prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import numpy as np
from plico_interferometer import interferometer
from plico_dm import deformableMirror
from astropy.io import fits
from functools import reduce
import matplotlib.pyplot as plt
from arte.types.mask import CircularMask
from arte.utils.zernike_generator import ZernikeGenerator
from tesi_ao.mems_command_to_position_linearization_measurer import CommandToPositionLinearizationMeasurer
from tesi_ao.mems_command_to_position_linearization_analyzer import CommandToPositionLinearizationAnalyzer
from tesi_ao.mems_command_linearization import MemsCommandLinearization
def create_devices():
wyko = interferometer('172.16.17.32', 7300)
bmc = deformableMirror('192.168.3.11', 7000)
return wyko, bmc
# def _2dgaussian(self, X, amplitude, x0, y0, sigmax, sigmay, offset):
# y, x = X
# z = np.zeros((len(y), len(x)), dtype='float')
# N = amplitude # *0.5 / (np.pi * sigmax * sigmay)
# for xi in np.arange(len(x)):
# a = 0.5 * ((xi - x0) / sigmax)**2
# for yi in np.arange(len(y)):
# b = 0.5 * ((yi - y0) / sigmay)**2
#
# z[yi, xi] = N * np.exp(-(a + b)) + offset
# return z.ravel()
#
# def _gaussian_fitting(self, act_idx, cmd_index):
# wf = self._wfs[act_idx, cmd_index]
# wf2 = self._wfs[act_idx, 2]
# b, t, l, r = self._get_max_roi(act_idx)
# wfroi = wf[b:t, l:r]
# wfroi2 = wf2[b:t, l:r]
# coord_max = np.argwhere(np.abs(wfroi2) == np.max(np.abs(wfroi2)))[0]
# x0 = coord_max[1]
# y0 = coord_max[0]
# #z = wfroi[wfroi.data != 0.]
#
# #z = wfroi
#
# NvalidX = (wfroi.mask[y0, :] == False).sum()
# NvalidY = (wfroi.mask[:, x0] == False).sum()
# x = np.arange(NvalidX, dtype='float')
# y = np.arange(NvalidY, dtype='float')
#
# Z = []
# for yi in range(wfroi.shape[0]):
# for xi in range(wfroi.shape[1]):
# if(wfroi[yi, xi].data != 0.):
# Z.append(wfroi[yi, xi])
#
# Z = np.array(Z, dtype='float')
#
# Z = wfroi.compressed()
#
# A0 = self._max_wavefront(act_idx, cmd_index)
#
# sigma0 = 25.
# sigmax = sigma0
# sigmay = sigma0
# offset = 0.
# starting_values = [A0, x0, y0, sigmax, sigmay, offset]
# X = y, x
#
# #err_z = Z.std() * np.ones(len(x) * len(y))
#
# fpar, fcov = curve_fit(self._2dgaussian, X, Z,
# p0=starting_values, absolute_sigma=True)
# #err_fpar = np.sqrt(np.diag(fcov))
# print('1curve_fit done')
# error = (Z - self._2dgaussian(X, *fpar))
# starting_values = [fpar[0], fpar[1],
# fpar[2], fpar[3], fpar[4], fpar[5]]
# fpar, fcov = curve_fit(
# self._2dgaussian, X, Z, p0=starting_values, sigma=error, absolute_sigma=True)
# print('2curve_fit done')
# return fpar[0]
#
# def _compute_gaussian_amplitude_deflection(self):
# self._max_deflection = np.zeros(
# (self._cmd_vector.shape[0], self._cmd_vector.shape[1]))
# for act in range(self._cmd_vector.shape[0]):
# for cmd_idx in range(self._cmd_vector.shape[1]):
# self._max_deflection[act, cmd_idx] = self._gaussian_fitting(
# act, cmd_idx)
#
# def compute_gaussian_linearization(self):
# self._compute_gaussian_amplitude_deflection()
#
# return MemsCommandLinearization(
# self._actuators_list,
# self._cmd_vector,
# self._max_deflection,
# self._reference_shape_tag)
# def plot_interpolated_function(mcl):
# '''
# F_int(pos)=cmd
# '''
# plt.figure()
# plt.clf()
# for idx, act in enumerate(mcl._actuators_list):
# a = np.min(mcl._deflection[act])
# b = np.max(mcl._deflection[act])
# xx = np.linspace(a, b, 1000)
# plt.plot(mcl._finter[act](xx), xx / 1.e-9, '.-')
# plt.xlabel('Command [au]', size=25)
# plt.ylabel('Deflection [nm]', size=25)
# plt.title('Calibration curve per actuator', size=25)
# plt.grid()
def _plot_acquired_measures(mcl):
plt.figure()
plt.clf()
for idx, act in enumerate(mcl._actuators_list):
plt.plot(mcl._cmd_vector[idx], mcl._deflection[idx] / 1.e-9, '.-')
plt.xlabel('Command [au]', size=25)
plt.ylabel('Deflection [nm]', size=25)
plt.title('Acquired Measures per actuator', size=25)
plt.grid()
# def plot_single_curve(mcl, act):
# '''
# F_int(pos)=cmd
# '''
# plt.figure()
# plt.clf()
# a = np.min(mcl._deflection[act])
# b = np.max(mcl._deflection[act])
# xx = np.linspace(a, b, 1000)
# plt.plot(mcl._cmd_vector[act], mcl._deflection[act] /
# 1.e-9, 'or', label='sampling points')
# plt.plot(mcl._finter[act](xx), xx / 1.e-9, '-', label='finter')
# plt.title('Calibration Curve: act#%d' % act, size=25)
# plt.xlabel('Commands [au]', size=25)
# plt.ylabel('Deflection [nm]', size=25)
# plt.grid()
# plt.legend(loc='best')
def _plot_pos_vs_cmd(mcl, act):
'''
F_int(cmd)=pos
'''
plt.figure()
plt.clf()
plt.plot(mcl._cmd_vector[act], mcl._deflection[act] /
1.e-9, 'or', label='sampling points')
plt.title('act=%d' % act, size=25)
plt.ylabel('pos[nm]')
plt.xlabel('cmd[au]')
plt.grid()
a = np.min(mcl._cmd_vector[act])
b = np.max(mcl._cmd_vector[act])
vv = np.linspace(a, b, 1000)
plt.plot(vv, mcl._finter[act](vv) / 1.e-9, '-', label='finter')
plt.legend(loc='best')
def _plot_all_int_funcs(mcl):
plt.figure()
plt.clf()
for idx, act in enumerate(mcl._actuators_list):
a = np.min(mcl._cmd_vector[act])
b = np.max(mcl._cmd_vector[act])
vv = np.linspace(a, b, 1000)
plt.plot(vv, mcl._finter[act](vv) / 1.e-9, '.-', label='finter')
plt.xlabel('Command [au]', size=25)
plt.ylabel('Deflection [nm]', size=25)
plt.title('Calibration curve per actuator', size=25)
plt.grid()
class PupilMaskBuilder():
def __init__(self, wfmask):
self._wfmask = wfmask # is the interferometer mask!
def get_circular_mask(self, radius, center):
mask = CircularMask(self._wfmask.shape,
maskRadius=radius, maskCenter=center)
return mask # .mask()
def get_centred_circular_mask_wrt_interferometer_mask(self):
# TODO: controllare che i dati a False siano una mappa rettangolare
# prendo un generico pixel che sia a False per ricostruire base
# e altezza della mappa rettangolare a False
yFalsePixel = np.where(self._wfmask == False)[0][0]
xFalsePixel = np.where(self._wfmask == False)[1][0]
HeightInPixels = (self._wfmask[:, xFalsePixel] == False).sum()
WidthInPixels = (self._wfmask[yFalsePixel, :] == False).sum()
offsetX = (self._wfmask[yFalsePixel, 0:xFalsePixel] == True).sum()
offsetY = (self._wfmask[0:yFalsePixel, xFalsePixel] == True).sum()
# center of False map and origin of circular pupil in pixel
yc0 = offsetY + 0.5 * HeightInPixels
xc0 = offsetX + 0.5 * WidthInPixels
MaxRadiusInPixel = min(WidthInPixels, HeightInPixels) * 0.5
cmask = self.get_circular_mask(MaxRadiusInPixel, (yc0, xc0))
return cmask
def get_barycenter_of_false_pixels(self):
N_of_pixels = self._wfmask.shape[0] * self._wfmask.shape[1]
True_pixels = self._wfmask.sum()
False_pixels = N_of_pixels - True_pixels
coord_yi = np.where(self._wfmask == False)[0]
coord_xi = np.where(self._wfmask == False)[1]
yc = coord_yi.sum() / float(False_pixels)
xc = coord_xi.sum() / float(False_pixels)
return yc, xc
def get_number_of_false_pixels_along_barycenter_axis(self):
y, x = self.get_barycenter_of_false_pixels()
y = int(y)
x = int(x)
n_pixels_along_x = (self._wfmask[y, :] == False).sum()
n_pixels_along_y = (self._wfmask[:, x] == False).sum()
return n_pixels_along_y, n_pixels_along_x
def get_number_of_false_pixels_along_pixel_axis(self, yp, xp):
y = int(yp)
x = int(xp)
n_pixels_along_x = (self._wfmask[y, :] == False).sum()
n_pixels_along_y = (self._wfmask[:, x] == False).sum()
return n_pixels_along_y, n_pixels_along_x
def get_number_of_false_pixels_along_frame_axis(self):
n_pixels_along_x_axis = np.zeros(
self._wfmask.shape[1]) # shape[1]== len(y_axis)
n_pixels_along_y_axis = np.zeros(
self._wfmask.shape[0]) # shape[0]== len(x_axis)
n_pixels_along_x_axis = (self._wfmask == False).sum(axis=1)
n_pixels_along_y_axis = (self._wfmask == False).sum(axis=0)
return n_pixels_along_y_axis, n_pixels_along_x_axis
def build_max_radius_and_pupil_in_imask(self):
self._max_radius_in_imask, self._max_pupil_in_imask = self.get_centred_circular_mask_wrt_interferometer_mask()
# da provare sul file cplm_all_fixed fatto il 17/3
class ModeGenerator():
NORM_AT_THIS_CMD = 19 # such that wyko noise and saturation are avoided
VISIBLE_AT_THIS_CMD = 19 # related cmd for actuators visibility in the given mask
THRESHOLD_RMS = 0.5 # threshold for nasty actuators outside a given mask
def __init__(self, cpla, mcl):
self._cpla = cpla
self._mcl = mcl
self._n_of_act = self._cpla._wfs.shape[0]
self._build_intersection_mask()
def _build_intersection_mask(self):
self._imask = reduce(lambda a, b: np.ma.mask_or(
a, b), self._cpla._wfs[:, self.NORM_AT_THIS_CMD].mask)
def _check_actuators_visibility(self, cmd=None):
if cmd is None:
cmd = self.VISIBLE_AT_THIS_CMD
self._rms_wf = np.zeros(self._n_of_act)
for act in range(self._n_of_act):
self._rms_wf[act] = np.ma.array(data=self._cpla._wfs[act, cmd],
mask=self._pupil_mask).std()
def _show_actuators_visibility(self):
plt.figure()
plt.clf()
plt.ion()
plt.plot(self._rms_wf / 1.e-9, 'o', label='cmd=%d' %
self.VISIBLE_AT_THIS_CMD)
plt.xlabel('#N actuator', size=25)
plt.ylabel('Wavefront rms [nm]', size=25)
plt.grid()
plt.legend(loc='best')
def _build_valid_actuators_list(self, cmd=None):
self._check_actuators_visibility(cmd)
self._acts_in_pupil = np.where(
self._rms_wf > self.THRESHOLD_RMS * self._rms_wf.max())[0]
self._n_of_selected_acts = len(self._acts_in_pupil)
def _normalize_influence_function(self, act):
return (self._cpla._wfs[act, self.NORM_AT_THIS_CMD][self._pupil_mask == False] /
self._mcl._deflection[act, self.NORM_AT_THIS_CMD]).data
def _build_interaction_matrix(self):
if self._acts_in_pupil is None:
selected_act_list = self._cpla._actuators_list
else:
selected_act_list = self._acts_in_pupil
self._im = np.column_stack([self._normalize_influence_function(
act) for act in selected_act_list])
def _build_reconstruction_matrix(self):
self._rec = | np.linalg.pinv(self._im) | numpy.linalg.pinv |
import pycocotools.coco as cocoapi
import sys
import cv2
import numpy as np
import pickle
import json
SPLITS = [ 'train']# , 'val'
ANN_PATH = '../data/coco/annotations/instances_{}2017.json'
OUT_PATH = '../data/coco/annotations/instances_small_{}2017.json'
IMG_DIR = '../data/coco/{}2017/'
DEBUG = False
from scipy.spatial import ConvexHull
def _coco_box_to_bbox(box):
bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]],
dtype=np.int32)
return bbox
def _get_extreme_points(pts):
l, t = min(pts[:, 0]), min(pts[:, 1])
r, b = max(pts[:, 0]), max(pts[:, 1])
# 3 degrees
thresh = 0.02
w = r - l + 1
h = b - t + 1
pts = np.concatenate([pts[-1:], pts, pts[:1]], axis=0)
t_idx = | np.argmin(pts[:, 1]) | numpy.argmin |
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.interpolate import griddata
from scipy.sparse import csr_matrix, lil_matrix, hstack, vstack
from geometry import rotation_matrix, proj_iso_plane # , make_fluence_maps
from time import time
# from scipy.signal import convolve2d
from scipy.ndimage import convolve
from scipy.ndimage.interpolation import rotate
# import trimesh as tm # https://github.com/mikedh/trimesh
# commissioning data and fit fxns
pdd_data = {
"6X": {
"buildup": [-0.00015749459237802087, 0.018456397544299074, -0.88448139491242872, 22.163062813849965,
-312.23926598651917, 2449.7961711094094, 1749.682558831852],
"split": 18.75,
"falloff": [-2.8264719677060061e-07, 0.00024313850219755478, -0.036093426359969094, -28.230918530108855,
11245.762396352433]
},
"15X": {
"split": 35.0,
"buildup": [-0.0009464313106873083, 0.19875057524433598, -16.539586683888302, 692.4124379156118,
-15519.52470334705, 185289.8082771371, 38443.305072768264],
"falloff": [-3.1861193991006273e-10, 5.497344697565649e-07, -0.0003803517731495236, 0.1334223080989128,
-22.60982496684418, -479.32980224649026, 1113733.8377053856]
},
"15X_MC": {
"buildup": [-2.7723087151505166e-06, 0.00055879347539751413, -0.053759468984408219, 3.0197899077600456,
-101.31274784968605, 1888.8581630228164, 1293.1597039077351],
"split": 32.8125,
"falloff": [-1.9597228831831153e-11, 2.693437995470181e-08, -1.4915457123262073e-05, 0.0042146835045338083,
-0.58755541834481695, -2.688095323220637, 17061.029792989608]
},
# "15X": {
# "buildup": [-2.781688925124089e-06, 0.0005606841739703134, -0.054105297895991306, 3.0520358074793372,
# -102.57441264586714, 1907.2660184066269, 1267.6475603080792],
# "split": 32.8125,
# "falloff": [-2.8839890418852902e-11, 3.887703303176435e-08, -2.0889148193952867e-05, 0.0056726958756894907,
# -0.7677552995266792, 7.7496351540534905, 16882.372184793865]
# },
"15X_old": {
"buildup": [-1.6507633296164477e-06, 0.00028630426498036006, -0.02888629114824896, 1.7451265889913861,
-61.497244761739545, 1171.9987395979024, 1674.4131730133356],
"split": 28.125,
"falloff": [-2.954626231433345e-07, 0.00024567974262585063, -0.052934868220503181, -18.176864694198056,
11639.846648127208]
}
}
kernel_data = {
"15X_TPS": [4.245974260699353, 3.017027753379914, 0.14762857922875838,
2.041032903900953, 5.76614346628947, 49.547092289488255], #came from TPS lines
# "15X": [4873.696975027252, 1404.0366659346853, 2455.7177653736917,
# 49.56740857240596, 8.417599570230726, 2.1880620468364484],
"15X": [0.00028964017385020818, 0.00011667873579437889, 0.0024779599104120744, 6.4674171413250718,
18.237437627703674, 1.5545102702143783], ## CAME FROM MC
"6X": [0.00028964017385020818, 0.00011667873579437889, 0.0024779599104120744, 6.4674171413250718,
18.237437627703674, 1.5545102702143783], ## COPY OF 15X kernel
# (f1,f2,f3,s1,s2,r3)
# {
# 'f2':.2,#.32,
# 'f3':0.0,#.0052, # ODDS ARE THIS FACTOR IS WRONG
# 'sig1': 1.0,#1.1, # calibrated with 2 mm kernel
# 'sig2': 2.0,#2.9,
# },
# "6X":{
# 'calib_beam_size_mm': 2.0, # calibrated with 2 mm kernel
# 'f2':.09,
# 'f3':.0043,
# 'sig1':.8,
# 'sig2':1.9,
# },
}
def compute_Dij(dose_shape, idxs_oi, pts_3d, pts_3d_shell, SAD=1000., gantry_angle=0., field_size=100.,
beamlet_size_x=1., beamlet_size_z=5., field_buffer=20., beam_energy=None, show_plots=False,
anti_alias=False, pdd_dose = None):
"""
all units in mm (DICOM)
returns a "ray-trace" Dij matrix (no scatter)
:param SAD:
:param gantry_angle:
:param field_size:
:param beamlet_size:
:param field_buffer: added to all sides of the field
:param beam_energy:
:param show_plots:
:param anti_alias:
1)
:return:
"""
assert beam_energy is not None, "please provide a beam energy"
assert len(pdd_data[beam_energy]), "please provide beam data"
big_tic = time()
# dose calc settings
# SAD = 1000. # mm
# gantry_angle = 0. # degrees
# all geometry defined on iso-center plane as usual
# field_size = 100. # mm (square)
# beamlet_size = 1. # mm (square)
# field_buffer = 20. # mm (width of region beyond field - allows for scattering outside of field)
# BUILD FLUENCE MAP ###############################################################################################
tic = time()
# pre-sanity check
assert field_size % beamlet_size_x == 0, "field_size must be integer multiple of beamlet_size"
assert field_buffer % beamlet_size_x == 0, "field_buffer must be integer multiple of beamlet_size"
assert field_size % beamlet_size_z == 0, "field_size must be integer multiple of beamlet_size"
assert field_buffer % beamlet_size_z == 0, "field_buffer must be integer multiple of beamlet_size"
# some pre-calcualted variables which are re-used throughout the code
src = [0, SAD, 0] # source point @ 0 degrees rotation in (isocenter-shifted) DICOM ref. frame
src_rot = np.dot(rotation_matrix([0., 0., np.radians(180. - gantry_angle)]), src)
# print(src_rot)
# setup fluence grid
expanded_field_size = field_size + 2. * field_buffer # [buffer][field][buffer]
field_buffer_x_px = int(field_buffer / beamlet_size_x) # number of fluence pixels in buffer region
field_size_x_px = int(field_size / beamlet_size_x) # number of fluence pixels in field
field_buffer_z_px = int(field_buffer / beamlet_size_z) # number of fluence pixels in buffer region
field_size_z_px = int(field_size / beamlet_size_z) # number of fluence pixels in field
# compute BOUNDARIES of fluence map pixels (note the +1)
x_map_boundaries = np.linspace(-expanded_field_size / 2., expanded_field_size / 2.,
int(expanded_field_size / beamlet_size_x) + 1)
z_map_boundaries = np.linspace(-expanded_field_size / 2., expanded_field_size / 2.,
int(expanded_field_size / beamlet_size_z) + 1)
# z_map_boundaries = x_map_boundaries
x_mesh_boundaries, z_mesh_boundaries = np.meshgrid(x_map_boundaries, z_map_boundaries)
# sanity check
# print(x_mesh_boundaries.shape[1], 2*field_buffer_x_px + field_size_x_px + 1, "x field dimensions")
# print(x_mesh_boundaries.shape[0], 2*field_buffer_z_px + field_size_z_px + 1, "x field dimensions")
assert x_mesh_boundaries.shape[1] == 2 * field_buffer_x_px + field_size_x_px + 1, "error computing field dimensions"
assert x_mesh_boundaries.shape[0] == 2 * field_buffer_z_px + field_size_z_px + 1, "error computing field dimensions"
# compute CENTRAL POINTS of pixels on fluence map
x_map_centers = np.linspace(-expanded_field_size / 2. + beamlet_size_x / 2,
expanded_field_size / 2. - beamlet_size_x / 2.,
int(expanded_field_size / beamlet_size_x))
z_map_centers = np.linspace(-expanded_field_size / 2. + beamlet_size_z / 2,
expanded_field_size / 2. - beamlet_size_z / 2.,
int(expanded_field_size / beamlet_size_z))
# z_map_centers = x_map_centers
x_mesh_centers, z_mesh_centers = np.meshgrid(x_map_centers, z_map_centers)
# print(len(x_map))
# print(x_map.min(), x_map.max())
# get point data
# vox_size = study['{}/voxel_size'.format(ct_group)]
# pts_3d = study['{}/voxel_coords'.format(ct_group)] - isocenter
# try grabbing body contour instead?
# pts_3d_shell = (study['ct/voxel_coords'.format(ct_group)] - isocenter)[np.where(study[body_shell_path])] # shell path in full resolution?
# print(round(time() - tic, 3), ' sec for init')
# BUILD SURFACE DISTANCE MAP ######################################################################################
tic = time()
# rotate and project points to isocenter plane
pts_proj = proj_iso_plane(pts_3d_shell.T, SAD, gantry_angle)
# pick out relevant points (y dimension should always be zero)
x = pts_proj[0]
z = pts_proj[2]
# optional, adds ~30 compute time to this section
assert sum(pts_proj[1]) == 0.0, "unexpected behavior of projection operation"
# create digitize bin boundaries at a lower resolution: make space wider by 1cm on each side (buffer zone)
# THESE SET BOUNDARIES - indexing is not a problem here since we use matching rather than slicing on this step
x_bins = np.linspace(-field_size / 2. - 1.5 * field_buffer, field_size / 2. + 1.5 * field_buffer, 25)
z_bins = np.linspace(-field_size / 2. - 1.5 * field_buffer, field_size / 2. + 1.5 * field_buffer, 25)
x_dig = np.digitize(x, x_bins) # the binned indices along x dimension: x_bins[i-1] <= x < x_bins[i]
# the actual limits of the projected data
# print('x', x.min(), x.max())
# print('z', z.min(), z.max())
# stores distance values for each bin
d_min = []
# d_max = []
# stores the point values for each bin
p_min = []
# p_max = []
# for each x-bin "i" (digitize returns valid index between 1 <= i < len(x_bins))
for i in range(1, len(x_bins)):
# find indices of points within the current x-bin "i"
idx_x = np.array(np.where(x_dig == i))
# get binned indices along z dimension for the points withing x-bin "i"
z_dig = np.digitize(z[idx_x], z_bins) # z_bins[j-1] <= z < z_bins[j]
# for each z-bin "j"
for j in range(1, len(z_bins)):
# find the indices of points within current x-bin "i" and z-bin "j"
idx_xz = idx_x[np.where(z_dig == j)]
# if there is more than one point in the current xz-bin "i,j"
if len(idx_xz) > 1:
# get the 3d coordinate for each point
pix_pts = pts_3d_shell[idx_xz, :]
# get the 2d coordinate for each point (on projection plane)
pln_pts = pts_proj[:, idx_xz]
# compute distances to source for the 3d points
dists = np.linalg.norm(pix_pts - src_rot, axis=1) # faster
# save the 2d coordinates of the minimum and maximum distance points
p_min.append(pln_pts[::2, dists.argmin()]) # only selecting x-y component
# p_max.append(pln_pts[::2, dists.argmax()]) # only selecting x-y component
# save the distances to the minimum and maximum 3d points
d_min.append(dists.min())
# d_max.append(dists.max())
del z_dig
del x_dig
# cast to numpy array
p_min = np.array(p_min)
d_min = np.array(d_min)
# p_max = np.array(p_max)
# d_max = np.array(d_max)
# print(round(time() - tic, 3), 'sec for surface map computation')
if show_plots:
# example of interpolated distance map @ fluence map resolution
# create interpolated distance map
d_map = griddata(p_min, d_min, (x_mesh_centers, z_mesh_centers), method='cubic')
print(d_map.shape)
plt.imshow(d_map, interpolation='none')
plt.colorbar()
print("dmap mean SSD:", d_map.mean(), " mm")
plt.show()
fig = plt.figure()
ax = fig.gca(projection='3d')
# surf = ax.plot_surface(x_mesh,z_mesh,d_min.max()-d_map)
ax.scatter(p_min[:, 0], p_min[:, 1], d_min)
plt.show()
# COMPUTE DEPTH TO EACH POINT #####################################################################################
tic = time()
# create dose grid (@ CT resolution for now)
dose_test = np.zeros(dose_shape)
# only select points inside body contour
# idxs_oi = np.where(study[body_contour_path] > 0)
pts_3d_oi = pts_3d[idxs_oi] # points within body contour
# project all points to isocenter plane
vol_proj = proj_iso_plane(pts_3d_oi.T, SAD, gantry_angle)
# compute physical distance between source and all points of interest
dist_pts = np.linalg.norm(pts_3d_oi - src_rot, axis=1)
# compute physical distance between body surface and all points of interest
dx_map = griddata(p_min, d_min, (vol_proj[0], vol_proj[2]), method='cubic')
dose_test[idxs_oi] = dist_pts - dx_map
# only used for testing/validation
# dose_test[idxs_oi] = np.divide(dist_pts,np.square(dist_pts))
# print(round(time() - tic, 3), "sec for depth calculation")
if show_plots:
plt.imshow(dose_test[:, :, int(dose_test.shape[2] / 2)])
plt.colorbar()
plt.show()
# plt.imshow(dose_test[:,dose_test.shape[1]/2,:])
# plt.colorbar()
# plt.show()
del dx_map
# APPLY PDD #######################################################################################################
if pdd_dose is None:
tic = time()
pdd_f_fxn = np.poly1d(pdd_data[beam_energy]['falloff'])
pdd_b_fxn = np.poly1d(pdd_data[beam_energy]['buildup'])
# make copy of distance data
pdd_dose = dose_test.copy()
# optional cleanup
# nan_vals = np.where(np.isnan(pdd_dose))
# pdd_dose[nan_vals] = 0.
# select buildup region by index
bu_idx = np.where(pdd_dose <= pdd_data[beam_energy]['split'])
# select fall off region by index
fo_idx = np.where(pdd_dose > pdd_data[beam_energy]['split'])
# apply buildup and falloff PDD filter
# TODO: can we narrow the indexing here rather than applying to full dose grid?
pdd_dose[bu_idx] = pdd_b_fxn(pdd_dose[bu_idx])
pdd_dose[fo_idx] = pdd_f_fxn(pdd_dose[fo_idx])
# normalize by physical distance (1/square(r))
# TESTING NO NORM
pdd_dose[idxs_oi] = np.divide(pdd_dose[idxs_oi], np.square(dist_pts))
# cleanup dose grid
pdd_dose[np.where(np.isnan(pdd_dose))] = 0.0
pdd_dose[np.where(pdd_dose < 0.0)] = 0.0
# print(time() - tic, 'sec to apply PDD')
del bu_idx
del fo_idx
else:
print("USING ECLIPSE BEAM FOR DEPTH DOSE")
assert pdd_dose.shape == dose_shape, "PDD shape does not match dose shape"
# BUILD SPARSE MATRIX #############################################################################################
# here we form the "ray trace" matrix for computing dose given a fluence map
# TODO: double check behavior of np.digitize
# some variable shortcuts
x_map_n = len(x_map_centers)
z_map_n = len(z_map_centers)
def digitize_voxel_mtx(vol_pts, x_map_bounds, z_map_bounds, x_shift=0.0, z_shift=0.0):
# digitize the location of each vozel point on the fluence plane
v_dig_x = np.digitize(vol_pts[0] + x_shift, x_map_bounds) - 1
v_dig_z = np.digitize(vol_pts[2] + z_shift, z_map_bounds) - 1
# select on valid indeces within fluence map
v_dig_valid = np.where((0 <= v_dig_x) & (v_dig_x < x_map_n) & (0 <= v_dig_z) & (v_dig_z < z_map_n))
tmp = pdd_dose[idxs_oi]
# form sparse dose matrix:
sparse_dose = tmp[v_dig_valid].flatten().copy()
fmap_width = len(x_map_bounds) - 1 # we subtract 1 here because boundaries are of length x_map_n+1
col_idx = v_dig_x[v_dig_valid] + fmap_width * (v_dig_z[v_dig_valid])
row_idx = v_dig_valid[0] # np.array(range(len(sparse_dose)))
del tmp
del v_dig_x
del v_dig_z
return csr_matrix((sparse_dose.astype(np.float32), (row_idx, col_idx)),
shape=(
len(idxs_oi[0]),
(x_map_boundaries.shape[0] - 1) * (z_map_boundaries.shape[0] - 1)
))
if anti_alias:
# this averages out beamlet contributions across neighboring voxels
# d = 2.
csr = None
N = 4 #20
for x_shift in np.linspace(-beamlet_size_x/2.0, beamlet_size_x/2.0, N, endpoint=True):
for z_shift in np.linspace(-beamlet_size_z/2.0, beamlet_size_z/2.0, N, endpoint=True):
if csr is None:
csr = digitize_voxel_mtx(vol_proj, x_map_boundaries, z_map_boundaries, x_shift, z_shift)
else:
csr += digitize_voxel_mtx(vol_proj, x_map_boundaries, z_map_boundaries, x_shift, z_shift)
csr /= float(N)
else:
csr = digitize_voxel_mtx(vol_proj, x_map_boundaries, z_map_boundaries)
# print(col_idx.max())
# print(csr.shape)
# print(csr.nnz)
print("beam Dij time: ", round(time() - big_tic, 3), " sec")
return csr # , idxs_oi# v_dig_valid #, x_bins, z_bins
def compute_Dij_bodymesh(dose_shape, idxs_oi, pts_3d, bodymesh, SAD=1000., gantry_angle=0., field_size=100.,
beamlet_size_x=1., beamlet_size_z=5., field_buffer=20., beam_energy=None, show_plots=False,
anti_alias=False, pdd_dose = None):
"""
all units in mm (DICOM)
returns a "ray-trace" Dij matrix (no scatter)
:param SAD:
:param gantry_angle:
:param field_size:
:param beamlet_size:
:param field_buffer: added to all sides of the field
:param beam_energy:
:param show_plots:
:param anti_alias:
1)
:return:
"""
assert beam_energy is not None, "please provide a beam energy"
assert len(pdd_data[beam_energy]), "please provide beam data"
big_tic = time()
# dose calc settings
# SAD = 1000. # mm
# gantry_angle = 0. # degrees
# all geometry defined on iso-center plane as usual
# field_size = 100. # mm (square)
# beamlet_size = 1. # mm (square)
# field_buffer = 20. # mm (width of region beyond field - allows for scattering outside of field)
# BUILD FLUENCE MAP ###############################################################################################
tic = time()
# pre-sanity check
assert field_size % beamlet_size_x == 0, "field_size must be integer multiple of beamlet_size"
assert field_buffer % beamlet_size_x == 0, "field_buffer must be integer multiple of beamlet_size"
assert field_size % beamlet_size_z == 0, "field_size must be integer multiple of beamlet_size"
assert field_buffer % beamlet_size_z == 0, "field_buffer must be integer multiple of beamlet_size"
# some pre-calcualted variables which are re-used throughout the code
src = [0, SAD, 0] # source point @ 0 degrees rotation in (isocenter-shifted) DICOM ref. frame
src_rot = np.dot(rotation_matrix([0., 0., np.radians(180. - gantry_angle)]), src)
# print(src_rot)
# setup fluence grid
expanded_field_size = field_size + 2. * field_buffer # [buffer][field][buffer]
field_buffer_x_px = int(field_buffer / beamlet_size_x) # number of fluence pixels in buffer region
field_size_x_px = int(field_size / beamlet_size_x) # number of fluence pixels in field
field_buffer_z_px = int(field_buffer / beamlet_size_z) # number of fluence pixels in buffer region
field_size_z_px = int(field_size / beamlet_size_z) # number of fluence pixels in field
# compute BOUNDARIES of fluence map pixels (note the +1)
x_map_boundaries = np.linspace(-expanded_field_size / 2., expanded_field_size / 2.,
int(expanded_field_size / beamlet_size_x) + 1)
z_map_boundaries = np.linspace(-expanded_field_size / 2., expanded_field_size / 2.,
int(expanded_field_size / beamlet_size_z) + 1)
# z_map_boundaries = x_map_boundaries
x_mesh_boundaries, z_mesh_boundaries = np.meshgrid(x_map_boundaries, z_map_boundaries)
# sanity check
# print(x_mesh_boundaries.shape[1], 2*field_buffer_x_px + field_size_x_px + 1, "x field dimensions")
# print(x_mesh_boundaries.shape[0], 2*field_buffer_z_px + field_size_z_px + 1, "x field dimensions")
assert x_mesh_boundaries.shape[1] == 2 * field_buffer_x_px + field_size_x_px + 1, "error computing field dimensions"
assert x_mesh_boundaries.shape[0] == 2 * field_buffer_z_px + field_size_z_px + 1, "error computing field dimensions"
# compute CENTRAL POINTS of pixels on fluence map
x_map_centers = np.linspace(-expanded_field_size / 2. + beamlet_size_x / 2,
expanded_field_size / 2. - beamlet_size_x / 2.,
int(expanded_field_size / beamlet_size_x))
z_map_centers = np.linspace(-expanded_field_size / 2. + beamlet_size_z / 2,
expanded_field_size / 2. - beamlet_size_z / 2.,
int(expanded_field_size / beamlet_size_z))
# z_map_centers = x_map_centers
x_mesh_centers, z_mesh_centers = np.meshgrid(x_map_centers, z_map_centers)
# print(len(x_map))
# print(x_map.min(), x_map.max())
# get point data
# vox_size = study['{}/voxel_size'.format(ct_group)]
# pts_3d = study['{}/voxel_coords'.format(ct_group)] - isocenter
# try grabbing body contour instead?
# pts_3d_shell = (study['ct/voxel_coords'.format(ct_group)] - isocenter)[np.where(study[body_shell_path])] # shell path in full resolution?
# print(round(time() - tic, 3), ' sec for init')
# BUILD SURFACE DISTANCE MAP ######################################################################################
tic = time()
num_bixels = x_mesh_centers.shape[0] * z_mesh_centers.shape[0]
src_pts = np.array([_ for _ in src_rot] * num_bixels).reshape((-1, 3)) # mm
# raise Exception("BODY MESH FLUENCE PLANE ROTATION NOT IMPLEMENTED")
isocenter_plane = np.array([x_mesh_centers.flatten(), [0.0] * num_bixels, z_mesh_centers.flatten()]).T
iso_plane_rot = np.dot(
isocenter_plane,
rotation_matrix([0., 0., np.radians(180. - gantry_angle)]).T,
)
assert iso_plane_rot.shape == src_pts.shape, "iso_plane shape: {}, src_pts.shape: {}".format(iso_plane_rot.shape,src_pts.shape)
intersections = bodymesh.ray.intersects_location(ray_origins=src_pts, ray_directions=iso_plane_rot - src_pts)
locations = intersections[0]
ray_idxs = intersections[1]
dist_map = np.ones_like(x_mesh_centers) * np.inf
for i, idx in enumerate(ray_idxs):
temp_dist = np.sqrt(np.square(locations[i] - src_pts[idx]).sum())
if (temp_dist < dist_map.flat[idx]):
dist_map.flat[idx] = temp_dist
# COMPUTE DEPTH TO EACH POINT #####################################################################################
tic = time()
# create dose grid (@ CT resolution for now)
dose_test = np.zeros(dose_shape)
# only select points inside body contour
# idxs_oi = np.where(study[body_contour_path] > 0)
pts_3d_oi = pts_3d[idxs_oi] # points within body contour
# project all points to isocenter plane
vol_proj = proj_iso_plane(pts_3d_oi.T, SAD, gantry_angle)
# compute physical distance between source and all points of interest
dist_pts = np.linalg.norm(pts_3d_oi - src_rot, axis=1)
# compute physical distance between body surface and all points of interest
dx_map = griddata((iso_plane_rot.T[0], isocenter_plane.T[2]), dist_map.flat, (vol_proj[0], vol_proj[2]), method='linear')
dose_test[idxs_oi] = dist_pts - dx_map
# only used for testing/validation
# dose_test[idxs_oi] = np.divide(dist_pts,np.square(dist_pts))
# print(round(time() - tic, 3), "sec for depth calculation")
if show_plots:
plt.imshow(dose_test[:, :, int(dose_test.shape[2] / 2)])
plt.colorbar()
plt.show()
# plt.imshow(dose_test[:,dose_test.shape[1]/2,:])
# plt.colorbar()
# plt.show()
# del dx_map
# APPLY PDD #######################################################################################################
if pdd_dose == None:
tic = time()
pdd_f_fxn = np.poly1d(pdd_data[beam_energy]['falloff'])
pdd_b_fxn = np.poly1d(pdd_data[beam_energy]['buildup'])
# make copy of distance data
pdd_dose = dose_test.copy()
# optional cleanup
# nan_vals = np.where(np.isnan(pdd_dose))
# pdd_dose[nan_vals] = 0.
# select buildup region by index
bu_idx = np.where(pdd_dose <= pdd_data[beam_energy]['split'])
# select fall off region by index
fo_idx = np.where(pdd_dose > pdd_data[beam_energy]['split'])
# apply buildup and falloff PDD filter
# TODO: can we narrow the indexing here rather than applying to full dose grid?
pdd_dose[bu_idx] = pdd_b_fxn(pdd_dose[bu_idx])
pdd_dose[fo_idx] = pdd_f_fxn(pdd_dose[fo_idx])
# normalize by physical distance (1/square(r))
# TESTING NO NORM
pdd_dose[idxs_oi] = np.divide(pdd_dose[idxs_oi], np.square(dist_pts))
# cleanup dose grid
pdd_dose[np.where(np.isnan(pdd_dose))] = 0.0
pdd_dose[np.where(pdd_dose < 0.0)] = 0.0
# print(time() - tic, 'sec to apply PDD')
del bu_idx
del fo_idx
else:
assert pdd_dose.shape == dose_shape, "PDD shape does not match dose shape"
# BUILD SPARSE MATRIX #############################################################################################
# here we form the "ray trace" matrix for computing dose given a fluence map
# TODO: double check behavior of np.digitize
# some variable shortcuts
x_map_n = len(x_map_centers)
z_map_n = len(z_map_centers)
def digitize_voxel_mtx(vol_pts, x_map_bounds, z_map_bounds, x_shift=0.0, z_shift=0.0):
# digitize the location of each vozel point on the fluence plane
v_dig_x = np.digitize(vol_pts[0] + x_shift, x_map_bounds) - 1
v_dig_z = np.digitize(vol_pts[2] + z_shift, z_map_bounds) - 1
# select on valid indeces within fluence map
v_dig_valid = np.where((0 <= v_dig_x) & (v_dig_x < x_map_n) & (0 <= v_dig_z) & (v_dig_z < z_map_n))
tmp = pdd_dose[idxs_oi]
# form sparse dose matrix:
sparse_dose = tmp[v_dig_valid].flatten().copy()
fmap_width = len(x_map_bounds) - 1 # we subtract 1 here because boundaries are of length x_map_n+1
col_idx = v_dig_x[v_dig_valid] + fmap_width * (v_dig_z[v_dig_valid])
row_idx = v_dig_valid[0] # np.array(range(len(sparse_dose)))
del tmp
del v_dig_x
del v_dig_z
return csr_matrix((sparse_dose.astype(np.float32), (row_idx, col_idx)),
shape=(
len(idxs_oi[0]),
(x_map_boundaries.shape[0] - 1) * (z_map_boundaries.shape[0] - 1)
))
if anti_alias:
# this averages out beamlet contributions across neighboring voxels
# d = 2.
csr = None
N = 20
for x_shift in np.linspace(-beamlet_size_x/2.0, beamlet_size_x/2.0, N, endpoint=True):
for z_shift in np.linspace(-beamlet_size_z/2.0, beamlet_size_z/2.0, N, endpoint=True):
if csr is None:
csr = digitize_voxel_mtx(vol_proj, x_map_boundaries, z_map_boundaries, x_shift, z_shift)
else:
csr += digitize_voxel_mtx(vol_proj, x_map_boundaries, z_map_boundaries, x_shift, z_shift)
csr /= float(N)
else:
csr = digitize_voxel_mtx(vol_proj, x_map_boundaries, z_map_boundaries)
# print(col_idx.max())
# print(csr.shape)
# print(csr.nnz)
print("beam Dij time: ", round(time() - big_tic, 3), " sec")
return csr # , idxs_oi# v_dig_valid #, x_bins, z_bins
def _g_func(r, sig):
return np.exp(- np.square(r / sig) / 2.0) / sig / np.sqrt(2.0 * np.pi)
def _e_func(r, sig):
return np.exp(- np.abs(r / sig)) / 2.0
def _scatt_func(r, f1, f2, f3, sig1, sig2, sig3):
return f1 * _g_func(r, sig1) + f2 * _g_func(r, sig2) + f3 * _e_func(r, sig3)
def _scatter_kernel(x, y, popt, max_radius):
radii = np.sqrt(np.square(x) + np.square(y))
scatter_temp = _scatt_func(radii, *popt)
scatter_temp[np.where(radii > max_radius)] = 0.0
return scatter_temp / scatter_temp.max()
# def _scatter_kernel(x, y, bx_size_x,bx_size_y,centers=None,energy='6X'):
# """
# :param x: grid of x locations in fluence map [mm]
# :param y: grid of y locations in fluence map [mm]
# :param bx_size_x: beamlet size along x dimension (i know i could just calculate it, sorry)
# :param bx_size_y:
# :param centers:
# :param energy: string for kernel configuration
# :return: 2d scatter kernel
# """
#
# _r = np.sqrt( np.square(x-centers[0]) + np.square(y-centers[1]) )
# return _scatt_func(_r,kernel_data[energy]['f1'],kernel_data[energy]['f2'],kernel_data[energy]['f3'],
# kernel_data[energy]['sig1'],kernel_data[energy]['sig2'],kernel_data[energy]['sig3'])
def make_sh2o_Dij_template(study, template, ct_group='ct_lowres', body_shell_path='structures/body/shell',
dose_mask_path='structures_lowres/body/mask', dij_cutoff=1e-4):
"""
Should create a Dij for each beam in the template, with the template containing field sizes, isocenters, and gantry angles
Parameters
----------
study
template
ct_group
body_shell_path
dose_mask_path
trial_name
anti_alias
Returns
-------
"""
pass
def _make_csr(_group):
m = _group['DijT_csr']
return csr_matrix((m['data'], m['indices'], m['indptr']), shape=m['shape'])
def get_DijT(study, dose_group='dose_sh2o'):
return _make_csr(study.h5[dose_group])
def make_maps_from_x(study, dose_group='dose_sh2o', alt_x_path=None):
if alt_x_path is None:
x = study['{}/x'.format(dose_group)]
else:
x = study[alt_x_path]
# count beams
n_beams = len(study['{}/beams'.format(dose_group)].keys())
aps = x.reshape((n_beams, -1))
N = aps.shape[1]
beamlet_size_xz_mm = study[
'{}/beams/{}/beamlet_size_xz_mm'.format(dose_group, 1)] # assume at least 1 beam all same size
xy_beamlet_ratio = beamlet_size_xz_mm[1] / beamlet_size_xz_mm[0]
n_flu_x = int(np.sqrt(N / xy_beamlet_ratio))
n_flu_y = int(xy_beamlet_ratio * n_flu_x)
assert n_flu_y * n_flu_x == N, "wrong xy_beamlet_ratio"
print(n_flu_x, n_flu_y)
print(n_beams)
for beam_number in range(1, n_beams + 1):
# we also compute JAW information, however, it's probably not much different from the original tps plan
flu = aps[(beam_number - 1)].reshape(n_flu_y, n_flu_x)
idx = np.where(flu.sum(0) > 0)
x_idx_min = idx[0][0] - 1
x_idx_max = idx[0][-1] + 1
x_midpoint = float(n_flu_x) / 2.0
x_1 = (x_idx_min - x_midpoint) * beamlet_size_xz_mm[0]
x_2 = (x_idx_max - x_midpoint) * beamlet_size_xz_mm[0]
# print("x:", x_idx_min, x_idx_max, x_midpoint)
# print("x1mm:", x_1)
# print("x2mm:", x_2)
idx = np.where(flu.sum(1) > 0)
y_idx_min = idx[0][0] - 1
y_idx_max = idx[0][-1] + 1
y_midpoint = float(n_flu_y) / 2.0
y_1 = (y_idx_min - y_midpoint) * beamlet_size_xz_mm[1]
y_2 = (y_idx_max - y_midpoint) * beamlet_size_xz_mm[1]
# print("y", y_idx_min, y_idx_max, y_midpoint)
# print("y1mm:", y_1)
# print("y2mm:", y_2)
study.create_dataset('{}/beams/{}/{}'.format(dose_group, beam_number, 'jaw_x1'), x_1, compression=None) # mm
study.create_dataset('{}/beams/{}/{}'.format(dose_group, beam_number, 'jaw_x2'), x_2, compression=None)
study.create_dataset('{}/beams/{}/{}'.format(dose_group, beam_number, 'jaw_y1'), y_1, compression=None)
study.create_dataset('{}/beams/{}/{}'.format(dose_group, beam_number, 'jaw_y2'), y_2, compression=None)
study.create_dataset('{}/beams/{}/map'.format(dose_group, beam_number), flu.T)
def _make_sh2o_Dij_beam(dose_shape, idxs_oi, pts_3d_ct, pts_3d_shell_ct, pysapi_beam, field_size_mm,
field_buffer_mm,
beamlet_size_x_mm, beamlet_size_z_mm, anti_alias, use_beam_dose=False, ref_image=None):
""" Ready for use in PySAPI """
# since each beam could have a different isocenter
isocenter_mm = [pysapi_beam.IsocenterPosition.x, pysapi_beam.IsocenterPosition.y,
pysapi_beam.IsocenterPosition.z] # already in mm
pts_3d = pts_3d_ct - isocenter_mm
pts_3d_shell = pts_3d_shell_ct - isocenter_mm
gantry_angle_deg = pysapi_beam.ControlPoints[0].GantryAngle
assert np.all(
[cp.GantryAngle == gantry_angle_deg for cp in pysapi_beam.ControlPoints]), "Arc beams not implemented."
open_field_dose = None
if use_beam_dose:
if ref_image is None:
# use dose grid resolution
open_field_dose = pysapi_beam.Dose.np_array_like() # dose grid res
else:
# use image grid resolution
open_field_dose = pysapi_beam.Dose.np_array_like(ref_image)
assert open_field_dose.shape == dose_shape, "dose shape does not match beam dose or image shape"
# get dose from beam
csr = compute_Dij( # v_dig_valid, x_bins, y_bins
dose_shape,
idxs_oi,
pts_3d,
pts_3d_shell,
beam_energy=pysapi_beam.EnergyModeDisplayName,
SAD=pysapi_beam.TreatmentUnit.get_SourceAxisDistance(),
gantry_angle=gantry_angle_deg,
field_size=field_size_mm,
field_buffer=field_buffer_mm, # added to all sides
beamlet_size_x=beamlet_size_x_mm,
beamlet_size_z=beamlet_size_z_mm,
show_plots=False,
anti_alias=anti_alias,
pdd_dose=open_field_dose
)
return csr
def _make_sh2o_Dij_beam_bodymesh(dose_shape, idxs_oi, pts_3d_ct, bodymeshgeo, pysapi_beam, field_size_mm,
field_buffer_mm,
beamlet_size_x_mm, beamlet_size_z_mm, anti_alias, use_beam_dose=False, ref_image=None):
""" Ready for use in PySAPI """
import trimesh as tm # https://github.com/mikedh/trimesh
# since each beam could have a different isocenter
iso = pysapi_beam.IsocenterPosition # mm
isocenter_mm = [iso.x, iso.y, iso.z] # already in mm
pts_3d = pts_3d_ct - isocenter_mm
body_verts = np.array([(_.X - iso.x, _.Y - iso.y, _.Z - iso.z) for _ in bodymeshgeo.Positions])
body_faces = np.array([_ for _ in bodymeshgeo.TriangleIndices]).reshape((-1, 3))
gantry_angle_deg = pysapi_beam.ControlPoints[0].GantryAngle
assert np.all(
[cp.GantryAngle == gantry_angle_deg for cp in pysapi_beam.ControlPoints]), "Arc beams not implemented."
open_field_dose = None
if use_beam_dose:
if ref_image is None:
# use dose grid resolution
open_field_dose = pysapi_beam.Dose.np_array_like() # dose grid res
else:
# use image grid resolution
open_field_dose = pysapi_beam.Dose.np_array_like(ref_image)
assert open_field_dose.shape == dose_shape, "dose shape does not match beam dose or image shape"
# get dose from beam
csr = compute_Dij_bodymesh( # v_dig_valid, x_bins, y_bins
dose_shape,
idxs_oi,
pts_3d,
bodymesh=tm.Trimesh(vertices=body_verts, faces=body_faces),
beam_energy=pysapi_beam.EnergyModeDisplayName,
SAD=pysapi_beam.TreatmentUnit.get_SourceAxisDistance(),
gantry_angle=gantry_angle_deg,
field_size=field_size_mm,
field_buffer=field_buffer_mm, # added to all sides
beamlet_size_x=beamlet_size_x_mm,
beamlet_size_z=beamlet_size_z_mm,
show_plots=False,
anti_alias=anti_alias,
pdd_dose=open_field_dose
)
return csr
def _calc_num_px_for_field(field_size_mm, field_buffer_mm, beamlet_size_x_mm, beamlet_size_z_mm):
field_buffer_x_px = int(field_buffer_mm / beamlet_size_x_mm) # number of fluence pixels in buffer region
field_size_x_px = int(field_size_mm / beamlet_size_x_mm) # number of fluence pixels in field
x_map_n = 2 * field_buffer_x_px + field_size_x_px
field_buffer_z_px = int(field_buffer_mm / beamlet_size_z_mm) # number of fluence pixels in buffer region
field_size_z_px = int(field_size_mm / beamlet_size_z_mm) # number of fluence pixels in field
z_map_n = 2 * field_buffer_z_px + field_size_z_px
return x_map_n, z_map_n
def dose_influence_matrix(pysapi_plan, body_surface_pts, pts_3d_ct, dose_mask, max_scatter_radius_mm=20.0,
use_scatter = True,
anti_alias=False, verbose=False,
beamlet_size_x_mm=2.5, beamlet_size_z_mm=2.5, field_buffer_mm=5.0,
use_plan_dose=False, return_scatter_matrix=False, ref_image=None
):
""" Ready for use in PySAPI """
beam_energy = pysapi_plan.BeamsLot(0).EnergyModeDisplayName
assert np.all([beam_energy == b.EnergyModeDisplayName for b in
pysapi_plan.Beams]), "Beams having different energies is not implemented."
## 2.5 mm is the only resolution supported by ESAPI, other resulutions would require resampling
# beamlet_size_x_mm = 2.5 # 1.0 # this is minimum leaf resolution, set to 1.0 mm
# beamlet_size_z_mm = 2.5 # 5.0 # truebeam HD is 5.0mm and 10.0mm clinac is 10.0mm
# field_buffer_mm = 20.0 # or 5 cm, needed for penumbra?
idxs_oi = np.where(dose_mask > 0) # indexes Of Interest
# this should happen at full resulution if possible
pts_3d_shell_ct = body_surface_pts #pts_3d_ct[np.where(body_shell)]
field_size_mm = 0.
# scan control points to get max square field size
for beam_obj in pysapi_plan.Beams:
jaws_max_mm = np.max([np.abs([cp.get_JawPositions().get_X1() for cp in beam_obj.ControlPoints]).max(),
np.abs([cp.get_JawPositions().get_X2() for cp in beam_obj.ControlPoints]).max(),
np.abs([cp.get_JawPositions().get_Y1() for cp in beam_obj.ControlPoints]).max(),
np.abs([cp.get_JawPositions().get_Y2() for cp in beam_obj.ControlPoints]).max()])
tst_fsize = 2. * jaws_max_mm
if tst_fsize > field_size_mm:
field_size_mm = tst_fsize
# snap to nearest reasonable size (evenly divisible by beamlet size)
# enforce even number of bixels for better match up with TPS (honestly we only need this along the virtical axis...
temp_field_size = | np.ceil(field_size_mm / beamlet_size_z_mm) | numpy.ceil |
import pandas as pd
import numpy as np
from gensim.models.wrappers import LdaMallet
from sklearn.metrics.pairwise import cosine_similarity
from gensim.corpora import Dictionary
from gensim import corpora
import pickle
import os
"""This class trains the Latent Dirichlet Allocation (LDA) Model on
painting description corpus.,
we want to compare the paintings by computing a similarity measure : cosine similarity"""
class LdaTraining:
path_to_mallet_bin = "/resources/mallet-2.0.6/bin/mallet" #path has to be absolute
os.environ['MALLET_HOME'] = "/resources/mallet-2.0.6/" #path has to be absolute
path_save_score = 'resources/datasets/'
path_save_outputs = 'resources/matrices/lda/'
path_save_model = 'resources/models/'
path_to_listwords = 'resources/datasets/preprocessed/list_words.txt'
path_to_dict = 'resources/datasets/preprocessed/dict'
path_to_corpus = 'resources/datasets/preprocessed/corpus'
painting_df = pd.read_csv('resources/datasets/ng-dataset.csv')
def __init__(self, num_topics):
self.num_topics = num_topics
def load_list_words(self, path_to_listwords):
"""Load the list of words"""
with open(path_to_listwords, "rb") as fp: # Unpickling
list_words = pickle.load(fp)
return list_words
def load_dictionary(self, path_to_dict):
"""Load the dictionary"""
dictionary = Dictionary.load(path_to_dict)
return dictionary
def load_corpus(self, path_to_corpus):
"""Load the corpus"""
corpus = corpora.MmCorpus(path_to_corpus)
return corpus
def LdaModel(self, num_topics, corpus, dictionary):
"""Create a LDA topic model
Input:
num_topics: number of topics for the model
corpus: gensim corpus
ditionary: gensim dictionary
Output:
lda_model: a topic model using Latent Dirichlet Allocation (LDA)
"""
lda_model = LdaMallet(mallet_path=self.path_to_mallet_bin, num_topics=num_topics, corpus=corpus, id2word=dictionary, random_seed=123)
return lda_model
def transform_output(self, lda_model, corpus):
"""Transform the topic document matrix into an ordered array of topic distribution
Input:
lda_model: LDA model
corpus: gensim corpus
Output:
lda_model: a topic model using Latent Dirichlet Allocation (LDA)
"""
topdoc_mat = lda_model[corpus]
topdoc_sorted = self.sort_tuples(topdoc_mat)
lda_output = | np.asarray(topdoc_sorted) | numpy.asarray |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import glob
import os
import subprocess
import sys
import numpy as np
import chainer
import cv2 as cv
from c3d_ft import C3DVersion1
from chainer import Variable
sys.path.insert(0, '.')
from infer import get_models # isort:skip
from infer import vgen_forward # isort:skip
def calc_inception(ys):
p_all = np.mean(ys, axis=0, keepdims=True)
kl = ys * np.log(ys + 1e-7) - ys * np.log(p_all + 1e-7)
return | np.exp(kl) | numpy.exp |
import numpy as np
import matplotlib.pyplot as plt
import torch
import pandas as pd
import torch.nn.functional as f
import sys
import os
sys.path.append(os.path.abspath('../loss_surface_vis'))
sys.path.append(os.path.abspath('../scripts'))
from loss_functions_electromagnetic import multiply_Eg_C
# Calculates | HC - EC |/ |HC|
def getEigError(eigenVector, eigenval, H):
eigenVector_shape_half = int(eigenVector.shape[1]/2)
batchC = eigenVector.view(-1, eigenVector.shape[1], 1)
batchEg = eigenval.view(-1, 2, 1)
HC = torch.matmul(H, batchC)
EC = multiply_Eg_C(batchEg, batchC)
EC = multiply_Eg_C(batchEg, EC)
EC = EC*2*2
EC = EC*(torch.tensor(np.pi))
EC = EC*(torch.tensor(np.pi))# This assumes lam0=1
Total = (HC - EC)**2
loss_phy = torch.sqrt(torch.sum(Total, dim=1))
loss_phy /= torch.sqrt(torch.sum(HC ** 2, dim=1))
return loss_phy
def getCosSimMeasure(prediction, target):
return torch.nn.CosineSimilarity()(prediction, target)
def getMSE(prediction, target):
# scale the vectors so that the first element of both is equal
scale = torch.div(prediction[:, 0], target[:, 0]).view(prediction.shape[0], -1)
prediction_v = torch.div(prediction, scale)
# Calculate error
num_of_examples = target.shape[0]
error=torch.zeros(num_of_examples)
for i in range(num_of_examples):
error[i] = torch.nn.MSELoss()(prediction_v[i, :], target[i, :])
return error
def getOverlapIntegral(prediction, target, useRealComp=False):
# Convert to numpy complex format
shape_half = int(target.shape[1]/2)
prediction2 = prediction[:,:shape_half].cpu().detach().numpy() + 1j * prediction[:,shape_half:].cpu().detach().numpy()
target2 = target[:,:shape_half].cpu().detach().numpy() + 1j * target[:,shape_half:].cpu().detach().numpy()
# Calculate error
num_of_examples = prediction2.shape[0]
error=torch.zeros(num_of_examples)
for i in range(num_of_examples):
error1 = np.vdot(prediction2[i, :], target2[i, :]) # vdot takes care of conjugating automatically.
if useRealComp:
error1 = np.real(error1)
else:
error1 = np.absolute(error1)
error[i] = torch.tensor(error1)
return error
def getBxVsCosineSimilarity(Bx, batchInput, prediction, target, measure="Cos"):
numOfSamples = prediction.shape[0]
error_per_sample = torch.zeros(numOfSamples, 1)
# Normalize vectors
prediction_v = prediction[:, :-2]
prediction_v = f.normalize(prediction_v, p=2, dim=-1)
target_v = target[:, :-2]
target_v = f.normalize(target_v, p=2, dim=-1)
if measure=="Cos":
error = getCosSimMeasure(prediction_v, target_v)
elif measure=="OverInt":
error = getOverlapIntegral(prediction_v, target_v)
elif measure=="OverIntRealVal":
error = getOverlapIntegral(prediction_v, target_v, True)
elif measure=="MSE":
error = getMSE(prediction_v, target_v)
elif measure=="EigenError":
error = getEigError(prediction_v, prediction[:, -2:], batchInput).view(-1)
elif measure=="EigenError2":
error = getEigError(prediction_v, target[:, -2:], batchInput).view(-1)
else:
raise
error_per_sample[:, 0] = error
Bx_inds = torch.argsort(Bx, dim=0)
Bx_sorted = Bx[Bx_inds].view(Bx.shape[0], -1)
error_per_sample_sorted = error_per_sample[Bx_inds].view(error_per_sample.shape)
return torch.cat((Bx_sorted, error_per_sample_sorted), dim=1)
def getFittedData(Bx_vs_cosineSimilarity, x_range):
x_axis = (Bx_vs_cosineSimilarity)[:, 0].t().cpu().detach().numpy()
y_axis = (Bx_vs_cosineSimilarity)[:, 1].t().cpu().detach().numpy()
return np.transpose(y_axis)
def GetStatistics(Bx, Bx_vs_cosineSimilarity_list):
Bx_vs_cosineSimilarity_array = np.hstack(Bx_vs_cosineSimilarity_list).reshape(1, -1)
Bx_ = np.tile(Bx,(1,len(Bx_vs_cosineSimilarity_list)))
Bx_vs_cosineSimilarity_array = np.concatenate((Bx_, Bx_vs_cosineSimilarity_array), axis=0)
df = pd.DataFrame(Bx_vs_cosineSimilarity_array.T, columns=[ 'Bx','cosineSimilarity'])
df2 = df.groupby('Bx').mean().reset_index()
df3 = df.groupby('Bx').std().reset_index()
Bx_vs_cosineSimilarity_mean = df2['cosineSimilarity'].values.tolist()
Bx_vs_cosineSimilarity_std = df3['cosineSimilarity'].values.tolist()
return (Bx_vs_cosineSimilarity_mean, Bx_vs_cosineSimilarity_std)
# Plot helper
class PlotHelper():
def __init__(self, title, xLabel, yLabel, xticks, line_location):
self.fig, self.ax = plt.subplots()
self.ax.set_xlabel(xLabel)
self.ax.set_ylabel(yLabel)
self.ax.set_xticks(xticks)
self.colors = ['g', 'b', 'c', 'r', 'm', 'y', 'k']
self.ax.axvline(x=line_location,dashes=(5,5),color="black", lw=0.5)
def getColor(self, lineID, dashed):
clr = ''
if lineID != -1:
clr = self.colors[lineID % len(self.colors)]
if dashed == True:
clr = clr + '--'
return clr
def updatePlotXYSTD(self, x, y, std, legend, lineID=-1, dashed=False):
legend = legend.replace('BB', 'NN')
if legend == 'CoPhy':
legend = r'\emph{CoPhy}-PGNN'
if legend == 'Analogue':
legend = r'PGNN-\emph{analogue}'
y_np = | np.asarray(y) | numpy.asarray |
import numpy as np
import cv2
from skimage import measure
def get_connected_components(y):
cc = measure.label(y, neighbors=8)
return np.array(cc, dtype='float32')
def interpolate_np(x, scale_factor, axes=(-1, -2, -3)):
for ax in axes:
x = np.repeat(x, scale_factor, axis=ax)
return x
def fill3d(img3d, nodule, z_origin, z_spacing):
"""Fills LUNA target ``img3d`` with given ``nodule`` roi."""
img3d = np.float32(img3d)
for roi in nodule:
z = int((roi[0] - z_origin) / z_spacing)
pts = np.int32([roi[1]])
img = np.zeros_like(img3d[..., z], dtype='float32').T
img3d[::, ::, z] += cv2.fillPoly(img.copy(), pts, 1).T
return | np.clip(img3d, 0, 1) | numpy.clip |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 9 13:22:31 2021
Model Simulation & Grid Interpolation
@authors: <NAME> & <NAME>
"""
import numpy as np
import sys
from scipy.stats import norm
from scipy.stats import uniform
import scipy.special as sc
import mpmath
import scipy.integrate as si
import scipy.interpolate as interp
import scipy.optimize as optim
from scipy.stats import genextreme
## integration.cpp
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## Import C++ function library
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
##
##
## i.e., RW_marginal, pRW_me_interp, find_xrange_pRW_me
##
import os, ctypes
# g++ -std=c++11 -shared -fPIC -o p_integrand.so p_integrand.cpp
lib = ctypes.CDLL(os.path.abspath('./nonstat_model_noXs_global/p_integrand.so'))
i_and_o_type = np.ctypeslib.ndpointer(ndim=1, dtype=np.float64)
grid_type = np.ctypeslib.ndpointer(ndim=1, dtype=np.float64)
bool_type = np.ctypeslib.ndpointer(ndim=1, dtype='bool')
lib.pRW_me_interp_C.restype = ctypes.c_int
lib.pRW_me_interp_C.argtypes = (i_and_o_type, grid_type, grid_type,
ctypes.c_double, ctypes.c_double, ctypes.c_double, ctypes.c_int, ctypes.c_int,
i_and_o_type)
lib.RW_marginal_C.restype = ctypes.c_int
lib.RW_marginal_C.argtypes = (i_and_o_type,
ctypes.c_double, ctypes.c_double, ctypes.c_int,
i_and_o_type)
lib.RW_me_2_unifs.restype = ctypes.c_int
lib.RW_me_2_unifs.argtypes = (i_and_o_type, grid_type, grid_type, ctypes.c_double, i_and_o_type, ctypes.c_double,
ctypes.c_int, ctypes.c_int, ctypes.c_int, i_and_o_type)
lib.find_xrange_pRW_me_C.restype = ctypes.c_int
lib.find_xrange_pRW_me_C.argtypes = (ctypes.c_double, ctypes.c_double, ctypes.c_double, ctypes.c_double,
grid_type, grid_type, ctypes.c_double, ctypes.c_double, ctypes.c_double,
ctypes.c_int, i_and_o_type)
lib.pchip.restype = ctypes.c_int
lib.pchip.argtypes = (grid_type, grid_type, grid_type, grid_type, ctypes.c_int, ctypes.c_int)
lib.qRW_me_interp.restype = ctypes.c_int
lib.qRW_me_interp.argtypes = (i_and_o_type, grid_type, grid_type, ctypes.c_double, ctypes.c_double, ctypes.c_double,
ctypes.c_int, ctypes.c_int, i_and_o_type,
grid_type, grid_type, ctypes.c_int, ctypes.c_double, ctypes.c_double)
lib.RW_density_C.restype = ctypes.c_int
lib.RW_density_C.argtypes = (i_and_o_type,
ctypes.c_double, ctypes.c_double, ctypes.c_int,
i_and_o_type)
lib.dRW_me_interp_C.restype = ctypes.c_int
lib.dRW_me_interp_C.argtypes = (i_and_o_type, grid_type, grid_type,
ctypes.c_double, ctypes.c_double, ctypes.c_double, ctypes.c_int, ctypes.c_int,
i_and_o_type)
lib.density_interp_grid.restype = ctypes.c_int
lib.density_interp_grid.argtypes = (grid_type, i_and_o_type,
ctypes.c_double, ctypes.c_int, ctypes.c_int,
i_and_o_type, i_and_o_type)
lib.dgev_C.restype = ctypes.c_double
lib.dgev_C.argtypes = (ctypes.c_double, ctypes.c_double, ctypes.c_double, ctypes.c_double, ctypes.c_bool)
lib.dnorm_C.restype = ctypes.c_double
lib.dnorm_C.argtypes = (ctypes.c_double, ctypes.c_double, ctypes.c_double, ctypes.c_bool)
lib.marg_transform_data_mixture_me_likelihood_C.restype = ctypes.c_double
lib.marg_transform_data_mixture_me_likelihood_C.argtypes = (i_and_o_type, i_and_o_type, i_and_o_type,
bool_type, bool_type, i_and_o_type, i_and_o_type, i_and_o_type,
ctypes.c_double, i_and_o_type, ctypes.c_double,
grid_type, grid_type, ctypes.c_int, ctypes.c_int)
lib.marg_transform_data_mixture_me_likelihood_F.restype = ctypes.c_double
lib.marg_transform_data_mixture_me_likelihood_F.argtypes = (i_and_o_type, i_and_o_type, i_and_o_type,
bool_type, bool_type, i_and_o_type, i_and_o_type, i_and_o_type,
ctypes.c_double, ctypes.c_double, ctypes.c_double,
grid_type, grid_type, ctypes.c_int, ctypes.c_int)
lib.marg_transform_data_mixture_me_likelihood_global.restype = ctypes.c_double
lib.marg_transform_data_mixture_me_likelihood_global.argtypes = (i_and_o_type, i_and_o_type, i_and_o_type,
bool_type, bool_type, i_and_o_type, i_and_o_type, i_and_o_type,
ctypes.c_double, i_and_o_type, ctypes.c_double,
grid_type, grid_type, ctypes.c_int, ctypes.c_int, ctypes.c_int)
lib.Thresh_X_try.restype = ctypes.c_int
lib.Thresh_X_try.argtypes = (i_and_o_type, grid_type, grid_type, ctypes.c_double, ctypes.c_double, ctypes.c_double, ctypes.c_double,
ctypes.c_double, ctypes.c_double,
ctypes.c_int, ctypes.c_int, i_and_o_type, i_and_o_type)
lib.X_update.restype = ctypes.c_int
lib.X_update.argtypes = (i_and_o_type, grid_type, grid_type, i_and_o_type, ctypes.c_double, ctypes.c_double,
ctypes.c_double, ctypes.c_double,
ctypes.c_int, ctypes.c_int, ctypes.c_int, i_and_o_type)
lib.unifs_2_RW_me.restype = ctypes.c_int
lib.unifs_2_RW_me.argtypes = (i_and_o_type, grid_type, grid_type, ctypes.c_double, i_and_o_type, ctypes.c_double,
ctypes.c_double, ctypes.c_double,
ctypes.c_int, ctypes.c_int, ctypes.c_int, i_and_o_type)
lib.print_Vec.restype = ctypes.c_double
lib.print_Vec.argtypes = (i_and_o_type, ctypes.c_int, ctypes.c_int)
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## Generate Levy random samples
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
##
##
## i.e., Stable variables with alpha=1/2
##
def rlevy(n, m = 0, s = 1):
if np.any(s < 0):
sys.exit("s must be positive")
return s/norm.ppf(uniform.rvs(0,1,n)/2)**2 + m
## The density for R^phi in which R is levy distributed
def dR_power_phi(x, phi, m=0, s=1, log=False):
x_phi = x**(1/phi)
if np.any(x_phi <= m):
sys.exit("some x**phi <= m")
if np.any(s <= 0):
sys.exit("s must be positive")
tmp = np.sum(np.log(s/(2 * np.pi))/2 - 3 * np.log(x_phi - m)/2 - s/(2 * (x_phi -
m)) + (1/phi-1)*np.log(x)-np.log(phi))
if np.invert(log):
tmp = np.exp(tmp)
return tmp
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## Calculate unregularized upper incomplete gamma function
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
##
##
## The negative a values are allowed
##
def gammaincc_unregulized(a,x):
if(isinstance(x, (int, np.int64, float))): x=np.array([x])
if x.any()<0: sys.exit("x must be positive")
if a>0:
return sc.gamma(a)*sc.gammaincc(a,x)
elif a<0:
return gammaincc_unregulized(a+1,x)/a-(x**a)*np.exp(-x)/a
else:
return mpmath.gammainc(0,x)
## Compare with mpmath.gammainc
## gammaincc_unregulized is more efficient
# import time
#
# start_time = time.time()
# gammaincc_unregulized(-3.62,5)
# time.time() - start_time
# start_time = time.time()
# mpmath.gammainc(-3.62,5)
# time.time() - start_time
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## Calculate the exact marginal survival function for R^phi*W
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
##
##
##
def RW_marginal_uni(x,phi,gamma,survival = True):
tmp1 = gamma/(2*(x**(1/phi)))
tmp2 = (gamma/2)**phi/sc.gamma(0.5)
res = sc.gammainc(0.5,tmp1) + tmp2*gammaincc_unregulized(0.5-phi,tmp1)/x
if survival:
return res
else:
return 1-res
RW_marginal = np.vectorize(RW_marginal_uni)
def RW_marginal_asymp(x,phi,gamma):
if phi<0.5:
moment = ((2*gamma)**phi)*sc.gamma(1-2*phi)/sc.gamma(1-phi)
return moment/x
elif phi>0.5:
return np.sqrt(2*gamma/np.pi)*(x**(-1/(2*phi)))/(1-1/(2*phi))
else:
return np.sqrt(2*gamma/np.pi)*np.log(x)/x
def RW_quantile_asymp(p,phi,gamma):
if phi<0.5:
moment = ((2*gamma)**phi)*sc.gamma(1-2*phi)/sc.gamma(1-phi)
return moment/(1-p)
elif phi>0.5:
return (np.sqrt(2*gamma/np.pi)/(1-1/(2*phi))/(1-p))**(2*phi)
else:
tmp = (1-p)/np.sqrt(2*gamma/np.pi)
return tmp/sc.lambertw(tmp)
# # Compare the exact and asymptotic CDF
# gamma = 1.2; x =10; phi=0.3
# import matplotlib.pyplot as plt
# axes = plt.gca()
# axes.set_ylim([0,0.125])
# X_vals = np.linspace(100,1500,num=200)
# P_vals = RW_marginal(X_vals,phi,gamma)
# P_asym = RW_marginal_asymp(X_vals,phi,gamma)
# plt.plot(X_vals, P_vals, 'b')
# plt.plot(X_vals, P_asym, 'r',linestyle='--');
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## Calculate the marginal survival function for R^phi*W + epsilon
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
##
##
# ---------------- 1. Define integrand in Python: exact form ---------------- #
def mix_distn_integrand(t, xval, phi, tmp1, tmp2, tau_sqd):
diff = xval - t
tmp3 = tmp1/(diff**(1/phi))
res = sc.gammainc(0.5,tmp3) + tmp2*gammaincc_unregulized(0.5-phi,tmp3)/diff
result = res * np.exp(-t**2/(2*tau_sqd))
return result
def pRW_me_uni(xval, phi, gamma, tau_sqd):
tmp1 = gamma/2
tmp2 = ((gamma/2)**phi)/sc.gamma(0.5)
sd = np.sqrt(tau_sqd)
I_1 = si.quad(mix_distn_integrand, -np.inf, xval, args=(xval, phi, tmp1, tmp2, tau_sqd)) # 0.00147
tmp = norm.cdf(xval, loc=0.0, scale=sd)-I_1[0]/np.sqrt(2*np.pi*tau_sqd)
if tmp<0.999:
return tmp
else:
return RW_marginal_uni(xval,phi,gamma,survival = False)
pRW_me = np.vectorize(pRW_me_uni)
# ----------- 2. Define integrand in Python: linear interpolation ----------- #
# Actually BETTER than numerical integration because there are no singular values.
# We use the Trapezoidal rule.
## **** (0). Generate a GRIDDED set of values for P(RW>x) ****
def survival_interp_grid(phi, gamma, grid_size=800):
xp_1 = np.linspace(0.000001, 200, grid_size, endpoint = False)
xp_2 = np.linspace(200.5, 900, int(grid_size/4), endpoint = False)
xp_3 = np.linspace(900.5, 100000, int(grid_size/10), endpoint = False)
xp = np.concatenate((xp_1, xp_2, xp_3))
xp = xp[::-1] # reverse order
xp = np.ascontiguousarray(xp, np.float64) #C contiguous order: xp.flags['C_CONTIGUOUS']=True?
n_xval = len(xp); surv_p = np.empty(n_xval)
tmp_int = lib.RW_marginal_C(xp, phi, gamma, n_xval, surv_p)
if tmp_int!=1: sys.exit('C implementaion failed.')
# surv_p = RW_marginal(xp, phi, gamma)
return (xp, surv_p)
## **** (1). Vectorize univariate function ****
def pRW_me_uni_interp(xval, xp, surv_p, tau_sqd):
tp = xval-xp
integrand_p = np.exp(-tp**2/(2*tau_sqd)) * surv_p
sd = np.sqrt(tau_sqd)
I_1 = sum(np.diff(tp)*(integrand_p[:-1] + integrand_p[1:])/2) # 0.00036
tmp = norm.cdf(xval, loc=0.0, scale=sd)-I_1/np.sqrt(2*np.pi*tau_sqd)
return tmp
def pRW_me_interp_slower(xval, xp, surv_p, tau_sqd):
return np.array([pRW_me_uni_interp(xval_i, xp, surv_p, tau_sqd) for xval_i in xval])
## **** (2). Broadcast matrices and vectorize columns ****
def pRW_me_interp_py(xval, xp, surv_p, tau_sqd, phi, gamma):
if(isinstance(xval, (int, np.int64, float))): xval=np.array([xval], dtype='float64')
tmp = np.zeros(xval.shape) # Store the results
# Use the smooth process CDF values if tau_sqd<0.05
if tau_sqd>0.05:
which = (xval<820)
else:
which = np.repeat(False, xval.shape)
# Calculate for values that are less than 820
if(np.sum(which)>0):
xval_less = xval[which]
tp = xval_less-xp[:,np.newaxis]
integrand_p = np.exp(-tp**2/(2*tau_sqd)) * surv_p[:,np.newaxis]
sd = | np.sqrt(tau_sqd) | numpy.sqrt |
import os.path as osp
import numpy as np
from torch.utils import data
from PIL import Image
from dataset.transforms import *
import torchvision.transforms as standard_transforms
class cityscapesDataSet(data.Dataset):
def __init__(self, args, root, list_path, max_iters=None, set='val'):
self.root = root
self.list_path = list_path
train_input_transform = []
train_input_transform += [standard_transforms.ToTensor(),
standard_transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]
cityscape_transform_list = [joint_transforms.RandomSizeAndCrop(args.input_size, False, pre_size=None,
scale_min=0.5, scale_max=1.0, ignore_index=255),
joint_transforms.Resize(args.input_size),
joint_transforms.RandomHorizontallyFlip()]
self.joint_transform = joint_transforms.Compose(cityscape_transform_list)
self.target_transform = extended_transforms.MaskToTensor()
self.transform = standard_transforms.Compose(train_input_transform)
self.img_ids = [i_id.strip() for i_id in open(list_path)]
if not max_iters == None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
self.files = []
self.set = set
for name in self.img_ids:
img_file = osp.join(self.root, "leftImg8bit/%s/%s" % (self.set, name))
label_file = osp.join(self.root, "gtFine_trainvaltest/gtFine/%s/%s" % (self.set, name)).replace("leftImg8bit", "gtFine_labelIds")
self.files.append({
"img": img_file,
"label": label_file,
"name": name
})
self.id_to_trainid = {7: 0, 8: 1, 11: 2, 12: 3, 13: 4, 17: 5,
19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12,
26: 13, 27: 14, 28: 15, 31: 16, 32: 17, 33: 18}
print('{} images are loaded!'.format(len(self.files)))
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
image = Image.open(datafiles["img"]).convert('RGB')
label = Image.open(datafiles["label"])
name = datafiles["name"]
label = | np.asarray(label, np.uint8) | numpy.asarray |
import logging
import threading
import os
import sys
import numpy as np
import cv2
from collections import deque
from argparse import ArgumentParser, SUPPRESS
from math import exp as exp
from enum import Enum
from time import perf_counter
from openvino.inference_engine import IECore
import monitors
class YoloParams:
def __init__(self,param, side):
self.num = 3 if 'num' not in param else int(param['num'])
self.coords = 4 if 'coords' not in param else int(param['coords'])
self.classes = 80 if 'classes' not in param else int(param['classes'])
self.side = side
self.anchors = [10.0, 13.0, 16.0, 30.0, 33.0, 23.0, 30.0, 61.0, 62.0, 45.0, 59.0, 119.0, 116.0, 90.0, 156.0,
198.0,
373.0, 326.0] if 'anchors' not in param else [float(a) for a in param['anchors'].split(',')]
self.isYoloV3 = False
if param.get('mask'):
mask = [int(idx) for idx in param['mask'].split(',')]
self.num = len(mask)
maskedAnchors = []
for idx in mask:
maskedAnchors += [self.anchors[idx * 2], self.anchors[idx * 2 + 1]]
self.anchors = maskedAnchors
self.isYoloV3 = True # Weak way to determine but the only one.
class Modes(Enum):
USER_SPECIFIED = 0
MIN_LATENCY = 1
class Mode():
def __init__(self, value):
self.current = value
def next(self):
if self.current.value + 1 < len(Modes):
self.current = Modes(self.current.value + 1)
else:
self.current = Modes(0)
class ModeInfo():
def __init__(self):
self.last_start_time = perf_counter()
self.last_end_time = None
self.frames_count = 0
self.latency_sum = 0
def scale_bbox(x, y, height, width, class_id, confidence, im_h, im_w, is_proportional):
if is_proportional:
scale = np.array([min(im_w/im_h, 1), min(im_h/im_w, 1)])
offset = 0.5*( | np.ones(2) | numpy.ones |
import numpy as np
from ..utils.construct_observation_matrix import construct_model_based_A_matrix
class AnisotropicSignalModelProperties:
def __init__(self):
pass
def __call__(self):
raise NotImplementedError()
def rotational_harmonics_representation(
self, acquisition_scheme, **kwargs):
r""" The rotational harmonics of the model, such that Y_lm = Yl0.
Axis aligned with z-axis to be used as kernel for spherical
convolution. Returns an array with rotational harmonics for each shell.
Parameters
----------
acquisition_scheme : DmipyAcquisitionScheme instance,
An acquisition scheme that has been instantiated using dMipy.
kwargs: keyword arguments to the model parameter values,
Is internally given as **parameter_dictionary.
Returns
-------
rh_array : array, shape(Nshells, N_rh_coef),
Rotational harmonics coefficients for each shell.
"""
rh_scheme = acquisition_scheme.rotational_harmonics_scheme
kwargs.update({'mu': [0., 0.]})
E_kernel_sf = self(rh_scheme, **kwargs)
E_reshaped = E_kernel_sf.reshape([-1, rh_scheme.Nsamples])
max_sh_order = max(rh_scheme.shell_sh_orders.values())
rh_array = np.zeros((len(E_reshaped), max_sh_order // 2 + 1))
for i, (shell_index, sh_order) in enumerate(
rh_scheme.shell_sh_orders.items()):
rh_array[i, :sh_order // 2 + 1] = (
np.dot(
rh_scheme.inverse_rh_matrix[sh_order],
E_reshaped[i])
)
return rh_array
def spherical_mean(self, acquisition_scheme, **kwargs):
"""
Estimates spherical mean for every shell in acquisition scheme.
Parameters
----------
acquisition_scheme : DmipyAcquisitionScheme instance,
An acquisition scheme that has been instantiated using dMipy.
kwargs: keyword arguments to the model parameter values,
Is internally given as **parameter_dictionary.
Returns
-------
E_mean : float,
spherical mean of the model for every acquisition shell.
"""
E_mean = | np.ones_like(acquisition_scheme.shell_bvalues) | numpy.ones_like |
import numpy as np
import imghdr
from enum import Enum
import tensorflow as tf
import matplotlib.pyplot as plt
import cv2
import csv
from .dataset import createDataset
from .model_interface import ModelInterface
from .query_strategy.prototypical import Prototypical
from active_tester.query_strategy.noisy_label_uncertainty import LabelUncertainty
from active_tester.query_strategy.classifier_uncertainty import ClassifierUncertainty
from .estimators.learned import Learned
class InteractiveType(Enum):
PLOTLIB = 1
OPENFILE = 2
VISUALIZER = 3
RAWX = 4
IMGBYTEX = 5
class ActiveTester:
'''
ActiveTester class
Description: the main interface for users to perform their active
testing. Expected userflow is to call gen_data -> gen_model_predictions()
-> query_oracle(optional) -> test() . From there, user can call various
getter methods to grab metrics of importance or return the result map to
and access metrics directly from the dictionary.
'''
# Set in constructor or setters
estimator = None # function
query_strategy = None # function
# Set in standardize_data
X = np.asarray([]) # np.array()
X_is_img = False
X_feature_label = np.asarray([]) # np.array()
Y_ground_truth = np.asarray([]) # np.array()
Y_noisy = np.asarray([]) # np.array()
classes = {} # dict {'class': int}
dataset = None # dataset._Dataset object
# Set in gen_model_predictions()
model_results = {} # map {'model_labels': np.array(), 'Y_prime_prob': np.array(), ...(unsure)}
# Set in gen_data or query_oracle
Y_vetted = np.asarray([]) # np.array()
# Set in test()
test_results = {} # map {'tester_labels': np.array(), tester_prob: np.array(), tester_metric: np.array()}
def __init__(self, estimator, query_strategy):
'''
Description: constructor that takes estimator and query_strategy as input
:param estimator: function used to estimate ground truth labels
:param query_strategy: function used to specify samples for querying oracle
:return: None
'''
self.estimator = estimator
self.query_strategy = query_strategy
self.rearrange=None
# Getters and setters
def get_X(self):
return self.X
def set_X(self, X):
self.X = X
def set_Y_noisy(self, Y_noisy):
self.Y_noisy = Y_noisy
def set_confidence(self, confidence):
self.confidence = confidence
def get_model_results(self):
return self.model_results
def set_model_results(self, model_results):
self.model_results = model_results
def set_prob_array(self, model_results):
self.model_results['probabilities'] = model_results
def get_test_results(self):
return self.test_results
def get_Y_vetted(self):
return self.Y_vetted
def set_Y_vetted(self, Y_vetted):
self.Y_vetted = Y_vetted
def _check_ground_truth(self):
check = True
if self.Y_ground_truth.size != 0:
for i in self.Y_ground_truth:
if i == -1:
check = False
break
else:
check = False
return check
def _check_Y_vetted(self):
num_classes = len(self.classes)
class_found = [False for x in range(num_classes)]
found_count = 0
for _class in self.Y_vetted:
if _class != -1:
if not class_found[_class]:
class_found[_class] = True
found_count += 1
if found_count == num_classes:
return True
return False
def _query_vetted_index(self, indices, interactive, interactive_type, raw=None, visualizer=None, class_labels=None):
for index in indices:
# Check if indices are integers
if not isinstance(index, (int, np.integer)):
print('Error: indices are not int values')
return
# Condition if interactive; display and query user input
current_vetted_label = 0
if interactive:
self._visualize_row(interactive_type, index, raw)
print("The available labels are: %s" % list(self.classes.keys()))
# Query for label
vetted_label = None
valid = False
while not valid:
try:
vetted_label = input("Label the provided item: ").lower()
self.Y_vetted[index] = self.classes[vetted_label]
current_vetted_label = self.classes[vetted_label]
valid = True
print("\n")
except KeyError:
print("Only accept the following labels: %s" % list(self.classes.keys()))
# Query ground truth if interactive is disabled
else:
self.Y_vetted[index] = self.Y_ground_truth[index]
current_vetted_label = self.Y_ground_truth[index]
# Class_labels only used if calling _query_vetted_index from pre-processing step
if class_labels is not None:
class_labels[current_vetted_label] = True
return class_labels
def _visualize_row(self, interactive_type, index, raw):
'''
Description: visualize row to user
:param interactive_type: Enum type of how to visualize row to user
:param index: <INT> index of row to display
:param raw: <STRING> file to display to user
:return: None
'''
if interactive_type == InteractiveType.PLOTLIB:
img = cv2.imread( | np.array(raw) | numpy.array |
import numpy as np
# CRIANDO UM ARRAY:
#Array criado a partir de uma lista com np.array():
vetor1 = | np.array([0, 1, 2, 3, 4, 5, 6, 7, 8]) | numpy.array |
import numpy as np
import logging
logger = logging.getLogger(name=__name__)
from ...sgmcmc_sampler import SGMCMCHelper
from ..._utils import random_categorical, lower_tri_mat_inv
class SLDSHelper(SGMCMCHelper):
""" LGSSM Helper
forward_message (dict) with keys
x (dict):
log_constant (double) log scaling const
mean_precision (ndarray) mean precision
precision (ndarray) precision
z (dict):
prob_vector (ndarray) dimension num_states
log_constant (double) log scaling const
x_prev (ndarray)
z_prev (ndarray)
backward_message (dict) with keys
x (dict):
log_constant (double) log scaling const
mean_precision (ndarray) mean precision
precision (ndarray) precision
z (dict):
likelihood_vector (ndarray) dimension num_states
log_constant (double) log scaling const
x_next (ndarray)
z_next (ndarray)
"""
def __init__(self, num_states, n, m,
forward_message=None, backward_message=None,
**kwargs):
self.num_states = num_states
self.n = n
self.m = m
if forward_message is None:
forward_message = {
'x': {
'log_constant': 0.0,
'mean_precision': np.zeros(self.n),
'precision': np.eye(self.n)/10,
},
'z': {
'log_constant': 0.0,
'prob_vector': np.ones(self.num_states)/self.num_states,
},
}
self.default_forward_message=forward_message
if backward_message is None:
backward_message = {
'x': {
'log_constant': 0.0,
'mean_precision': np.zeros(self.n),
'precision': np.zeros((self.n, self.n)),
},
'z': {
'log_constant': np.log(self.num_states),
'likelihood_vector':
np.ones(self.num_states)/self.num_states,
},
}
self.default_backward_message=backward_message
return
def _forward_messages(self, observations, parameters, forward_message,
x=None, z=None, **kwargs):
if z is not None:
if x is not None:
raise ValueError("Either x or z can be conditioned on")
# Forward Messages conditioned on z
return self._x_forward_messages(
observations=observations,
z=z,
parameters=parameters,
forward_message=forward_message,
**kwargs
)
elif x is not None:
# Forward Messages conditioned on z
return self._z_forward_messages(
observations=observations,
x=x,
parameters=parameters,
forward_message=forward_message,
**kwargs
)
else:
raise ValueError("Requires x or z be passed to condition on")
def _backward_messages(self, observations, parameters, backward_message, x=None, z=None, **kwargs):
if z is not None:
if x is not None:
raise ValueError("Either x or z can be conditioned on")
# Forward Messages conditioned on z
return self._x_backward_messages(
observations=observations,
z=z,
parameters=parameters,
backward_message=backward_message,
**kwargs
)
elif x is not None:
# Forward Messages conditioned on z
return self._z_backward_messages(
observations=observations,
x=x,
parameters=parameters,
backward_message=backward_message,
**kwargs
)
else:
raise ValueError("Requires x or z be passed to condition on")
## Helper Functions conditioned on z
def _x_forward_messages(self, observations, z, parameters, forward_message,
weights=None, tqdm=None, only_return_last=False):
# Return list of forward messages Pr(x_{t} | y_{<=t}, z)
# y is num_obs x m matrix
num_obs = np.shape(observations)[0]
if not only_return_last:
forward_messages = [None]*(num_obs+1)
forward_messages[0] = forward_message
mean_precision = forward_message['x']['mean_precision']
precision = forward_message['x']['precision']
log_constant = forward_message['x']['log_constant']
z_prev = forward_message.get('z_prev', None)
Pi = parameters.pi
A = parameters.A
LQinv = parameters.LQinv
Qinv = np.array([np.dot(LQinv_k, LQinv_k.T)
for LQinv_k in LQinv])
AtQinv = np.array([np.dot(A_k.T, Qinv_k)
for (A_k, Qinv_k) in zip(A, Qinv)])
AtQinvA = np.array([np.dot(AtQinv_k, A_k)
for (A_k, AtQinv_k) in zip(A, AtQinv)])
C = parameters.C
LRinv = parameters.LRinv
Rinv = np.dot(LRinv, LRinv.T)
CtRinv = np.dot(C.T, Rinv)
CtRinvC = np.dot(CtRinv, C)
pbar = range(num_obs)
if tqdm is not None:
pbar = tqdm(pbar)
pbar.set_description("forward messages")
for t in pbar:
y_cur = observations[t]
z_cur = z[t]
weight_t = 1.0 if weights is None else weights[t]
# Calculate Predict Parameters
J = np.linalg.solve(AtQinvA[z_cur] + precision, AtQinv[z_cur])
pred_mean_precision = np.dot(J.T, mean_precision)
pred_precision = Qinv[z_cur] - np.dot(AtQinv[z_cur].T, J)
# Calculate Observation Parameters
y_mean = np.dot(C,
np.linalg.solve(pred_precision, pred_mean_precision))
y_precision = Rinv - np.dot(CtRinv.T,
np.linalg.solve(CtRinvC + pred_precision, CtRinv))
log_constant += weight_t * (
-0.5 * np.dot(y_cur-y_mean,
np.dot(y_precision, y_cur-y_mean)) + \
0.5 * np.linalg.slogdet(y_precision)[1] + \
-0.5 * self.m * np.log(2*np.pi)
)
if z_prev is not None:
log_constant += weight_t * np.log(Pi[z_prev, z_cur])
# Calculate Filtered Parameters
new_mean_precision = pred_mean_precision + np.dot(CtRinv, y_cur)
new_precision = pred_precision + CtRinvC
# Save Messages
mean_precision = new_mean_precision
precision = new_precision
z_prev = z_cur
if not only_return_last:
forward_messages[t+1] = dict(
x={
'mean_precision': mean_precision,
'precision': precision,
'log_constant': log_constant,
},
z_prev=z_prev,
)
if only_return_last:
last_message = dict(
x={
'mean_precision': mean_precision,
'precision': precision,
'log_constant': log_constant,
},
z_prev=z_prev,
)
return last_message
else:
return forward_messages
def _x_backward_messages(self, observations, z, parameters, backward_message,
weights=None, tqdm=None, only_return_last=False):
# Return list of backward messages Pr(y_{>t} | x_t, z)
# y is num_obs x n matrix
num_obs = np.shape(observations)[0]
if not only_return_last:
backward_messages = [None]*(num_obs+1)
backward_messages[-1] = backward_message
mean_precision = backward_message['x']['mean_precision']
precision = backward_message['x']['precision']
log_constant = backward_message['x']['log_constant']
z_next = backward_message.get('z_next', None)
Pi = parameters.pi
A = parameters.A
LQinv = parameters.LQinv
Qinv = np.array([np.dot(LQinv_k, LQinv_k.T)
for LQinv_k in LQinv])
AtQinv = np.array([np.dot(A_k.T, Qinv_k)
for (A_k, Qinv_k) in zip(A, Qinv)])
AtQinvA = np.array([np.dot(AtQinv_k, A_k)
for (A_k, AtQinv_k) in zip(A, AtQinv)])
C = parameters.C
LRinv = parameters.LRinv
Rinv = np.dot(LRinv, LRinv.T)
CtRinv = np.dot(C.T, Rinv)
CtRinvC = np.dot(CtRinv, C)
pbar = reversed(range(num_obs))
if tqdm is not None:
pbar = tqdm(pbar)
pbar.set_description("backward messages")
for t in pbar:
y_cur = observations[t]
z_cur = z[t]
weight_t = 1.0 if weights is None else weights[t]
# Helper Values
xi = Qinv[z_cur] + precision + CtRinvC
L = np.linalg.solve(xi, AtQinv[z_cur].T)
vi = mean_precision + np.dot(CtRinv, y_cur)
# Calculate new parameters
log_constant += weight_t * (
-0.5 * self.m * np.log(2.0*np.pi) + \
np.sum(np.log(np.diag(LRinv))) + \
np.sum(np.log(np.diag(LQinv[z_cur]))) + \
-0.5 * np.linalg.slogdet(xi)[1] + \
-0.5 * np.dot(y_cur, np.dot(Rinv, y_cur)) + \
0.5 * np.dot(vi, np.linalg.solve(xi, vi))
)
if z_next is not None:
log_constant += weight_t * np.log(Pi[z_cur, z_next])
new_mean_precision = np.dot(L.T, vi)
new_precision = AtQinvA[z_cur] - np.dot(AtQinv[z_cur], L)
# Save Messages
mean_precision = new_mean_precision
precision = new_precision
z_next = z_cur
if not only_return_last:
backward_messages[t] = dict(x={
'mean_precision': mean_precision,
'precision': precision,
'log_constant': log_constant,
}, z_next=z_next)
if only_return_last:
last_message = dict(x={
'mean_precision': mean_precision,
'precision': precision,
'log_constant': log_constant,
}, z_next=z_next)
return last_message
else:
return backward_messages
def _x_marginal_loglikelihood(self, observations, z, parameters,
forward_message=None, backward_message=None, weights=None,
**kwargs):
# Run forward pass + combine with backward pass
# y is num_obs x m matrix
# forward_pass is Pr(x_{T-1} | y_{<=T-1})
forward_pass = self._forward_message(
observations=observations,
z=z,
parameters=parameters,
forward_message=forward_message,
weights=weights,
**kwargs)
weight_T = 1.0 if weights is None else weights[-1]
# Calculate the marginal loglikelihood of forward + backward message
f_mean_precision = forward_pass['x']['mean_precision']
f_precision = forward_pass['x']['precision']
c_mean_precision = f_mean_precision + backward_message['x']['mean_precision']
c_precision = f_precision + backward_message['x']['precision']
loglikelihood = forward_pass['x']['log_constant'] + \
(backward_message['x']['log_constant'] + \
+0.5 * np.linalg.slogdet(f_precision)[1] + \
-0.5 * np.linalg.slogdet(c_precision)[1] + \
-0.5 * np.dot(f_mean_precision,
np.linalg.solve(f_precision, f_mean_precision)
) + \
0.5 * np.dot(c_mean_precision,
np.linalg.solve(c_precision, c_mean_precision)
)
) * weight_T
z_next = backward_message.get('z_next')
z_prev = forward_pass.get('z_prev')
if (z_next is not None) and (z_prev is not None):
loglikelihood = loglikelihood + weight_T * np.log(
parameters.pi[z_prev, z_next])
return loglikelihood
def _x_gradient_marginal_loglikelihood(self, observations, z, parameters,
forward_message=None, backward_message=None, weights=None,
tqdm=None):
Pi, expanded_pi = parameters.pi, parameters.expanded_pi
A, LQinv, C, LRinv = \
parameters.A, parameters.LQinv, parameters.C, parameters.LRinv
# Forward Pass
# forward_messages = [Pr(x_{t} | z, y_{-inf:t}), y{t}] for t=-1,...,T-1
forward_messages = self.forward_pass(
observations=observations,
z=z,
parameters=parameters,
forward_message=forward_message,
include_init_message=True)
# Backward Pass
# backward_messages = [Pr(y_{t+1:inf} | z,x_{t}), y{t}] for t=-1,...,T-1
backward_messages = self.backward_pass(
observations=observations,
z=z,
parameters=parameters,
backward_message=backward_message,
include_init_message=True)
# Gradients
grad = {var: np.zeros_like(value)
for var, value in parameters.as_dict().items()}
grad['LQinv'] = np.zeros_like(parameters.LQinv)
grad['LRinv'] = np.zeros_like(parameters.LRinv)
# Helper Constants
Rinv = np.dot(LRinv, LRinv.T)
RinvC = np.dot(Rinv, C)
CtRinvC = np.dot(C.T, RinvC)
LRinv_diaginv = np.diag(np.diag(LRinv)**-1)
Qinv = np.array([np.dot(LQinv_k, LQinv_k.T)
for LQinv_k in LQinv])
QinvA = np.array([np.dot(Qinv_k, A_k)
for (A_k, Qinv_k) in zip(A, Qinv)])
AtQinvA = np.array([np.dot(A_k.T, QinvA_k)
for (A_k, QinvA_k) in zip(A, QinvA)])
LQinv_diaginv = np.array([np.diag(np.diag(LQinv_k)**-1)
for LQinv_k in LQinv])
# Emission Gradients
p_bar = zip(forward_messages[1:], backward_messages[1:], observations)
if tqdm is not None:
pbar = tqdm(pbar)
pbar.set_description("emission gradient loglike")
for t, (forward_t, backward_t, y_t) in enumerate(p_bar):
weight_t = 1.0 if weights is None else weights[t]
# Pr(x_t | y)
c_mean_precision = \
forward_t['x']['mean_precision'] + \
backward_t['x']['mean_precision']
c_precision = \
forward_t['x']['precision'] + backward_t['x']['precision']
x_mean = np.linalg.solve(c_precision, c_mean_precision)
xxt_mean = np.linalg.inv(c_precision) + np.outer(x_mean, x_mean)
# Gradient of C
grad['C'] += weight_t * (np.outer(np.dot(Rinv, y_t), x_mean) + \
-1.0 * np.dot(RinvC, xxt_mean))
# Gradient of LRinv
#raise NotImplementedError("SHOULD CHECK THE MATH FOR LRINV")
Cxyt = np.outer(np.dot(C, x_mean), y_t)
CxxtCt = np.dot(C, np.dot(xxt_mean, C.T))
grad['LRinv'] += weight_t * (LRinv_diaginv + \
-1.0*np.dot(np.outer(y_t, y_t) - Cxyt - Cxyt.T + CxxtCt, LRinv))
# Transition Gradients
p_bar = zip(forward_messages[0:-1], backward_messages[1:], observations, z)
if tqdm is not None:
pbar = tqdm(pbar)
pbar.set_description("transition gradient loglike")
for t, (forward_t, backward_t, y_t, z_t) in enumerate(p_bar):
weight_t = 1.0 if weights is None else weights[t]
# Pr(x_t, x_t+1 | y)
c_mean_precision = \
np.concatenate([
forward_t['x']['mean_precision'],
backward_t['x']['mean_precision'] + np.dot(RinvC.T,y_t)
])
c_precision = \
np.block([
[forward_t['x']['precision'] + AtQinvA[z_t],
-QinvA[z_t].T],
[-QinvA[z_t],
backward_t['x']['precision'] + CtRinvC + Qinv[z_t]]
])
c_mean = np.linalg.solve(c_precision, c_mean_precision)
c_cov = np.linalg.inv(c_precision)
xp_mean = c_mean[0:self.n]
xn_mean = c_mean[self.n:]
xpxpt_mean = c_cov[0:self.n, 0:self.n] + np.outer(xp_mean, xp_mean)
xnxpt_mean = c_cov[self.n:, 0:self.n] + np.outer(xn_mean, xp_mean)
xnxnt_mean = c_cov[self.n:, self.n:] + np.outer(xn_mean, xn_mean)
# Gradient of A
grad['A'][z_t] += weight_t * (np.dot(Qinv[z_t],
xnxpt_mean - np.dot(A[z_t],xpxpt_mean)))
# Gradient of LQinv
Axpxnt = np.dot(A[z_t], xnxpt_mean.T)
AxpxptAt = np.dot(A[z_t], np.dot(xpxpt_mean, A[z_t].T))
grad['LQinv'][z_t] += weight_t * (LQinv_diaginv[z_t] + \
-1.0*np.dot(xnxnt_mean - Axpxnt - Axpxnt.T + AxpxptAt,
LQinv[z_t]))
# Latent State Gradients
z_prev = forward_message.get('z_prev') if forward_message is not None else None
for t, z_t in enumerate(z):
weight_t = 1.0 if weights is None else weights[t]
if z_prev is not None:
if parameters.pi_type == "logit":
logit_pi_grad_t = -Pi[z_prev] + 0.0
logit_pi_grad_t[z_t] += 1.0
grad['logit_pi'][z_prev] += weight_t * logit_pi_grad_t
elif parameters.pi_type == "expanded":
expanded_pi_grad_t = - Pi[z_prev] / expanded_pi[z_prev]
expanded_pi_grad_t[z_t] += 1.0 / expanded_pi[z_prev, z_t]
grad['expanded_pi'][z_prev] += weight_t * expanded_pi_grad_t
z_prev = z_t
grad['LQinv_vec'] = np.array([grad_LQinv_k[np.tril_indices(self.n)]
for grad_LQinv_k in grad.pop('LQinv')])
grad['LRinv_vec'] = grad.pop('LRinv')[np.tril_indices(self.m)]
return grad
def _x_predictive_loglikelihood(self, observations, z, parameters, lag=10,
forward_message=None, backward_message=None, **kwargs):
if forward_message is None:
forward_message = self.default_forward_message
if backward_message is None:
backward_message = self.default_backward_message
# Calculate Filtered
if lag == 0:
forward_messages = self.forward_pass(
observations=observations,
z=z,
parameters=parameters,
forward_message=forward_message,
**kwargs)
else:
forward_messages = self.forward_pass(
observations=observations[0:-lag],
z=z[0:-lag],
parameters=parameters,
forward_message=forward_message,
**kwargs)
loglike = 0.0
A = parameters.A
Q = parameters.Q
C = parameters.C
R = parameters.R
for t in range(lag, np.shape(observations)[0]):
y_cur = observations[t]
z_cur = z[t]
# Calculate Pr(x_t | y_{<=t-lag}, theta)
mean_precision = forward_messages[t-lag]['x']['mean_precision']
precision = forward_messages[t-lag]['x']['precision']
mean = np.linalg.solve(precision, mean_precision)
var = np.linalg.inv(precision)
for l in range(lag):
mean = np.dot(A[z_cur], mean)
var = np.dot(A[z_cur], np.dot(var, A[z_cur].T)) + Q[z_cur]
y_mean = np.dot(C, mean)
y_var = np.dot(C, np.dot(var, C.T)) + R
log_like_t = -0.5 * np.dot(y_cur - y_mean,
np.linalg.solve(y_var, y_cur - y_mean)) + \
-0.5 * np.linalg.slogdet(y_var)[1] + \
-0.5 * self.m * np.log(2*np.pi)
loglike += log_like_t
return loglike
def _x_latent_var_sample(self, observations, z, parameters,
forward_message=None, backward_message=None,
distribution='smoothed', tqdm=None):
""" Sample latent vars from observations
Args:
observations (ndarray): num_obs by n observations
z (ndarray): num_obs latent states
parameters (LGSSMParameters): parameters
forward_message (dict): alpha message
backward_message (dict): beta message
distr (string): 'smoothed', 'filtered', 'predict'
smoothed: sample X from Pr(X | Y, theta)
filtered: sample X_t from Pr(X_t | Y_<=t, theta) iid for all t
predictive: sample X_t from Pr(X_t | Y_<t, theta) iid for all t
Returns
x (ndarray): num_obs sampled latent values (in R^n)
"""
if forward_message is None:
forward_message = self.default_forward_message
if backward_message is None:
backward_message = self.default_backward_message
A = parameters.A
LQinv = parameters.LQinv
Qinv = np.array([np.dot(LQinv_k, LQinv_k.T)
for LQinv_k in LQinv])
AtQinv = np.array([np.dot(A_k.T, Qinv_k)
for (A_k, Qinv_k) in zip(A, Qinv)])
AtQinvA = np.array([np.dot(AtQinv_k, A_k)
for (A_k, AtQinv_k) in zip(A, AtQinv)])
L = | np.shape(observations) | numpy.shape |
"""
Implementation of the paper 'ATOMO: Communication-efficient Learning via Atomic Sparsification'
This is mainly based on the code available at https://github.com/hwang595/ATOMO
Since the basic (transform domain) was not available, I implemented Alg. 1.
"""
import numpy as np
import scipy.linalg as sla
class atomo_quantizer:
def __init__(self, rank, spectral_method=True, T=None):
self._spectral = spectral_method
self._rank = rank
self._T = T
def quantize(self, X, reconstructed=True):
if self._spectral:
return self._spectral_atomo(X, reconstructed)
else:
return self._transform_atomo(X, reconstructed)
def _spectral_atomo(self, X, reconstructed):
orig_shape = X.shape
if X.ndim != 2:
X = _resize_to_2d(X)
u, s, vT = sla.svd(X, full_matrices=False)
i, probs = _sample_svd(s, self._rank)
u = u[:, i]
s = s[i] / probs
vT = vT[i, :]
if reconstructed:
xh = np.dot(np.dot(u, np.diag(s)), vT)
Xh = np.reshape(xh, newshape=orig_shape)
return Xh
else:
return u, s, vT
def _transform_atomo(self, X, reconstructed):
"""
Original ATOMO formulation
It assumes that transform matrix is orthonormal.
"""
x = np.reshape(X, -1)
coeffs = np.matmul(self._T.T, x)
abs_c = np.abs(coeffs)
sort_idx = np.argsort(abs_c)[::-1]
i, probs = _atomo_probabilities(abs_c[sort_idx], self._rank)
i = sort_idx[i]
coeffs = coeffs[i] / probs
if reconstructed:
xh = np.matmul(self._T[:, i], coeffs)
Xh = np.reshape(xh, newshape=X.shape)
return Xh
else:
return i, coeffs, probs
def _resize_to_2d(x):
"""
x.shape > 2
If x.shape = (a, b, *c), assumed that each one of (a, b) pairs has relevant information in c.
"""
shape = x.shape
if x.ndim == 1:
n = x.shape[0]
return x.reshape((n // 2, 2))
if all([s == 1 for s in shape[2:]]):
return x.reshape((shape[0], shape[1]))
# each of (a, b) has related features
x = x.reshape((shape[0], shape[1], -1))
# stack those related features into a tall matrix
x_tmp = x.reshape((shape[0] * shape[1], -1))
tmp_shape = x_tmp.shape
return x_tmp.reshape((int(tmp_shape[0] / 2), int(tmp_shape[1] * 2)))
def _sample_svd(s, rank=0):
if s[0] < 1e-6:
return [0], np.array([1.0])
probs = s / s[0] if rank == 0 else rank * s / s.sum()
for i, p in enumerate(probs):
if p > 1:
probs[i] = 1
sampled_idx = []
sample_probs = []
for i, p in enumerate(probs):
#if np.random.rand() < p:
# random sampling from bernulli distribution
if np.random.binomial(1, p):
sampled_idx += [i]
sample_probs += [p]
rank_hat = len(sampled_idx)
if rank_hat == 0: # or (rank != 0 and np.abs(rank_hat - rank) >= 3):
return _sample_svd(s, rank=rank)
return | np.array(sampled_idx, dtype=int) | numpy.array |
import numpy as np
from ..preprocessing.stack import StackedObservation
from ..dataset import TransitionMiniBatch
def _make_batches(episode, window_size, n_frames):
n_batches = len(episode) // window_size
if len(episode) % window_size != 0:
n_batches += 1
for i in range(n_batches):
head_index = i * window_size
last_index = min(head_index + window_size, len(episode))
transitions = episode.transitions[head_index:last_index]
batch = TransitionMiniBatch(transitions, n_frames)
yield batch
def td_error_scorer(algo, episodes, window_size=1024):
""" Returns average TD error (in negative scale).
This metics suggests how Q functions overfit to training sets.
If the TD error is large, the Q functions are overfitting.
.. math::
\\mathbb{E}_{s_t, a_t, r_{t+1}, s_{t+1} \\sim D}
[Q_\\theta (s_t, a_t)
- (r_t + \\gamma \\max_a Q_\\theta (s_{t+1}, a))^2]
Args:
algo (d3rlpy.algos.base.AlgoBase): algorithm.
episodes (list(d3rlpy.dataset.Episode)): list of episodes.
window_size (int): mini-batch size to compute.
Returns:
float: negative average TD error.
"""
total_errors = []
for episode in episodes:
for batch in _make_batches(episode, window_size, algo.n_frames):
# estimate values for current observations
values = algo.predict_value(batch.observations, batch.actions)
# estimate values for next observations
next_actions = algo.predict(batch.next_observations)
next_values = algo.predict_value(batch.next_observations,
next_actions)
# calculate td errors
mask = (1.0 - np.asarray(batch.terminals)).reshape(-1)
rewards = | np.asarray(batch.next_rewards) | numpy.asarray |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Operator collections as math/calculation objects for Model classes"""
from abc import ABC, abstractmethod
from typing import Union, List, Optional
from copy import copy
import numpy as np
from scipy.sparse import issparse
from scipy.sparse.csr import csr_matrix
from qiskit import QiskitError
from qiskit.quantum_info.operators.operator import Operator
from qiskit_dynamics.array import Array, wrap
from qiskit_dynamics.type_utils import to_array, to_csr, to_BCOO, vec_commutator, vec_dissipator
try:
import jax.numpy as jnp
from jax.experimental import sparse as jsparse
# sparse versions of jax.numpy operations
jsparse_sum = jsparse.sparsify(jnp.sum)
jsparse_matmul = jsparse.sparsify(jnp.matmul)
jsparse_add = jsparse.sparsify(jnp.add)
jsparse_subtract = jsparse.sparsify(jnp.subtract)
def jsparse_linear_combo(coeffs, mats):
"""Method for computing a linear combination of sparse arrays."""
return jsparse_sum(jnp.broadcast_to(coeffs[:, None, None], mats.shape) * mats, axis=0)
# sparse version of computing A @ X @ B
jsparse_triple_product = jsparse.sparsify(lambda A, X, B: A @ X @ B)
except ImportError:
pass
class BaseOperatorCollection(ABC):
r"""Abstract class representing a two-variable matrix function.
This class represents a function :math:`c,y \mapsto \Lambda(c, y)`,
which is assumed to be decomposed as
:math:`\Lambda(c, y) = (G_d + \sum_jc_jG_j) y`
for matrices :math:`G_d` and :math:`G_j`, with
:math:`G_d` referred to as the static operator.
Describes an interface for evaluating the map or its action on ``y``,
given the 1d set of values :math:`c_j`.
"""
def __init__(
self,
static_operator: Optional[any] = None,
operators: Optional[any] = None,
):
"""Initialize.
Accepted types are determined by concrete subclasses.
Args:
operators: (k,n,n) Array specifying the terms :math:`G_j`.
static_operator: (n,n) Array specifying the extra static_operator :math:`G_d`.
"""
self.operators = operators
self.static_operator = static_operator
@property
def static_operator(self) -> Array:
"""Returns static part of operator collection."""
@static_operator.setter
def static_operator(self, new_static_operator: Optional[Array] = None):
"""Sets static_operator term."""
@property
def operators(self) -> Array:
"""Return operators."""
@operators.setter
def operators(self, new_operators: Array) -> Array:
"""Return operators."""
@abstractmethod
def evaluate(self, signal_values: Array) -> Array:
r"""Evaluate the map."""
@abstractmethod
def evaluate_rhs(self, signal_values: Union[List[Array], Array], y: Array) -> Array:
r"""Compute the function."""
def __call__(
self, signal_values: Union[List[Array], Array], y: Optional[Array] = None
) -> Array:
"""Call either ``self.evaluate`` or ``self.evaluate_rhs`` depending on number of
arguments.
"""
if y is None:
return self.evaluate(signal_values)
return self.evaluate_rhs(signal_values, y)
def copy(self):
"""Return a copy of self."""
return copy(self)
class DenseOperatorCollection(BaseOperatorCollection):
r"""Concrete operator collection representing a function computing left
multiplication by an affine combination of matrices.
Concrete instance of ``BaseOperatorCollection`` in which
:math:`G_d` and :math:`G_j` are dense arrays.
"""
@property
def static_operator(self) -> Array:
"""Returns static part of operator collection."""
return self._static_operator
@static_operator.setter
def static_operator(self, new_static_operator: Array):
"""Sets static_operator term."""
self._static_operator = to_array(new_static_operator)
@property
def operators(self) -> Array:
"""Operators in the collection."""
return self._operators
@operators.setter
def operators(self, new_operators: Array):
self._operators = to_array(new_operators)
def evaluate(self, signal_values: Union[Array, None]) -> Array:
r"""Evaluate the affine combination of matrices.
Returns:
Evaluated model.
Raises:
QiskitError: if both static_operator and operators are None
"""
if self._static_operator is not None and self._operators is not None:
return np.tensordot(signal_values, self._operators, axes=1) + self._static_operator
elif self._static_operator is None and self._operators is not None:
return np.tensordot(signal_values, self._operators, axes=1)
elif self._static_operator is not None:
return self._static_operator
else:
raise QiskitError(
self.__class__.__name__
+ """ with None for both static_operator and
operators cannot be evaluated."""
)
def evaluate_rhs(self, signal_values: Union[Array, None], y: Array) -> Array:
"""Evaluates the function."""
return np.dot(self.evaluate(signal_values), y)
class SparseOperatorCollection(BaseOperatorCollection):
r"""Sparse version of DenseOperatorCollection."""
def __init__(
self,
static_operator: Optional[Union[Array, Operator]] = None,
operators: Optional[Union[Array, List[Operator]]] = None,
decimals: Optional[int] = 10,
):
"""Initialize.
Args:
static_operator: (n,n) Array specifying the static_operator term :math:`G_d`.
operators: (k,n,n) Array specifying the terms :math:`G_j`.
decimals: Values will be rounded at ``decimals`` places after decimal.
"""
self._decimals = decimals
super().__init__(static_operator=static_operator, operators=operators)
@property
def static_operator(self) -> csr_matrix:
return self._static_operator
@static_operator.setter
def static_operator(self, new_static_operator: csr_matrix):
if new_static_operator is not None:
self._static_operator = np.round(to_csr(new_static_operator), self._decimals)
else:
self._static_operator = None
@property
def operators(self) -> List[csr_matrix]:
if self._operators is None:
return None
return list(self._operators)
@operators.setter
def operators(self, new_operators: List[csr_matrix]):
if new_operators is not None:
new_operators_to_csr = to_csr(list(new_operators))
new_operators = np.empty(shape=len(new_operators_to_csr), dtype="O")
for idx, new_op in enumerate(new_operators_to_csr):
new_operators[idx] = csr_matrix(np.round(new_op, self._decimals))
self._operators = new_operators
def evaluate(self, signal_values: Union[Array, None]) -> csr_matrix:
r"""Sparse version of ``DenseOperatorCollection.evaluate``.
Args:
signal_values: Coefficients :math:`c_j`.
Returns:
Generator as sparse array.
Raises:
QiskitError: If collection cannot be evaluated.
"""
if self._static_operator is not None and self._operators is not None:
return (
np.tensordot(signal_values, self._operators, axes=1).item() + self._static_operator
)
elif self._static_operator is None and self._operators is not None:
return np.tensordot(signal_values, self._operators, axes=1).item()
elif self.static_operator is not None:
return self._static_operator
raise QiskitError(
self.__class__.__name__
+ """ with None for both static_operator and
operators cannot be evaluated."""
)
def evaluate_rhs(self, signal_values: Union[Array, None], y: Array) -> Array:
if len(y.shape) == 2:
# For 2d array, compute linear combination then multiply
gen = self.evaluate(signal_values)
return gen.dot(y)
elif len(y.shape) == 1:
# For a 1d array, multiply individual matrices then compute linear combination
tmparr = np.empty(shape=(1), dtype="O")
tmparr[0] = y
if self._static_operator is not None and self._operators is not None:
return np.dot(signal_values, self._operators * tmparr) + self.static_operator.dot(y)
elif self._static_operator is None and self._operators is not None:
return np.dot(signal_values, self._operators * tmparr)
elif self.static_operator is not None:
return self.static_operator.dot(y)
raise QiskitError(
self.__class__.__name__
+ """ with None for both static_operator and
operators cannot be evaluated."""
)
raise QiskitError(self.__class__.__name__ + """ cannot evaluate RHS for y.ndim > 3.""")
class JAXSparseOperatorCollection(BaseOperatorCollection):
"""Jax version of SparseOperatorCollection built on jax.experimental.sparse.BCOO."""
@property
def static_operator(self) -> "BCOO":
return self._static_operator
@static_operator.setter
def static_operator(self, new_static_operator: Union["BCOO", None]):
self._static_operator = to_BCOO(new_static_operator)
@property
def operators(self) -> Union["BCOO", None]:
return self._operators
@operators.setter
def operators(self, new_operators: Union["BCOO", None]):
self._operators = to_BCOO(new_operators)
def evaluate(self, signal_values: Union[Array, None]) -> "BCOO":
r"""Jax sparse version of ``DenseOperatorCollection.evaluate``.
Args:
signal_values: Coefficients :math:`c_j`.
Returns:
Generator as sparse jax array.
Raises:
QiskitError: If collection cannot be evaluated.
"""
if signal_values is not None and isinstance(signal_values, Array):
signal_values = signal_values.data
if self._static_operator is not None and self._operators is not None:
return jsparse_linear_combo(signal_values, self._operators) + self._static_operator
elif self._static_operator is None and self._operators is not None:
return jsparse_linear_combo(signal_values, self._operators)
elif self.static_operator is not None:
return self._static_operator
raise QiskitError(
self.__class__.__name__
+ """ with None for both static_operator and
operators cannot be evaluated."""
)
def evaluate_rhs(self, signal_values: Union[Array, None], y: Array) -> Array:
if y.ndim < 3:
if isinstance(y, Array):
y = y.data
return Array(jsparse_matmul(self.evaluate(signal_values), y))
raise QiskitError(self.__class__.__name__ + """ cannot evaluate RHS for y.ndim >= 3.""")
class BaseLindbladOperatorCollection(ABC):
r"""Abstract class representing a two-variable matrix function for evaluating
the right hand side of the Lindblad equation.
In particular, this object represents the function:
.. math::
\Lambda(c_1, c_2, \rho) = -i[H_d + \sum_j c_{1,j}H_j,\rho]
+ \sum_j(D_j\rho D_j^\dagger
- (1/2) * {D_j^\daggerD_j,\rho})
+ \sum_jc_{2,j}(L_j\rho L_j^\dagger
- (1/2) * {L_j^\daggerL_j,\rho})
where :math:`\[,\]` and :math:`\{,\}` are the operator
commutator and anticommutator, respectively.
Describes an interface for evaluating the map or its action on :math:`\rho`,
given a pair of 1d sets of values :math:`c_1, c_2`.
"""
def __init__(
self,
static_hamiltonian: Optional[any] = None,
hamiltonian_operators: Optional[any] = None,
static_dissipators: Optional[any] = None,
dissipator_operators: Optional[any] = None,
):
r"""Initialize collection. Argument types depend on concrete subclass.
Args:
static_hamiltonian: Constant term :math:`H_d` to be added to the Hamiltonian of the
system.
hamiltonian_operators: Specifies breakdown of Hamiltonian
as :math:`H(t) = \sum_j s(t) H_j+H_d` by specifying H_j. (k,n,n) array.
static_dissipators: Constant dissipator terms.
dissipator_operators: the terms :math:`L_j` in Lindblad equation. (m,n,n) array.
"""
self.static_hamiltonian = static_hamiltonian
self.hamiltonian_operators = hamiltonian_operators
self.static_dissipators = static_dissipators
self.dissipator_operators = dissipator_operators
@property
@abstractmethod
def static_hamiltonian(self) -> Array:
"""Returns static part of the hamiltonian."""
@static_hamiltonian.setter
@abstractmethod
def static_hamiltonian(self, new_static_operator: Optional[Array] = None):
"""Sets static_operator term."""
@property
@abstractmethod
def hamiltonian_operators(self) -> Array:
"""Returns operators for non-static part of Hamiltonian."""
@hamiltonian_operators.setter
@abstractmethod
def hamiltonian_operators(self, new_hamiltonian_operators: Optional[Array] = None):
"""Set operators for non-static part of Hamiltonian."""
@property
@abstractmethod
def static_dissipators(self) -> Array:
"""Returns operators for static part of dissipator."""
@static_dissipators.setter
@abstractmethod
def static_dissipators(self, new_static_dissipators: Optional[Array] = None):
"""Sets operators for static part of dissipator."""
@property
@abstractmethod
def dissipator_operators(self) -> Array:
"""Returns operators for non-static part of dissipator."""
@dissipator_operators.setter
@abstractmethod
def dissipator_operators(self, new_dissipator_operators: Optional[Array] = None):
"""Sets operators for non-static part of dissipator."""
@abstractmethod
def evaluate_hamiltonian(self, ham_sig_vals: Union[None, Array]) -> Union[csr_matrix, Array]:
"""Evaluate the Hamiltonian of the model."""
@abstractmethod
def evaluate(
self, ham_sig_vals: Union[None, Array], dis_sig_vals: Union[None, Array]
) -> Union[csr_matrix, Array]:
r"""Evaluate the map."""
@abstractmethod
def evaluate_rhs(
self, ham_sig_vals: Union[None, Array], dis_sig_vals: Union[None, Array], y: Array
) -> Array:
r"""Compute the function."""
def __call__(
self, ham_sig_vals: Union[None, Array], dis_sig_vals: Union[None, Array], y: Optional[Array]
) -> Union[csr_matrix, Array]:
"""Evaluate the model, or evaluate the RHS."""
if y is None:
return self.evaluate(ham_sig_vals, dis_sig_vals)
return self.evaluate_rhs(ham_sig_vals, dis_sig_vals, y)
def copy(self):
"""Return a copy of self."""
return copy(self)
class DenseLindbladCollection(BaseLindbladOperatorCollection):
r"""Object for computing the right hand side of the Lindblad equation
with dense arrays.
"""
@property
def static_hamiltonian(self) -> Array:
return self._static_hamiltonian
@static_hamiltonian.setter
def static_hamiltonian(self, new_static_hamiltonian: Optional[Array] = None):
self._static_hamiltonian = to_array(new_static_hamiltonian)
@property
def hamiltonian_operators(self) -> Array:
return self._hamiltonian_operators
@hamiltonian_operators.setter
def hamiltonian_operators(self, new_hamiltonian_operators: Optional[Array] = None):
self._hamiltonian_operators = to_array(new_hamiltonian_operators)
@property
def static_dissipators(self) -> Array:
return self._static_dissipators
@static_dissipators.setter
def static_dissipators(self, new_static_dissipators: Optional[Array] = None):
self._static_dissipators = to_array(new_static_dissipators)
if self._static_dissipators is not None:
self._static_dissipators_adj = np.conjugate(
np.transpose(self._static_dissipators, [0, 2, 1])
).copy()
self._static_dissipators_product_sum = -0.5 * np.sum(
np.matmul(self._static_dissipators_adj, self._static_dissipators), axis=0
)
@property
def dissipator_operators(self) -> Array:
return self._dissipator_operators
@dissipator_operators.setter
def dissipator_operators(self, new_dissipator_operators: Optional[Array] = None):
self._dissipator_operators = to_array(new_dissipator_operators)
if self._dissipator_operators is not None:
self._dissipator_operators_adj = np.conjugate(
np.transpose(self._dissipator_operators, [0, 2, 1])
).copy()
self._dissipator_products = np.matmul(
self._dissipator_operators_adj, self._dissipator_operators
)
def evaluate(self, ham_sig_vals: Array, dis_sig_vals: Array) -> Array:
raise ValueError("Non-vectorized Lindblad collections cannot be evaluated without a state.")
def evaluate_hamiltonian(self, ham_sig_vals: Union[None, Array]) -> Array:
r"""Compute the Hamiltonian.
Args:
ham_sig_vals: [Real] values of :math:`s_j` in :math:`H = \sum_j s_j(t) H_j + H_d`.
Returns:
Hamiltonian matrix.
Raises:
QiskitError: If collection not sufficiently specified.
"""
if self._static_hamiltonian is not None and self._hamiltonian_operators is not None:
return (
np.tensordot(ham_sig_vals, self._hamiltonian_operators, axes=1)
+ self._static_hamiltonian
)
elif self._static_hamiltonian is None and self._hamiltonian_operators is not None:
return np.tensordot(ham_sig_vals, self._hamiltonian_operators, axes=1)
elif self._static_hamiltonian is not None:
return self._static_hamiltonian
else:
raise QiskitError(
self.__class__.__name__
+ """ with None for both static_hamiltonian and
hamiltonian_operators cannot evaluate Hamiltonian."""
)
def evaluate_rhs(
self, ham_sig_vals: Union[None, Array], dis_sig_vals: Union[None, Array], y: Array
) -> Array:
r"""Evaluates Lindblad equation RHS given a pair of signal values
for the hamiltonian terms and the dissipator terms. Expresses
the RHS of the Lindblad equation as :math:`(A+B)y + y(A-B) + C`, where
.. math::
A = (-1/2)*\sum_jD_j^\dagger D_j + (-1/2)*\sum_j\gamma_j(t) L_j^\dagger L_j,
B = -iH,
C = \sum_j \gamma_j(t) L_j y L_j^\dagger.
Args:
ham_sig_vals: hamiltonian coefficient values, :math:`s_j(t)`.
dis_sig_vals: dissipator signal values, :math:`\gamma_j(t)`.
y: density matrix as (n,n) Array representing the state at time :math:`t`.
Returns:
RHS of Lindblad equation
.. math::
-i[H,y] + \sum_j\gamma_j(t)(L_j y L_j^\dagger - (1/2) * {L_j^\daggerL_j,y}).
Raises:
QiskitError: If operator collection is underspecified.
"""
hamiltonian_matrix = None
if self._static_hamiltonian is not None or self._hamiltonian_operators is not None:
hamiltonian_matrix = -1j * self.evaluate_hamiltonian(ham_sig_vals) # B matrix
# if dissipators present (includes both hamiltonian is None and is not None)
if self._dissipator_operators is not None or self._static_dissipators is not None:
# A matrix
if self._static_dissipators is None:
dissipators_matrix = np.tensordot(
-0.5 * dis_sig_vals, self._dissipator_products, axes=1
)
elif self._dissipator_operators is None:
dissipators_matrix = self._static_dissipators_product_sum
else:
dissipators_matrix = self._static_dissipators_product_sum + np.tensordot(
-0.5 * dis_sig_vals, self._dissipator_products, axes=1
)
if hamiltonian_matrix is not None:
left_mult_contribution = np.matmul(hamiltonian_matrix + dissipators_matrix, y)
right_mult_contribution = np.matmul(y, dissipators_matrix - hamiltonian_matrix)
else:
left_mult_contribution = np.matmul(dissipators_matrix, y)
right_mult_contribution = np.matmul(y, dissipators_matrix)
if len(y.shape) == 3:
# Must do array broadcasting and transposition to ensure vectorization works
y = np.broadcast_to(y, (1, y.shape[0], y.shape[1], y.shape[2])).transpose(
[1, 0, 2, 3]
)
if self._static_dissipators is None:
both_mult_contribution = np.tensordot(
dis_sig_vals,
np.matmul(
self._dissipator_operators, np.matmul(y, self._dissipator_operators_adj)
),
axes=(-1, -3),
)
elif self._dissipator_operators is None:
both_mult_contribution = np.sum(
np.matmul(self._static_dissipators, np.matmul(y, self._static_dissipators_adj)),
axis=-3,
)
else:
both_mult_contribution = np.sum(
np.matmul(self._static_dissipators, np.matmul(y, self._static_dissipators_adj)),
axis=-3,
) + np.tensordot(
dis_sig_vals,
np.matmul(
self._dissipator_operators, np.matmul(y, self._dissipator_operators_adj)
),
axes=(-1, -3),
)
return left_mult_contribution + right_mult_contribution + both_mult_contribution
# if just hamiltonian
elif hamiltonian_matrix is not None:
return np.dot(hamiltonian_matrix, y) - np.dot(y, hamiltonian_matrix)
else:
raise QiskitError(
"""DenseLindbladCollection with None for static_hamiltonian,
hamiltonian_operators, static_dissipators, and
dissipator_operators, cannot evaluate rhs."""
)
class SparseLindbladCollection(DenseLindbladCollection):
"""Sparse version of DenseLindbladCollection."""
def __init__(
self,
static_hamiltonian: Optional[Union[csr_matrix, Operator]] = None,
hamiltonian_operators: Optional[Union[List[csr_matrix], List[Operator]]] = None,
static_dissipators: Optional[Union[List[csr_matrix], List[Operator]]] = None,
dissipator_operators: Optional[Union[List[csr_matrix], List[Operator]]] = None,
decimals: Optional[int] = 10,
):
r"""Initializes sparse lindblad collection.
Args:
static_hamiltonian: Constant term :math:`H_d` to be added to the Hamiltonian of the
system.
hamiltonian_operators: Specifies breakdown of Hamiltonian
as :math:`H(t) = \sum_j s(t) H_j+H_d` by specifying H_j. (k,n,n) array.
dissipator_operators: the terms :math:`L_j` in Lindblad equation. (m,n,n) array.
decimals: operator values will be rounded to ``decimals`` places after the
decimal place to avoid excess storage of near-zero values
in sparse format.
"""
self._decimals = decimals
super().__init__(
static_hamiltonian=static_hamiltonian,
hamiltonian_operators=hamiltonian_operators,
static_dissipators=static_dissipators,
dissipator_operators=dissipator_operators,
)
@property
def static_hamiltonian(self) -> csr_matrix:
return self._static_hamiltonian
@static_hamiltonian.setter
def static_hamiltonian(self, new_static_hamiltonian: Optional[csr_matrix] = None):
if new_static_hamiltonian is not None:
new_static_hamiltonian = np.round(
to_csr(new_static_hamiltonian), decimals=self._decimals
)
self._static_hamiltonian = new_static_hamiltonian
@property
def hamiltonian_operators(self) -> np.ndarray:
if self._hamiltonian_operators is None:
return None
return list(self._hamiltonian_operators)
@hamiltonian_operators.setter
def hamiltonian_operators(self, new_hamiltonian_operators: Optional[List[csr_matrix]] = None):
if new_hamiltonian_operators is not None:
new_hamiltonian_operators = to_csr(new_hamiltonian_operators)
new_hamiltonian_operators = [
np.round(op, decimals=self._decimals) for op in new_hamiltonian_operators
]
new_hamiltonian_operators = np.array(new_hamiltonian_operators, dtype="O")
self._hamiltonian_operators = new_hamiltonian_operators
@property
def static_dissipators(self) -> Union[None, csr_matrix]:
if self._static_dissipators is None:
return None
return list(self._static_dissipators)
@static_dissipators.setter
def static_dissipators(self, new_static_dissipators: Optional[List[csr_matrix]] = None):
"""Set up the dissipators themselves, as well as their adjoints, and the product of
adjoint with operator.
"""
self._static_dissipators = None
if new_static_dissipators is not None:
# setup new dissipators
new_static_dissipators = to_csr(new_static_dissipators)
new_static_dissipators = [
np.round(op, decimals=self._decimals) for op in new_static_dissipators
]
# setup adjoints
static_dissipators_adj = [op.conj().transpose() for op in new_static_dissipators]
# wrap in object arrays
new_static_dissipators = np.array(new_static_dissipators, dtype="O")
static_dissipators_adj = np.array(static_dissipators_adj, dtype="O")
# pre-compute products
static_dissipators_product_sum = -0.5 * np.sum(
static_dissipators_adj * new_static_dissipators, axis=0
)
self._static_dissipators = new_static_dissipators
self._static_dissipators_adj = static_dissipators_adj
self._static_dissipators_product_sum = static_dissipators_product_sum
@property
def dissipator_operators(self) -> Union[None, List[csr_matrix]]:
if self._dissipator_operators is None:
return None
return list(self._dissipator_operators)
@dissipator_operators.setter
def dissipator_operators(self, new_dissipator_operators: Optional[List[csr_matrix]] = None):
"""Set up the dissipators themselves, as well as their adjoints, and the product of
adjoint with operator.
"""
self._dissipator_operators = None
if new_dissipator_operators is not None:
# setup new dissipators
new_dissipator_operators = to_csr(new_dissipator_operators)
new_dissipator_operators = [
np.round(op, decimals=self._decimals) for op in new_dissipator_operators
]
# setup adjoints
dissipator_operators_adj = [op.conj().transpose() for op in new_dissipator_operators]
# wrap in object arrays
new_dissipator_operators = np.array(new_dissipator_operators, dtype="O")
dissipator_operators_adj = np.array(dissipator_operators_adj, dtype="O")
# pre-compute projducts
dissipator_products = dissipator_operators_adj * new_dissipator_operators
self._dissipator_operators = new_dissipator_operators
self._dissipator_operators_adj = dissipator_operators_adj
self._dissipator_products = dissipator_products
def evaluate_hamiltonian(self, ham_sig_vals: Union[None, Array]) -> csr_matrix:
r"""Compute the Hamiltonian.
Args:
ham_sig_vals: [Real] values of :math:`s_j` in :math:`H = \sum_j s_j(t) H_j + H_d`.
Returns:
Hamiltonian matrix.
Raises:
QiskitError: If collection not sufficiently specified.
"""
if self._static_hamiltonian is not None and self._hamiltonian_operators is not None:
return (
np.sum(ham_sig_vals * self._hamiltonian_operators, axis=-1)
+ self.static_hamiltonian
)
elif self._static_hamiltonian is None and self._hamiltonian_operators is not None:
return np.sum(ham_sig_vals * self._hamiltonian_operators, axis=-1)
elif self._static_hamiltonian is not None:
return self._static_hamiltonian
else:
raise QiskitError(
self.__class__.__name__
+ """ with None for both static_hamiltonian and
hamiltonian_operators cannot evaluate Hamiltonian."""
)
def evaluate_rhs(
self, ham_sig_vals: Union[None, Array], dis_sig_vals: Union[None, Array], y: Array
) -> Array:
r"""Evaluates the RHS of the LindbladModel for a given list of signal values.
Args:
ham_sig_vals: stores Hamiltonian signal values :math:`s_j(t)`.
dis_sig_vals: stores dissipator signal values :math:`\gamma_j(t)`.
Pass None if no dissipator operators involved.
y: density matrix of system. (k,n,n) Array.
Returns:
RHS of Lindbladian.
Raises:
QiskitError: If RHS cannot be evaluated due to insufficient collection data.
Calculation details:
* for csr_matrices is equivalent to matrix multiplicaiton.
We use numpy array broadcasting rules, combined with the above
fact, to achieve speeds that are substantially faster than a for loop.
First, in the case of a single (n,n) density matrix, we package the entire
array as a single-element array whose entry is the array. In the case of
multiple density matrices a (k,n,n) Array, we package everything as a
(k,1) Array whose [j,0] entry is the [j,:,:] density matrix.
In calculating the left- and right-mult contributions, we package
H+L and H-L as (1) object arrays whose single entry stores the relevant
sparse matrix. We can then multiply our packaged density matrix and
[H\pm L]. Using numpy broadcasting rules, [H\pm L] will be broadcast
to a (k,1) Array for elementwise multiplication with our packaged density
matrices. After this, elementwise multiplication is applied. This in turn
references each object's __mul__ function, which–for our csr_matrix components
means matrix multiplication.
In calculating the left-right-multiplication part, we use our (m)-shape
object arrays holding the dissipator operators to perform multiplication.
We can take an elementwise product with our packaged density matrix, at which
point our dissipator operators are broadcast as (m) -> (1,m) -> (k,m) shaped,
and our packaged density matrix as (k,1) -> (k,m). Elementwise multiplication
is then applied, which is interpreted as matrix multiplication. This yields
an array where entry [i,j] is an object storing the results of s_jL_j\rho_i L_j^\dagger.
We can then sum over j and unpackage our object array to get our desired result.
"""
hamiltonian_matrix = None
if self._static_hamiltonian is not None or self._hamiltonian_operators is not None:
hamiltonian_matrix = -1j * self.evaluate_hamiltonian(ham_sig_vals) # B matrix
# package (n,n) Arrays as (1)
# Arrays of dtype object, or (k,n,n) Arrays as (k,1) Arrays of dtype object
y = package_density_matrices(y)
# if dissipators present (includes both hamiltonian is None and is not None)
if self._dissipator_operators is not None or self._static_dissipators is not None:
# A matrix
if self._static_dissipators is None:
dissipators_matrix = np.sum(
-0.5 * dis_sig_vals * self._dissipator_products, axis=-1
)
elif self._dissipator_operators is None:
dissipators_matrix = self._static_dissipators_product_sum
else:
dissipators_matrix = self._static_dissipators_product_sum + np.sum(
-0.5 * dis_sig_vals * self._dissipator_products, axis=-1
)
if hamiltonian_matrix is not None:
left_mult_contribution = np.squeeze([hamiltonian_matrix + dissipators_matrix] * y)
right_mult_contribution = np.squeeze(y * [dissipators_matrix - hamiltonian_matrix])
else:
left_mult_contribution = np.squeeze([dissipators_matrix] * y)
right_mult_contribution = | np.squeeze(y * [dissipators_matrix]) | numpy.squeeze |
# Tools for analyzing data from microSWIFTs
# Import modules
import numpy as np
import matplotlib.pyplot as plt
import netCDF4 as nc
import cftime
import datetime
def missionMap(mission_num, mission_dir_path, mission_nc_path):
'''
@edwinrainville
'''
# Load in netCDF file as a dataset
mission_dataset = nc.Dataset(mission_nc_path, mode='r')
# Get list of all microSWIFTs on the mission
microSWIFTs_on_mission = list(mission_dataset.groups.keys())
# Create map of all drift tracks during mission
fig, ax = plt.subplots(figsize=(8,6))
ax.set_xlabel('FRF X Location [meters]')
ax.set_ylabel('FRF Y Location [meters]')
# Add the FRF Bathymetry to the map
# Data from September 28th, 2021
bathy_url = 'https://chlthredds.erdc.dren.mil/thredds/dodsC/frf/geomorphology/DEMs/surveyDEM/data/FRF_geomorphology_DEMs_surveyDEM_20210928.nc'
bathy_dataset = nc.Dataset(bathy_url)
# Create grid from coordinates
xFRF_grid, yFRF_grid = np.meshgrid(bathy_dataset['xFRF'][:],bathy_dataset['yFRF'][:])
bathy = bathy_dataset['elevation'][0,:,:]
ax.contourf(xFRF_grid, yFRF_grid, bathy, cmap='gray')
# Sort time labels
initial_values_set = False
while initial_values_set == False:
for microSWIFT in microSWIFTs_on_mission:
if 'GPS' in list(mission_dataset[microSWIFT].groups.keys()):
if 'time' in list(mission_dataset[microSWIFT]['GPS'].variables):
# Set initial time labels
min_time_label = mission_dataset[microSWIFT]['GPS']['time'][0]
max_time_label = mission_dataset[microSWIFT]['GPS']['time'][-1]
# Sort x and y locations for map
x, y = transform2FRF(mission_dataset[microSWIFT]['GPS']['lat'], mission_dataset[microSWIFT]['GPS']['lon'])
min_x = | np.min(x) | numpy.min |
import pandas as pd
sample = pd.read_csv('/home/vitorbootz/research/aux_files/galaxy_list.csv')
sdss = pd.read_csv('/home/vitorbootz/research/flux_measurements/sdss_flux_lines.csv')
main = pd.read_csv('/home/vitorbootz/research/results_starlight/STARLIGHT_MAIN_RESULTS.csv', sep=',')
cut = pd.read_csv('/home/vitorbootz/research/results_starlight/CUT_STARLIGHT_MAIN_RESULTS.csv', sep=',')
pd.set_option('max_columns', None)
pd.set_option('max_rows', None)
sample['lgm_tot_p50'] = -99.9
for i in range(len(sample)):
for j in range(len(sdss)):
if round(sample.ra[i], 2) == round(sdss.ra[j], 2):
sample.lgm_tot_p50[i] = sdss.lgm_tot_p50[j]
selection_gem = (sample['onoff'] == 1) & (sample['flag_lcg'] == 1) & (sample['extension'] != 1) & (sample['flag_sdss'] == 0) & (sample['extension'] < 100)
selection_sdss = (sample['onoff'] == 1) & (sample['flag_lcg'] == 1) & (sample['extension'] != 1) & (sample['flag_sdss'] == 1)
sample_gem = sample[selection_gem]
sample_sdss = sample[selection_sdss]
sample_gem.index = range(len(sample_gem))
sample_sdss.index = range(len(sample_sdss))
mass_star_gem = pd.DataFrame([])
mass_star_sdss = pd.DataFrame([])
mass_star_cut = pd.DataFrame([])
lgm_tot_p50 = pd.DataFrame([])
for i in range(len(sample_gem)):
selection_gem = (main['lcgID'] == sample_gem.lcgID[i]) & (main['extension'] == sample_gem.extension[i])
selection_sdss = (main['lcgID'] == sample_sdss.lcgID[i]) & (main['extension'] == sample_sdss.extension[i])
selection_cut = (cut['lcgID'] == sample_gem.lcgID[i]) & (cut['extension'] == sample_gem.extension[i]+1000)
mass_star_gem = mass_star_gem.append(main[selection_gem])
mass_star_sdss = mass_star_sdss.append(main[selection_sdss])
mass_star_cut = mass_star_cut.append(cut[selection_cut])
mass_star_gem.index = range(len(mass_star_gem))
mass_star_sdss.index = range(len(mass_star_sdss))
mass_star_cut.index = range(len(mass_star_cut))
mass_comparsion = sample_sdss[['lcgID', 'logMstar_lcg_sdss', 'lgm_tot_p50']].drop_duplicates()
mass_comparsion.index = range(len(mass_comparsion))
mass_comparsion = pd.concat([mass_comparsion,round(mass_star_gem.Mcor_log_Mo, 2), round(mass_star_sdss.Mcor_log_Mo, 2), round(mass_star_cut.Mcor_log_Mo, 2)], axis=1)
mass_comparsion.columns = ['lcgID', 'lgm_Izotov', 'lgm_tot_p50_SDSS', 'lgm_starlight_gem', 'lgm_starlight_sdss', 'lgm_starlight_sdss_cut']
############
### PLOT ###
############
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
pp = PdfPages('/home/vitorbootz/research/TCC_images/comparacao_massas/mass_comparsion.pdf')
labels = mass_comparsion.lcgID
x = np.arange(len(labels)) # the label locations
width = 0.15 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(x - 2*width, mass_comparsion.lgm_tot_p50_SDSS, width, label='lgm_tot_p50 SDSS')
rects2 = ax.bar(x - 1*width, mass_comparsion.lgm_Izotov, width, label='Izotov et al.')
rects3 = ax.bar(x, mass_comparsion.lgm_starlight_gem, width, label='STARLIGHT Gemini')
rects4 = ax.bar(x + 1*width, mass_comparsion.lgm_starlight_sdss, width, label='STARLIGHT SDSS')
rects5 = ax.bar(x + 2*width, mass_comparsion.lgm_starlight_sdss_cut, width, label='STARLIGHT SDSS-Gemini')
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('log(M$\star/$M$\odot$)', size=12)
ax.set_xlabel('LCG ID', size=12)
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.set_ylim(7,10.5)
ax.legend(loc='lower right', shadow=True, fontsize='x-small')
def autolabel(rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
autolabel(rects1)
autolabel(rects2)
plt.grid(alpha=0.3, color='black')
ax.set_axisbelow(True)
plt.yticks( | np.arange(7, 11, 0.5) | numpy.arange |
# Third-party imports
import numpy as np
import pandas as pd
import pytest
# First-party imports
from gluonts.core.serde import dump_code, load_code
from gluonts.dataset.common import (
BasicFeatureInfo,
CategoricalFeatureInfo,
MetaData,
)
from gluonts.dataset.artificial import RecipeDataset
from gluonts.dataset.artificial.recipe import (
Add,
BinaryMarkovChain,
Constant,
ConstantVec,
Debug,
Expr,
ForEachCat,
Lag,
LinearTrend,
Mul,
NanWhere,
NanWhereNot,
RandomBinary,
RandomCat,
RandomGaussian,
RandomSymmetricDirichlet,
SmoothSeasonality,
Stack,
evaluate_recipe,
generate,
take_as_list,
)
BASE_RECIPE = [("foo", ConstantVec(1.0)), ("cat", RandomCat([10]))]
@pytest.mark.parametrize(
"func",
[
Debug(),
RandomGaussian(),
RandomBinary(),
RandomSymmetricDirichlet(),
BinaryMarkovChain(0.1, 0.1),
Constant(1),
LinearTrend(),
RandomCat([10]),
Lag("foo", 1),
ForEachCat(RandomGaussian()),
Expr("np.random.rand(length)"),
SmoothSeasonality(Constant(12), Constant(0)),
Add(["foo", "foo"]),
Mul(["foo", "foo"]),
NanWhere("foo", "foo"),
NanWhereNot("foo", "foo"),
Stack(["foo", "foo"]),
RandomGaussian() + RandomGaussian(),
RandomGaussian() * RandomGaussian(),
RandomGaussian() / RandomGaussian(),
],
)
def test_call_and_repr(func) -> None:
global_state = {}
x = evaluate_recipe(BASE_RECIPE, length=10, global_state=global_state)
kwargs = dict(foo=42, bar=23)
np.random.seed(0)
ret = func(
x,
field_name="bar",
length=10,
global_state=global_state.copy(),
**kwargs,
)
func_reconstructed = load_code(dump_code(func))
np.random.seed(0)
ret2 = func_reconstructed(
x,
field_name="foo",
length=10,
global_state=global_state.copy(),
**kwargs,
)
| np.testing.assert_allclose(ret2, ret) | numpy.testing.assert_allclose |
import logging
import math
import sys
import unittest
import numpy as np
import tensorflow.keras as keras
import lottery_ticket_pruner
from lottery_ticket_pruner.lottery_ticket_pruner import _prune_func_smallest_weights, \
_prune_func_smallest_weights_global, _prune_func_large_final
TEST_NUM_CLASSES = 3
TEST_DENSE_INPUT_DIMS = (32, )
TEST_DENSE_LAYER_INPUTS = np.prod(TEST_DENSE_INPUT_DIMS)
TEST_DENSE_WEIGHT_COUNT = TEST_DENSE_LAYER_INPUTS * TEST_NUM_CLASSES
TEST_DNN_INPUT_DIMS = (64, 64, 3)
TEST_DNN_NUM_CLASSES = 10
def enable_debug_logging():
logger = logging.getLogger('lottery_ticket_pruner')
logger.setLevel('DEBUG')
logger.addHandler(logging.StreamHandler(sys.stdout))
# enable_debug_logging()
class TestLotteryTicketPruner(unittest.TestCase):
def _create_test_model(self):
input = keras.Input(shape=TEST_DENSE_INPUT_DIMS, dtype='float32')
x = keras.layers.Dense(TEST_NUM_CLASSES)(input)
model = keras.Model(inputs=input, outputs=x)
return model
def _create_test_model_diff_shape(self, diff_input_shape=False, diff_output_shape=False):
input_dims = (64, ) if diff_input_shape else TEST_DENSE_INPUT_DIMS
output_dims = (TEST_NUM_CLASSES + 1) if diff_output_shape else TEST_NUM_CLASSES
input = keras.Input(shape=input_dims, dtype='float32')
x = keras.layers.Dense(output_dims)(input)
model = keras.Model(inputs=input, outputs=x)
return model
def _create_test_mode_extra_layer(self):
input = keras.Input(shape=TEST_DENSE_INPUT_DIMS, dtype='float32')
x = keras.layers.Dense(TEST_NUM_CLASSES)(input)
x = keras.layers.Softmax()(x)
model = keras.Model(inputs=input, outputs=x)
return model
def _create_test_dnn_model(self):
input = keras.Input(shape=TEST_DNN_INPUT_DIMS, dtype='float32')
x = keras.layers.Conv2D(4,
kernel_size=3,
strides=(2, 2),
padding='valid',
use_bias=True,
name='Conv1')(input)
x = keras.layers.BatchNormalization(axis=1,
epsilon=1e-3,
momentum=0.999,
name='bn_Conv1')(x)
x = keras.layers.ReLU(6., name='Conv1_relu')(x)
x = keras.layers.Conv2D(3,
kernel_size=1,
padding='same',
use_bias=False,
activation=None,
name='Conv2')(x)
x = keras.layers.BatchNormalization(axis=1,
epsilon=1e-3,
momentum=0.999,
name='bn_Conv2')(x)
x = keras.layers.ReLU(6., name='Conv2_relu')(x)
x = keras.layers.GlobalAveragePooling2D()(x)
x = keras.layers.Dense(TEST_DNN_NUM_CLASSES, activation='softmax',
use_bias=True, name='Logits')(x)
model = keras.Model(inputs=input, outputs=x)
return model
def _get_test_dnn_training_data(self):
num_samples = 10
X = np.random.random((num_samples,) + TEST_DNN_INPUT_DIMS)
y = np.random.choice([0, 1], num_samples, replace=True)
y = keras.utils.to_categorical(y, num_classes=TEST_DNN_NUM_CLASSES)
return X, y
def _summed_model_weights(self, model):
weights_sum = 0.0
for layer in model.layers:
weights = layer.get_weights()
weights_sum += sum(np.sum(w) for w in weights)
return weights_sum
#
# _prune_func_smallest_weights()
#
def test_prune_func_smallest_weights(self):
actual_mask = _prune_func_smallest_weights(np.array([]), None, np.array([1, 2, 3, 4], dtype=float),
np.array([1, 1, 1, 1]), prune_percentage=0.25)
self.assertTrue(np.array_equal([0, 1, 1, 1], actual_mask))
# Just changed order of weights
actual_mask = _prune_func_smallest_weights(np.array([]), None, np.array([3, 1, 2, 4], dtype=float),
np.array([1, 1, 1, 1]), prune_percentage=0.5)
self.assertTrue(np.array_equal([1, 0, 0, 1], actual_mask))
# Odd number of weights
actual_mask = _prune_func_smallest_weights(np.array([]), None, np.array([5, 3, 1, 2, 4], dtype=float),
np.array([1, 1, 1, 1, 1]), prune_percentage=0.5)
self.assertTrue(np.array_equal([1, 1, 0, 0, 1], actual_mask))
# Current mask masks out one of the lowest weights
actual_mask = _prune_func_smallest_weights(np.array([]), None, np.array([1, 2, 3, 4, 5], dtype=float),
np.array([0, 1, 1, 1, 1]), prune_percentage=0.25)
self.assertTrue(np.array_equal([0, 0, 1, 1, 1], actual_mask))
# Current mask masks out one of the lowest weights
actual_mask = _prune_func_smallest_weights(np.array([]), None, np.array([1, 2, 3, 4], dtype=float),
np.array([0, 1, 1, 0]), prune_percentage=0.25)
self.assertTrue(np.array_equal([0, 0, 1, 0], actual_mask))
# Some negative and some positive weights should be masked
actual_mask = _prune_func_smallest_weights(np.array([]), None, np.array([-1, 2, -3, 4], dtype=float),
np.array([1, 1, 1, 1]), prune_percentage=0.5)
self.assertTrue(np.array_equal([0, 0, 1, 1], actual_mask))
# Many identical values but only some of them should get masked
actual_mask = _prune_func_smallest_weights(np.array([]), None, np.array([1, 1, 1, 1, 2, 2], dtype=float),
np.array([1, 1, 1, 1, 1, 1]), prune_percentage=0.5)
self.assertEqual(3, np.sum(actual_mask))
# Many identical absolute values but only some of them should get masked
actual_mask = _prune_func_smallest_weights(np.array([]), None, np.array([1, -1, -1, 1, 2, -2], dtype=float),
np.array([1, 1, 1, 1, 1, 1]), prune_percentage=0.5)
self.assertEqual(3, np.sum(actual_mask))
#
# _prune_func_smallest_weights_global()
#
def test_prune_func_smallest_weights_global_negative(self):
model = self._create_test_model()
pruner = lottery_ticket_pruner.LotteryTicketPruner(model)
# Both percentage and count are unspecified
with self.assertRaises(ValueError) as ex:
_ = _prune_func_smallest_weights_global(None, None, prune_percentage=None, prune_count=None)
self.assertIn('prune_percentage', str(ex.exception))
self.assertIn('prune_count', str(ex.exception))
# Prune percentage is zero
with unittest.mock.patch('logging.Logger.warning') as warning:
_ = _prune_func_smallest_weights_global(pruner.iterate_prunables(model), None, prune_percentage=0.0,
prune_count=None)
self.assertEqual(1, warning.call_count)
# Prune count is zero
with unittest.mock.patch('logging.Logger.warning') as warning:
_ = _prune_func_smallest_weights_global(pruner.iterate_prunables(model), None, prune_percentage=None,
prune_count=0)
self.assertEqual(1, warning.call_count)
#
# _prune_func_large_final()
#
def test_prune_func_large_final_negative(self):
# Both percentage and count are unspecified
with self.assertRaises(ValueError) as ex:
_ = _prune_func_large_final(None, None, prune_percentage=None, prune_count=None)
self.assertIn('prune_percentage', str(ex.exception))
self.assertIn('prune_count', str(ex.exception))
#
# constructor
#
def test_constructor(self):
model1 = self._create_test_model()
pruner = lottery_ticket_pruner.LotteryTicketPruner(model1)
# Disabled since there are legit cases where the two models may different. E.g when using transfer learning
# one may choose to replace, say, a single head layer in the original model with 2 or more layers in the new
# model.
# # Different number of layers
# model2 = self._create_test_mode_extra_layer()
# with self.assertRaises(ValueError) as ex:
# pruner.calc_prune_mask(model2, 0.2, 'smallest_weights')
# self.assertIn('must have the same number of layers', str(ex.exception))
# Different shapes
model2 = self._create_test_model_diff_shape(diff_input_shape=True)
with self.assertRaises(ValueError) as ex:
pruner.apply_pruning(model2)
self.assertIn('must have the same input shape', str(ex.exception))
model2 = self._create_test_model_diff_shape(diff_output_shape=True)
with self.assertRaises(ValueError) as ex:
pruner.calc_prune_mask(model2, 0.2, 'smallest_weights')
self.assertIn('must have the same output shape', str(ex.exception))
#
# reset_masks()
#
def test_reset_masks(self):
model = self._create_test_model()
pruner = lottery_ticket_pruner.LotteryTicketPruner(model)
interesting_layer_index = 1
interesting_weights_index = 0
tpl = tuple([interesting_layer_index, tuple([interesting_weights_index])])
original_mask = np.array(pruner.prune_masks_map[tpl][interesting_weights_index])
self.assertEqual(TEST_DENSE_WEIGHT_COUNT, np.sum(original_mask))
# Prune and make sure prune mask has changed
pruner.calc_prune_mask(model, 0.2, 'smallest_weights')
pruned_mask = pruner.prune_masks_map[tpl][interesting_weights_index]
num_pruned = np.sum(pruned_mask)
self.assertLess(num_pruned, TEST_DENSE_WEIGHT_COUNT)
# Now reset
pruner.reset_masks()
reset_mask = np.array(pruner.prune_masks_map[tpl][interesting_weights_index])
self.assertEqual(TEST_DENSE_WEIGHT_COUNT, np.sum(reset_mask))
#
# apply_dwr()
#
def test_apply_dwr(self):
model = self._create_test_model()
pruner = lottery_ticket_pruner.LotteryTicketPruner(model)
interesting_layer_index = 1
interesting_weights_index = 0
tpl = (interesting_layer_index, (interesting_weights_index, ))
interesting_layer = model.layers[interesting_layer_index]
# Assign the weights values between 0..N, with 1/2 the weights being negative
weights = interesting_layer.get_weights()
interesting_weights = weights[interesting_weights_index]
num_interesting_layer_weights = np.prod(interesting_weights.shape)
test_weights = np.array(np.random.choice(range(num_interesting_layer_weights),
size=num_interesting_layer_weights, replace=False))
test_weights = test_weights.reshape(interesting_weights.shape)
weights[interesting_weights_index] = test_weights
interesting_layer.set_weights(weights)
prune_rate1 = 0.5
pruner.calc_prune_mask(model, prune_rate1, 'smallest_weights')
pruner.apply_pruning(model)
pruner.apply_dwr(model)
# Mask out any pruned weights
pruned_weights = interesting_layer.get_weights()[interesting_weights_index]
expected_test_weights = test_weights * pruner.prune_masks_map[tpl][interesting_weights_index]
# We expect DWR to have increased the value of unmasked weight by a factor of 2.0 (1.0 / 0.5 = 2.0)
expected_test_weights *= (1.0 / prune_rate1)
np.testing.assert_array_equal(expected_test_weights, pruned_weights)
# Prune again to make sure we accumulate the DWR multiplier as expected
weights[interesting_weights_index] = test_weights
interesting_layer.set_weights(weights)
prune_rate2 = 0.2
pruner.calc_prune_mask(model, prune_rate2, 'smallest_weights')
pruner.apply_pruning(model)
pruner.apply_dwr(model)
# Mask out any pruned weights
pruned_weights = interesting_layer.get_weights()[interesting_weights_index]
expected_test_weights = test_weights * pruner.prune_masks_map[tpl][interesting_weights_index]
# We expect DWR to have increased the value of unmasked weight by a factor of 2.5
# (1.0 / ((1.0 - 0.5) * 0.2) = 2.5)
# But since there is rounding due to counting the number of 1s in the prune mask (an int) the rescaling factor
# is not quite exactly 2.5
num_first_prune_ones = int(num_interesting_layer_weights * prune_rate1)
denominator = (num_interesting_layer_weights - (num_first_prune_ones + int(num_first_prune_ones * prune_rate2)))
rescale_factor = num_interesting_layer_weights / denominator
expected_test_weights *= rescale_factor
np.testing.assert_array_almost_equal(expected_test_weights, pruned_weights, decimal=3)
#
# calc_prune_mask()
# 'smallest_weights'
#
def test_smallest_weights(self):
model = self._create_test_model()
# First layer is the input layer; ignore it
# Second layer is Dense layer with 2 weights. First is fully connected weights. Second is output weights.
interesting_layer_index = 1
interesting_layer = model.layers[interesting_layer_index]
interesting_layer_shape = interesting_layer.weights[0].shape
interesting_layer_weight_count = int(np.prod(interesting_layer_shape))
interesting_key = tuple([interesting_layer_index, tuple([0])])
dl_test_weights = np.random.choice(TEST_DENSE_LAYER_INPUTS * TEST_NUM_CLASSES,
size=TEST_DENSE_LAYER_INPUTS * TEST_NUM_CLASSES, replace=False)
# Get rid of zero weights since we count those below during verification
dl_test_weights += 1
dl_test_weights = dl_test_weights.reshape(interesting_layer_shape)
interesting_layer.set_weights([dl_test_weights, interesting_layer.get_weights()[1]])
pruner = lottery_ticket_pruner.LotteryTicketPruner(model)
pruner.calc_prune_mask(model, 0.5, 'smallest_weights')
num_masked = np.sum(pruner.prune_masks_map[interesting_key][0] == 0)
self.assertEqual(num_masked, int(TEST_DENSE_LAYER_INPUTS * TEST_NUM_CLASSES * 0.5))
pruner.apply_pruning(model)
actual_weights = interesting_layer.get_weights()
actual_weights[0][actual_weights[0] == 0.0] = math.inf
min_weight = np.min(actual_weights[0])
self.assertGreaterEqual(min_weight, int(interesting_layer_weight_count * 0.5))
pruner.calc_prune_mask(model, 0.2, 'smallest_weights')
num_masked = np.sum(pruner.prune_masks_map[interesting_key][0] == 0)
self.assertEqual(num_masked, int(TEST_DENSE_LAYER_INPUTS * TEST_NUM_CLASSES * 0.6))
pruner.apply_pruning(model)
actual_weights = interesting_layer.get_weights()
actual_weights[0][actual_weights[0] == 0.0] = math.inf
min_weight = np.min(actual_weights[0])
self.assertGreaterEqual(min_weight, int(interesting_layer_weight_count * 0.6))
def test_smallest_weights_2(self):
model = self._create_test_model()
# First layer is the input layer; ignore it
# Second layer is Dense layer with 2 weights. First is fully connected weights. Second is output weights.
interesting_layer = model.layers[1]
interesting_layer_shape = interesting_layer.weights[0].shape
dl_test_weights = np.random.choice(TEST_DENSE_LAYER_INPUTS * TEST_NUM_CLASSES,
size=TEST_DENSE_LAYER_INPUTS * TEST_NUM_CLASSES, replace=False)
# Make some weights negative
dl_test_weights -= TEST_DENSE_LAYER_INPUTS * TEST_NUM_CLASSES // 2
dl_test_weights = dl_test_weights.reshape(interesting_layer_shape)
interesting_layer.set_weights([dl_test_weights, interesting_layer.get_weights()[1]])
pruner = lottery_ticket_pruner.LotteryTicketPruner(model)
prune_rate = 0.5
pruner.calc_prune_mask(model, prune_rate, 'smallest_weights')
pruner.apply_pruning(model)
actual_weights = interesting_layer.get_weights()
min_expected_pos = TEST_DENSE_LAYER_INPUTS * TEST_NUM_CLASSES * prune_rate // 2 - 1
max_expected_neg = -TEST_DENSE_LAYER_INPUTS * TEST_NUM_CLASSES * prune_rate // 2 + 1
unpruned_pos = np.sum(actual_weights[0] >= min_expected_pos)
unpruned_neg = np.sum(actual_weights[0] <= max_expected_neg)
unpruned = unpruned_pos + unpruned_neg
self.assertIn(unpruned, [int(TEST_DENSE_LAYER_INPUTS * TEST_NUM_CLASSES * prune_rate),
int(TEST_DENSE_LAYER_INPUTS * TEST_NUM_CLASSES * prune_rate) - 1])
expected_to_be_pruned = TEST_DENSE_LAYER_INPUTS * TEST_NUM_CLASSES - unpruned - 1
self.assertLessEqual(abs(int(TEST_DENSE_LAYER_INPUTS * TEST_NUM_CLASSES * prune_rate) - expected_to_be_pruned),
1)
# Prune again
prune_rate2 = 0.1
expected_to_be_pruned2 = int(TEST_DENSE_LAYER_INPUTS * TEST_NUM_CLASSES * prune_rate2 * (1.0 - prune_rate))
pruner.calc_prune_mask(model, prune_rate2, 'smallest_weights')
pruner.apply_pruning(model)
actual_weights = interesting_layer.get_weights()
min_expected_pos = expected_to_be_pruned2 // 2 - 1
max_expected_neg = -expected_to_be_pruned2 // 2 + 1
unpruned_pos = np.sum(actual_weights[0] >= min_expected_pos)
unpruned_neg = np.sum(actual_weights[0] <= max_expected_neg)
unpruned = unpruned_pos + unpruned_neg
expected_unpruned = TEST_DENSE_LAYER_INPUTS * TEST_NUM_CLASSES - expected_to_be_pruned - expected_to_be_pruned2
self.assertLessEqual(abs(expected_unpruned - unpruned), 1)
def test_smallest_weights_similar_weights(self):
""" Tests case where many or all weights are same value. Hence we might be tempted to mask on all of the
smallest weights rather than honoring only up to the prune rate
"""
model = self._create_test_model()
# First layer is the input layer; ignore it
# Second layer is Dense layer with 2 weights. First is fully connected weights. Second is output weights.
interesting_layer = model.layers[1]
interesting_layer_shape = interesting_layer.weights[0].shape
# Make all weights the same
dl_test_weights = np.ones([TEST_DENSE_LAYER_INPUTS, TEST_NUM_CLASSES], dtype=int)
# Make some weights negative
dl_test_weights = dl_test_weights.reshape(interesting_layer_shape)
interesting_layer.set_weights([dl_test_weights, interesting_layer.get_weights()[1]])
pruner = lottery_ticket_pruner.LotteryTicketPruner(model)
prune_rate = 0.5
pruner.calc_prune_mask(model, prune_rate, 'smallest_weights')
pruner.apply_pruning(model)
actual_weights = interesting_layer.get_weights()
expected = int(TEST_DENSE_LAYER_INPUTS * TEST_NUM_CLASSES * prune_rate)
actual = np.sum(actual_weights[0])
self.assertEqual(expected, actual)
#
# calc_prune_mask()
# 'large_final'
#
def test_prune_func_large_final(self):
""" Tests case where many or all weights are same value. Hence we might be tempted to mask on all of the
smallest weights rather than honoring only up to the prune rate
"""
model = self._create_test_dnn_model()
interesting_layer = model.layers[1]
interesting_weights_index = 0
pruner = lottery_ticket_pruner.LotteryTicketPruner(model)
# Assign the weights values between 0..N, with 1/2 the weights being negative
weights = interesting_layer.get_weights()
interesting_weights = weights[interesting_weights_index]
num_interesting_layer_weights = np.prod(interesting_weights.shape)
new_weights = np.array(np.random.choice(range(num_interesting_layer_weights),
size=num_interesting_layer_weights, replace=False))
rand_multiplier = np.random.choice([1, -1], size=num_interesting_layer_weights, replace=True)
new_weights *= rand_multiplier
new_weights = new_weights.reshape(interesting_weights.shape)
weights[interesting_weights_index] = new_weights
interesting_layer.set_weights(weights)
pruner.set_pretrained_weights(model)
# Now verify that the absolute value of all unpruned weights are as large or larger than the smallest expected
# non-zero weight
prune_rate = 0.2
pruner.calc_prune_mask(model, prune_rate, 'large_final')
pruner.apply_pruning(model)
weights = interesting_layer.get_weights()
pruned_weights = weights[interesting_weights_index]
pruned_weights = np.abs(pruned_weights)
num_zero = np.sum(pruned_weights == 0.0)
self.assertEqual(int(num_interesting_layer_weights * prune_rate), num_zero)
expected_non_zero_min = int( | np.prod(pruned_weights.shape) | numpy.prod |
from skimage import io
import os
import numpy as np
DEBUG = 0
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
if DEBUG:
print(BASE_DIR)
PICS_DIR = os.path.join(BASE_DIR,"..\\pics\\test_match")
if DEBUG:
print(PICS_DIR)
GREY = [247, 247, 247]
GREEN = [148, 211, 77]
WHITE = [255, 255, 255]
vertex_top = 1233
vertex_left = 174
box_width_all = 735
box_height_all = 112
start_top = 1257
start_left = 352
box_width = int(735 / 2)
box_height = int(112 * 2/3)
interval_height = int((1738 - 1233) / 3)
question_pos = [1054, 1215, 59, 1000]
def crop_answer(whole_img):
answer_1 = whole_img[start_top+interval_height*0:start_top+box_height+interval_height*0, start_left:start_left+box_width, 0:3]
answer_2 = whole_img[start_top+interval_height*1:start_top+box_height+interval_height*1, start_left:start_left+box_width, 0:3]
answer_3 = whole_img[start_top+interval_height*2:start_top+box_height+interval_height*2, start_left:start_left+box_width, 0:3]
answer_4 = whole_img[start_top+interval_height*3:start_top+box_height+interval_height*3, start_left:start_left+box_width, 0:3]
return answer_1, answer_2, answer_3, answer_4
def cal_num_scalar(image, color):
num =0
for loop in range(image.shape[0]):
for loop2 in range(image.shape[1]):
if image[loop][loop2][0] == color[0] :# and image[loop][loop2][1] == color[1] and image[loop][loop2][2] == color[2]:
continue
else:
#print(image[loop][loop2][0:3])
num = num+1
return num
def cal_num(image, color):
num = 0
image_useful = image[:, :, 0] != color[0]
num = np.sum(np.sum(image_useful))
return int(num)
def cal_num_cat(image, color):
if 0:
height_split = int(image.shape[0]/3)
num = ""
for i in range(3):
image_useful = image[height_split * i:height_split * (i+1), :, 0] != color[0]
num1 = np.sum(np.sum(image_useful))
num += str(num1)
return int(np.int(num))
else:
width_split = int(image.shape[1]/2)
data_str = ""
for i in range(2):
image_useful = image[:, width_split * i:width_split * (i+1), 0] != color[0]
num = np.sum(np.sum(image_useful))
num_str = str(num)
if num_str.__len__() == 1:
num_str = "0000" + num_str
elif num_str.__len__() == 2:
num_str = "000" + num_str
elif num_str.__len__() == 3:
num_str = "00" + num_str
elif num_str.__len__() == 4:
num_str = "0" + num_str
elif num_str.__len__() == 5:
pass
else:
assert False, "num_str length error. length: %d" % num_str.__len__()
data_str += num_str
return data_str
def cal_num1(image, color):
num =0
for loop in range(image.shape[0]):
for loop2 in range(image.shape[1]):
if sum(image[loop][loop2][0:3] == color) == 3:
continue
else:
#print(image[loop][loop2][0:3])
num = num+1
return num
def selection(correct_loss, loss1, loss2, loss3, loss4):
a = | np.array([loss1, loss2, loss3, loss4]) | numpy.array |
#Copyright 2018 KAIST under XAI Project supported by Ministry of Science and ICT, Korea
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
import numpy as np
from layers import *
"""
These functions are for the GP class and data generation.
Only functions related to data generation is in use and others are not needed anymore,
since they are replaced by GPflow.
"""
# Define Kernel (covariance matrix)
def Kernel(X1, X2, t0, t1):
sqdist = square_dist(X1, X2, t1)
return t0**2 * np.exp(-.5 * sqdist)
# Description: Data generation from GP specified by W, theta's
# 'N' is a number of data points in one dimension line.
def data_from_GP(params, N, low=-5., high=5., dim=1):
# Parameters defining generation model
W, B, t0, t1 = params['W'], params['B'], params['t0'], params['t1']
# Dimension check
assert (dim == len(W)-1) and (dim == B.shape[0]-1 == B.shape[1]-1), 'Dimension match error'
# Define input data points
X = [np.linspace(low, high, N) for _ in range(dim)]
X = np.meshgrid(*X)
D_X = np.empty((N**dim, 0))
for i in range(dim):
D_X = np.concatenate((D_X, X[i].reshape(-1, 1)), axis=1)
# Generate y values
D_X_aug = np.concatenate((np.ones((N**dim, 1)), D_X), axis=1)
K = Kernel(D_X, D_X, t0, t1)
L = np.linalg.cholesky(K + 1e-12*np.eye(N**dim))
U = np.random.normal(size=(N**dim, 1))
y = np.matmul(D_X_aug, W) + np.matmul(L, U)
K_noisy = Kernel(D_X, D_X, t0, t1) + np.dot(np.dot(D_X_aug, B), D_X_aug.T)
L_noisy = np.linalg.cholesky(K_noisy + 1e-12*np.eye(N))
y_noisy = np.matmul(D_X_aug, W) + np.matmul(L_noisy, U)
if dim == 1:
X = np.array(X).reshape(-1, 1)
y = y.reshape(-1, 1)
else:
y = y.reshape([N for _ in range(dim)])
return X, y, y_noisy, None
def simulation_data(num_data_each=100, num_patients=10, low=-5., high=5., \
input_dim=1, hidden_dim=5, output_dim=1, ratio=0.5, classification=False):
"""
The data generation for simulating our model.
The data generation model follows the model where all GPs(patients) share MLP mean function,
and each has its own covariance function.
Input points for each GP might be different.
MLP is one hideen layer model.
Args:
num_data_each: Number of data for each patient, N.
num_patients: Number of paitents, P.
low: Low limit value of range of input, x.
high: High limit value of range of input, x.
input_dim: Dimension of inputs.
hidden_dim: Dimension of hiddens.
output_dim: Dimension of outputs.
ratio: Ratio between training data and testing data.
"""
mu_W1 = 2*np.random.rand(input_dim*hidden_dim)-1
mu_W2 = 2*np.random.rand(hidden_dim*output_dim)-1
mu_b1 = 2*np.random.rand(hidden_dim)-1
mu_b2 = 2*np.random.rand(output_dim)-1
cov_W1 = 0.01*np.eye(input_dim*hidden_dim)
cov_W2 = 0.01*np.eye(hidden_dim*output_dim)
cov_b1 = 0.01*np.eye(hidden_dim)
cov_b2 = 0.01*np.eye(output_dim)
N = num_data_each*num_patients
X = np.random.uniform(low=low, high=high, size=N).reshape(-1, 1)
sample_W1 = np.random.multivariate_normal(mu_W1, cov_W1, N).reshape(N, input_dim, hidden_dim)
sample_W2 = np.random.multivariate_normal(mu_W2, cov_W2, N).reshape(N, hidden_dim, output_dim)
sample_b1 = np.random.multivariate_normal(mu_b1, cov_b1, N).reshape(N, hidden_dim)
sample_b2 = np.random.multivariate_normal(mu_b2, cov_b2, N).reshape(N, output_dim)
M = np.zeros((N, 1))
for i in range(N):
M[i] = np.matmul(np.maximum(np.matmul(X[i].reshape(1, -1), sample_W1[i]) + sample_b1[i], 0), \
sample_W2[i]) + sample_b2[i]
data = {'X_train': np.empty((0, input_dim)), 'y_train': np.empty((0, output_dim)), \
'X_test': np.empty((0, input_dim)), 'y_test': np.empty((0, output_dim))}
for i in range(num_patients):
N_each = num_data_each
N_each_train = int(N_each*ratio)
N_each_test = N_each - N_each_train
t0, t1 = np.random.uniform(low=0.7, high=1.414, size=2)
_X = X[i*N_each:(i+1)*N_each, :]
K = Kernel(_X, _X, t0, t1)
L = np.linalg.cholesky(K + 1e-12*np.eye(N_each))
U = np.random.normal(size=(N_each, 1))
y = M[i*N_each:(i+1)*N_each, :] + np.matmul(L, U)
if classification:
y_mean = np.mean(y)
for j in range(N_each):
if y[j] <= y_mean:
y[j] = 0
else:
y[j] = 1
#y_mean = np.mean(y)
#y[y<=y_mean] = 0
#y[y>y_mean] = 1
idx_shuffled = | np.arange(N_each) | numpy.arange |
# Copyright 2022 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
import numpy as np
from PIL import Image
import albumentations as A
from nnabla.utils.data_iterator import data_iterator_simple
from nnabla.logger import logger
def data_iterator_celeba(img_path, attributes,
transform=None, batch_size=32, num_samples=-1, shuffle=True, rng=None):
"""
create celebA data iterator
Args:
img_path(list) : list of image paths
attributes (dict) : attribute list
transform : transform the image(data augmentation)
batch_size (int) : number of samples contained in each generated batch
num_samples (int) : number of samples taken in data loader
(if num_samples=-1, it will take all the images in the dataset)
shuffle (bool) : shuffle the data
Returns:
simple data iterator
"""
imgs = img_path
attr = attributes
if num_samples == -1:
num_samples = len(imgs)
else:
logger.info(
"Num. of data ({}) is used for debugging".format(num_samples))
def load_func(i):
pillow_image = Image.open(imgs[i])
image = | np.array(pillow_image) | numpy.array |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime
from numpy import random
import numpy as np
from pandas.compat import lrange, lzip, u
from pandas import (compat, DataFrame, Series, Index, MultiIndex,
date_range, isnull)
import pandas as pd
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
from pandas.core.common import PerformanceWarning
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameSelectReindex(tm.TestCase, TestData):
# These are specific reindex-based tests; other indexing tests should go in
# test_indexing
_multiprocess_can_split_ = True
def test_drop_names(self):
df = DataFrame([[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=['a', 'b', 'c'],
columns=['d', 'e', 'f'])
df.index.name, df.columns.name = 'first', 'second'
df_dropped_b = df.drop('b')
df_dropped_e = df.drop('e', axis=1)
df_inplace_b, df_inplace_e = df.copy(), df.copy()
df_inplace_b.drop('b', inplace=True)
df_inplace_e.drop('e', axis=1, inplace=True)
for obj in (df_dropped_b, df_dropped_e, df_inplace_b, df_inplace_e):
self.assertEqual(obj.index.name, 'first')
self.assertEqual(obj.columns.name, 'second')
self.assertEqual(list(df.columns), ['d', 'e', 'f'])
self.assertRaises(ValueError, df.drop, ['g'])
self.assertRaises(ValueError, df.drop, ['g'], 1)
# errors = 'ignore'
dropped = df.drop(['g'], errors='ignore')
expected = Index(['a', 'b', 'c'], name='first')
self.assert_index_equal(dropped.index, expected)
dropped = df.drop(['b', 'g'], errors='ignore')
expected = Index(['a', 'c'], name='first')
self.assert_index_equal(dropped.index, expected)
dropped = df.drop(['g'], axis=1, errors='ignore')
expected = Index(['d', 'e', 'f'], name='second')
self.assert_index_equal(dropped.columns, expected)
dropped = df.drop(['d', 'g'], axis=1, errors='ignore')
expected = Index(['e', 'f'], name='second')
self.assert_index_equal(dropped.columns, expected)
def test_drop_col_still_multiindex(self):
arrays = [['a', 'b', 'c', 'top'],
['', '', '', 'OD'],
['', '', '', 'wx']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(np.random.randn(3, 4), columns=index)
del df[('a', '', '')]
assert(isinstance(df.columns, MultiIndex))
def test_drop(self):
simple = DataFrame({"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]})
assert_frame_equal(simple.drop("A", axis=1), simple[['B']])
assert_frame_equal(simple.drop(["A", "B"], axis='columns'),
simple[[]])
assert_frame_equal(simple.drop([0, 1, 3], axis=0), simple.ix[[2], :])
assert_frame_equal(simple.drop(
[0, 3], axis='index'), simple.ix[[1, 2], :])
self.assertRaises(ValueError, simple.drop, 5)
self.assertRaises(ValueError, simple.drop, 'C', 1)
self.assertRaises(ValueError, simple.drop, [1, 5])
self.assertRaises(ValueError, simple.drop, ['A', 'C'], 1)
# errors = 'ignore'
assert_frame_equal(simple.drop(5, errors='ignore'), simple)
assert_frame_equal(simple.drop([0, 5], errors='ignore'),
simple.ix[[1, 2, 3], :])
assert_frame_equal(simple.drop('C', axis=1, errors='ignore'), simple)
assert_frame_equal(simple.drop(['A', 'C'], axis=1, errors='ignore'),
simple[['B']])
# non-unique - wheee!
nu_df = DataFrame(lzip(range(3), range(-3, 1), list('abc')),
columns=['a', 'a', 'b'])
assert_frame_equal(nu_df.drop('a', axis=1), nu_df[['b']])
assert_frame_equal(nu_df.drop('b', axis='columns'), nu_df['a'])
nu_df = nu_df.set_index(pd.Index(['X', 'Y', 'X']))
nu_df.columns = list('abc')
assert_frame_equal(nu_df.drop('X', axis='rows'), nu_df.ix[["Y"], :])
assert_frame_equal(nu_df.drop(['X', 'Y'], axis=0), nu_df.ix[[], :])
# inplace cache issue
# GH 5628
df = pd.DataFrame(np.random.randn(10, 3), columns=list('abc'))
expected = df[~(df.b > 0)]
df.drop(labels=df[df.b > 0].index, inplace=True)
assert_frame_equal(df, expected)
def test_drop_multiindex_not_lexsorted(self):
# GH 11640
# define the lexsorted version
lexsorted_mi = MultiIndex.from_tuples(
[('a', ''), ('b1', 'c1'), ('b2', 'c2')], names=['b', 'c'])
lexsorted_df = DataFrame([[1, 3, 4]], columns=lexsorted_mi)
self.assertTrue(lexsorted_df.columns.is_lexsorted())
# define the non-lexsorted version
not_lexsorted_df = DataFrame(columns=['a', 'b', 'c', 'd'],
data=[[1, 'b1', 'c1', 3],
[1, 'b2', 'c2', 4]])
not_lexsorted_df = not_lexsorted_df.pivot_table(
index='a', columns=['b', 'c'], values='d')
not_lexsorted_df = not_lexsorted_df.reset_index()
self.assertFalse(not_lexsorted_df.columns.is_lexsorted())
# compare the results
tm.assert_frame_equal(lexsorted_df, not_lexsorted_df)
expected = lexsorted_df.drop('a', axis=1)
with tm.assert_produces_warning(PerformanceWarning):
result = not_lexsorted_df.drop('a', axis=1)
tm.assert_frame_equal(result, expected)
def test_merge_join_different_levels(self):
# GH 9455
# first dataframe
df1 = DataFrame(columns=['a', 'b'], data=[[1, 11], [0, 22]])
# second dataframe
columns = MultiIndex.from_tuples([('a', ''), ('c', 'c1')])
df2 = DataFrame(columns=columns, data=[[1, 33], [0, 44]])
# merge
columns = ['a', 'b', ('c', 'c1')]
expected = DataFrame(columns=columns, data=[[1, 11, 33], [0, 22, 44]])
with tm.assert_produces_warning(UserWarning):
result = pd.merge(df1, df2, on='a')
tm.assert_frame_equal(result, expected)
# join, see discussion in GH 12219
columns = ['a', 'b', ('a', ''), ('c', 'c1')]
expected = DataFrame(columns=columns,
data=[[1, 11, 0, 44], [0, 22, 1, 33]])
with tm.assert_produces_warning(UserWarning):
result = df1.join(df2, on='a')
tm.assert_frame_equal(result, expected)
def test_reindex(self):
newFrame = self.frame.reindex(self.ts1.index)
for col in newFrame.columns:
for idx, val in compat.iteritems(newFrame[col]):
if idx in self.frame.index:
if np.isnan(val):
self.assertTrue(np.isnan(self.frame[col][idx]))
else:
self.assertEqual(val, self.frame[col][idx])
else:
self.assertTrue(np.isnan(val))
for col, series in compat.iteritems(newFrame):
self.assertTrue(tm.equalContents(series.index, newFrame.index))
emptyFrame = self.frame.reindex(Index([]))
self.assertEqual(len(emptyFrame.index), 0)
# Cython code should be unit-tested directly
nonContigFrame = self.frame.reindex(self.ts1.index[::2])
for col in nonContigFrame.columns:
for idx, val in compat.iteritems(nonContigFrame[col]):
if idx in self.frame.index:
if np.isnan(val):
self.assertTrue(np.isnan(self.frame[col][idx]))
else:
self.assertEqual(val, self.frame[col][idx])
else:
self.assertTrue(np.isnan(val))
for col, series in compat.iteritems(nonContigFrame):
self.assertTrue(tm.equalContents(series.index,
nonContigFrame.index))
# corner cases
# Same index, copies values but not index if copy=False
newFrame = self.frame.reindex(self.frame.index, copy=False)
self.assertIs(newFrame.index, self.frame.index)
# length zero
newFrame = self.frame.reindex([])
self.assertTrue(newFrame.empty)
self.assertEqual(len(newFrame.columns), len(self.frame.columns))
# length zero with columns reindexed with non-empty index
newFrame = self.frame.reindex([])
newFrame = newFrame.reindex(self.frame.index)
self.assertEqual(len(newFrame.index), len(self.frame.index))
self.assertEqual(len(newFrame.columns), len(self.frame.columns))
# pass non-Index
newFrame = self.frame.reindex(list(self.ts1.index))
self.assert_index_equal(newFrame.index, self.ts1.index)
# copy with no axes
result = self.frame.reindex()
assert_frame_equal(result, self.frame)
self.assertFalse(result is self.frame)
def test_reindex_nan(self):
df = pd.DataFrame([[1, 2], [3, 5], [7, 11], [9, 23]],
index=[2, np.nan, 1, 5],
columns=['joe', 'jim'])
i, j = [np.nan, 5, 5, np.nan, 1, 2, np.nan], [1, 3, 3, 1, 2, 0, 1]
assert_frame_equal(df.reindex(i), df.iloc[j])
df.index = df.index.astype('object')
assert_frame_equal(df.reindex(i), df.iloc[j], check_index_type=False)
# GH10388
df = pd.DataFrame({'other': ['a', 'b', np.nan, 'c'],
'date': ['2015-03-22', np.nan,
'2012-01-08', np.nan],
'amount': [2, 3, 4, 5]})
df['date'] = pd.to_datetime(df.date)
df['delta'] = (pd.to_datetime('2015-06-18') - df['date']).shift(1)
left = df.set_index(['delta', 'other', 'date']).reset_index()
right = df.reindex(columns=['delta', 'other', 'date', 'amount'])
assert_frame_equal(left, right)
def test_reindex_name_remains(self):
s = Series(random.rand(10))
df = DataFrame(s, index=np.arange(len(s)))
i = Series(np.arange(10), name='iname')
df = df.reindex(i)
self.assertEqual(df.index.name, 'iname')
df = df.reindex(Index(np.arange(10), name='tmpname'))
self.assertEqual(df.index.name, 'tmpname')
s = Series(random.rand(10))
df = DataFrame(s.T, index=np.arange(len(s)))
i = Series(np.arange(10), name='iname')
df = df.reindex(columns=i)
self.assertEqual(df.columns.name, 'iname')
def test_reindex_int(self):
smaller = self.intframe.reindex(self.intframe.index[::2])
self.assertEqual(smaller['A'].dtype, np.int64)
bigger = smaller.reindex(self.intframe.index)
self.assertEqual(bigger['A'].dtype, np.float64)
smaller = self.intframe.reindex(columns=['A', 'B'])
self.assertEqual(smaller['A'].dtype, np.int64)
def test_reindex_like(self):
other = self.frame.reindex(index=self.frame.index[:10],
columns=['C', 'B'])
assert_frame_equal(other, self.frame.reindex_like(other))
def test_reindex_columns(self):
newFrame = self.frame.reindex(columns=['A', 'B', 'E'])
assert_series_equal(newFrame['B'], self.frame['B'])
self.assertTrue(np.isnan(newFrame['E']).all())
self.assertNotIn('C', newFrame)
# length zero
newFrame = self.frame.reindex(columns=[])
self.assertTrue(newFrame.empty)
def test_reindex_axes(self):
# GH 3317, reindexing by both axes loses freq of the index
df = DataFrame(np.ones((3, 3)),
index=[datetime(2012, 1, 1),
datetime(2012, 1, 2),
datetime(2012, 1, 3)],
columns=['a', 'b', 'c'])
time_freq = date_range('2012-01-01', '2012-01-03', freq='d')
some_cols = ['a', 'b']
index_freq = df.reindex(index=time_freq).index.freq
both_freq = df.reindex(index=time_freq, columns=some_cols).index.freq
seq_freq = df.reindex(index=time_freq).reindex(
columns=some_cols).index.freq
self.assertEqual(index_freq, both_freq)
self.assertEqual(index_freq, seq_freq)
def test_reindex_fill_value(self):
df = DataFrame(np.random.randn(10, 4))
# axis=0
result = df.reindex(lrange(15))
self.assertTrue(np.isnan(result.values[-5:]).all())
result = df.reindex(lrange(15), fill_value=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
# axis=1
result = df.reindex(columns=lrange(5), fill_value=0.)
expected = df.copy()
expected[4] = 0.
assert_frame_equal(result, expected)
result = df.reindex(columns=lrange(5), fill_value=0)
expected = df.copy()
expected[4] = 0
assert_frame_equal(result, expected)
result = df.reindex(columns=lrange(5), fill_value='foo')
expected = df.copy()
expected[4] = 'foo'
assert_frame_equal(result, expected)
# reindex_axis
result = df.reindex_axis(lrange(15), fill_value=0., axis=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
result = df.reindex_axis(lrange(5), fill_value=0., axis=1)
expected = df.reindex(columns=lrange(5)).fillna(0)
assert_frame_equal(result, expected)
# other dtypes
df['foo'] = 'foo'
result = df.reindex(lrange(15), fill_value=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
def test_reindex_dups(self):
# GH4746, reindex on duplicate index error messages
arr = | np.random.randn(10) | numpy.random.randn |
import json
import math
import os
import cv2
from PIL import Image
import numpy as np
from keras import layers
from keras.applications import DenseNet121
from keras.callbacks import Callback, ModelCheckpoint
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.optimizers import Adam
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import cohen_kappa_score, accuracy_score
import scipy
import tensorflow as tf
from tqdm import tqdm
import matplotlib.pyplot as plt
import cv2
from pathlib import Path
import os
from PIL import *
import matplotlib.image as mpimg
import numpy as np
from keras.preprocessing import image
import json
import random
from sklearn.model_selection import train_test_split
from keras.utils import to_categorical
from keras.layers import Dense, Dropout, Flatten
from keras.applications.resnet50 import ResNet50
from keras.applications.inception_v3 import InceptionV3
import keras
from keras.models import Sequential,Input,Model
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D,GlobalAveragePooling2D, ReLU, MaxPool2D,InputLayer
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import LeakyReLU
from keras import optimizers, regularizers
from sklearn.metrics import classification_report
from keras.callbacks import TensorBoard
import datetime
from keras.preprocessing.image import ImageDataGenerator
from keras.applications.resnet50 import preprocess_input
from keras.applications import DenseNet121
from keras import layers
import sys
np.random.seed(2019)
tf.set_random_seed(2019)
def preprocess_image(image_path, desired_size=224):
im = Image.open(image_path)
im = im.resize((desired_size, )*2, resample=Image.LANCZOS)
return im
def create_datagen():
return ImageDataGenerator(
zoom_range=0.15, # set range for random zoom
# set mode for filling points outside the input boundaries
fill_mode='constant',
cval=0., # value used for fill_mode = "constant"
horizontal_flip=True, # randomly flip images
vertical_flip=True, # randomly flip images
)
'''
resnet50
'''
def build_model(input_shape):
base_model =ResNet50(weights='imagenet', include_top=False, input_shape=input_shape)
#for layer in base_model.layers[:10]:
#layer.trainable = False
#layer.padding='same'
#for layer in base_model.layers[10:]:
#layer.trainable = True
#layer.padding='same'
# x = base_model.get_layer('avg_pool').output
x = base_model.output
x = GlobalAveragePooling2D()(x)
# x = BatchNormalization()(x)
x = Dropout(0.5)(x)
# x = Flatten() (x)
# x = Dropout(0.5)(x)
# x = Dense(512, activation='relu', kernel_regularizer=regularizers.l2(0.001))(x)
# x = BatchNormalization()(x)
# x = Dropout(0.5)(x)
# x = Dense(32, activation='relu')(x)
# x = Dense(128, activation='relu', kernel_regularizer=regularizers.l2(0.001))(x)
# x = Dropout(0.5)(x)
# x = BatchNormalization()(x)
# x = Dense(64, activation='relu', kernel_regularizer=regularizers.l2(0.001))(x)
# x = Dropout(0.5)(x)
# x = BatchNormalization()(x)
# x = Dense(512, activation='relu')(x)
# x = LeakyReLU(alpha=0.1)(x)
# x = Dropout(0.3)(x)
#x = Dense(5, activation='softmax')(x)
#model = Model(base_model.input, x)
predictions = Dense(5, activation='sigmoid')(x)
model = Model(inputs=base_model.input, outputs=predictions)
# for layer in model.layers[:-2]:
# layer.trainable = False
model.compile(
loss='binary_crossentropy',
optimizer=Adam(lr=0.00005),
metrics=['accuracy']
)
return model
# def build_model(input_shape):
# densenet = DenseNet121(
# weights='/home/z5163479/code/adapted_deep_embeddings/DenseNet-BC-121-32-no-top.h5',
# include_top=False,
# input_shape=input_shape
# )
# model = Sequential()
# model.add(densenet)
# model.add(layers.GlobalAveragePooling2D())
# model.add(layers.Dropout(0.5))
# model.add(layers.Dense(5, activation='sigmoid'))
# model.compile(
# loss='binary_crossentropy',
# optimizer=Adam(lr=0.00005),
# metrics=['accuracy']
# )
# return model
def get_preds(arr):
mask = arr == 0
return np.clip(np.where(mask.any(1), mask.argmax(1), 5) - 1, 0, 4)
def main():
train_df = pd.read_csv('/srv/scratch/z5163479/aptos/labels/trainLabels19.csv')
print(train_df.shape)
# train_df.head()
N = train_df.shape[0]
x_train = np.empty((N, 224, 224, 3), dtype=np.uint8)
for i, image_id in enumerate(tqdm(train_df['id_code'])):
x_train[i, :, :, :] = preprocess_image(
f'/srv/scratch/z5163479/aptos/resized_train_19/{image_id}.jpg'
)
y_train = pd.get_dummies(train_df['diagnosis']).values
print(x_train.shape)
print(y_train.shape)
x_train, x_val, y_train, y_val = train_test_split(
x_train, y_train,
test_size=0.15,
random_state=2019
)
y_train_multi = np.empty(y_train.shape, dtype=y_train.dtype)
y_train_multi[:, 4] = y_train[:, 4]
for i in range(3, -1, -1):
y_train_multi[:, i] = np.logical_or(y_train[:, i], y_train_multi[:, i+1])
print("Original y_train:", y_train.sum(axis=0))
print("Multilabel version:", y_train_multi.sum(axis=0))
assert (np.argmax(y_train, 1) == get_preds(y_train_multi)).all()
y_val_multi = np.empty(y_val.shape, dtype=y_val.dtype)
y_val_multi[:, 4] = y_val[:, 4]
for i in range(3, -1, -1):
y_val_multi[:, i] = np.logical_or(y_val[:, i], y_val_multi[:, i+1])
print("Original y_val:", y_train.sum(axis=0))
print("Multilabel version:", y_train_multi.sum(axis=0))
assert ( | np.argmax(y_val, 1) | numpy.argmax |
# SPDX-License-Identifier: Apache-2.0
"""Unit Tests for optimizers such as TransposeOptimizer."""
import unittest
import numpy as np
from onnx import helper, numpy_helper, TensorProto, OperatorSetIdProto
from parameterized import parameterized
from backend_test_base import Tf2OnnxBackendTestBase
from common import unittest_main, group_nodes_by_type, check_opset_min_version, check_opset_max_version, get_test_config
from tf2onnx import utils, constants
from tf2onnx.graph import GraphUtil
# pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test
class OptimizerTests(Tf2OnnxBackendTestBase):
"""Run original model proto and modified model proto with onnxruntime, compare the results."""
def run_and_compare(self, output_names_with_port, onnx_feed_dict, origin_proto, op_type,
remaining_op_num, debug=False, rtol=1e-07):
utils.make_sure(op_type is not None, "op_type should be specified")
utils.make_sure(remaining_op_num is not None, "remaining_op_num should be specified")
utils.make_sure(self.config.is_onnxruntime_backend, "only onnxruntime is supported to test transpose optimizer")
origin_model_path = self.save_onnx_model(origin_proto, onnx_feed_dict, postfix="_origin")
expected = self.run_onnxruntime(origin_model_path, onnx_feed_dict, output_names_with_port)
new_proto, new_graph = GraphUtil.optimize_model_proto(origin_proto, catch_errors=False, return_graph=True)
self.assertTrue(new_proto, msg="model proto after optimizer should not be None")
new_model_path = self.save_onnx_model(new_proto, onnx_feed_dict, postfix="_opt")
current = GraphUtil.get_node_count_from_onnx_graph(new_proto.graph)
actual = self.run_onnxruntime(new_model_path, onnx_feed_dict, output_names_with_port)
for expected_val, actual_val in zip(expected, actual):
self.assertAllClose(expected_val, actual_val, rtol=rtol, atol=1e-5)
self.assertEqual(expected_val.dtype, actual_val.dtype)
self.assertEqual(expected_val.shape, actual_val.shape)
self.assertTrue(current[op_type] == remaining_op_num,
msg="Expect " + str(remaining_op_num) + " " + op_type + " ops left, but actually " + str(
current[op_type]) + " left")
self.assert_shapes_correct(new_graph, allow_missing=False, run_checker=True)
return new_proto
@staticmethod
def _make_onnx_const(np_val, output_name):
node = helper.make_node(
'Constant',
inputs=[],
outputs=[output_name],
value=helper.make_tensor(
name=output_name,
data_type=utils.map_numpy_to_onnx_dtype(np_val.dtype),
dims=np_val.shape,
vals=np_val.flatten().astype(np_val.dtype).tolist(),
),
)
return node
def make_model(self, graph, producer_name="onnx-tests"):
imp = OperatorSetIdProto()
imp.version = self.config.opset
model_proto = helper.make_model(graph, producer_name=producer_name, opset_imports=[imp])
try:
model_proto.ir_version = constants.OPSET_TO_IR_VERSION.get(self.config.opset, model_proto.ir_version)
except: # pylint: disable=bare-except
pass
return model_proto
# Tranpose Optimizer Tests Start
def run_transpose_compare(self, output_names_with_port, onnx_feed_dict, origin_proto,
remaining_transpose_num=None, debug=False, rtol=1e-07):
return self.run_and_compare(output_names_with_port, onnx_feed_dict, origin_proto, op_type="Transpose",
remaining_op_num=remaining_transpose_num, debug=debug, rtol=rtol)
def check_transpose_perm(self, model_proto, expected_perm):
for node in model_proto.graph.node:
if node.op_type == "Transpose":
perm = list(node.attribute[0].ints)
self.assertEqual(perm, expected_perm)
@parameterized.expand([
((2, 3, 4, 5), [0, 3, 1, 2], [0, 2, 3, 1]),
((2, 3, 4, 5, 6), [0, 4, 1, 2, 3], [0, 2, 3, 4, 1]),
])
def test_transpose_with_concat(self, input_shape, perm, inner_perm):
input_shape_with_trans = [input_shape[i] for i in perm]
for axis in range(len(input_shape)):
output_before_trans = list(input_shape)
output_before_trans[axis] *= 2
output_shape = [output_before_trans[i] for i in perm]
node1 = helper.make_node("Transpose", ["input_data1"], ["Y"], perm=inner_perm, name="trans")
node2 = helper.make_node("Concat", ["Y", "input_data2"], ["Z"], axis=axis, name="concat")
node3 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm, name="trans2")
graph = helper.make_graph(
[node1, node2, node3],
"test_transpose_with_concat",
[helper.make_tensor_value_info("input_data1", TensorProto.FLOAT, input_shape_with_trans),
helper.make_tensor_value_info("input_data2", TensorProto.FLOAT, input_shape),
],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
feed_dict = {"input_data1": np.random.randn(*input_shape_with_trans).astype(np.float32),
"input_data2": np.random.randn(*input_shape).astype(np.float32),
}
self.run_transpose_compare(["res"], feed_dict, model_proto, remaining_transpose_num=1)
@parameterized.expand([
((2, 3, 4), [2, 0, 1], [1, 2, 0]),
((2, 3, 4, 5), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_with_add1(self, input_shape, perm_input, perm_output):
# when transpose follows with a broadcasting op
# reshape is needed when switching transpose with this op and op need broadcast its inputs
node1 = helper.make_node("Transpose", ["input_data1"], ["Y"], perm=perm_input, name="trans")
node2 = helper.make_node("Add", ["Y", "input_data2"], ["Z"], name="add")
node3 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans2")
graph = helper.make_graph(
[node1, node2, node3],
"transpose_with_shape",
[helper.make_tensor_value_info("input_data1", TensorProto.FLOAT, input_shape),
helper.make_tensor_value_info("input_data2", TensorProto.FLOAT, (input_shape[1],)),
],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, input_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
feed_dict = {"input_data1": np.random.randn(*input_shape).astype(np.float32),
"input_data2": np.random.randn(input_shape[1]).astype(np.float32),
}
self.run_transpose_compare(["res"], feed_dict, model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4, 5), (2, 4, 5, 3), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), (2, 4, 5, 6, 3), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_with_add2(self, input_shape1, input_shape2, perm_input, perm_output):
node1 = helper.make_node("Transpose", ["input_data1"], ["Y"], perm=perm_input, name="trans")
node2 = helper.make_node("Add", ["Y", "input_data2"], ["Z"], name="add")
node3 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans2")
output_shape = input_shape1
graph = helper.make_graph(
[node1, node2, node3],
"transpose_with_shape",
[helper.make_tensor_value_info("input_data1", TensorProto.FLOAT, input_shape1),
helper.make_tensor_value_info("input_data2", TensorProto.FLOAT, input_shape2),
],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
feed_dict = {"input_data1": np.random.randn(*input_shape1).astype(np.float32),
"input_data2": np.random.randn(*input_shape2).astype(np.float32),
}
self.run_transpose_compare(["res"], feed_dict, model_proto, remaining_transpose_num=1)
@parameterized.expand([
((2, 3, 4), [2, 0, 1], [1, 2, 0]),
((2, 3, 4, 5), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_relu(self, shape, perm_input, perm_output):
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node2 = helper.make_node("Relu", ["Y"], ["Z"], name="relu")
node3 = helper.make_node("Transpose", ["Z"], ["Z1"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[node1, node2, node3],
"relu-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, shape)],
[helper.make_tensor_value_info("Z1", TensorProto.FLOAT, shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z1"], {"X": np.random.randn(*shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4), [2, 0, 1], [1, 2, 0]),
((2, 3, 4, 5), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_leaky_relu(self, shape, perm_input, perm_output):
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node2 = helper.make_node("LeakyRelu", ["Y"], ["Z"], alpha=0.02, name="relu")
node3 = helper.make_node("Transpose", ["Z"], ["Z1"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[node1, node2, node3],
"LeakyRelu-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, shape)],
[helper.make_tensor_value_info("Z1", TensorProto.FLOAT, shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z1"], {"X": np.random.randn(*shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4), [2, 0, 1], [1, 2, 0]),
((2, 3, 4, 5), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_min_version(10, "QuantizeLinear")
def test_transpose_quantize(self, shape, perm_input, perm_output):
scale = numpy_helper.from_array(np.array(0.75, dtype=np.float32), name='scale')
zero_point = numpy_helper.from_array(np.array(3, dtype=np.uint8), name='zero_point')
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node2 = helper.make_node("QuantizeLinear", ["Y", "scale", "zero_point"], ["Z"], name="quantize")
node3 = helper.make_node("Transpose", ["Z"], ["Z1"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[node1, node2, node3],
"quantize-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, shape)],
[helper.make_tensor_value_info("Z1", TensorProto.UINT8, shape)],
[scale, zero_point]
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z1"], {"X": np.random.randn(*shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4), [0, 2, 1], [0, 2, 1]),
((2, 3, 4, 5), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_min_version(13, "QuantizeLinear with axis")
def test_transpose_quantize_with_axis(self, shape, perm_input, perm_output):
scale = numpy_helper.from_array(np.array([0.75, 0.1, 2.3, 0.3], dtype=np.float32), name='scale')
zero_point = numpy_helper.from_array(np.array([2, 4, 6, 8], dtype=np.uint8), name='zero_point')
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node2 = helper.make_node("QuantizeLinear", ["Y", "scale", "zero_point"], ["Z"], name="quantize", axis=1)
node3 = helper.make_node("Transpose", ["Z"], ["Z1"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[node1, node2, node3],
"quantize-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, shape)],
[helper.make_tensor_value_info("Z1", TensorProto.UINT8, shape)],
[scale, zero_point]
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z1"], {"X": np.random.randn(*shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4), [2, 0, 1], [1, 2, 0]),
((2, 3, 4, 5), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_min_version(10, "DequantizeLinear")
def test_transpose_dequantize(self, shape, perm_input, perm_output):
scale = numpy_helper.from_array(np.array(0.75, dtype=np.float32), name='scale')
zero_point = numpy_helper.from_array(np.array(3, dtype=np.uint8), name='zero_point')
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node2 = helper.make_node("DequantizeLinear", ["Y", "scale", "zero_point"], ["Z"], name="dequantize")
node3 = helper.make_node("Transpose", ["Z"], ["Z1"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[node1, node2, node3],
"dequantize-test",
[helper.make_tensor_value_info("X", TensorProto.UINT8, shape)],
[helper.make_tensor_value_info("Z1", TensorProto.FLOAT, shape)],
[scale, zero_point]
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z1"], {"X": np.random.randint(0, 100, shape, np.uint8)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4), [0, 2, 1], [0, 2, 1]),
((2, 3, 4, 5), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_min_version(13, "DequantizeLinear with axis")
def test_transpose_dequantize_with_axis(self, shape, perm_input, perm_output):
scale = numpy_helper.from_array(np.array([0.75, 0.1, 2.3, 0.3], dtype=np.float32), name='scale')
zero_point = numpy_helper.from_array(np.array([2, 4, 6, 8], dtype=np.uint8), name='zero_point')
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node2 = helper.make_node("DequantizeLinear", ["Y", "scale", "zero_point"], ["Z"], name="dequantize", axis=1)
node3 = helper.make_node("Transpose", ["Z"], ["Z1"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[node1, node2, node3],
"dequantize-test",
[helper.make_tensor_value_info("X", TensorProto.UINT8, shape)],
[helper.make_tensor_value_info("Z1", TensorProto.FLOAT, shape)],
[scale, zero_point]
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z1"], {"X": np.random.randint(0, 100, shape, np.uint8)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
([2, 3, 4], [1, 2, 1], [1], [0, 2, 1], [0, 2, 1]),
([2, 3, 4, 5], [1, 2, 1, 2], [1], [0, 2, 3, 1], [0, 3, 1, 2]),
([2, 3, 4, 5], [1, 2, 1, 2], [1, 2], [0, 2, 3, 1], [0, 3, 1, 2]),
([2, 3, 4, 5], [1, 2, 1, 2], [0, 1, 2, 3], [0, 2, 3, 1], [0, 3, 1, 2]),
([2, 3, 4, 5, 6], [1, 2, 1, 2, 1], [2], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
([2, 3, 4, 5, 6], [1, 2, 1, 2, 1], [2, 3], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
([2, 3, 4, 5, 6], [1, 2, 1, 2, 1], [0, 1, 2, 3, 4], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_max_version(9, "Slice in opset 9 and takes 'axes, 'start' and 'ends' as attributes")
def test_transpose_slice(self, input_shape, slice_size, axes, perm_input, perm_output):
axes = np.array(axes, dtype=np.int64)
starts = np.array([0] * axes.size, dtype=np.int64)
ends = []
for i in range(axes.size):
ends.append(slice_size[axes[i]])
ends = np.array(ends, dtype=np.int64)
output_shape = input_shape.copy()
for axis in axes:
output_shape[perm_input[axis]] = slice_size[axis]
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node2 = helper.make_node("Slice", ["Y"], ["Z"], starts=starts, ends=ends, axes=axes, name="slice")
node3 = helper.make_node("Transpose", ["Z"], ["Z1"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[node1, node2, node3],
"slice-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z1", TensorProto.FLOAT, output_shape)],
[
helper.make_tensor("starts", TensorProto.INT64, starts.shape, starts),
helper.make_tensor("ends", TensorProto.INT64, ends.shape, ends),
helper.make_tensor("axes", TensorProto.INT64, axes.shape, axes)
]
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z1"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
([2, 3, 4], [1, 2, 1], [1], [0, 2, 1], [0, 2, 1]),
([2, 3, 4, 5], [1, 2, 1, 2], [1], [0, 2, 3, 1], [0, 3, 1, 2]),
([2, 3, 4, 5], [1, 2, 1, 2], [1, 2], [0, 2, 3, 1], [0, 3, 1, 2]),
([2, 3, 4, 5], [1, 2, 1, 2], [0, 1, 2, 3], [0, 2, 3, 1], [0, 3, 1, 2]),
([2, 3, 4, 5, 6], [1, 2, 1, 2, 1], [2], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
([2, 3, 4, 5, 6], [1, 2, 1, 2, 1], [2, 3], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
([2, 3, 4, 5, 6], [1, 2, 1, 2, 1], [0, 1, 2, 3, 4], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_min_version(10, "Slice in opset 10 can accept dynamic 'start' and 'ends'")
def test_transpose_slice_opset_10(self, input_shape, slice_size, axes, perm_input, perm_output):
axes = np.array(axes, dtype=np.int32)
starts = np.array([0] * axes.size, dtype=np.int32)
ends = []
for i in range(axes.size):
ends.append(slice_size[axes[i]])
ends = np.array(ends, dtype=np.int32)
output_shape = input_shape.copy()
for axis in axes:
output_shape[perm_input[axis]] = slice_size[axis]
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node2 = helper.make_node("Slice", ["Y", "starts", "ends", "axes"], ["Z"], name="slice")
node3 = helper.make_node("Transpose", ["Z"], ["Z1"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[node1, node2, node3],
"slice-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z1", TensorProto.FLOAT, output_shape)],
[
helper.make_tensor("starts", TensorProto.INT32, starts.shape, starts),
helper.make_tensor("ends", TensorProto.INT32, ends.shape, ends),
helper.make_tensor("axes", TensorProto.INT32, axes.shape, axes)
]
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z1"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4), (4, 2, 3), (2, 0, 1), (1, 2, 0)),
((2, 3, 4, 5), (2, 4, 5, 3), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), (2, 4, 5, 6, 3), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_min_version(8, "Max in opset 10 supports broadcasting")
def test_transpose_max(self, input_shape1, input_shape2, perm_input, perm_output):
const_1_val = [2.0]
const_1 = helper.make_tensor("const_1", TensorProto.FLOAT, (1,), const_1_val)
const_1_node = helper.make_node("Constant", [], ["const_1"], value=const_1, name="const_1")
const_2_val = np.random.randn(*input_shape2).astype(np.float32)
const_2 = helper.make_tensor("const_2", TensorProto.FLOAT, input_shape2, const_2_val.flatten())
const_2_node = helper.make_node("Constant", [], ["const_2"], value=const_2, name="const_2")
const_3_val = np.random.randn(*input_shape2).astype(np.float32)
const_3 = helper.make_tensor("const_3", TensorProto.FLOAT, input_shape2, const_3_val.flatten())
const_3_node = helper.make_node("Constant", [], ["const_3"], value=const_3, name="const_3")
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node2 = helper.make_node("Max", ["Y", "const_3", "const_2", "const_1"], ["Z"], name="max")
node3 = helper.make_node("Transpose", ["Z"], ["Z1"], perm=perm_output, name="trans_2")
output_shape = input_shape1
graph = helper.make_graph(
[const_1_node, const_2_node, const_3_node, node1, node2, node3],
"Max-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape1)],
[helper.make_tensor_value_info("Z1", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z1"], {"X": np.random.randn(*input_shape1).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4, 5), (2, 4, 5, 3), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), (2, 4, 5, 6, 3), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_min_version(8, "Max in opset 10 supports broadcasting")
def test_transpose_max_input_non_const(self, input_shape1, input_shape2, perm_input, perm_output):
const_1_val = [2.0]
const_1 = helper.make_tensor("const_1", TensorProto.FLOAT, (1,), const_1_val)
const_1_node = helper.make_node("Constant", [], ["const_1"], value=const_1, name="const_1")
const_2_val = np.random.randn(*input_shape2).astype(np.float32)
const_2 = helper.make_tensor("const_2", TensorProto.FLOAT, input_shape2, const_2_val.flatten())
const_2_node = helper.make_node("Constant", [], ["const_2"], value=const_2, name="const_2")
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node2 = helper.make_node("Max", ["Y", "non_const", "const_2", "const_1"], ["Z"], name="max")
node3 = helper.make_node("Transpose", ["Z"], ["Z1"], perm=perm_output, name="trans_2")
output_shape = input_shape1
graph = helper.make_graph(
[const_1_node, const_2_node, node1, node2, node3],
"Max-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape1),
helper.make_tensor_value_info("non_const", TensorProto.FLOAT, input_shape2)],
[helper.make_tensor_value_info("Z1", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z1"], {"X": np.random.randn(*input_shape1).astype(np.float32),
"non_const": np.random.randn(*input_shape2).astype(np.float32)},
model_proto, remaining_transpose_num=1)
@parameterized.expand([
((2, 3, 4, 5), (2, 4, 5, 3), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), (2, 4, 5, 6, 3), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_min_version(8, "Max in opset 10 supports broadcasting")
def test_transpose_max_no_cancel(self, input_shape1, input_shape2, perm_input, perm_output):
const_1_val = [2.0]
const_1 = helper.make_tensor("const_1", TensorProto.FLOAT, (1,), const_1_val)
const_1_node = helper.make_node("Constant", [], ["const_1"], value=const_1, name="const_1")
const_2_val = np.random.randn(*input_shape2).astype(np.float32)
const_2 = helper.make_tensor("const_2", TensorProto.FLOAT, input_shape2, const_2_val.flatten())
const_2_node = helper.make_node("Constant", [], ["const_2"], value=const_2, name="const_2")
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node2 = helper.make_node("Max", ["Y", "non_const", "const_2", "const_1"], ["Z"], name="max")
output_shape = [None] * len(input_shape1)
graph = helper.make_graph(
[const_1_node, const_2_node, node1, node2],
"Max-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape1),
helper.make_tensor_value_info("non_const", TensorProto.FLOAT, input_shape2)],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z"], {"X": np.random.randn(*input_shape1).astype(np.float32),
"non_const": np.random.randn(*input_shape2).astype(np.float32)},
model_proto, remaining_transpose_num=2)
@parameterized.expand([
((2, 3, 4, 5), (2, 4, 5, 3), [0, 2, 3, 1]),
((2, 3, 4, 5, 6), (2, 4, 5, 6, 3), [0, 2, 3, 4, 1]),
])
def test_transpose_merge(self, input_shape1, input_shape2, perm):
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
node1 = helper.make_node("Transpose", ["X"], ["Y_1"], perm=perm, name="trans_1")
node2 = helper.make_node("Mul", ["Y", "Y_1"], ["OUT"], name="mul")
output_shape = input_shape2
graph = helper.make_graph(
[node0, node1, node2],
"transpose-merge-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape1)],
[helper.make_tensor_value_info("OUT", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["OUT"], {"X": np.random.randn(*input_shape1).astype(np.float32)},
model_proto, remaining_transpose_num=1)
@parameterized.expand([
((2, 3, 4), [2, 0, 1], [1, 2, 0]),
((2, 3, 4, 5), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_mul_as_square(self, shape, perm_input, perm_output):
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans")
node1 = helper.make_node("Mul", ["Y", "Y"], ["Z"], name="mul")
node2 = helper.make_node("Transpose", ["Z"], ["OUT"], perm=perm_output, name="trans_1")
graph = helper.make_graph(
[node0, node1, node2],
"transpose-mul-as-sqr-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, shape)],
[helper.make_tensor_value_info("OUT", TensorProto.FLOAT, shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["OUT"], {"X": np.random.randn(*shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4), [2, 0, 1], [1, 2, 0]),
((2, 3, 4, 5), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_mul_broadcastable_const(self, shape, perm_input, perm_output):
const = numpy_helper.from_array(np.random.random((1, shape[1])).astype(np.float32), name='const')
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans")
node1 = helper.make_node("Mul", ["Y", "const"], ["Z"], name="mul")
node2 = helper.make_node("Transpose", ["Z"], ["OUT"], perm=perm_output, name="trans_1")
graph = helper.make_graph(
[node0, node1, node2],
"transpose-mul-const-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, shape)],
[helper.make_tensor_value_info("OUT", TensorProto.FLOAT, shape)],
[const],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["OUT"], {"X": np.random.randn(*shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4), [2, 0, 1]),
((2, 3, 4, 5), [0, 2, 3, 1]),
((2, 3, 4, 5, 6), [0, 2, 3, 4, 1]),
])
def test_transpose_with_shape(self, shape, perm):
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
node2 = helper.make_node("Shape", ["Y"], ["Z"], name="shape")
graph = helper.make_graph(
[node1, node2],
"transpose_with_shape",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, shape)],
[helper.make_tensor_value_info("Z", TensorProto.INT64, [len(shape)])],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z"], {"X": np.random.randn(*shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4), (4, 2, 3), [2, 0, 1]),
((2, 3, 4, 5), (2, 4, 5, 3), [0, 2, 3, 1]),
((2, 3, 4, 5, 6), (2, 4, 5, 6, 3), [0, 2, 3, 4, 1]),
])
def test_transpose_with_identity(self, input_shape, output_shape, perm):
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
node2 = helper.make_node("Identity", ["Y"], ["Z"], name="identity")
graph = helper.make_graph(
[node1, node2],
"transpose_with_identity",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=1)
@parameterized.expand([
((2, 3, 4), [2, 0, 1], [1, 2, 0]),
((2, 3, 4, 5), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_sqrt(self, shape, perm_input, perm_output):
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans1")
node1 = helper.make_node("Sqrt", ["Y"], ["Z"], name="sqrt")
node2 = helper.make_node("Transpose", ["Z"], ["OUT"], perm=perm_output, name="trans2")
graph = helper.make_graph(
[node0, node1, node2],
"transpose-sqrt-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, shape)],
[helper.make_tensor_value_info("OUT", TensorProto.FLOAT, shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["OUT"], {"X": np.random.randn(*shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((1, 3, 4), [4, 3], [0, 2, 1], [1, 0]),
((1, 3, 4, 5), (4, 5, 3), [0, 2, 3, 1], [1, 2, 0]),
((1, 3, 4, 5, 6), (4, 5, 6, 3), [0, 2, 3, 4, 1], [1, 2, 3, 0]),
])
@check_opset_max_version(12, "Squeeze/Unsqueeze changed in opset 13")
def test_transpose_with_squeeze1(self, input_shape, output_shape, perm, expected_perm):
# squeeze the first dim
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
node2 = helper.make_node("Squeeze", ["Y"], ["Z"], name="squeeze", axes=[0])
graph = helper.make_graph(
[node1, node2],
"transpose_with_squeeze",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
model_after_opt = self.run_transpose_compare(["Z"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=1)
self.check_transpose_perm(model_after_opt, expected_perm)
@parameterized.expand([
((1, 3, 4), (1, 4, 1, 3, 1, 1), [2, 0, 1], [0, 4, 5], [2, 3, 0, 1, 4, 5]),
((1, 3, 4, 5), (1, 1, 4, 5, 1, 3, 1), [0, 2, 3, 1], [0, 4, 6], [0, 1, 4, 5, 2, 3, 6]),
((1, 3, 4, 5, 6), (1, 1, 4, 5, 1, 6, 1, 3), [0, 2, 3, 4, 1], [0, 4, 6], [0, 1, 4, 5, 6, 7, 2, 3]),
])
def test_transpose_with_unsqueeze(self, input_shape, output_shape, perm, axes_val, expected_perm):
# unsqueeze the first dim
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
if self.config.opset <= 12:
node2 = helper.make_node("Unsqueeze", ["Y"], ["Z"], name="unsqueeze", axes=axes_val)
nodes = [node1, node2]
else:
axes = self._make_onnx_const(np.array(axes_val, dtype=np.int64), "axes")
node2 = helper.make_node("Unsqueeze", ["Y", "axes"], ["Z"], name="unsqueeze")
nodes = [axes, node1, node2]
graph = helper.make_graph(
nodes,
"transpose_with_unsqueeze",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
model_after_opt = self.run_transpose_compare(["Z"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=1)
self.check_transpose_perm(model_after_opt, expected_perm)
@parameterized.expand([
((1, 3, 4), [4, 3], [0, 2, 1], [1, 0]),
((1, 3, 4, 5), (4, 5, 3), [0, 2, 3, 1], [1, 2, 0]),
((1, 3, 4, 5, 6), (4, 5, 6, 3), [0, 2, 3, 4, 1], [1, 2, 3, 0]),
])
@check_opset_min_version(13, "Squeeze/Unsqueeze changed in opset 13")
def test_transpose_with_squeeze1_13(self, input_shape, output_shape, perm, expected_perm):
# squeeze the first dim
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
axes = self._make_onnx_const(np.array([0], dtype=np.int64), "axes")
node2 = helper.make_node("Squeeze", ["Y", "axes"], ["Z"], name="squeeze")
graph = helper.make_graph(
[node1, node2, axes],
"transpose_with_squeeze",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
model_after_opt = self.run_transpose_compare(["Z"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=1)
self.check_transpose_perm(model_after_opt, expected_perm)
@parameterized.expand([
((3, 4, 1, 5), (3, 5, 4), [0, 2, 3, 1], [0, 2, 1]),
((3, 4, 1, 5, 6), (3, 5, 6, 4), [0, 2, 3, 4, 1], [0, 2, 3, 1]),
])
@check_opset_max_version(12, "Squeeze/Unsqueeze changed in opset 13")
def test_transpose_with_squeeze2(self, input_shape, output_shape, perm, expected_perm):
# squeeze the second dim
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
node2 = helper.make_node("Squeeze", ["Y"], ["Z"], name="squeeze", axes=[1])
graph = helper.make_graph(
[node1, node2],
"transpose_with_squeeze",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
model_after_opt = self.run_transpose_compare(["Z"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=1)
self.check_transpose_perm(model_after_opt, expected_perm)
@parameterized.expand([
((3, 4, 1, 5), (3, 5, 4), [0, 2, 3, 1], [0, 2, 1]),
((3, 4, 1, 5, 6), (3, 5, 6, 4), [0, 2, 3, 4, 1], [0, 2, 3, 1]),
])
@check_opset_min_version(13, "Squeeze/Unsqueeze changed in opset 13")
def test_transpose_with_squeeze2_13(self, input_shape, output_shape, perm, expected_perm):
# squeeze the second dim
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
axes = self._make_onnx_const(np.array([1], dtype=np.int64), "axes")
node2 = helper.make_node("Squeeze", ["Y", "axes"], ["Z"], name="squeeze")
graph = helper.make_graph(
[node1, node2, axes],
"transpose_with_squeeze",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
model_after_opt = self.run_transpose_compare(["Z"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=1)
self.check_transpose_perm(model_after_opt, expected_perm)
@parameterized.expand([
((3, 1, 4, 5), (3, 4, 5), [0, 2, 3, 1]),
((3, 1, 4, 5, 6), (3, 4, 5, 6), [0, 2, 3, 4, 1]),
])
@check_opset_max_version(12, "Squeeze/Unsqueeze changed in opset 13")
def test_transpose_with_squeeze3(self, input_shape, output_shape, perm):
# squeeze the last dim
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
node2 = helper.make_node("Squeeze", ["Y"], ["Z"], name="squeeze", axes=[len(input_shape) - 1])
graph = helper.make_graph(
[node1, node2],
"transpose_with_squeeze",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((3, 1, 4, 5), (3, 4, 5), [0, 2, 3, 1]),
((3, 1, 4, 5, 6), (3, 4, 5, 6), [0, 2, 3, 4, 1]),
])
@check_opset_min_version(13, "Squeeze/Unsqueeze changed in opset 13")
def test_transpose_with_squeeze3_13(self, input_shape, output_shape, perm):
# squeeze the last dim
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
axes = self._make_onnx_const(np.array([len(input_shape) - 1], dtype=np.int64), "axes")
node2 = helper.make_node("Squeeze", ["Y", "axes"], ["Z"], name="squeeze")
graph = helper.make_graph(
[node1, node2, axes],
"transpose_with_squeeze",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((3, 1, 1, 5), (3, 5), [0, 2, 3, 1]),
((3, 1, 1, 5, 4), (3, 5, 4), [0, 2, 3, 4, 1]),
])
@check_opset_max_version(12, "Squeeze/Unsqueeze changed in opset 13")
def test_transpose_with_squeeze4(self, input_shape, output_shape, perm):
# squeeze the two dims
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
node2 = helper.make_node("Squeeze", ["Y"], ["Z"], name="squeeze", axes=[1, len(input_shape) - 1])
graph = helper.make_graph(
[node1, node2],
"transpose_with_squeeze",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((3, 1, 1, 5), (3, 5), [0, 2, 3, 1]),
((3, 1, 1, 5, 4), (3, 5, 4), [0, 2, 3, 4, 1]),
])
@check_opset_min_version(13, "Squeeze/Unsqueeze changed in opset 13")
def test_transpose_with_squeeze4_13(self, input_shape, output_shape, perm):
# squeeze the two dims
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
axes = self._make_onnx_const(np.array([1, len(input_shape) - 1], dtype=np.int64), "axes")
node2 = helper.make_node("Squeeze", ["Y", "axes"], ["Z"], name="squeeze")
graph = helper.make_graph(
[node1, node2, axes],
"transpose_with_squeeze",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((10, 3, 4), [0, 2, 1], [0, 2, 1]),
((10, 3, 4, 5), [0, 2, 3, 1], [0, 3, 1, 2]),
((10, 3, 4, 5, 6), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_with_loop(self, shape, perm_input, perm_output):
def _define_loop_graph(external_inputs):
# external_inputs: external node which will be used by this graph
# graph without loop carried
# computation
# for(...){a = external_inputs[i]; b = trans(a), c = squeeze(b)}, c is scan output
node1 = helper.make_node("Gather", [external_inputs[0], "loop_iter_num"], ["Y0"])
node2 = helper.make_node("Transpose", ["Y0"], ["Z0"], perm=perm_input)
# graph output
if get_test_config().opset <= 12:
node3 = helper.make_node("Squeeze", ["Z0"], ["scan_output"], axes=[0])
const_node = None
else:
const_tensor = helper.make_tensor(name='const', data_type=TensorProto.INT64, dims=[1],
vals=np.array([0], dtype=np.int64))
const_node = helper.make_node("Constant", [], ["axes_const"], value=const_tensor, name="const")
node3 = helper.make_node("Squeeze", ["Z0", "axes_const"], ["scan_output"])
node4 = helper.make_node("Identity", ["loop_condition"], ["loop_cond_output"])
node5 = helper.make_node("Identity", ["loop_condition"], ["loop_carried_output"])
nodes = [node1, node2, node3, node4, node5]
if const_node is not None:
nodes.append(const_node)
graph = helper.make_graph(
nodes,
"loop_subgraph",
[helper.make_tensor_value_info("loop_iter_num", TensorProto.INT64, (1,)), # iteration_num
helper.make_tensor_value_info("loop_condition", TensorProto.BOOL, ()), # condition
helper.make_tensor_value_info("loop_carried", TensorProto.BOOL, ()) # loop_carried
],
[helper.make_tensor_value_info("loop_cond_output", TensorProto.BOOL, ()),
helper.make_tensor_value_info("loop_carried_output", TensorProto.BOOL, ()),
helper.make_tensor_value_info("scan_output", TensorProto.FLOAT, ["unknown"] * (len(shape) - 1))
],
)
return graph
def _make_loop(external_inputs, outputs):
trip_cnt = self._make_onnx_const(np.array(10, dtype=np.int64), "trip_cnt")
cond = self._make_onnx_const(np.array(True, dtype=np.bool), "cond")
sub_graph = _define_loop_graph(external_inputs)
loop_node = helper.make_node("Loop", ["trip_cnt", "cond", "cond"], outputs,
name="loop", body=sub_graph)
return trip_cnt, cond, loop_node
nodes = _make_loop(["array"], ["loop_carried", "scan_out"])
res = helper.make_node("Transpose", ["scan_out"], ["Y"], perm=perm_output, name="trans")
graph = helper.make_graph(
[*nodes, res],
"transpose_with_loop",
[helper.make_tensor_value_info("array", TensorProto.FLOAT, ["unknow"] * len(shape))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, ["unknow"] * len(shape))],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Y"], {"array": np.random.randn(*shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4), [4, 2, 3], [2, 0, 1], [1, 2, 0]),
((2, 3, 4, 5), [2, 4, 5, 3], [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), [2, 4, 5, 6, 3], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_trans_with_sub(self, io_shape, const_shape_base, perm_input, perm_output):
const_shapes = []
for i in range(len(const_shape_base)):
const_shapes.append(const_shape_base[i:])
for trans_is_first_input in [True, False]:
for const_shape in const_shapes:
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_a")
const_tensor = helper.make_tensor(name='const', data_type=TensorProto.FLOAT, dims=const_shape,
vals=np.random.randn(*const_shape).flatten().astype(np.float32))
node2 = helper.make_node("Constant", [], ["const"], value=const_tensor, name="const")
if trans_is_first_input:
node3 = helper.make_node("Sub", ["Y", "const"], ["Z"], name="sub")
else:
node3 = helper.make_node("Sub", ["const", "Y"], ["Z"], name="sub")
node4 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans_b")
graph = helper.make_graph(
[node1, node2, node3, node4],
"test_trans_with_sub",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, io_shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, io_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*io_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4, 5), [2, 4, 5, 3], [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), [2, 4, 5, 6, 3], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_trans_with_sub_input_non_const(self, io_shape, non_const_shape_base, perm_input, perm_output):
non_const_shapes = []
for i in range(len(non_const_shape_base) - 1):
non_const_shapes.append(non_const_shape_base[i:])
for trans_is_first_input in [True, False]:
for non_const_shape in non_const_shapes:
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_a")
if trans_is_first_input:
node2 = helper.make_node("Sub", ["Y", "non_const"], ["Z"], name="sub")
else:
node2 = helper.make_node("Sub", ["non_const", "Y"], ["Z"], name="sub")
node3 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans_b")
graph = helper.make_graph(
[node1, node2, node3],
"test_trans_with_sub_input_non_const",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, io_shape),
helper.make_tensor_value_info("non_const", TensorProto.FLOAT, non_const_shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, io_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*io_shape).astype(np.float32),
"non_const": np.random.randn(*non_const_shape).astype(np.float32)},
model_proto, remaining_transpose_num=1)
@parameterized.expand([
((1, 1, 3, 3), (1, 3, 3, 1), [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 1, 3, 3, 3), (1, 3, 3, 3, 1), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_add_with_input_non_const(self, input_shape1, input_shape2, perm_input, perm_output):
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node1 = helper.make_node("Add", ["Y", "A"], ["Z"], name="add")
node2 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans_2")
output_shape = input_shape1
graph = helper.make_graph(
[node0, node1, node2],
"transpose-add-test-input-non-const",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape1),
helper.make_tensor_value_info("A", TensorProto.FLOAT, input_shape2)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*input_shape1).astype(np.float32),
"A": np.random.randn(*input_shape2).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4), [4, 2, 3], [2, 0, 1], [1, 2, 0]),
((1, 1, 3, 3), (1, 3, 3, 1), [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 1, 3, 3, 3), (1, 3, 3, 3, 1), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_add_with_input_const(self, input_shape1, input_shape2, perm_input, perm_output):
const_1_val = np.random.randn(*input_shape2).astype(np.float32)
const_1 = helper.make_tensor("const_1", TensorProto.FLOAT, input_shape2, const_1_val.flatten())
const_1_node = helper.make_node("Constant", [], ["const_1"], value=const_1, name="const_1")
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node1 = helper.make_node("Add", ["Y", "const_1"], ["Z"], name="add")
node2 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans_2")
output_shape = input_shape1
graph = helper.make_graph(
[const_1_node, node0, node1, node2],
"transpose-add-test-input-const",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape1)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*input_shape1).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((1, 5, 3, 3), (16, 5, 3, 3), (1, 16, 1, 1), (1, 1, 1, 16), [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 5, 3, 3, 3), (16, 5, 3, 3, 3), (1, 16, 1, 1, 1), (1, 1, 1, 1, 16), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_add_with_conv_1(self, input_shape, weights_shape, output_shape,
const_shape, perm_input, perm_output):
# case where bias's dim is 1D and can be merged into Conv
const_b_val = np.random.randn(*const_shape).astype(np.float32)
const_b = helper.make_tensor("const_b", TensorProto.FLOAT, const_shape, const_b_val.flatten())
const_b_node = helper.make_node("Constant", [], ["const_b"], value=const_b, name="const_b")
node0 = helper.make_node("Conv", ["x", "W"], ["X"], name="conv", pads=[0] * 2 * (len(input_shape) - 2))
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node2 = helper.make_node("Add", ["Y", "const_b"], ["Z"], name="add")
node3 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[const_b_node, node0, node1, node2, node3],
"transpose-add-test-with-conv-1",
[helper.make_tensor_value_info("x", TensorProto.FLOAT, input_shape),
helper.make_tensor_value_info("W", TensorProto.FLOAT, weights_shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"x": np.random.randn(*input_shape).astype(np.float32),
"W": np.random.randn(*weights_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((1, 1, 5, 5), (1, 1, 3, 3), (1, 1, 3, 3), (1, 3, 3, 1), [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 1, 5, 5, 5), (1, 1, 3, 3, 3), (1, 1, 3, 3, 3), (1, 3, 3, 3, 1), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_add_with_conv_2(self, input_shape, weights_shape, output_shape,
const_shape, perm_input, perm_output):
# case where bias's dim is not 1D and can't be merged into Conv
# add handler just remove the transpose around Add node
const_b_val = np.random.randn(*const_shape).astype(np.float32)
const_b = helper.make_tensor("const_b", TensorProto.FLOAT, const_shape, const_b_val.flatten())
const_b_node = helper.make_node("Constant", [], ["const_b"], value=const_b, name="const_b")
node0 = helper.make_node("Conv", ["x", "W"], ["X"], name="conv", pads=[0] * 2 * (len(input_shape) - 2))
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node2 = helper.make_node("Add", ["Y", "const_b"], ["Z"], name="add")
node3 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[const_b_node, node0, node1, node2, node3],
"transpose-add-test-with-conv-2",
[helper.make_tensor_value_info("x", TensorProto.FLOAT, input_shape),
helper.make_tensor_value_info("W", TensorProto.FLOAT, weights_shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"x": np.random.randn(*input_shape).astype(np.float32),
"W": np.random.randn(*weights_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((3, 4, 5), (8, 4, 6), [1, 3, 0, 0, 2, 0], [2, 0, 1], [1, 2, 0]),
((1, 3, 4, 5), (2, 6, 4, 8), [1, 0, 1, 3, 0, 0, 2, 0], [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 3, 4, 5, 6), (2, 5, 6, 8, 10), [1, 0, 1, 3, 1, 0, 2, 2, 1, 1], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_max_version(10, "pad")
def test_transpose_pad(self, input_shape, output_shape, pads, perm_input, perm_output):
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node1 = helper.make_node("Pad", ["Y"], ["Z"], pads=pads, name="pad")
node2 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[node0, node1, node2],
"transpose-pad-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((3, 4, 5), (8, 4, 6), [1, 3, 0, 0, 2, 0], [2, 0, 1], [1, 2, 0]),
((1, 3, 4, 5), (2, 6, 4, 8), [1, 0, 1, 3, 0, 0, 2, 0], [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 3, 4, 5, 6), (2, 5, 6, 8, 10), [1, 0, 1, 3, 1, 0, 2, 2, 1, 1], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_min_version(11, "pad")
def test_transpose_pad11(self, input_shape, output_shape, pads, perm_input, perm_output):
pads_val = np.array(pads, dtype=np.int64)
pads_tensor = helper.make_tensor("Pads", TensorProto.INT64, [len(input_shape) * 2], pads_val)
pads_const = helper.make_node("Constant", [], ["Pads"], value=pads_tensor, name="Pads")
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node1 = helper.make_node("Pad", ["Y", "Pads"], ["Z"], name="pad")
node2 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[node0, node1, node2, pads_const],
"transpose-pad-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((3, 4, 5), (8, 4, 6), [1, 3, 0, 0, 2, 0], [2, 0, 1], [1, 2, 0]),
((1, 3, 4, 5), (2, 6, 4, 8), [1, 0, 1, 3, 0, 0, 2, 0], [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 3, 4, 5, 6), (2, 5, 6, 8, 10), [1, 0, 1, 3, 1, 0, 2, 2, 1, 1], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_min_version(11, "pad")
def test_transpose_pad11_non_const_pads(self, input_shape, output_shape, pads, perm_input, perm_output):
pads_val = np.array(pads, dtype=np.int64)
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node1 = helper.make_node("Pad", ["Y", "Pads"], ["Z"], name="pad")
node2 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[node0, node1, node2],
"transpose-pad-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape),
helper.make_tensor_value_info("Pads", TensorProto.INT64, pads_val.shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"],
{
"X": np.random.randn(*input_shape).astype(np.float32),
"Pads": pads_val
},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4), [2, 0, 1], [1, 2, 0]),
((2, 3, 4, 5), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_reciprocal(self, shape, perm_input, perm_output):
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans1")
node1 = helper.make_node("Reciprocal", ["Y"], ["Z"], name="reciprocal")
node2 = helper.make_node("Transpose", ["Z"], ["OUT"], perm=perm_output, name="trans2")
graph = helper.make_graph(
[node0, node1, node2],
"transpose-reciprocal-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, shape)],
[helper.make_tensor_value_info("OUT", TensorProto.FLOAT, shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["OUT"], {"X": np.random.randn(*shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((3, 4, 5), (3, 4, 1), [0, 2, 1], [0, 2, 1]),
((1, 3, 4, 5), (1, 3, 1, 1), [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 3, 4, 5, 6), (1, 3, 1, 1, 1), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_reducemean(self, input_shape, output_shape, perm_input, perm_output):
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node1 = helper.make_node("ReduceMean", ["Y"], ["Z"], axes=list(range(1, len(input_shape) - 1)),
keepdims=1, name="reducemean")
node2 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[node0, node1, node2],
"transpose-reducemean-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((3, 4, 5), (3, 4, 1), [1], [0, 2, 1], [0, 2, 1]),
((1, 3, 4, 5), (1, 3, 4, 1), [2], [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 3, 4, 5), (1, 3, 1, 1), [1, 2], [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 3, 4, 5), (1, 1, 1, 1), [0, 1, 2, 3], [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 3, 4, 5, 6), (1, 3, 1, 5, 6), [1], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
((1, 3, 4, 5, 6), (1, 3, 1, 1, 1), [1, 2, 3], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
((1, 3, 4, 5, 6), (1, 1, 1, 1, 1), [0, 1, 2, 3, 4], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_max_version(12, "ReduceSum from opset <= 12 has axes as an attribute")
def test_transpose_reducesum(self, input_shape, output_shape, axes, perm_input, perm_output):
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node1 = helper.make_node("ReduceSum", ["Y"], ["Z"], axes=axes,
keepdims=1, name="reducesum")
node2 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[node0, node1, node2],
"transpose-reducesum-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((1, 3, 4, 5), (1, 3, 4), [2], [0, 2, 3, 1], [0, 2, 1]),
((1, 3, 4, 5), (1, 3), [1, 2], [0, 2, 3, 1], [0, 1]),
((1, 3, 4, 5), (), [0, 1, 2, 3], [0, 2, 3, 1], []),
((1, 3, 4, 5, 6), (1, 3, 5, 6), [1], [0, 2, 3, 4, 1], [0, 3, 1, 2]),
((1, 3, 4, 5, 6), (1, 3), [1, 2, 3], [0, 2, 3, 4, 1], [0, 1]),
((1, 3, 4, 5, 6), (), [0, 1, 2, 3, 4], [0, 2, 3, 4, 1], []),
])
def test_transpose_reducemax(self, input_shape, output_shape, axes, perm_input, perm_output):
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node1 = helper.make_node("ReduceMax", ["Y"], ["Z"], axes=axes,
keepdims=0, name="reducemax")
if perm_output:
node2 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans_2")
else:
node2 = helper.make_node("Identity", ["Z"], ["res"], name="trans_2")
graph = helper.make_graph(
[node0, node1, node2],
"transpose-reducemax-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
def test_transpose_argmax(self):
input_shape = [1, 2, 3, 4]
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=[0, 2, 3, 1], name="trans_1")
node1 = helper.make_node("ArgMax", ["Y"], ["Z"], axis=3, keepdims=0, name="argmax")
node2 = helper.make_node("Cast", ["Z"], ["res"], to=TensorProto.INT32, name="cast")
graph = helper.make_graph(
[node0, node1, node2],
"transpose-argmax-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("res", TensorProto.INT32, [1, 3, 4])],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
def test_transpose_tile(self):
input_shape = [1, 2, 3, 4]
repeats_value = [3, 6, 5, 11]
repeats_tensor = helper.make_tensor("A", TensorProto.INT64, [len(input_shape)], repeats_value)
repeats_const = helper.make_node("Constant", [], ["A"], value=repeats_tensor, name="repeats_const")
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=[0, 2, 3, 1], name="trans_1")
node1 = helper.make_node("Tile", ["Y", "A"], ["Z"], name="tile")
node2 = helper.make_node("Transpose", ["Z"], ["res"], perm=[0, 3, 1, 2], name="trans_2")
graph = helper.make_graph(
[repeats_const, node0, node1, node2],
"transpose-tile-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, [3, 22, 18, 20])],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((3, 4, 5), (3, 4, 1), [1], [0, 2, 1], [0, 2, 1]),
((1, 3, 4, 5), (1, 3, 4, 1), [2], [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 3, 4, 5), (1, 3, 1, 1), [1, 2], [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 3, 4, 5), (1, 1, 1, 1), [0, 1, 2, 3], [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 3, 4, 5, 6), (1, 3, 1, 5, 6), [1], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
((1, 3, 4, 5, 6), (1, 3, 1, 1, 1), [1, 2, 3], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
((1, 3, 4, 5, 6), (1, 1, 1, 1, 1), [0, 1, 2, 3, 4], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_min_version(13, "ReduceSum from opset >= 13 has axes as an input")
def test_transpose_reducesum_opset_13(self, input_shape, output_shape, axes, perm_input, perm_output):
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node1 = helper.make_node("ReduceSum", ["Y", "axes"], ["Z"], keepdims=1, name="reducesum")
node2 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans_2")
axes = np.array(axes, dtype=np.int64)
graph = helper.make_graph(
[node0, node1, node2],
"transpose-reducesum-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
[helper.make_tensor("axes", TensorProto.INT64, axes.shape, axes)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4), (4, 2, 3), [2, 0, 1]),
((2, 3, 4, 5), (2, 4, 5, 3), [0, 2, 3, 1]),
((2, 3, 4, 5, 6), (2, 4, 5, 6, 3), [0, 2, 3, 4, 1]),
])
def test_trans_output_as_graph_outputs(self, input_shape, output_shape, perm):
"""
If transpose's output is graph's output, don't optimize it.
"""
trans = helper.make_node("Transpose", ["X"], ["Y"], name="trans", perm=perm)
graph_proto = helper.make_graph(
[trans],
"trans-to-graph-output",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, output_shape)],
)
graph = GraphUtil.create_graph_from_onnx_graph(graph_proto)
# remove identity to graph output
identity_op = graph.get_node_by_output(graph.outputs[0])
graph.outputs = [identity_op.input[0]]
graph.remove_node(identity_op.name)
optimized_graph = GraphUtil.optimize_graph(graph)
self.assertTrue(optimized_graph, msg="graph after optimizer should not be None")
trans_cnt = len(group_nodes_by_type(optimized_graph)["Transpose"])
self.assertTrue(trans_cnt == 1, msg="Expect 1 Transpose ops left, but actually " + str(trans_cnt) + " left")
@parameterized.expand([
((2, 3, 4, 1), (2, 3, 4, 1), [0, 3, 1, 2]),
((2, 1, 1, 4), (2, 1, 1, 4), [0, 3, 1, 2]),
((2, 3, 4, 1), (2, -1, -1, 1), [0, 3, 1, 2]),
((2, 3, 4, 2, 1), (2, 3, 4, 2, 1), [0, 4, 1, 2, 3]),
((2, 1, 1, 1, 4), (2, 1, 1, 1, 4), [0, 4, 1, 2, 3]),
((2, 3, 4, 2, 1), (2, -1, -1, -1, 1), [0, 4, 1, 2, 3]),
])
def test_trans_can_be_replaced_with_reshape1(self, input_shape_np, input_shape, perm):
# test trans-NHWC
result_shape = [input_shape[i] for i in perm]
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
graph = helper.make_graph(
[node1],
"test_trans_can_be_replaced_with_reshape",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, result_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Y"], {"X": np.random.randn(*input_shape_np).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 1, 3, 4), (2, 1, 3, 4), [0, 2, 3, 1]),
((2, 4, 1, 1), (2, 4, 1, 1), [0, 2, 3, 1]),
((2, 1, 3, 4), (2, 1, -1, -1), [0, 2, 3, 1]),
((2, 1, 3, 4, 2), (2, 1, 3, 4, 2), [0, 2, 3, 4, 1]),
((2, 4, 1, 1, 1), (2, 4, 1, 1, 1), [0, 2, 3, 4, 1]),
((2, 1, 3, 4, 2), (2, 1, -1, -1, -1), [0, 2, 3, 4, 1]),
])
def test_trans_can_be_replaced_with_reshape2(self, input_shape_np, input_shape, perm):
# test trans-NCHW
result_shape = [input_shape[i] for i in perm]
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
graph = helper.make_graph(
[node1],
"test_trans_can_be_replaced_with_reshape",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, result_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Y"], {"X": np.random.randn(*input_shape_np).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((1, 6, 8), [2, 0, 1], [1, 2, 0]),
((1, 6, 8, 9), [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 6, 8, 9, 2), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_two_transposes_switch_with_mul(self, shape, perm_input, perm_output):
const_node = self._make_onnx_const(np.array(np.random.random(6), dtype=np.float32), "const_10")
node0 = helper.make_node("Transpose", ["u1"], ["v1"], perm=perm_input, name="trans_0")
node1 = helper.make_node("Transpose", ["u2"], ["v2"], perm=perm_input, name="trans_1")
node2 = helper.make_node("Mul", ["v1", "v2"], ["x"], name="mul_1")
node3 = helper.make_node("Mul", ["x", const_node.output[0]], ["y"], name="mul_2")
node4 = helper.make_node("Transpose", ["y"], ["res"], perm=perm_output, name="trans_3")
graph = helper.make_graph(
[const_node, node0, node1, node2, node3, node4],
"test-transpose-mul",
[helper.make_tensor_value_info("u1", TensorProto.FLOAT, shape),
helper.make_tensor_value_info("u2", TensorProto.FLOAT, shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"u1": np.random.randn(*shape).astype(np.float32),
"u2": np.random.randn(*shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((1, 6, 8), (8, 1, 6), [2, 0, 1], [1, 2, 0]),
((1, 6, 8, 9), (1, 8, 9, 6), [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 6, 8, 9, 2), (1, 8, 9, 2, 6), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_many_transposes_and_constant_switch_with_sum(self, input_shape1, input_shape2, perm_input, perm_output):
constnode = self._make_onnx_const(np.array(np.random.random(input_shape2), dtype=np.float32), "v4")
node0 = helper.make_node("Transpose", ["u1"], ["v1"], perm=perm_input, name="trans_0")
node1 = helper.make_node("Transpose", ["u2"], ["v2"], perm=perm_input, name="trans_1")
node11 = helper.make_node("Transpose", ["u3"], ["v3"], perm=perm_input, name="trans_2")
node2 = helper.make_node("Sum", ["v1", "v2", "v3", "v4"], ["x"], name="sum_1")
node3 = helper.make_node("Sum", ["x", "v1"], ["y"], name="sum_2")
node4 = helper.make_node("Transpose", ["y"], ["res"], perm=perm_output, name="trans_4")
output_shape = input_shape1
graph = helper.make_graph(
[constnode, node0, node1, node11, node2, node3, node4],
"test-transpose-mul",
[helper.make_tensor_value_info("u1", TensorProto.FLOAT, input_shape1),
helper.make_tensor_value_info("u2", TensorProto.FLOAT, input_shape1),
helper.make_tensor_value_info("u3", TensorProto.FLOAT, input_shape1)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"u1": np.random.randn(*input_shape1).astype(np.float32),
"u2": np.random.randn(*input_shape1).astype(np.float32),
"u3": np.random.randn(*input_shape1).astype(np.float32)},
model_proto, remaining_transpose_num=0)
# Tranpose Optimizer Tests End
# Identity Optimizer Tests Start
def run_identity_compare(self, output_names_with_port, onnx_feed_dict, origin_proto,
remaining_identity_num=None, debug=False, rtol=1e-07):
self.run_and_compare(output_names_with_port, onnx_feed_dict, origin_proto, op_type="Identity",
remaining_op_num=remaining_identity_num, debug=debug, rtol=rtol)
def test_identity_non_graph_output(self):
node1 = helper.make_node("Add", ["X", "X"], ["Y"], name="add")
node2 = helper.make_node("Identity", ["Y"], ["Z"], name="identity")
node3 = helper.make_node("Shape", ["Z"], ["Z1"], name="shape")
graph = helper.make_graph(
[node1, node2, node3],
"identity-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4, 5))],
[helper.make_tensor_value_info("Z1", TensorProto.INT64, [4])],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_identity_compare(["Z1"], {"X": np.random.randn(2, 3, 4, 5).astype(np.float32)},
model_proto, remaining_identity_num=0)
def test_identity_unremovable_identity(self):
# should not remove!!
node1 = helper.make_node("Identity", ["X"], ["Y"], name="identity")
graph = helper.make_graph(
[node1],
"identity-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4, 5))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 3, 4, 5))],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_identity_compare(["Y"], {"X": np.random.randn(2, 3, 4, 5).astype(np.float32)},
model_proto, remaining_identity_num=1)
def test_identity_output_as_multiple_graph_outputs(self):
# handle case like this, both Identity nodes are graph outputs,
# Add
# / \
# Identity Identity
# We at most can remove one Identity for this case.
node1 = helper.make_node("Add", ["X", "X"], ["Y"], name="identity")
node2 = helper.make_node("Identity", ["Y"], ["Z1"], name="identity2")
node3 = helper.make_node("Identity", ["Y"], ["Z2"], name="identity3")
graph = helper.make_graph(
[node1, node2, node3],
"identity-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4, 5))],
[helper.make_tensor_value_info("Z1", TensorProto.FLOAT, (2, 3, 4, 5)),
helper.make_tensor_value_info("Z2", TensorProto.FLOAT, (2, 3, 4, 5))],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_identity_compare(["Z1", "Z2"], {"X": np.random.randn(2, 3, 4, 5).astype(np.float32)},
model_proto, remaining_identity_num=1)
def test_identity_in_subgraph_non_graph_output(self):
node1 = helper.make_node("Add", ["X", "X"], ["Y"], name="add")
iter_num_value = np.array(1, dtype=np.int64)
node2 = helper.make_node(
'Constant',
inputs=[],
outputs=['iterate_num_value'],
value=helper.make_tensor(
name='iterate_num_value',
data_type=TensorProto.INT64,
dims=iter_num_value.shape,
vals=iter_num_value.flatten().astype(np.int64).tolist(),
),
)
cond_value = np.array(True, dtype=np.bool)
node3 = helper.make_node(
'Constant',
inputs=[],
outputs=['cond_value'],
value=helper.make_tensor(
name='cond_value',
data_type=TensorProto.BOOL,
dims=iter_num_value.shape,
vals=cond_value.flatten().astype(np.bool).tolist(),
),
)
# sub graph
sub_node1 = helper.make_node("Add", ["loop_var_1", "loop_var_1"], ["SubY"], name="sub_add")
sub_node2 = helper.make_node("Identity", ["SubY"], ["SubIdentity1"], name="sub_identity_1")
sub_node3 = helper.make_node("Identity", ["SubIdentity1"], ["loop_var_out_1"], name="sub_identity_2")
sub_node4 = helper.make_node("Identity", ["loop_condition"], ["loop_cond_output"], name="sub_identity_3")
sub_graph = helper.make_graph(
[sub_node1, sub_node2, sub_node3, sub_node4],
"identity_subgraph-test",
[helper.make_tensor_value_info("loop_iter_num", TensorProto.INT64, (1,)), # iteration_num
helper.make_tensor_value_info("loop_condition", TensorProto.BOOL, ()), # condition
helper.make_tensor_value_info("loop_var_1", TensorProto.FLOAT, ()), # loop-carried dependency
],
[helper.make_tensor_value_info("loop_cond_output", TensorProto.BOOL, ()),
helper.make_tensor_value_info("loop_var_out_1", TensorProto.FLOAT, ())
],
)
# sub graph ends
loop_node = helper.make_node("Loop", ["iterate_num_value", "cond_value", "Y"], ["loop_var_1_output"],
name="loop", body=sub_graph)
node4 = helper.make_node("Identity", ["loop_var_1_output"], ["Z"], name="identity")
node5 = helper.make_node("Shape", ["Z"], ["Z1"], name="shape")
graph = helper.make_graph(
[node1, node2, node3, loop_node, node4, node5],
"identity-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4, 5))],
[helper.make_tensor_value_info("Z1", TensorProto.INT64, [4])],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_identity_compare(["Z1"], {"X": np.random.randn(2, 3, 4, 5).astype(np.float32)},
model_proto, remaining_identity_num=0)
# Identity Optimizer Tests End
# Merge Duplicated Nodes Optimizer Tests Start
def run_merge_duplicated_nodes_compare(self, output_names_with_port, onnx_feed_dict, origin_proto,
op_type=None, remaining_op_num=None, debug=False, rtol=1e-07,
graph_validator=None):
new_proto = self.run_and_compare(output_names_with_port, onnx_feed_dict, origin_proto, op_type=op_type,
remaining_op_num=remaining_op_num, debug=debug, rtol=rtol)
if graph_validator:
self.assertTrue(graph_validator(new_proto.graph))
def test_duplicated_duplicated_input(self):
# same input or not
node0 = helper.make_node('Add', inputs=["X", "X"], outputs=["value0"])
node1 = helper.make_node('Add', inputs=["X", "X"], outputs=["value1"])
node2 = helper.make_node('Add', inputs=["value1", "X"], outputs=["value2"])
node3 = helper.make_node("Mul", ["value0", "value2"], ["value3"])
node4 = helper.make_node("Mul", ["value1", "value3"], ["OUT"])
graph = helper.make_graph(
[node0, node1, node2, node3, node4],
"test_duplicated_duplicated_input",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5, 5))],
[helper.make_tensor_value_info("OUT", TensorProto.FLOAT, (5, 5))],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_merge_duplicated_nodes_compare(["OUT"], {"X": np.random.randn(5, 5).astype(np.float32)}, model_proto,
op_type="Add", remaining_op_num=2)
def test_duplicated_duplicated_attributes(self):
# same attr or not
node0 = helper.make_node('ReduceMin', inputs=["X"], outputs=["value0"], axes=[0], keepdims=0)
node1 = helper.make_node('ReduceMin', inputs=["X"], outputs=["value1"], axes=[0], keepdims=0)
node2 = helper.make_node('ReduceMin', inputs=["X"], outputs=["value2"], axes=[1], keepdims=0)
node3 = helper.make_node('Add', inputs=["value0", "value1"], outputs=["value3"])
node4 = helper.make_node("Mul", ["value2", "value3"], ["OUT"])
graph = helper.make_graph(
[node0, node1, node2, node3, node4],
"test_duplicated_duplicated_attributes",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5, 5))],
[helper.make_tensor_value_info("OUT", TensorProto.FLOAT, (5,))],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_merge_duplicated_nodes_compare(["OUT"], {"X": np.random.randn(5, 5).astype(np.float32)}, model_proto,
op_type="ReduceMin", remaining_op_num=2)
def _check_initializer_num(self, graph_proto, num):
return num == len(graph_proto.initializer)
def test_duplicated_duplicated_constant(self):
const_val = np.array([1, 2, 3], dtype=np.float32)
tensor_1 = helper.make_tensor("tensor_1", TensorProto.FLOAT, const_val.shape, const_val)
tensor_2 = helper.make_tensor("tensor_2", TensorProto.FLOAT, const_val.shape, const_val)
tensor_3 = helper.make_tensor("tensor_3", TensorProto.FLOAT, const_val.shape, const_val)
tensor_4 = helper.make_tensor("tensor_4", TensorProto.FLOAT, const_val.shape, const_val)
node0 = helper.make_node('Constant', inputs=[], outputs=["value0"], value=tensor_1)
node1 = helper.make_node('Constant', inputs=[], outputs=["value1"], value=tensor_2)
node2 = helper.make_node('Constant', inputs=[], outputs=["value2"], value=tensor_3)
node3 = helper.make_node('Constant', inputs=[], outputs=["value3"], value=tensor_4)
node4 = helper.make_node("Mul", ["value0", "value1"], ["output1"])
node5 = helper.make_node("Mul", ["value2", "output1"], ["output2"])
node6 = helper.make_node("Mul", ["value3", "output2"], ["OUT"])
graph = helper.make_graph(
[node0, node1, node2, node3, node4, node5, node6],
"test_duplicated_duplicated_constant",
[],
[helper.make_tensor_value_info("OUT", TensorProto.FLOAT, (3,))],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_merge_duplicated_nodes_compare(["OUT"], {}, model_proto, op_type="Constant", remaining_op_num=0,
graph_validator=lambda g: self._check_initializer_num(g, 1))
def test_duplicated_duplicated_constant_and_initializer(self):
const_val = | np.array([1, 2, 3], dtype=np.float32) | numpy.array |
import soxbindings
import soundfile as sf
import numpy as np
import tempfile
import pytest
import subprocess
INPUT_FILES = [
'tests/data/input.wav',
]
def sox(args):
if args[0].lower() != "sox":
args.insert(0, "sox")
else:
args[0] = "sox"
try:
process_handle = subprocess.Popen(
args, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
out, err = process_handle.communicate()
#out = out.decode("utf-8")
err = err.decode("utf-8")
status = process_handle.returncode
return status, out, err
except OSError as error_msg:
logger.error("OSError: SoX failed! %s", error_msg)
except TypeError as error_msg:
logger.error("TypeError: %s", error_msg)
return 1, None, None
@pytest.mark.parametrize("input_file", INPUT_FILES)
def test_read(input_file):
sox_data, sox_rate = soxbindings.read(input_file)
sf_data, sf_rate = sf.read(input_file, always_2d=True)
assert np.allclose(sox_data, sf_data)
assert sox_rate == sf_rate
@pytest.mark.parametrize("input_file", INPUT_FILES)
def test_write(input_file):
sox_data, sox_rate = soxbindings.read(input_file)
with tempfile.NamedTemporaryFile(suffix='.wav', delete=True) as tmp:
soxbindings.write(tmp.name, sox_data, sox_rate)
sf_data, sf_rate = sf.read(tmp.name, always_2d=True)
sox_data_2, _ = soxbindings.read(tmp.name)
assert np.allclose(sox_data, sf_data)
assert np.allclose(sox_data, sox_data_2)
with open('tests/commands.txt', 'r') as f:
COMMANDS = f.readlines()
COMMANDS = [c.rstrip() for c in COMMANDS]
@pytest.mark.parametrize("command", COMMANDS)
def test_against_sox(command):
status, out, err = sox(command.split())
thresh = 1e-1 if 'reverb' in command or 'silence' in command or 'mcompand' in command else 1e-4
if 'output.wav' in command:
try:
cmd_sox_data, sr = soxbindings.read('tests/data/output.wav')
py_sox_data, sr = soxbindings.sox(command)
min_length = min(cmd_sox_data.shape[0], py_sox_data.shape[0])
cmd_sox_data = cmd_sox_data[:min_length]
py_sox_data = py_sox_data[:min_length]
assert | np.max((cmd_sox_data - py_sox_data) ** 2) | numpy.max |
import tensorflow as tf
import numpy as np
import gym
import trfl
import copy
from types import FunctionType
from itertools import chain
from stable_baselines.common import tf_util, SimpleRLModel, SetVerbosity
from stable_baselines.common.schedules import LinearSchedule
from stable_baselines.common.replay_buffer import ReplayBuffer, EpisodicBuffer, her_final, her_future, her_future_landmark,\
HerFutureAchievedPastActual, HerFutureAchievedPastActualLandmark
from stable_baselines.common.policies import observation_input
from stable_baselines.common.landmark_generator import AbstractLandmarkGenerator
class SimpleDQN(SimpleRLModel):
"""
Simplified version of DQN model class. DQN paper: https://arxiv.org/pdf/1312.5602.pdf
:param policy: (BasePolicy or str) The policy model to use (MlpPolicy, CnnPolicy, LnMlpPolicy, ...)
:param env: (Gym environment or str) The environment to learn from (if registered in Gym, can be str)
:param gamma: (float) discount factor
:param learning_rate: (float) learning rate for adam optimizer
:param exploration_fraction: (float) fraction of entire training period over which gamme is annealed
:param exploration_final_eps: (float) final value of random action probability
:param param_noise: (bool) Whether or not to apply noise to the parameters of the policy.
:param buffer_size: (int) size of the replay buffer
:param train_freq: (int) update the model every `train_freq` steps
:param batch_size: (int) size of a batched sampled from replay buffer for training
:param learning_starts: (int) how many steps of the model to collect transitions for before learning starts
:param target_network_update_frac: (float) fraction by which to update the target network every time.
:param target_network_update_freq: (int) update the target network every `target_network_update_freq` steps.
:param hindsight_mode: (str) e.g., "final", "none", "future_4"
:param double_q: (bool) whether to use double q learning
:param grad_norm_clipping: (float) amount of gradient norm clipping
:param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug
:param tensorboard_log: (str) the log location for tensorboard (if None, no logging)
:param _init_setup_model: (bool) Whether or not to build the network at the creation of the instance
"""
def __init__(self,
policy,
env,
gamma=0.99,
learning_rate=5e-4,
*,
exploration_fraction=0.1,
exploration_final_eps=0.02,
param_noise=False,
buffer_size=50000,
train_freq=1,
batch_size=32,
learning_starts=1000,
target_network_update_frac=1.,
target_network_update_freq=500,
hindsight_mode=None,
hindsight_frac=0.,
landmark_training=False,
landmark_training_per_batch=1,
landmark_generator=None,
landmark_mode='unidirectional',
landmark_error='linear',
landmark_width=1,
double_q=True,
grad_norm_clipping=10.,
verbose=0,
tensorboard_log=None,
eval_env=None,
eval_every=10,
_init_setup_model=True):
super(SimpleDQN, self).__init__(policy=policy, env=env, verbose=verbose, requires_vec_env=True)
self.learning_rate = learning_rate
self.gamma = gamma
self.exploration_final_eps = exploration_final_eps
self.exploration_fraction = exploration_fraction
self.param_noise = param_noise
if param_noise:
raise NotImplementedError('param_noise to be added later')
self.learning_starts = learning_starts
self.train_freq = train_freq
self.batch_size = batch_size
self.buffer_size = buffer_size
self.target_network_update_frac = target_network_update_frac
self.target_network_update_freq = target_network_update_freq
self.hindsight_mode = hindsight_mode
self.hindsight_frac = hindsight_frac
self.landmark_training = landmark_training
self.landmark_training_per_batch = landmark_training_per_batch
self.landmark_generator = landmark_generator
self.landmark_mode = landmark_mode
self.landmark_error = landmark_error
self.landmark_width = landmark_width
self.double_q = double_q
self.grad_norm_clipping = grad_norm_clipping
self.tensorboard_log = tensorboard_log
self.eval_env = eval_env
self.eval_every = eval_every
# Below props are set in self._setup_new_task()
self.reset = None
self.hindsight_subbuffer = None
self.hindsight_fn = None
self.global_step = 0
self.task_step = 0
self.replay_buffer = None
self.replay_buffer_hindsight = None
self.state_buffer = None
self.exploration = None
# Several additional props to be set in self._setup_model()
# The reason for _init_setup_model = False is to set the action/env space from a saved model, without
# loading an environment (e.g., to do transfer learning)
if _init_setup_model:
self._setup_model()
def _setup_model(self):
with SetVerbosity(self.verbose):
assert isinstance(self.action_space, gym.spaces.Discrete), \
"Error: SimpleDQN only supports gym.spaces.Discrete action space."
self.graph = tf.Graph()
with self.graph.as_default():
self.sess = tf_util.make_session(graph=self.graph)
ob_space = self.observation_space
with tf.variable_scope("deepq"):
# policy function
policy = self.policy(
self.sess,
self.observation_space,
self.action_space,
n_env=self.n_envs,
n_steps=1,
n_batch=None,
is_DQN=True,
goal_space=self.goal_space)
# exploration placeholders & online action with exploration noise
epsilon_ph = tf.placeholder_with_default(0., shape=(), name="epsilon_ph")
threshold_ph = tf.placeholder_with_default(0., shape=(), name="param_noise_threshold_ph")
reset_ph = tf.placeholder(tf.float32, shape=[None, 1], name="reset_ph")
# Set these to None for now
goal_phs = goal_ph = goal_or_goalstate_ph = landmark_state_ph = landmark_goal_ph = None
if not self.param_noise:
act = epsilon_greedy_wrapper(policy, epsilon_ph)
else:
act = param_noise_wrapper(policy, reset_ph=reset_ph, threshold_ph=threshold_ph)
# create target q network for training
with tf.variable_scope("target_q_func", reuse=False):
target_policy = self.policy(
self.sess,
self.observation_space,
self.action_space,
self.n_envs,
1,
None,
reuse=False,
is_DQN=True,
goal_space=self.goal_space)
# setup double q network; because of the outer_scope_getter, this reuses policy variables
with tf.variable_scope("double_q", reuse=True, custom_getter=tf_util.outer_scope_getter("double_q")):
double_policy = self.policy(
self.sess,
self.observation_space,
self.action_space,
self.n_envs,
1,
None,
reuse=True,
obs_phs=(target_policy.obs_ph, target_policy.processed_x),
is_DQN=True,
goal_space=self.goal_space,
goal_phs=(target_policy.goal_ph, target_policy.processed_g))
# landmark placeholder and processing
if self.landmark_training:
landmarks_q_s_lg = []
landmarks_q_l_g = []
# HC: Sketchy...borrowing processing code from Policies
# and assuming goal processing is the same as observation processing
landmark_goal_ph, landmark_goal = observation_input(self.goal_space, batch_size=None, scale=policy.scale,
name='landmark_goal')
landmark_state_ph, landmark_state = observation_input(ob_space, batch_size=None, scale=policy.scale,
name='landmark_state')
joined_landmark_state_and_goal = tf.concat([landmark_state, landmark_goal], axis=1)
for k in range(self.landmark_training_per_batch):
if k > 1:
shuffled_landmark_state_and_goal = tf.random_shuffle(joined_landmark_state_and_goal)
else:
shuffled_landmark_state_and_goal = joined_landmark_state_and_goal
landmark_state, landmark_goal = tf.split(shuffled_landmark_state_and_goal,
(ob_space.shape[0], self.goal_space.shape[0]), 1)
# Q(s, a, lg)
with tf.variable_scope("q_landmark_s_lg", reuse=True, custom_getter=tf_util.outer_scope_getter("q_landmark_s_lg")):
landmark_q_s_lg = self.policy(
self.sess,
self.observation_space,
self.action_space,
self.n_envs,
1,
None,
reuse=True,
obs_phs=(policy.obs_ph, policy.processed_x),
is_DQN=True,
goal_space=self.goal_space,
goal_phs=(landmark_goal_ph, landmark_goal))
landmarks_q_s_lg.append(landmark_q_s_lg)
# Q(l, a*, g)
with tf.variable_scope("q_landmark_l_g", reuse=True, custom_getter=tf_util.outer_scope_getter("q_landmark_l_g")):
landmark_q_l_g = self.policy(
self.sess,
self.observation_space,
self.action_space,
self.n_envs,
1,
None,
reuse=True,
obs_phs=(landmark_state_ph, landmark_state),
is_DQN=True,
goal_space=self.goal_space,
goal_phs=(policy.goal_ph, policy.processed_g))
landmarks_q_l_g.append(landmark_q_l_g)
with tf.variable_scope("loss"):
# note: naming conventions from trfl (see https://github.com/deepmind/trfl/blob/master/docs/index.md)
# placeholders for bellman equation
a_tm1 = tf.placeholder(tf.int32, [None], name="action")
r_t = tf.placeholder(tf.float32, [None], name="reward")
done_mask_ph = tf.placeholder(tf.float32, [None], name="done")
# gamma
pcont_t = tf.constant([self.gamma])
pcont_t = tf.tile(pcont_t, tf.shape(r_t))
pcont_t *= (1 - done_mask_ph) * pcont_t
# target q values based on 1-step bellman
if self.double_q:
l2_loss, loss_info = trfl.double_qlearning(policy.q_values, a_tm1, r_t, pcont_t, target_policy.q_values,
double_policy.q_values)
else:
l2_loss, loss_info = trfl.qlearning(policy.q_values, a_tm1, r_t, pcont_t, target_policy.q_values)
tf_util.NOT_USED(l2_loss) # because using huber loss (next line)
mean_huber_loss = tf.reduce_mean(tf_util.huber_loss(loss_info.td_error))
landmark_scores, landmark_ratios = None, None
if self.landmark_training:
landmark_losses = []
for k in range(self.landmark_training_per_batch):
# Q(s_t, a_t, l_g)
qa_tm1_s_lg = trfl.indexing_ops.batched_index(landmarks_q_s_lg[k].q_values, a_tm1)
# max_a Q(l, a, g)
q_l_g = tf.reduce_max(landmarks_q_l_g[k].q_values, axis=1)
# Q(s_t, a_t, g)
qa_tm1_g = trfl.indexing_ops.batched_index(policy.q_values, a_tm1)
if self.landmark_mode == 'unidirectional':
landmark_lower_bound = tf.stop_gradient(qa_tm1_s_lg * q_l_g * (self.gamma ** self.landmark_width))
elif self.landmark_mode == 'bidirectional':
landmark_lower_bound = qa_tm1_s_lg * q_l_g * (self.gamma ** self.landmark_width)
else:
raise ValueError('landmark_mode must be one of "unidirectional" or "bidirectional"')
if self.landmark_error == 'linear':
landmark_losses.append(tf.maximum(0., landmark_lower_bound - qa_tm1_g))
elif self.landmark_error == 'squared':
landmark_losses.append(tf.square(tf.maximum(0., landmark_lower_bound - qa_tm1_g)))
else:
raise ValueError('Unsupported landmark_error!')
if k == 0:
landmark_scores = tf.expand_dims(tf.clip_by_value(landmark_lower_bound / qa_tm1_g, 0., 1.05), 1)
landmark_ratios = tf.log(
tf.clip_by_value(landmarks_q_s_lg[k].value_fn / landmarks_q_l_g[k].value_fn, 1e-1, 1e1))
tf.summary.histogram('landmark_scores', landmark_scores)
tf.summary.histogram('landmark_ratios', landmark_ratios)
landmark_losses = tf.concat(landmark_losses, 0)
tf.summary.histogram('landmark_losses', landmark_losses)
mean_huber_loss += self.landmark_training * tf.reduce_mean(landmark_losses)
tf.summary.scalar("td_error", tf.reduce_mean(loss_info.td_error))
tf.summary.histogram("td_error", loss_info.td_error)
tf.summary.scalar("loss", mean_huber_loss)
if self.landmark_training:
tf.summary.scalar("landmark_loss", tf.reduce_mean(landmark_losses))
# compute optimization op (potentially with gradient clipping)
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
gradients = optimizer.compute_gradients(mean_huber_loss, var_list=policy.trainable_vars)
if self.grad_norm_clipping is not None:
for i, (grad, var) in enumerate(gradients):
if grad is not None:
gradients[i] = (tf.clip_by_norm(grad, self.grad_norm_clipping), var)
training_step = optimizer.apply_gradients(gradients)
with tf.name_scope('update_target_network_ops'):
init_target_network = []
update_target_network = []
for var, var_target in zip(
sorted(policy.trainable_vars, key=lambda v: v.name),
sorted(target_policy.trainable_vars, key=lambda v: v.name)):
new_target = self.target_network_update_frac * var +\
(1 - self.target_network_update_frac) * var_target
update_target_network.append(var_target.assign(new_target))
init_target_network.append(var_target.assign(var))
update_target_network = tf.group(*update_target_network)
init_target_network = tf.group(*init_target_network)
with tf.variable_scope("input_info", reuse=False):
tf.summary.scalar('rewards', tf.reduce_mean(r_t))
tf.summary.histogram('rewards', r_t)
if len(policy.obs_ph.shape) == 3:
tf.summary.image('observation', policy.obs_ph)
else:
tf.summary.histogram('observation', policy.obs_ph)
with tf_util.COMMENT("attribute assignments:"):
self._act = act
self._train_step = training_step
self._obs1_ph = policy.obs_ph
self._action_ph = a_tm1
self._reward_ph = r_t
self._obs2_ph = target_policy.obs_ph
self._dones_ph = done_mask_ph
self._goal_ph = policy.goal_ph
self._goal2_ph = target_policy.goal_ph
self._landmark_state_ph = landmark_state_ph
self._landmark_goal_ph = landmark_goal_ph
self._landmark_scores = landmark_scores
self._landmark_ratios = landmark_ratios
self.update_target_network = update_target_network
self.init_target_network = init_target_network
self.model = policy
self.target_model = target_policy
self.epsilon_ph = epsilon_ph
self.reset_ph = reset_ph
self.threshold_ph = threshold_ph
with tf.variable_scope("deepq"):
self.params = tf.trainable_variables()
# Log the state, action, goal, and landmark state
# with tf.variable_scope("input_info_viz", reuse=False):
tf.summary.image('input state', tf.cast(self._obs1_ph, tf.float32), max_outputs=1)
tf.summary.image('input goal', tf.cast(self._goal_ph, tf.float32), max_outputs=1)
action_shape = tf.reshape(self._action_ph, [-1, self.action_space.n, 1, 1])
tf.summary.image('input action', tf.cast(action_shape, tf.float32), max_outputs=1)
if self.landmark_training:
tf.summary.image('generated landmark', tf.cast(self._landmark_goal_ph, tf.float32), max_outputs=1)
with tf_util.COMMENT("attribute assignments:"):
self._summary_op = tf.summary.merge_all()
# Initialize the parameters and copy them to the target network.
with tf_util.COMMENT("graph initialization"):
tf_util.initialize(self.sess)
self.sess.run(self.init_target_network)
self.summary = tf.summary.merge_all()
def _setup_new_task(self, total_timesteps):
"""Sets up new task by reinitializing step, replay buffer, and exploration schedule"""
self.task_step = 0
self.reset = np.ones([self.n_envs, 1])
items = [("observations0", self.observation_space.shape), ("actions", self.action_space.shape), ("rewards", (1, )),
("observations1", self.observation_space.shape), ("terminals1", (1, ))]
if self.goal_space is not None:
items += [("desired_goal", self.env.observation_space.spaces['desired_goal'].shape)]
self.replay_buffer = ReplayBuffer(self.buffer_size, items)
if self.hindsight_mode == 'final':
self.hindsight_fn = lambda trajectory: her_final(trajectory, self.env.compute_reward)
elif isinstance(self.hindsight_mode, str) and 'future_' in self.hindsight_mode:
_, k = self.hindsight_mode.split('_')
if self.landmark_generator is not None:
# Assume that landmark generator needs to have a separate landmark buffer containing [s,a,l,g] tuples
self.hindsight_fn = lambda trajectory: her_future_landmark(trajectory, int(k), self.env.compute_reward)
else:
self.hindsight_fn = lambda trajectory: her_future(trajectory, int(k), self.env.compute_reward)
self.hindsight_frac = 1. - 1. / (1. + float(k))
elif isinstance(self.hindsight_mode,
str) and 'futureactual_' in self.hindsight_mode:
_, k, p = self.hindsight_mode.split('_')
if self.landmark_generator is not None:
self.hindsight_fn = HerFutureAchievedPastActualLandmark(int(k), int(p), self.env.compute_reward)
else:
self.hindsight_fn = HerFutureAchievedPastActual(int(k), int(p), self.env.compute_reward)
self.hindsight_frac = 1. - 1. / (1. + float(k + p))
else:
self.hindsight_fn = None
# Add additional fields for the hindsight replay buffer, if using landmark
# When using landmark, current observation becomes the landmark when goal_space
# is the same as the observation space, for now
hindsight_items = copy.deepcopy(items)
# Create a secondary replay buffer
if self.hindsight_fn is not None:
self.replay_buffer_hindsight = ReplayBuffer(self.buffer_size, hindsight_items)
self.hindsight_subbuffer = EpisodicBuffer(self.n_envs, self.hindsight_fn, n_cpus=min(self.n_envs, 8))
if self.goal_space is not None and self.landmark_training:
if isinstance(self.landmark_generator, FunctionType):
self.landmark_generator = self.landmark_generator(self.buffer_size, self.env)
elif self.landmark_generator is None:
self.state_buffer = ReplayBuffer(self.buffer_size, [
("state", self.env.observation_space.spaces['observation'].shape),
("as_goal", self.env.observation_space.spaces['achieved_goal'].shape)])
else:
assert isinstance(self.landmark_generator, AbstractLandmarkGenerator)
# Create the schedule for exploration starting from 1.
self.exploration = LinearSchedule(
schedule_timesteps=int(self.exploration_fraction * total_timesteps),
initial_p=1.0,
final_p=self.exploration_final_eps)
def _get_action_for_single_obs(self, obs):
"""Called during training loop to get online action (with exploration)"""
if self.goal_space is not None:
feed_dict = {
self.model.obs_ph: np.array(obs["observation"]),
self.model.goal_ph: np.array(obs["desired_goal"]),
self.epsilon_ph: self.exploration.value(self.task_step),
self.reset_ph: self.reset
}
else:
feed_dict = {
self.model.obs_ph: np.array(obs),
self.epsilon_ph: self.exploration.value(self.task_step),
self.reset_ph: self.reset
}
return self.sess.run(self._act, feed_dict=feed_dict)
def _process_experience(self, obs, action, rew, new_obs, done):
"""Called during training loop after action is taken; includes learning;
returns a summary"""
summaries = []
expanded_done = np.expand_dims(done.astype(np.float32), 1)
rew = np.expand_dims(rew, 1)
goal_agent = self.goal_space is not None
# Reset the episode if done
self.reset = expanded_done
# Store transition in the replay buffer, and hindsight subbuffer
if goal_agent:
self.replay_buffer.add_batch(obs['observation'], action, rew, new_obs['observation'], expanded_done, new_obs['desired_goal'])
if self.landmark_training:
if self.landmark_generator is not None:
self.landmark_generator.add_state_data(obs['observation'], obs['achieved_goal'])
else:
self.state_buffer.add_batch(obs['observation'], obs['achieved_goal'])
else:
self.replay_buffer.add_batch(obs, action, rew, new_obs, expanded_done)
if self.hindsight_fn is not None:
for idx in range(self.n_envs):
# add the transition to the HER subbuffer for that worker
self.hindsight_subbuffer.add_to_subbuffer(
idx, [obs['observation'][idx], action[idx], rew[idx], new_obs['observation'][idx], new_obs['achieved_goal'][idx], new_obs['desired_goal'][idx]])
if done[idx]:
# commit the subbuffer
self.hindsight_subbuffer.commit_subbuffer(idx)
if len(self.hindsight_subbuffer) == self.n_envs:
if self.landmark_generator is not None:
# Using landmarks will return also the landmark transitions
hindsight_experiences, landmark_experiences = zip(*self.hindsight_subbuffer.process_trajectories())
else:
hindsight_experiences = self.hindsight_subbuffer.process_trajectories()
# for hindsight_experience in chain.from_iterable(self.hindsight_subbuffer.process_trajectories()):
# self.replay_buffer_hindsight.add(*hindsight_experience)
for hindsight_experience in chain.from_iterable(hindsight_experiences):
self.replay_buffer_hindsight.add(*hindsight_experience)
if self.landmark_generator is not None:
s, a, l, g = [np.array(a) for a in zip(*chain.from_iterable(landmark_experiences))]
additional = None
if hasattr(self.landmark_generator,
'get_scores_with_experiences') and self.landmark_generator.get_scores_with_experiences:
feed_dict = {
self._obs1_ph: s,
self._action_ph: a,
self._goal_ph: g,
self._landmark_state_ph: l,
self._landmark_goal_ph: self.landmark_generator.goal_extraction_function(l),
}
landmark_scores, landmark_ratios = self.sess.run(
[self._landmark_scores, self._landmark_ratios],
feed_dict=feed_dict)
additional = np.concatenate([landmark_scores, landmark_ratios], 1)
landmark_summaries = self.landmark_generator.add_landmark_experience_data(s, a, l, g, additional)
if landmark_summaries:
summaries.append(landmark_summaries)
self.hindsight_subbuffer.clear_main_buffer()
self.global_step += self.n_envs
for _ in range(self.n_envs):
self.task_step += 1
# If have enough data, train on it.
if self.task_step > self.learning_starts:
if self.task_step % self.train_freq == 0:
if goal_agent:
if self.replay_buffer_hindsight is not None and len(
self.replay_buffer_hindsight) and self.hindsight_frac > 0.:
hindsight_batch_size = round(self.batch_size * self.hindsight_frac)
real_batch_size = self.batch_size - hindsight_batch_size
# Sample from real batch
obses_t, actions, rewards, obses_tp1, dones, desired_g = \
self.replay_buffer.sample(real_batch_size)
# Sample from hindsight batch
obses_t_hs, actions_hs, rewards_hs, obses_tp1_hs, dones_hs, desired_g_hs = \
self.replay_buffer_hindsight.sample(hindsight_batch_size)
# Concatenate the two
obses_t = np.concatenate([obses_t, obses_t_hs], 0)
actions = np.concatenate([actions, actions_hs], 0)
rewards = np.concatenate([rewards, rewards_hs], 0)
obses_tp1 = np.concatenate([obses_tp1, obses_tp1_hs], 0)
dones = | np.concatenate([dones, dones_hs], 0) | numpy.concatenate |
# PRISM CONVERSION FROM ASCII GRIDS -- TASMIN / TASMAX
# header info
# ncols 2015
# nrows 1320
# xllcorner -2301787.7731349
# yllcorner 108069.7858797
# cellsize 2000
# NODATA_value -9999
import rasterio, glob, os
from rasterio import Affine
import numpy as np
from pathos import multiprocessing as mp
# input_path = '/Data/Base_Data/Climate/AK_CAN_2km/historical/singleBand/pr'
# #'/Data/Base_Data/Climate/AK_CAN_2km/historical/singleBand/prism/AK_2KM_PRISM/Temperature/2km/older'
# output_path = '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/prism_v2'
# groups = ['min_temp', 'max_temp']
# # # STEP 1 -- CONVERT TO GTIFF FROM ASC AND TXT
# list the data we want
variables = [ 'tmin', 'tmax' ]
input_path_ak = '/Data/Base_Data/Climate/AK_CAN_2km/historical/singleBand/prism/AK_2KM_PRISM/Temperature/2km/older'
input_path_can = '/Data/Base_Data/Climate/AK_CAN_2km/historical/singleBand/prism/AK_CAN_2km_PRISM/CAN_originals/older'
for variable in variables:
for ak_test, input_path in zip( [True,False], [input_path_ak,input_path_can] ):
output_path = os.path.join( '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/prism_v2', variable,'raw_converted' )
if not os.path.exists( output_path ):
os.makedirs( output_path )
if ak_test:
input_path = input_path_ak
if variable == 'tmin':
v = 'min_temp'
elif variable == 'tmax':
v = 'max_temp'
else:
NotImplemented( 'only tmax / tmin currently supported' )
files = glob.glob( os.path.join( input_path, v, '*'+variable+'*.txt' ) )
else:
input_path = input_path_can
files = glob.glob( os.path.join( input_path, '*'+variable+'*.asc' ) )
ext = files[0].split('.')[1]
output_filenames = [ os.path.join( output_path, os.path.basename( fn ).replace( '.'+ext, '.tif' ) ) for fn in files ]
crs = {'init':'epsg:4326'}
args = [ (i,j,crs) for i,j in zip(files, output_filenames) ]
def bounds_to_extent( bounds ):
'''
take input rasterio bounds object and return an extent
'''
l,b,r,t = bounds
return [ (l,b), (r,b), (r,t), (l,t), (l,b) ]
def convert_to_gtiff( fn, output_filename, crs={'init':'epsg:3338'} ):
'''
convert the ascii rasters from PRISM to gtiff
'''
print( fn )
rst = rasterio.open( fn )
arr = rst.read( 1 ) # get the first and only band
meta = rst.meta
meta.update( compress='lzw', driver='GTiff', crs=crs )
# drop the transform to overcome rasterio warnings
if 'transform' in meta.keys():
meta.pop( 'transform' )
# write them out
with rasterio.open( output_filename, 'w', **meta ) as out:
out.write( arr, 1 )
return output_filename
if __name__ == '__main__':
pool = mp.Pool( 32 )
pool.map( lambda x: convert_to_gtiff( *x ), args )
pool.close()
pool.join()
# # # STEP 2 -- MERGE IT WITH GDAL TOOLS
# list the data
caw = sorted( glob.glob( os.path.join( '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/prism_v2',variable,'raw_converted', 'caw*.tif' ) ) )
ak = sorted( glob.glob( os.path.join( '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/prism_v2',variable,'raw_converted', 'ak_*.tif' ) ) )
grouped = zip( ak, caw )
# merge these files:
# log = open( '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/prism_v2/batch_run.bat', 'w' )
for ak,ca in grouped:
out = ak.replace( 'ak_', 'akcan_')
ca_out = ca.replace( '.tif', '_3338.tif' )
os.system( 'gdalwarp -overwrite -r near -t_srs EPSG:3338 -s_srs EPSG:4326 -ot Float32 ' + ca + ' ' + ca_out )
ca_scale = ca_out.replace( '.tif', '_scaled.tif' )
os.system( 'gdal_calc.py --overwrite -A ' + ca_out + ' --outfile=' + ca_scale + ' --calc="A*(0.1)" --NoDataValue=-9999 --type=Float32' )
os.system( 'gdal_merge.py -init -9999 -n -9999 -a_nodata -9999 -ot Float32 -o ' + out + ' ' + ak + ' ' + ca_scale )
final = ca.replace( '.tif', '_merged.tif' ).replace( 'raw_converted', 'merged' ).replace( 'caw_', 'akcan_' )
if not os.path.exists( os.path.dirname(final) ):
os.makedirs(os.path.dirname(final))
os.system( 'gdal_translate -co "COMPRESS=LZW" ' + out + ' ' + final )
# # DUE TO SOME WEIRDNESS WITH VIRTUALENV AND GDAL_MERGE.PY I am writing this out to a text file and running it when not in virtualenv
# out = ak.replace( 'ak_', 'akcan_')
# ca_out = ca.replace( '.tif', '_3338.tif' )
# log.write( 'gdalwarp -overwrite -r near -t_srs EPSG:3338 -s_srs EPSG:4326 -ot Float32 ' + ca + ' ' + ca_out + '\n' )
# ca_scale = ca_out.replace( '.tif', '_scaled.tif' )
# log.write( 'gdal_calc.py --overwrite -A ' + ca_out + ' --outfile=' + ca_scale + ' --calc="A*(0.1)" --NoDataValue=-9999 --type=Float32' + '\n' )
# log.write( 'gdal_merge.py -init -9999 -n -9999 -a_nodata -9999 -ot Float32 -o ' + out + ' ' + ak + ' ' + ca_scale + '\n' )
# final = ca.replace( '.tif', '_merged.tif' )
# log.write( 'gdal_translate -co "COMPRESS=LZW" ' + out + ' ' + final + '\n' )
# # # STEP 3 -- INTERPOLATE / REGRID / MASK to match existing SNAP resources
def coordinates( fn=None, meta=None, numpy_array=None, input_crs=None, to_latlong=False ):
'''
take a raster file as input and return the centroid coords for each
of the grid cells as a pair of numpy 2d arrays (longitude, latitude)
'''
import rasterio
import numpy as np
from affine import Affine
from pyproj import Proj, transform
if fn:
# Read raster
with rasterio.open( fn ) as r:
T0 = r.affine # upper-left pixel corner affine transform
p1 = Proj( r.crs )
A = r.read( 1 ) # pixel values
elif (meta is not None) & (numpy_array is not None):
A = numpy_array
if input_crs != None:
p1 = Proj( input_crs )
T0 = meta[ 'affine' ]
else:
p1 = None
T0 = meta[ 'affine' ]
else:
BaseException( 'check inputs' )
# All rows and columns
cols, rows = np.meshgrid( | np.arange(A.shape[1]) | numpy.arange |
from unittest import TestCase
from sklearn_evaluation.metrics import (precision_at, labels_at,
tp_at, fp_at)
import numpy as np
from numpy import nan
class Test_precision_at(TestCase):
def test_perfect_precision(self):
labels = np.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
scores = np.array([100, 90, 80, 70, 60, 50, 40, 30, 20, 10])
prec, cutoff = precision_at(labels, scores, top_proportion=0.10)
self.assertEqual(prec, 1.0)
self.assertEqual(cutoff, 100)
def test_perfect_precision_with_nas(self):
labels = np.array([1, nan, 1, 1, 1, nan, 0, 0, 0, 0])
scores = np.array([100, 90, 80, 70, 60, 50, 40, 30, 20, 10])
prec, cutoff = precision_at(
labels, scores, top_proportion=0.10, ignore_nas=True)
self.assertEqual(prec, 1.0)
self.assertEqual(cutoff, 100)
def test_baseline_precision(self):
labels = np.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
scores = np.array([100, 90, 80, 70, 60, 50, 40, 30, 20, 10])
prec, cutoff = precision_at(labels, scores, top_proportion=1.0)
self.assertEqual(prec, 0.5)
self.assertEqual(cutoff, 10)
def test_baseline_precision_with_nas(self):
labels = np.array([nan, 1, nan, 1, 1, nan, nan, 0, 0, 0])
scores = np.array([100, 90, 80, 70, 60, 50, 40, 30, 20, 10])
prec, cutoff = precision_at(
labels, scores, top_proportion=1.0, ignore_nas=True)
self.assertEqual(prec, 0.5)
self.assertEqual(cutoff, 10)
def test_proportion_less_than_zero(self):
self.assertRaises(ValueError, precision_at, [1], [0], -0.1)
def test_proportion_more_than_one(self):
self.assertRaises(ValueError, precision_at, [1], [0], top_proportion=1.1)
class Test_labels_at(TestCase):
def test_no_labels_at_1(self):
y_true = np.array([nan, nan, nan, nan, nan, nan, nan, nan, nan, nan])
y_score = np.random.rand(1, 10)
labels = labels_at(y_true, y_score, top_proportion=0.01, normalize=False)
self.assertEqual(labels, 0)
def test_no_labels_at_50(self):
y_true = np.array([nan, nan, nan, nan, nan, nan, nan, nan, nan, nan])
y_score = np.random.rand(1, 10)
labels = labels_at(y_true, y_score, top_proportion=0.5, normalize=False)
self.assertEqual(labels, 0)
def test_no_labels_at_100(self):
y_true = np.array([nan, nan, nan, nan, nan, nan, nan, nan, nan, nan])
y_score = np.random.rand(1, 10)
labels = labels_at(y_true, y_score, top_proportion=1.0, normalize=False)
self.assertEqual(labels, 0)
def test_one_label_at_10(self):
y_true = np.array([1, nan, nan, nan, nan, nan, nan, nan, nan, nan])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
labels = labels_at(y_true, y_score, top_proportion=0.1, normalize=False)
self.assertEqual(labels, 1)
def test_one_label_at_10_norm(self):
y_true = np.array([1, nan, nan, nan, nan, nan, nan, nan, nan, nan])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
labels = labels_at(y_true, y_score, top_proportion=0.1, normalize=True)
self.assertEqual(labels, 1.0)
def test_one_label_at_50(self):
y_true = np.array([1, nan, nan, nan, nan, nan, nan, nan, nan, nan])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
labels = labels_at(y_true, y_score, top_proportion=0.5, normalize=False)
self.assertEqual(labels, 1)
def test_one_label_at_100(self):
y_true = np.array([1, nan, nan, nan, nan, nan, nan, nan, nan, nan])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
labels = labels_at(y_true, y_score, top_proportion=1.0, normalize=False)
self.assertEqual(labels, 1)
def test_60_labels_at_60(self):
y_true = np.array([1, 1, 1, 1, 1, 1, nan, nan, nan, nan])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
labels = labels_at(y_true, y_score, top_proportion=0.6, normalize=False)
self.assertEqual(labels, 6)
def test_60_labels_at_60_norm(self):
y_true = np.array([1, 1, 1, 1, 1, 1, nan, nan, nan, nan])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
labels = labels_at(y_true, y_score, top_proportion=0.6, normalize=True)
self.assertEqual(labels, 1.0)
def test_60_labels_at_60_mixed_values(self):
y_true = np.array([1, 0, 0, 1, 0, 1, nan, nan, nan, nan])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
labels = labels_at(y_true, y_score, top_proportion=0.6, normalize=False)
self.assertEqual(labels, 6)
def test_60_labels_at_60_norm_mixed_values(self):
y_true = np.array([0, 0, 0, 1, 0, 1, nan, nan, nan, nan])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
labels = labels_at(y_true, y_score, top_proportion=0.6, normalize=True)
self.assertEqual(labels, 1.0)
def test_60_labels_at_30(self):
y_true = np.array([1, 1, 1, 1, 1, 1, nan, nan, nan, nan])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
labels = labels_at(y_true, y_score, top_proportion=0.3, normalize=False)
self.assertEqual(labels, 3)
def test_60_labels_at_30_norm(self):
y_true = np.array([1, 1, 1, 1, 1, 1, nan, nan, nan, nan])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
labels = labels_at(y_true, y_score, top_proportion=0.3, normalize=True)
self.assertEqual(labels, 0.5)
def test_proportion_less_than_zero(self):
self.assertRaises(ValueError, labels_at, [1], [0], -0.1)
def test_proportion_more_than_one(self):
self.assertRaises(ValueError, labels_at, [1], [0], top_proportion=1.1)
class Test_tp_at(TestCase):
def test_with_nas(self):
y_true = np.array([1, nan, 1, 1, 1, 1, 1, 1, 1, nan])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
tps = tp_at(y_true, y_score, top_proportion=0.1)
self.assertEqual(tps, 1)
def test_all_tp_at_10(self):
y_true = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
tps = tp_at(y_true, y_score, top_proportion=0.1)
self.assertEqual(tps, 1)
def test_all_tp_at_50(self):
y_true = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
y_score = | np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]) | numpy.array |
#******************************************Rolling Funcs ******************************************
import numpy as np
def exp_2_avg(x):
x = x[::-1]
weights = np.array([2**(len(x) - 1 - y) /(2**(len(x))-1) for y in range(len(x))])
average = np.dot(np.array(x),weights)
return average
def exp_e_avg(x):
x = x[::-1]
weights = np.array([np.e**(len(x) - 1 - y) /(np.e**(len(x))-1) for y in range(len(x))])
average = np.dot(np.array(x),weights)
return average
def exp_x_avg(x,constant=10):
x = x[::-1]
weights = np.array([constant**(len(x) - 1 - y) /(constant**(len(x))-1) for y in range(len(x))])
average = np.dot(np.array(x),weights)
return average
def pond_avg(x):
x = x[::-1]
weights = np.array([((len(x)-y))/(int(((len(x))*((len(x)+1))))/2) for y in range(len(x))])
average = np.dot(np.array(x),weights)
return average
def relu(x):
return max(0,x)
def relu_neg(x):
return min(0,x)
def RSI(x, agg=exp_e_avg):
"vender 70-100, comprar 0-30, 30-70 neutral. x es el precio Adj close"
x = | np.array(x) | numpy.array |
# -*- coding: utf-8 -*-
import sys
import warnings
from collections import namedtuple
import cv2
import noteshrink
import numpy as np
import PIL
from imutils import object_detection
from itertools import combinations
from simplification.cutil import simplify_coords, simplify_coords_vw
from skimage import exposure
from skimage import feature
from skimage import morphology
from skimage import filters
from skimage.util import invert as invert_image
from sklearn import preprocessing
from .utils import (
convert,
get_color_histogram,
get_inner_paths,
get_palette,
get_quantize_method,
get_shortest_paths,
get_shortest_paths_astar,
image_as_array,
kmeans,
match_template_mask,
output_as_mask,
sample_histogram,
)
@image_as_array
def adjust_contrast(image, contrast):
if (contrast < 0):
contrast = 0
elif (contrast > 200):
contrast = 200
contrast = contrast / 100
img = image.astype(np.float) * contrast
img[img > 255] = 255
img[img < 0] = 0
return img.astype(np.ubyte)
@image_as_array
def adjust_brightness(image, brightness):
if (brightness < 0):
brightness = 0
elif (brightness > 200):
brightness = 200
brightness = (((brightness) * (510)) / 200) - 255
img = image.astype(np.float) + brightness
img[img > 255] = 255
img[img < 0] = 0
return img.astype(np.ubyte)
@image_as_array
def smooth_image(image, kernel):
if (kernel < 0):
kernel = 0
elif (kernel > 100):
kernel = 100
return cv2.bilateralFilter(image, kernel, kernel, kernel)
@image_as_array
def histogram_equalization(image, tile):
if (tile < 0):
tile = 0
elif (tile > 100):
tile = 100
tile = int(tile / 10)
img_yuv = cv2.cvtColor(image, cv2.COLOR_BGR2YCrCb)
clahe = cv2.createCLAHE(clipLimit=1.0, tileGridSize=(2 ** tile, 2 ** tile))
img_yuv[:, :, 0] = clahe.apply(img_yuv[:, :, 0])
img_out = cv2.cvtColor(img_yuv, cv2.COLOR_YCrCb2BGR)
img = exposure.rescale_intensity(img_out)
return img
@image_as_array
def denoise_image(image, value):
if (value < 0):
value = 0
elif (value > 100):
value = 100
return cv2.fastNlMeansDenoisingColored(image, None, value, value)
@image_as_array
def color_reduction(image, n_colors, method='kmeans', palette=None):
"""Reduce the number of colors in image to n_colors using method"""
method = method.lower()
if method not in ('kmeans', 'linear', 'max', 'median', 'octree'):
method = 'kmeans'
if n_colors < 2:
n_colors = 2
elif n_colors > 128:
n_colors = 128
if method == 'kmeans':
n_clusters = n_colors
h, w = image.shape[:2]
img = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
img = img.reshape((-1, 3)) # -1 -> img.shape[0] * img.shape[1]
centers, labels = kmeans(img, n_clusters)
if palette is not None:
# palette comes in RGB
centers = cv2.cvtColor(np.array([palette]), cv2.COLOR_RGB2LAB)[0]
quant = centers[labels].reshape((h, w, 3))
output = cv2.cvtColor(quant, cv2.COLOR_LAB2BGR)
else:
img = PIL.Image.fromarray(image[:, :, ::-1], mode='RGB')
quant = img.quantize(colors=n_colors,
method=get_quantize_method(method))
if palette is not None:
palette = np.array(palette, dtype=np.uint8)
quant.putpalette(palette.flatten())
output = np.array(quant.convert('RGB'), dtype=np.uint8)[:, :, ::-1]
return output
@image_as_array
def auto_clean(image, background_value=25, background_saturation=20,
colors=8, sample_fraction=5, white_background=False,
saturate=True, palette=None):
"""Clean image with minimal input required. Based on the work by
<NAME>: https://mzucker.github.io/2016/09/20/noteshrink.html"""
if background_value < 1:
background_value = 1
elif background_value > 100:
background_value = 100
if background_saturation < 1:
background_saturation = 1
elif background_saturation > 100:
background_saturation = 100
if sample_fraction < 1:
sample_fraction = 1
elif sample_fraction > 100:
sample_fraction = 100
if colors < 2:
colors = 2
elif colors > 128:
colors = 128
Options = namedtuple(
'options',
['quiet', 'sample_fraction', 'value_threshold', 'sat_threshold']
)
options = Options(
quiet=True,
sample_fraction=sample_fraction / 100.0,
value_threshold=background_value / 100.0,
sat_threshold=background_saturation / 100.0,
)
rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if palette is None:
samples = noteshrink.sample_pixels(rgb_image, options)
palette = get_palette(samples, colors, background_value,
background_saturation)
labels = noteshrink.apply_palette(rgb_image, palette, options)
if saturate:
palette = palette.astype(np.float32)
pmin = palette.min()
pmax = palette.max()
palette = 255 * (palette - pmin) / ((pmax - pmin) or 1)
palette = palette.astype(np.uint8)
if white_background:
palette = palette.copy()
palette[0] = (255, 255, 255)
return palette[labels][:, :, ::-1] # swap R and G channels
@image_as_array
def match_templates(image, templates, overlap=0.15):
"""Look for templates in image and return the matches.
Each entry in the templates list is a dictionary with keys 'image',
'threshold', 'flip', 'mask' and its matching
'method' (None, 'laplacian', 'canny')."""
default_threshold = 80
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
rectangles = np.empty([0, 2, 2], dtype=int)
for template in templates:
threshold = template.get('threshold', default_threshold)
if threshold > 100:
threshold = 100
elif threshold < 0:
threshold = 0
threshold /= 100.0
template_image = template.get('image')
template_flip = template.get('flip')
template_mask = template.get('mask')
template_method = template.get('method', 'canny') # defaults to canny
gray_template = cv2.cvtColor(template_image, cv2.COLOR_BGR2GRAY)
transformations = [lambda im: im]
if template_flip:
if template_flip[0] in ('h', 'a'):
transformations.append(lambda im: cv2.flip(im, 1))
if template_flip[0] in ('v', 'a'):
transformations.append(lambda im: cv2.flip(im, 0))
if template_flip[0] in ('b', 'a'):
transformations.append(lambda im: cv2.flip(cv2.flip(im, 1), 0))
for transformation in transformations:
transformed_template = transformation(gray_template)
height, width = transformed_template.shape
if template_mask is not None:
transformed_mask = transformation(template_mask)
else:
transformed_mask = None
results = match_template_mask(gray_image, transformed_template,
transformed_mask, template_method)
index = results >= threshold
y1, x1 = np.where(index)
y2, x2 = y1 + height, x1 + width
coords = np.array([x1, y1, x2, y2], dtype=int).T
probs = results[index]
boxes = np.array(
object_detection.non_max_suppression(coords, probs, overlap)
)
xyboxes = boxes.reshape(boxes.shape[0], 2, 2) # list of x,y points
rectangles = | np.vstack([rectangles, xyboxes]) | numpy.vstack |
from PIL import Image
import numpy as np
import pandas as pd
from PIL import Image
import h5py
import argparse
def rotate_by_channel(data, sita, length=2):
newdata = []
chanel_num = data.shape[3]
height = data.shape[1]
if length > 1:
for index, singal in enumerate(data):
new_sam = np.array([])
for i in range(chanel_num):
channel = singal[:,:,i]
img = Image.fromarray(channel)
new_img = img.rotate(sita[index])
new_channel = np.asarray(new_img)
if i==0:
new_sam = new_channel
else:
new_sam = np.concatenate((new_sam, new_channel), axis = 1)
new_sam = new_sam.reshape((height,height,chanel_num),order='F')
newdata.append(new_sam)
else:
print("Error! data length = 1...")
return np.array(newdata)
def load_data(data_path):
#data_path = "../data/TCIR-ALL_2017.h5"
print("begin read data from "+ data_path +" ...")
#load "info" as pandas dataframe
data_info = pd.read_hdf(data_path, key="info", mode='r')
#load "matrix" as numpy ndarray, this could take longer time
with h5py.File(data_path, 'r') as hf:
data_matrix = hf['matrix'][:]
#print(data_matrix.shape)
return data_matrix, data_info
def pre_processing(data_path):
#data_path1 = "../data/TCIR-ALL_2017.h5"
data_x, data_info = load_data(data_path)
data_info = data_info.values
data_y = data_info[:,5] # Vmax
#return X, Y, The data type of both are np.ndarray.
# X:(None, 201, 201, 4) = (None, 64, 64, 4) [IR WV VIS PMW]
# Y:(None, 1)
#data_x = data_x[:, 68:133, 68:133, :] # for the 65 * 65
data_x = np.nan_to_num(data_x)
data_x[data_x>1000] = 0
return data_x, data_y
def pre_processing2(first_time, second_time, data_path):
data_x, data_info = load_data(data_path)
data_info = data_info.values
# data_set ID lon lat time Vmax R35_4qAVG MSLP
data_time = data_info[:,4] # time
data_time = data_time.astype('int')
data_y = data_info[:,5] # Vmax
#return X, Y, The data type of both are np.ndarray.
# X:(None, 201, 201, 4) = (None, 64, 64, 4)
# Y:(None, 1)
new_data_x = []
new_data_y = []
data_x = data_x[:, 50:151, 50:151, [0,3]] # for the 64 * 64
data_x = np.nan_to_num(data_x)
data_x[data_x>1000] = 0
for i in range(len(data_time)):
if (data_time[i] >= first_time) & (data_time[i]< second_time):
new_data_x.append(data_x[i,:,:,:])
new_data_y.append(data_y[i])
return np.array(new_data_x), np.array(new_data_y)
def normalize_data(x_test, chanel_num):
result=[]
height = x_test.shape[1]
for each_sam in x_test:
new_sam = [];
for i in range(chanel_num):
chanel = each_sam[:,:,i]
chanel = (chanel - np.mean(chanel)) / (np.std(chanel)+0.01)
if i==0:
new_sam = chanel
else:
new_sam = np.concatenate((new_sam, chanel), axis =1)
new_sam = new_sam.reshape((height,height,chanel_num),order='F')
result.append(new_sam)
result = np.array(result)
return result
def rotated_evaltion(test_data, test_Y, model, BATCH_SIZE):
#model = load_model('./saved_models/keras_5_3best_400trained_model.h5')
predict_Y = model.predict(test_data, batch_size=BATCH_SIZE, verbose=0)
predict_Y = predict_Y.reshape(-1)
length = len(test_Y)
print("no rotated mae:" + str(np.mean(np.abs(predict_Y[:length] - test_Y))))
rotated_num = int(len(predict_Y)/length)
result = np.zeros(length)
tmp_predict_Y = predict_Y
for i in range(rotated_num):
print(np.mean(np.abs(tmp_predict_Y[:length] - test_Y)))
result = result + (tmp_predict_Y[:length] - test_Y)
tmp_predict_Y = tmp_predict_Y[length:]
result = result/rotated_num
mae = np.mean(np.abs(result)) # MAE
print(str(rotated_num) + " rotated mae: " + str(mae))
return mae
def Separate(train_data_path_ATLN, train_x_save_path, train_y_save_path, test_x_save_path, test_y_save_path):
#train_data_path_CPAC = "../data/TCIR-CPAC_IO_SH.h5" # CPAC,IO,SH 14.6GB data
#train_data_path_ATLN = "../data/TCIR-ATLN_EPAC_WPAC.h5" # ATLN,EPAC,WPAC 30GB data
#x_train, y_train = pre_processing(train_data_path_CPAC)
x_train, y_train = pre_processing2(2000000000, 2015000000, train_data_path_ATLN)
print(x_train.shape)
print(y_train.shape)
np.save(train_x_save_path, x_train)
np.save(train_y_save_path, y_train)
#np.save("../rotated_data/ATLN_2003_2014_data_x_201.npy", x_train)
#np.save("../rotated_data/ATLN_2003_2014_data_y_201.npy", y_train)
x_test, y_test = pre_processing2(2015000000, 2017000000, train_data_path_ATLN)
print(x_test.shape)
print(y_test.shape)
| np.save(test_x_save_path, x_test) | numpy.save |
import sys
import os
from glob import glob
import torch
import time
import numpy as np
class TestDeepNet():
'''
Perform forward for a net and loader
'''
def __init__(self, model, model_dir, domain, epoch_to_load=None, model_name="", device='cuda'):
sys.path.append(model_dir)
self.MODEL_NAME=model_name
print('model dir: %s' % (model_dir))
if not os.path.exists(model_dir):
print('NOT Found ERROR: model dir: %s' % (model_dir))
raise FileNotFoundError
'''
=====================================================================================
Network setup: load the parameters
=====================================================================================
'''
if epoch_to_load is None:
ckpt_filename = sorted(glob(os.path.join(model_dir, 'log', '.%s.model.*.ckpt' % (model_name))))[-1]
else:
ckpt_filename = os.path.join(model_dir, 'log', '.%s.model.%s.ckpt' % (model_name, epoch_to_load))
model_loader = TorchModelLoader(model, device).load_torch_model(ckpt_filename)
self.model = model_loader.net
self.model.eval()
self.device = model_loader.device
self.epoch = model_loader.epoch
def map_data_to_device(self, data, is_training):
'''
map dataloader data to torch device (cpu, gpu)
data: list or dict
'''
if type(data) is list:
data = [d.to(self.device) for d in data]
if type(data) is dict:
for key in data.keys():
if type(data[key]) is torch.Tensor:
data[key] = data[key].to(self.device)
if is_training:
data[key].requires_grad = True
if data[key].dtype is torch.float64:
data[key] = data[key].type(torch.float32)
else: # string, fname
data[key] = data[key]
return data
def forward(self, loader, callbacks):
if type(loader) == torch.utils.data.dataloader.DataLoader:
self.forward_dataset(loader, callbacks)
elif type(loader) == dict:
self.forward_images(loader, callbacks)
else:
print('[ERROR] Not implemented with this type of loader: %s' % (type(loader)))
raise NotImplementedError
def forward_dataset(self, loader, callbacks):
# In test phase, no gradients (for memory efficiency)
self.model.eval()
with torch.no_grad():
_avg_batch_time = []
_avg_callback_time = []
for i, samples in enumerate(loader):
_avg_batch_time_start = time.time()
target = self.map_data_to_device(samples['target'], is_training=False)
data = self.map_data_to_device(samples['data'], is_training=False)
network_output = self.model(data, epoch=self.epoch, max_epochs=500)
_avg_batch_time.append(time.time() - _avg_batch_time_start)
_avg_callback_time_start = time.time()
for callback in callbacks:
callback.batch(network_output, target, data)
_avg_callback_time.append(time.time() - _avg_callback_time_start)
if (i) % np.maximum(1, int(len(loader)/5)) == 0:
print('Inference [%03d/%03d], avg time: inference[%.1f]s, callbacks[%.1f]s' %
(i, len(loader), | np.mean(_avg_batch_time) | numpy.mean |
import numpy
import numpy as np
import scipy
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import lal
import lalsimulation
from lal.lal import PC_SI as LAL_PC_SI
import h5py
import warnings
import random
# Calculating the projection of complex vector v on complex vector u
def proj(u, v):
# notice: this algrithm assume denominator isn't zero
return u * numpy.vdot(v,u) / numpy.vdot(u,u)
# Calculating the normalized residual (= a new basis) of a vector vec from known bases
def gram_schmidt(bases, vec):
for i in numpy.arange(0,len(bases)):
vec = vec - proj(bases[i], vec)
return vec/numpy.sqrt(numpy.vdot(vec,vec)) # normalized new basis
# Calculating overlap of two waveforms
def overlap_of_two_waveforms(wf1, wf2):
wf1norm = wf1/numpy.sqrt(numpy.vdot(wf1,wf1)) # normalize the first waveform
wf2norm = wf2/numpy.sqrt(numpy.vdot(wf2,wf2)) # normalize the second waveform
diff = wf1norm - wf2norm
#overlap = 1 - 0.5*(numpy.vdot(diff,diff))
overlap = numpy.real(numpy.vdot(wf1norm, wf2norm))
return overlap
def spherical_to_cartesian(sph):
x = sph[0]*numpy.sin(sph[1])*numpy.cos(sph[2])
y = sph[0]*numpy.sin(sph[1])*numpy.sin(sph[2])
z = sph[0]*numpy.cos(sph[1])
car = [x,y,z]
return car
def get_m1m2_from_mcq(mc, q):
m2 = mc * q ** (-0.6) * (1+q)**0.2
m1 = m2 * q
return numpy.array([m1,m2])
def generate_a_waveform(m1, m2, spin1, spin2, ecc, lambda1, lambda2, iota, phiRef, distance, deltaF, f_min, f_max, waveFlags, approximant):
test_mass1 = m1 * lal.lal.MSUN_SI
test_mass2 = m2 * lal.lal.MSUN_SI
lalsimulation.SimInspiralWaveformParamsInsertTidalLambda1(waveFlags, lambda1)
lalsimulation.SimInspiralWaveformParamsInsertTidalLambda2(waveFlags, lambda2)
[plus_test, cross_test]=lalsimulation.SimInspiralChooseFDWaveform(test_mass1, test_mass2, spin1[0], spin1[1], spin1[2], spin2[0], spin2[1], spin2[2], distance, iota, phiRef, 0, ecc, 0, deltaF, f_min, f_max, 0, waveFlags, approximant)
hp = plus_test.data.data
hp_test = hp[numpy.int(f_min/deltaF):numpy.int(f_max/deltaF)]
return hp_test
def generate_a_waveform_from_mcq(mc, q, spin1, spin2, ecc, lambda1, lambda2, iota, phiRef, distance, deltaF, f_min, f_max, waveFlags, approximant):
m1,m2 = get_m1m2_from_mcq(mc,q)
test_mass1 = m1 * lal.lal.MSUN_SI
test_mass2 = m2 * lal.lal.MSUN_SI
lalsimulation.SimInspiralWaveformParamsInsertTidalLambda1(waveFlags, lambda1)
lalsimulation.SimInspiralWaveformParamsInsertTidalLambda2(waveFlags, lambda2)
[plus_test, cross_test]=lalsimulation.SimInspiralChooseFDWaveform(test_mass1, test_mass2, spin1[0], spin1[1], spin1[2], spin2[0], spin2[1], spin2[2], distance, iota, phiRef, 0, ecc, 0, deltaF, f_min, f_max, 0, waveFlags, approximant)
hp = plus_test.data.data
hp_test = hp[numpy.int(f_min/deltaF):numpy.int(f_max/deltaF)]
return hp_test
def generate_params_points(npts, nparams, params_low, params_high):
paramspoints = numpy.random.uniform(params_low, params_high, size=(npts,nparams))
paramspoints = paramspoints.round(decimals=6)
return paramspoints
# now generating N=npts waveforms at points that are
# randomly uniformly distributed in parameter space
# and calculate their inner products with the 1st waveform
# so as to find the best waveform as the new basis
def least_match_waveform_unnormalized(paramspoints, known_bases, npts, distance, deltaF, f_min, f_max, waveFlags, approximant):
overlaps = numpy.zeros(npts)
modula = numpy.zeros(npts)
for i in numpy.arange(0,len(paramspoints)):
paramspoint = paramspoints[i]
m1, m2 = get_m1m2_from_mcq(paramspoint[0],paramspoint[1])
s1x, s1y, s1z = spherical_to_cartesian(paramspoint[2:5])
s2x, s2y, s2z = spherical_to_cartesian(paramspoint[5:8])
iota = paramspoint[8]
phiRef = paramspoint[9]
ecc = 0
if len(paramspoint)==11:
ecc = paramspoint[10]
if len(paramspoint)==12:
lambda1 = paramspoint[10]
lambda2 = paramspoint[11]
lalsimulation.SimInspiralWaveformParamsInsertTidalLambda1(waveFlags, lambda1)
lalsimulation.SimInspiralWaveformParamsInsertTidalLambda2(waveFlags, lambda2)
f_ref = 0
RA=0
DEC=0
psi=0
phi=0
m1 *= lal.lal.MSUN_SI
m2 *= lal.lal.MSUN_SI
[plus,cross]=lalsimulation.SimInspiralChooseFDWaveform(m1, m2, s1x, s1y, s1z, s2x, s2y, s2z, distance, iota, phiRef, 0, ecc, 0, deltaF, f_min, f_max, f_ref, waveFlags, approximant)
hp_tmp = plus.data.data[numpy.int(f_min/deltaF):numpy.int(f_max/deltaF)] # data_tmp is hplus and is a complex vector
residual = hp_tmp
for k in numpy.arange(0,len(known_bases)):
residual -= proj(known_bases[k],hp_tmp)
modula[i] = numpy.sqrt(numpy.vdot(residual, residual))
arg_newbasis = numpy.argmax(modula)
mass1, mass2 = get_m1m2_from_mcq(paramspoints[arg_newbasis][0],paramspoints[arg_newbasis][1])
mass1 *= lal.lal.MSUN_SI
mass2 *= lal.lal.MSUN_SI
sp1x, sp1y, sp1z = spherical_to_cartesian(paramspoints[arg_newbasis,2:5])
sp2x, sp2y, sp2z = spherical_to_cartesian(paramspoints[arg_newbasis,5:8])
inclination = paramspoints[arg_newbasis][8]
phi_ref = paramspoints[arg_newbasis][9]
ecc = 0
if len(paramspoint)==11:
ecc = paramspoints[arg_newbasis][10]
if len(paramspoint)==12:
lambda1 = paramspoints[arg_newbasis][10]
lambda2 = paramspoints[arg_newbasis][11]
lalsimulation.SimInspiralWaveformParamsInsertTidalLambda1(waveFlags, lambda1)
lalsimulation.SimInspiralWaveformParamsInsertTidalLambda2(waveFlags, lambda2)
[plus_new, cross_new]=lalsimulation.SimInspiralChooseFDWaveform(mass1, mass2, sp1x, sp1y, sp1z, sp2x, sp2y, sp2z, distance, inclination, phi_ref, 0, ecc, 0, deltaF, f_min, f_max, 0, waveFlags, approximant)
hp_new = plus_new.data.data
hp_new = hp_new[numpy.int(f_min/deltaF):numpy.int(f_max/deltaF)]
basis_new = gram_schmidt(known_bases, hp_new)
return numpy.array([basis_new, paramspoints[arg_newbasis], modula[arg_newbasis]]) # elements, masses&spins, residual mod
def least_match_quadratic_waveform_unnormalized(paramspoints, known_quad_bases, npts, distance, deltaF, f_min, f_max, waveFlags, approximant):
overlaps = numpy.zeros(npts)
modula = numpy.zeros(npts)
for i in numpy.arange(0,len(paramspoints)):
paramspoint = paramspoints[i]
m1, m2 = get_m1m2_from_mcq(paramspoint[0],paramspoint[1])
s1x, s1y, s1z = spherical_to_cartesian(paramspoint[2:5])
s2x, s2y, s2z = spherical_to_cartesian(paramspoint[5:8])
iota=paramspoint[8]
phiRef=paramspoint[9]
ecc = 0
if len(paramspoint)==11:
ecc = paramspoint[10]
if len(paramspoint)==12:
lambda1 = paramspoint[10]
lambda2 = paramspoint[11]
lalsimulation.SimInspiralWaveformParamsInsertTidalLambda1(waveFlags, lambda1)
lalsimulation.SimInspiralWaveformParamsInsertTidalLambda2(waveFlags, lambda2)
f_ref = 0
RA=0
DEC=0
psi=0
phi=0
m1 *= lal.lal.MSUN_SI
m2 *= lal.lal.MSUN_SI
[plus,cross]=lalsimulation.SimInspiralChooseFDWaveform(m1, m2, s1x, s1y, s1z, s2x, s2y, s2z, distance, iota, phiRef, 0, ecc, 0, deltaF, f_min, f_max, f_ref, waveFlags, approximant)
hp_tmp = plus.data.data[numpy.int(f_min/deltaF):numpy.int(f_max/deltaF)] # data_tmp is hplus and is a complex vector
hp_quad_tmp = (numpy.absolute(hp_tmp))**2
residual = hp_quad_tmp
for k in numpy.arange(0,len(known_quad_bases)):
residual -= proj(known_quad_bases[k],hp_quad_tmp)
modula[i] = numpy.sqrt(numpy.vdot(residual, residual))
arg_newbasis = numpy.argmax(modula)
mass1, mass2 = get_m1m2_from_mcq(paramspoints[arg_newbasis][0],paramspoints[arg_newbasis][1])
mass1 *= lal.lal.MSUN_SI
mass2 *= lal.lal.MSUN_SI
sp1x, sp1y, sp1z = spherical_to_cartesian(paramspoints[arg_newbasis,2:5])
sp2x, sp2y, sp2z = spherical_to_cartesian(paramspoints[arg_newbasis,5:8])
inclination = paramspoints[arg_newbasis][8]
phi_ref = paramspoints[arg_newbasis][9]
ecc = 0
if len(paramspoint)==11:
ecc = paramspoints[arg_newbasis][10]
if len(paramspoint)==12:
lambda1 = paramspoints[arg_newbasis][10]
lambda2 = paramspoints[arg_newbasis][11]
lalsimulation.SimInspiralWaveformParamsInsertTidalLambda1(waveFlags, lambda1)
lalsimulation.SimInspiralWaveformParamsInsertTidalLambda2(waveFlags, lambda2)
[plus_new, cross_new]=lalsimulation.SimInspiralChooseFDWaveform(mass1, mass2, sp1x, sp1y, sp1z, sp2x, sp2y, sp2z, distance, inclination, phi_ref, 0, ecc, 0, deltaF, f_min, f_max, 0, waveFlags, approximant)
hp_new = plus_new.data.data
hp_new = hp_new[numpy.int(f_min/deltaF):numpy.int(f_max/deltaF)]
hp_quad_new = (numpy.absolute(hp_new))**2
basis_quad_new = gram_schmidt(known_quad_bases, hp_quad_new)
return numpy.array([basis_quad_new, paramspoints[arg_newbasis], modula[arg_newbasis]]) # elements, masses&spins, residual mod
def bases_searching_results_unnormalized(npts, nparams, nbases, known_bases, basis_waveforms, params, residual_modula, params_low, params_high, distance, deltaF, f_min, f_max, waveFlags, approximant):
if nparams == 10: print("The parameters are Mc, q, s1(mag, theta, phi), s2(mag, theta, phi), iota, and phiRef\n")
if nparams == 11: print("The parameters are Mc, q, s1(mag, theta, phi), s2(mag, theta, phi), iota, phiRef, and eccentricity\n")
if nparams == 12: print("The parameters are Mc, q, s1(mag, theta, phi), s2(mag, theta, phi), iota, phiRef, lambda1, and lambda2\n")
for k in numpy.arange(0,nbases-1):
params_points = generate_params_points(npts, nparams, params_low, params_high)
basis_new, params_new, rm_new= least_match_waveform_unnormalized(params_points, known_bases, npts, distance, deltaF, f_min, f_max, waveFlags, approximant)
print("Linear Iter: ", k, params_new)
known_bases= numpy.append(known_bases, numpy.array([basis_new]), axis=0)
params = numpy.append(params, numpy.array([params_new]), axis = 0)
residual_modula = numpy.append(residual_modula, rm_new)
return known_bases, params, residual_modula
def bases_searching_quadratic_results_unnormalized(npts, nparams, nbases_quad, known_quad_bases, basis_waveforms, params_quad, residual_modula, params_low, params_high, distance, deltaF, f_min, f_max, waveFlags, approximant):
for k in numpy.arange(0,nbases_quad-1):
print("Quadratic Iter: ", k)
params_points = generate_params_points(npts, nparams, params_low, params_high)
basis_new, params_new, rm_new= least_match_quadratic_waveform_unnormalized(params_points, known_quad_bases, npts, distance, deltaF, f_min, f_max, waveFlags, approximant)
known_quad_bases= numpy.append(known_quad_bases, numpy.array([basis_new]), axis=0)
params_quad = numpy.append(params_quad, numpy.array([params_new]), axis = 0)
residual_modula = numpy.append(residual_modula, rm_new)
return known_quad_bases, params_quad, residual_modula
def massrange(mc_low, mc_high, q_low, q_high):
mmin = get_m1m2_from_mcq(mc_low,q_high)[1]
mmax = get_m1m2_from_mcq(mc_high,q_high)[0]
return [mmin, mmax]
def initial_basis(mc_low, mc_high, q_low, q_high, s1sphere_low, s1sphere_high, s2sphere_low, s2sphere_high, ecc_low, ecc_high, lambda1_low, lambda1_high, lambda2_low, lambda2_high, iota_low, iota_high, phiref_low, phiref_high, distance, deltaF, f_min, f_max, waveFlags, approximant):
try:
if approximant==lalsimulation.IMRPhenomPv2:
nparams = 10
params_low = [mc_low, q_low, s1sphere_low[0], s1sphere_low[1], s1sphere_low[2], s2sphere_low[0], s2sphere_low[1], s2sphere_low[2], iota_low, phiref_low]
params_high = [mc_high, q_high, s1sphere_high[0], s1sphere_high[1], s1sphere_high[2], s2sphere_high[0], s2sphere_high[1], s2sphere_high[2], iota_high, phiref_high]
params_start = numpy.array([[mc_low, q_low, s1sphere_low[0], s1sphere_low[1], s1sphere_low[2], s2sphere_low[0], s2sphere_low[1], s2sphere_low[2], 0.33333*np.pi, 1.5*np.pi]])
hp1 = generate_a_waveform_from_mcq(mc_low, q_low, spherical_to_cartesian(s1sphere_low), spherical_to_cartesian(s2sphere_low), 0, 0, 0, iota_low, phiref_low, distance, deltaF, f_min, f_max, waveFlags, approximant)
except AttributeError:
pass
try:
if approximant==lalsimulation.IMRPhenomPv3:
nparams = 10
params_low = [mc_low, q_low, s1sphere_low[0], s1sphere_low[1], s1sphere_low[2], s2sphere_low[0], s2sphere_low[1], s2sphere_low[2], iota_low, phiref_low]
params_high = [mc_high, q_high, s1sphere_high[0], s1sphere_high[1], s1sphere_high[2], s2sphere_high[0], s2sphere_high[1], s2sphere_high[2], iota_high, phiref_high]
params_start = numpy.array([[mc_low, q_low, s1sphere_low[0], s1sphere_low[1], s1sphere_low[2], s2sphere_low[0], s2sphere_low[1], s2sphere_low[2], 0.33333*np.pi, 1.5*np.pi]])
hp1 = generate_a_waveform_from_mcq(mc_low, q_low, spherical_to_cartesian(s1sphere_low), spherical_to_cartesian(s2sphere_low), 0, 0, 0, iota_low, phiref_low, distance, deltaF, f_min, f_max, waveFlags, approximant)
except AttributeError:
pass
try:
if approximant==lalsimulation.IMRPhenomPv3HM:
nparams = 10
params_low = [mc_low, q_low, s1sphere_low[0], s1sphere_low[1], s1sphere_low[2], s2sphere_low[0], s2sphere_low[1], s2sphere_low[2], iota_low, phiref_low]
params_high = [mc_high, q_high, s1sphere_high[0], s1sphere_high[1], s1sphere_high[2], s2sphere_high[0], s2sphere_high[1], s2sphere_high[2], iota_high, phiref_high]
params_start = numpy.array([[mc_low, q_low, s1sphere_low[0], s1sphere_low[1], s1sphere_low[2], s2sphere_low[0], s2sphere_low[1], s2sphere_low[2], 0.33333*np.pi, 1.5*np.pi]])
hp1 = generate_a_waveform_from_mcq(mc_low, q_low, spherical_to_cartesian(s1sphere_low), spherical_to_cartesian(s2sphere_low), 0, 0, 0, iota_low, phiref_low, distance, deltaF, f_min, f_max, waveFlags, approximant)
except AttributeError:
pass
try:
if approximant==lalsimulation.IMRPhenomXHM:
nparams = 10
params_low = [mc_low, q_low, s1sphere_low[0], s1sphere_low[1], s1sphere_low[2], s2sphere_low[0], s2sphere_low[1], s2sphere_low[2], iota_low, phiref_low]
params_high = [mc_high, q_high, s1sphere_high[0], s1sphere_high[1], s1sphere_high[2], s2sphere_high[0], s2sphere_high[1], s2sphere_high[2], iota_high, phiref_high]
params_start = numpy.array([[mc_low, q_low, s1sphere_low[0], s1sphere_low[1], s1sphere_low[2], s2sphere_low[0], s2sphere_low[1], s2sphere_low[2], 0.33333*np.pi, 1.5*np.pi]])
hp1 = generate_a_waveform_from_mcq(mc_low, q_low, spherical_to_cartesian(s1sphere_low), spherical_to_cartesian(s2sphere_low), 0, 0, 0, iota_low, phiref_low, distance, deltaF, f_min, f_max, waveFlags, approximant)
except AttributeError:
pass
try:
if approximant==lalsimulation.TaylorF2Ecc:
nparams = 11
params_low = [mc_low, q_low, s1sphere_low[0], s1sphere_low[1], s1sphere_low[2], s2sphere_low[0], s2sphere_low[1], s2sphere_low[2], iota_low, phiref_low, ecc_low]
params_high = [mc_high, q_high, s1sphere_high[0], s1sphere_high[1], s1sphere_high[2], s2sphere_high[0], s2sphere_high[1], s2sphere_high[2], iota_high, phiref_high, ecc_high]
params_start = numpy.array([[mc_low, q_low, s1sphere_low[0], s1sphere_low[1], s1sphere_low[2], s2sphere_low[0], s2sphere_low[1], s2sphere_low[2], 0.33333*np.pi, 1.5*np.pi, ecc_low]])
hp1 = generate_a_waveform_from_mcq(mc_low, q_low, spherical_to_cartesian(s1sphere_low), spherical_to_cartesian(s2sphere_low), ecc_low, 0, 0, iota_low, phiref_low, distance, deltaF, f_min, f_max, waveFlags, approximant)
except AttributeError:
pass
try:
if approximant==lalsimulation.IMRPhenomPv2_NRTidal:
nparams = 12
params_low = [mc_low, q_low, s1sphere_low[0], s1sphere_low[1], s1sphere_low[2], s2sphere_low[0], s2sphere_low[1], s2sphere_low[2], lambda1_low, lambda2_low, iota_low, phiref_low]
params_high = [mc_high, q_high, s1sphere_high[0], s1sphere_high[1], s1sphere_high[2], s2sphere_high[0], s2sphere_high[1], s2sphere_high[2], lambda1_high, lambda2_high, iota_high, phiref_high]
params_start = numpy.array([[mc_low, q_low, s1sphere_low[0], s1sphere_low[1], s1sphere_low[2], s2sphere_low[0], s2sphere_low[1], s2sphere_low[2], lambda1_low, lambda2_low, 0.33333*np.pi, 1.5*np.pi]])
hp1 = generate_a_waveform_from_mcq(mc_low, q_low, spherical_to_cartesian(s1sphere_low), spherical_to_cartesian(s2sphere_low), 0, lambda1_low, lambda2_low, iota_low, phiref_low, distance, deltaF, f_min, f_max, waveFlags, approximant)
except AttributeError:
pass
try:
if approximant==lalsimulation.IMRPhenomNSBH:
nparams = 12
params_low = [mc_low, q_low, s1sphere_low[0], s1sphere_low[1], s1sphere_low[2], s2sphere_low[0], s2sphere_low[1], s2sphere_low[2], lambda1_low, lambda2_low, iota_low, phiref_low]
params_high = [mc_high, q_high, s1sphere_high[0], s1sphere_high[1], s1sphere_high[2], s2sphere_high[0], s2sphere_high[1], s2sphere_high[2], lambda1_high, lambda2_high, iota_high, phiref_high]
params_start = numpy.array([[mc_low, q_low, s1sphere_low[0], s1sphere_low[1], s1sphere_low[2], s2sphere_low[0], s2sphere_low[1], s2sphere_low[2], lambda1_low, lambda2_low, 0.33333*np.pi, 1.5*np.pi]])
hp1 = generate_a_waveform_from_mcq(mc_low, q_low, spherical_to_cartesian(s1sphere_low), spherical_to_cartesian(s2sphere_low), 0, lambda1_low, lambda2_low, iota_low, phiref_low, distance, deltaF, f_min, f_max, waveFlags, approximant)
except AttributeError:
pass
return numpy.array([nparams, params_low, params_high, params_start, hp1])
def empnodes(ndim, known_bases): # Here known_bases is the full copy known_bases_copy. Its length is equal to or longer than ndim.
emp_nodes = numpy.arange(0,ndim)*100000000
emp_nodes[0] = numpy.argmax(numpy.absolute(known_bases[0]))
c1 = known_bases[1,emp_nodes[0]]/known_bases[0,1]
interp1 = numpy.multiply(c1,known_bases[0])
diff1 = interp1 - known_bases[1]
r1 = numpy.absolute(diff1)
emp_nodes[1] = numpy.argmax(r1)
for k in numpy.arange(2,ndim):
emp_tmp = emp_nodes[0:k]
Vtmp = numpy.transpose(known_bases[0:k,emp_tmp])
inverse_Vtmp = numpy.linalg.pinv(Vtmp)
e_to_interp = known_bases[k]
Ci = numpy.dot(inverse_Vtmp, e_to_interp[emp_tmp])
interpolantA = numpy.zeros(len(known_bases[k]))+numpy.zeros(len(known_bases[k]))*1j
for j in numpy.arange(0, k):
tmp = numpy.multiply(Ci[j], known_bases[j])
interpolantA += tmp
diff = interpolantA - known_bases[k]
r = numpy.absolute(diff)
emp_nodes[k] = numpy.argmax(r)
emp_nodes = sorted(emp_nodes)
u, c = numpy.unique(emp_nodes, return_counts=True)
dup = u[c > 1]
#print(len(emp_nodes), "\nDuplicates indices:", dup)
emp_nodes = numpy.unique(emp_nodes)
ndim = len(emp_nodes)
#print(len(emp_nodes), "\n", emp_nodes)
V = numpy.transpose(known_bases[0:ndim, emp_nodes])
inverse_V = numpy.linalg.pinv(V)
return numpy.array([ndim, inverse_V, emp_nodes])
def surroerror(ndim, inverse_V, emp_nodes, known_bases, test_mc, test_q, test_s1, test_s2, test_ecc, test_lambda1, test_lambda2, test_iota, test_phiref, distance, deltaF, f_min, f_max, waveFlags, approximant):
hp_test = generate_a_waveform_from_mcq(test_mc, test_q, test_s1, test_s2, test_ecc, test_lambda1, test_lambda2, test_iota, test_phiref, distance, deltaF, f_min, f_max, waveFlags, approximant)
Ci = numpy.dot(inverse_V, hp_test[emp_nodes])
interpolantA = numpy.zeros(len(hp_test))+numpy.zeros(len(hp_test))*1j
#ndim = len(known_bases)
for j in numpy.arange(0, ndim):
tmp = numpy.multiply(Ci[j], known_bases[j])
interpolantA += tmp
surro = (1-overlap_of_two_waveforms(hp_test, interpolantA))*deltaF
return surro
def surros(tolerance, ndim, inverse_V, emp_nodes, known_bases, nts, nparams, params_low, params_high, distance, deltaF, f_min, f_max, waveFlags, approximant): # Here known_bases is known_bases_copy
test_points = generate_params_points(nts, nparams, params_low, params_high)
surros = numpy.zeros(nts)
count = 0
for i in numpy.arange(0,nts):
test_mc = test_points[i,0]
test_q = test_points[i,1]
test_s1 = spherical_to_cartesian(test_points[i,2:5])
test_s2 = spherical_to_cartesian(test_points[i,5:8])
test_iota = test_points[i,8]
test_phiref = test_points[i,9]
test_ecc = 0
test_lambda1 = 0
test_lambda2 = 0
if nparams == 11: test_ecc = test_points[i,10]
if nparams == 12:
test_lambda1 = test_points[i,10]
test_lambda2 = test_points[i,11]
surros[i] = surroerror(ndim, inverse_V, emp_nodes, known_bases[0:ndim], test_mc, test_q, test_s1, test_s2, test_ecc, test_lambda1, test_lambda2, test_iota, test_phiref, distance, deltaF, f_min, f_max, waveFlags, approximant)
if (surros[i] > tolerance):
count = count+1
print(ndim, "basis elements gave", count, "bad points of surrogate error > ", tolerance)
if count == 0: val =0
else: val = 1
return val
def roqs(tolerance, freq, ndimlow, ndimhigh, ndimstepsize, known_bases_copy, nts, nparams, params_low, params_high, distance, deltaF, f_min, f_max, waveFlags, approximant):
for num in np.arange(ndimlow, ndimhigh, ndimstepsize):
ndim, inverse_V, emp_nodes = empnodes(num, known_bases_copy)
if surros(tolerance, ndim, inverse_V, emp_nodes, known_bases_copy, nts, nparams, params_low, params_high, distance, deltaF, f_min, f_max, waveFlags, approximant)==0:
b_linear = numpy.dot(numpy.transpose(known_bases_copy[0:ndim]),inverse_V)
f_linear = freq[emp_nodes]
numpy.save('./B_linear.npy',numpy.transpose(b_linear))
numpy.save('./fnodes_linear.npy',f_linear)
print("Number of linear basis elements is ", ndim, "and the linear ROQ data are saved in B_linear.npy")
break
return
def testrep(b_linear, emp_nodes, test_mc, test_q, test_s1, test_s2, test_ecc, test_lambda1, test_lambda2, test_iota, test_phiref, distance, deltaF, f_min, f_max, waveFlags, approximant):
hp_test = generate_a_waveform_from_mcq(test_mc, test_q, test_s1, test_s2, test_ecc, test_lambda1, test_lambda2, test_iota, test_phiref, distance, deltaF, f_min, f_max, waveFlags, approximant)
hp_test_emp = hp_test[emp_nodes]
hp_rep = numpy.dot(b_linear,hp_test_emp)
diff = hp_rep - hp_test
rep_error = diff/numpy.sqrt(numpy.vdot(hp_test,hp_test))
plt.plot(numpy.real(rep_error), label='Real part of h+')
plt.plot(numpy.imag(rep_error), label='Imaginary part of h+')
plt.xlabel('Waveform Node Number')
plt.ylabel('Fractional Representation Error')
plt.title('Rep Error with numpy.linalg.pinv()')
plt.legend(loc=0)
plt.show()
return
def empnodes_quad(ndim_quad, known_quad_bases):
emp_nodes_quad = numpy.arange(0,ndim_quad)*100000000
emp_nodes_quad[0] = numpy.argmax(numpy.absolute(known_quad_bases[0]))
c1_quad = known_quad_bases[1,emp_nodes_quad[0]]/known_quad_bases[0,1]
interp1_quad = numpy.multiply(c1_quad,known_quad_bases[0])
diff1_quad = interp1_quad - known_quad_bases[1]
r1_quad = numpy.absolute(diff1_quad)
emp_nodes_quad[1] = | numpy.argmax(r1_quad) | numpy.argmax |
"""Module retrieving Fourier coefficients computation from lonlat grid.
Computation of the Fourier coefficients from lonlat grids
on pressure levels at every timestep.
The spectral truncation is determined by the number of longitudinal
gridsteps. The outputs are given as (time,level,wave,lat) where wave stands
for the zonal wavenumber. In the context of the thermodynamic diagnostic tool,
this is used for the computation of the Lorenz Energy Cycle.
@author: <EMAIL>, <NAME>, Hamburg University, 2018.
"""
import numpy as np
from netCDF4 import Dataset
GP_RES = np.array([16, 32, 48, 64, 96, 128, 256, 384, 512, 1024, 2048, 4096])
FC_RES = np.array([5, 10, 15, 21, 31, 43, 85, 127, 171, 341, 683, 1365])
G_0 = 9.81 # Gravity acceleration
GAM = 0.0065 # Standard atmosphere lapse rate
GAS_CON = 287.0 # Gas constant
P_0 = 10000 # Reference tropospheric pressure
def fourier_coeff(tadiagfile, outfile, ta_input, tas_input):
"""Compute Fourier coefficients in lon direction.
Receive as input:
- tadiagfile: the name of a file to store modified t fields;
- outfile: the name of a file to store the Fourier coefficients;
- ta_input: the name of a file containing t,u,v,w fields;
- tas_input: the name of a file containing t2m field.
"""
with Dataset(ta_input) as dataset:
lon = dataset.variables['lon'][:]
lat = dataset.variables['lat'][:]
lev = dataset.variables['plev'][:]
time = dataset.variables['time'][:]
t_a = dataset.variables['ta'][:, :, :, :]
u_a = dataset.variables['ua'][:, :, :, :]
v_a = dataset.variables['va'][:, :, :, :]
wap = dataset.variables['wap'][:, :, :, :]
nlon = len(lon)
nlat = len(lat)
nlev = len(lev)
ntime = len(time)
i = np.min(np.where(2 * nlat <= GP_RES))
trunc = FC_RES[i] + 1
wave2 = np.linspace(0, trunc - 1, trunc)
with Dataset(tas_input) as dataset:
tas = dataset.variables['tas'][:, :, :]
tas = tas[:, ::-1, :]
ta1_fx = np.array(t_a)
deltat = np.zeros([ntime, nlev, nlat, nlon])
p_s = np.full([ntime, nlat, nlon], P_0)
for i in np.arange(nlev - 1, 0, -1):
h_1 = np.ma.masked_where(ta1_fx[:, i, :, :] != 0, ta1_fx[:, i, :, :])
if np.any(h_1.mask > 0):
deltat[:, i - 1, :, :] = np.where(ta1_fx[:, i - 1, :, :] != 0,
deltat[:, i - 1, :, :],
(ta1_fx[:, i, :, :] - tas))
deltat[:, i - 1, :, :] = (
(1 * np.array(h_1.mask)) * np.array(deltat[:, i - 1, :, :]))
d_p = -(
(P_0 * G_0 / (GAM * GAS_CON)) * deltat[:, i - 1, :, :] / tas)
p_s = np.where(ta1_fx[:, i - 1, :, :] != 0, p_s, lev[i - 1] + d_p)
for k in np.arange(0, nlev - i - 1, 1):
h_3 = np.ma.masked_where(ta1_fx[:, i + k, :, :] != 0,
ta1_fx[:, i + k, :, :])
if np.any(h_3.mask > 0):
deltat[:, i - 1, :, :] = np.where(
ta1_fx[:, i + k, :, :] != 0, deltat[:, i - 1, :, :],
(ta1_fx[:, i + k + 1, :, :] - tas))
d_p = -((P_0 * G_0 /
(GAM * GAS_CON)) * deltat[:, i - 1, :, :] / tas)
p_s = np.where(ta1_fx[:, i + k, :, :] != 0, p_s,
lev[i + k] + d_p)
ta2_fx = np.array(t_a)
mask = np.zeros([nlev, ntime, nlat, nlon])
dat = np.zeros([nlev, ntime, nlat, nlon])
tafr_bar = | np.zeros([nlev, ntime, nlat, nlon]) | numpy.zeros |
import copy
import numpy as np
class GameBoard(object):
def __init__(self, shape=(3, 3), array=None):
if array is not None:
self.array = array
else:
self._array = -np.ones(shape=shape, dtype=int)
def __str__(self):
return str(self._array)
@property
def array(self):
return self._array
@array.setter
def array(self, x):
if isinstance(x, np.ndarray):
if len(x.shape) == 2:
self._array = x
else:
raise TypeError('x must be a 2D numpy.ndarray.')
else:
raise TypeError('x must be a 2D numpy.ndarray.')
@property
def shape(self):
return self._array.shape
@property
def m(self):
return self.shape[0]
@property
def n(self):
return self.shape[1]
@property
def is_empty(self):
return np.all(self._array == -1)
@property
def is_full(self):
return np.all(self._array != -1)
@property
def available_coords(self):
return [(r, c) for r, c in zip(*np.where(self._array == -1))]
@property
def equivalent_coords_dict(self):
b_0 = self._array
m, n = b_0.shape
coords_dict = dict()
for coord in self.available_coords:
if len(coords_dict) == 0:
coords_dict.update({coord: {coord}})
else:
found_equivalent = False
# Reflective symmetry
# left-right
b_r = b_0[::-1, :]
if np.all(b_0 == b_r):
eq_coord = (m - 1 - coord[0], coord[1])
if eq_coord in coords_dict.keys():
found_equivalent = True
coords_dict[eq_coord].update([coord])
# top-bottom
b_r = b_0[:, ::-1]
if np.all(b_0 == b_r):
eq_coord = (coord[0], n - 1 - coord[1])
if eq_coord in coords_dict.keys():
found_equivalent = True
coords_dict[eq_coord].update([coord])
if m == n:
# Diagonal
b_r = np.transpose(b_0)
if np.all(b_0 == b_r):
eq_coord = (coord[1], coord[0])
if eq_coord in coords_dict.keys():
found_equivalent = True
coords_dict[eq_coord].update([coord])
# Anti-diagonal
b_r = np.transpose(b_0[:, ::-1])[:, ::-1]
if np.all(b_0 == b_r):
eq_coord = (n - 1 - coord[1], m - 1 - coord[0])
if eq_coord in coords_dict.keys():
found_equivalent = True
coords_dict[eq_coord].update([coord])
# Rotational symmetry
if m == n:
# 90 degree
if (np.all(b_0 == np.rot90(b_0, k=1)) and
np.all(b_0 == np.rot90(b_0, k=2)) and
np.all(b_0 == np.rot90(b_0, k=3))):
eq_coord = (coord[1], n - 1 - coord[0])
if eq_coord in coords_dict.keys():
found_equivalent = True
coords_dict[eq_coord].update([coord])
eq_coord = (m - 1 - coord[1], coord[0])
if eq_coord in coords_dict.keys():
found_equivalent = True
coords_dict[eq_coord].update([coord])
eq_coord = (m - 1 - coord[0], n - 1 - coord[1])
if eq_coord in coords_dict.keys():
found_equivalent = True
coords_dict[eq_coord].update([coord])
else:
# 180 degree
if np.all(b_0 == np.rot90(b_0, k=2)):
eq_coord = (m - 1 - coord[0], n - 1 - coord[1])
if eq_coord in coords_dict.keys():
found_equivalent = True
coords_dict[eq_coord].update([coord])
if not found_equivalent:
coords_dict.update({coord: {coord}})
# Uniqueness
coords_dict = {k: list(x) for k, x in coords_dict.items()}
# Return dictionary
return coords_dict
def place_stone(self, row, col, stone):
self._array[row, col] = stone
def get_lines(self, row=None, col=None):
board_array = self.array
m, n = board_array.shape
if row is None or col is None:
# All horizontal lines
for i in range(m):
yield board_array[i, :]
# All vertical lines
for j in range(n):
yield board_array[:, j]
# All diagonal & anti-diagonal lines
for i in range(-m + 1, n):
yield | np.diagonal(board_array, offset=i) | numpy.diagonal |
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '4'
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.contrib import layers
import scipy.io as sio
from scipy.sparse.linalg import svds
#from numpy.linalg import svd
from sklearn import cluster
from sklearn.preprocessing import normalize
from munkres import Munkres
# from sklearn.manifold import TSNE
#import matlab.engine
# tf.train.Saver(max_to_keep=None)
def next_batch(data, _index_in_epoch ,batch_size , _epochs_completed):
_num_examples = data.shape[0]
# print(_num_examples)
start = _index_in_epoch
_index_in_epoch += batch_size
if _index_in_epoch > _num_examples:
# Finished epoch
_epochs_completed += 1
# Shuffle the data
perm = np.arange(_num_examples)
np.random.shuffle(perm)
data = data[perm]
#label = label[perm]
# Start next epoch
start = 0
_index_in_epoch = batch_size
assert batch_size <= _num_examples
end = _index_in_epoch
return data[start:end], _index_in_epoch, _epochs_completed
class ODSC(object):
def __init__(self, n_input, kernel_size, n_hidden, reg_const1 = 1.0, reg_const2 = 1.0, learning_rate = 0.001,batch_size = 200, reg = None, \
denoise = False, model_path = None, restore_path = None, \
logs_path = './mymodels/logs', no=0):
self.n_input = n_input
self.kernel_size = kernel_size
self.n_hidden = n_hidden
self.batch_size = batch_size
self.reg = reg
self.model_path = model_path
self.restore_path = restore_path
self.iter = 0
self.no = 0
#input required to be fed
self.x = tf.placeholder(tf.float32, [None, n_input[0], n_input[1], 1])
self.learning_rate = tf.placeholder(tf.float32, [])
weights = self._initialize_weights()
if denoise == False:
x_input = self.x
latent, pool1, shape = self.encoder(x_input, weights)
# olatent, oshape = self.encoderover(x_input, weights)
else:
x_input = tf.add(self.x, tf.random_normal(shape=tf.shape(self.x),
mean = 0,
stddev = 0.2,
dtype=tf.float32))
latent, shape = self.encoder(x_input, weights)
print(latent.shape,pool1.shape)
latent = tf.add(latent,pool1)
z = tf.reshape(latent, [batch_size, -1])
# z2 = tf.reshape(laten2, [batch_size, -1])
Coef = weights['Coef']
# Coef2 = weights['oCoef']
z_c = tf.matmul(Coef,z)
# z_c2 = tf.matmul(Coef2,z2)
self.Coef = Coef
latent_c = tf.reshape(z_c, tf.shape(latent))
# latent_c2 = tf.reshape(z_c2, tf.shape(laten2))
# print(z.shape)
self.z = z
# print(z.shape)
self.x_r = self.decoder(latent_c, weights, shape)
# l_2 reconstruction loss
self.reconst_cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.x_r, self.x), 2.0))
tf.summary.scalar("recons_loss", self.reconst_cost)
self.reg_losses = tf.reduce_sum(tf.pow(self.Coef,2.0))
tf.summary.scalar("reg_loss", reg_const1 * self.reg_losses )
self.selfexpress_losses = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(z_c, z), 2.0))
tf.summary.scalar("selfexpress_loss", reg_const2 * self.selfexpress_losses )
self.loss = self.reconst_cost + reg_const1 * self.reg_losses + reg_const2 * self.selfexpress_losses
self.merged_summary_op = tf.summary.merge_all()
self.optimizer = tf.train.AdamOptimizer(learning_rate = self.learning_rate).minimize(self.loss) #GradientDescentOptimizer #AdamOptimizer
self.init = tf.global_variables_initializer()
self.sess = tf.InteractiveSession()
self.sess.run(self.init)
self.saver = tf.train.Saver([v for v in tf.trainable_variables() if not (v.name.startswith("Coef"))],max_to_keep = None)
self.summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())
def _initialize_weights(self):
all_weights = dict()
all_weights['enc_w0'] = tf.get_variable("enc_w0", shape=[self.kernel_size[0], self.kernel_size[0], 1, self.n_hidden[0]],
initializer=layers.xavier_initializer_conv2d(),regularizer = self.reg)
all_weights['enc_b0'] = tf.Variable(tf.zeros([self.n_hidden[0]], dtype = tf.float32))
all_weights['enc_w1'] = tf.get_variable("enc_w1", shape=[self.kernel_size[1], self.kernel_size[1], self.n_hidden[0],self.n_hidden[1]],
initializer=layers.xavier_initializer_conv2d(),regularizer = self.reg)
all_weights['enc_b1'] = tf.Variable(tf.zeros([self.n_hidden[1]], dtype = tf.float32))
all_weights['enc_w2'] = tf.get_variable("enc_w2", shape=[self.kernel_size[2], self.kernel_size[2], self.n_hidden[1],self.n_hidden[2]],
initializer=layers.xavier_initializer_conv2d(),regularizer = self.reg)
all_weights['enc_b2'] = tf.Variable(tf.zeros([self.n_hidden[2]], dtype = tf.float32))
all_weights['Coef'] = tf.Variable(1.0e-4 * tf.ones([self.batch_size, self.batch_size],tf.float32), name = 'Coef')
all_weights['dec_w0'] = tf.get_variable("dec_w0", shape=[self.kernel_size[2], self.kernel_size[2], self.n_hidden[1],self.n_hidden[2]],
initializer=layers.xavier_initializer_conv2d(),regularizer = self.reg)
all_weights['dec_b0'] = tf.Variable(tf.zeros([self.n_hidden[1]], dtype = tf.float32))
all_weights['dec_w1'] = tf.get_variable("dec_w1", shape=[self.kernel_size[1], self.kernel_size[1], self.n_hidden[0],self.n_hidden[1]],
initializer=layers.xavier_initializer_conv2d(),regularizer = self.reg)
all_weights['dec_b1'] = tf.Variable(tf.zeros([self.n_hidden[0]], dtype = tf.float32))
all_weights['dec_w2'] = tf.get_variable("dec_w2", shape=[self.kernel_size[0], self.kernel_size[0],1, self.n_hidden[0]],
initializer=layers.xavier_initializer_conv2d(),regularizer = self.reg)
all_weights['dec_b2'] = tf.Variable(tf.zeros([1], dtype = tf.float32))
all_weights['oenc_w0'] = tf.get_variable("oenc_w0", shape=[self.kernel_size[0], self.kernel_size[0], self.n_hidden[0],1],
initializer=layers.xavier_initializer_conv2d(),regularizer = self.reg)
all_weights['oenc_b0'] = tf.Variable(tf.zeros([1], dtype = tf.float32))
all_weights['oenc_w1'] = tf.get_variable("oenc_w1", shape=[self.kernel_size[1], self.kernel_size[1], self.n_hidden[2],self.n_hidden[0]],
initializer=layers.xavier_initializer_conv2d(),regularizer = self.reg)
all_weights['oenc_b1'] = tf.Variable(tf.zeros([self.n_hidden[2]], dtype = tf.float32))
all_weights['oenc_w2'] = tf.get_variable("oenc_w2", shape=[self.kernel_size[2], self.kernel_size[2], self.n_hidden[2],self.n_hidden[1]],
initializer=layers.xavier_initializer_conv2d(),regularizer = self.reg)
all_weights['oenc_b2'] = tf.Variable(tf.zeros([self.n_hidden[2]], dtype = tf.float32))
all_weights['oCoef'] = tf.Variable(1.0e-4 * tf.ones([self.batch_size, self.batch_size],tf.float32), name = 'Coef')
return all_weights
# Building the encoder
def encoder(self,x, weights):
shapes = []
# Encoder Hidden layer with relu activation #1
shapes.append(x.get_shape().as_list())
layer1 = tf.nn.bias_add(tf.nn.conv2d(x, weights['enc_w0'], strides=[1,2,2,1],padding='SAME'),weights['enc_b0'])
layer1 = tf.nn.relu(layer1)
shapes_en = shapes[0]
olayer1 = tf.add(tf.nn.conv2d_transpose(x, weights['oenc_w0'],tf.stack([tf.shape(self.x)[0],shapes_en[1]*2,56,self.n_hidden[0]]),\
strides=[1,2,2,1],padding='SAME'),weights['oenc_b0'])
olayer1 = tf.nn.relu(olayer1)
# shapes.append(layer1.get_shape().as_list())
olayer2 = tf.add(tf.nn.conv2d_transpose(olayer1, weights['oenc_w1'],tf.stack([tf.shape(self.x)[0],shapes_en[1]*4,shapes_en[2]*4,self.n_hidden[2]]),\
strides=[1,2,2,1],padding='SAME'),weights['oenc_b1'])
olayer2 = tf.nn.relu(olayer2)
pool1 = tf.layers.max_pooling2d(inputs=olayer2, pool_size=[2,2], strides=32)
shapes.append(layer1.get_shape().as_list())
layer2 = tf.nn.bias_add(tf.nn.conv2d(layer1, weights['enc_w1'], strides=[1,2,2,1],padding='SAME'),weights['enc_b1'])
layer2 = tf.nn.relu(layer2)
shapes.append(layer2.get_shape().as_list())
layer3 = tf.nn.bias_add(tf.nn.conv2d(layer2, weights['enc_w2'], strides=[1,2,2,1],padding='SAME'),weights['enc_b2'])
layer3 = tf.nn.relu(layer3)
return layer3, pool1, shapes #add olayer2 as 2nd arg
# Building the decoder
def decoder(self,z, weights, shapes):
# Encoder Hidden layer with relu activation #1
shape_de1 = shapes[2]
layer1 = tf.add(tf.nn.conv2d_transpose(z, weights['dec_w0'], tf.stack([tf.shape(self.x)[0],shape_de1[1],shape_de1[2],shape_de1[3]]),\
strides=[1,2,2,1],padding='SAME'),weights['dec_b0'])
layer1 = tf.nn.relu(layer1)
shape_de2 = shapes[1]
layer2 = tf.add(tf.nn.conv2d_transpose(layer1, weights['dec_w1'], tf.stack([tf.shape(self.x)[0],shape_de2[1],shape_de2[2],shape_de2[3]]),\
strides=[1,2,2,1],padding='SAME'),weights['dec_b1'])
layer2 = tf.nn.relu(layer2)
shape_de3= shapes[0]
layer3 = tf.add(tf.nn.conv2d_transpose(layer2, weights['dec_w2'], tf.stack([tf.shape(self.x)[0],shape_de3[1],shape_de3[2],shape_de3[3]]),\
strides=[1,2,2,1],padding='SAME'),weights['dec_b2'])
layer3 = tf.nn.relu(layer3)
return layer3
def finetune_fit(self, X, lr): #
cost, summary, _, Coef = self.sess.run((self.reconst_cost, self.merged_summary_op, self.optimizer, self.Coef), feed_dict = {self.x: X, self.learning_rate: lr})#
self.summary_writer.add_summary(summary, self.iter)
self.iter = self.iter + 1
# tr = TSNE(perplexity=50).fit_transform(self.z.eval(feed_dict = {self.x: X, self.learning_rate: lr}))
# plt.scatter(tr[:, 0], tr[:, 1])
# plt.show()
return Coef,cost
def initlization(self):
self.sess.run(self.init)
def reconstruct(self,X):
print(self.x_r.shape)
print(self.x.shape)
return self.sess.run(self.x_r, feed_dict = {self.x:X})
def transform(self, X):
return self.sess.run(self.z, feed_dict = {self.x:X})
def save_model(self):
self.no = self.no+1
savetmp = self.model_path + "%d.ckpt"%(self.no)
# save_path = self.saver.save(self.sess,self.model_path)
save_path = self.saver.save(self.sess, savetmp)
print ("model saved in file: %s" % save_path)
def restore(self):
self.saver.restore(self.sess, self.restore_path)
print ("model restored")
def best_map(L1,L2):
#L1 should be the groundtruth labels and L2 should be the clustering labels we got
Label1 = np.unique(L1)
nClass1 = len(Label1)
Label2 = np.unique(L2)
nClass2 = len(Label2)
nClass = | np.maximum(nClass1,nClass2) | numpy.maximum |
#!/usr/bin/env python
'''
In this scipt, SVM's SVR model is used, which can be tuned with different parameters
@Author : <NAME>
'''
# We'll use the pandas library to read CSV files into dataframes
import pandas as pd
import numpy as np
from sklearn import svm
from sklearn import preprocessing
from sklearn.cross_validation import train_test_split
import xgboost as xgb
def gini(expected, predicted):
assert expected.shape[0] == predicted.shape[0], 'unequal number of rows'
_all = np.asarray(np.c_[
expected,
predicted,
np.arange(expected.shape[0])], dtype=np.float)
_EXPECTED = 0
_PREDICTED = 1
_INDEX = 2
# sort by predicted descending, then by index ascending
sort_order = np.lexsort((_all[:, _INDEX], -1 * _all[:, _PREDICTED]))
_all = _all[sort_order]
total_losses = _all[:, _EXPECTED].sum()
gini_sum = _all[:, _EXPECTED].cumsum().sum() / total_losses
gini_sum -= (expected.shape[0] + 1.0) / 2.0
return gini_sum / expected.shape[0]
def gini_normalized(expected, predicted):
return gini(expected, predicted) / gini(expected, expected)
# The competition datafiles are in the directory ../input
# Read competition data files:
train = pd.read_csv("../data/train.csv")
test = pd.read_csv("../data/test.csv")
labels = train.Hazard
train.drop('Hazard', axis=1, inplace=True)
columns = train.columns
test_ind = test.Id
train = np.array(train)
test = np.array(test)
for i in range(train.shape[1]):
if type(train[1,i]) is str:
lbl = preprocessing.LabelEncoder()
lbl.fit(list(train[:,i]) + list(test[:,i]))
train[:,i] = lbl.transform(train[:,i])
test[:,i] = lbl.transform(test[:,i])
train = train.astype(float)
test = test.astype(float)
print(train.shape)
print(test.shape)
mn = np.mean(train,axis=0)
st = | np.std(train,axis=0) | numpy.std |
import json
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import csv
import time
import copy
import os
from datetime import datetime
import error_metrics
global gld_num
gld_num = '1'
os.chdir('/home/ankit/PFO-ADC-DER-Testbed/ADC-DER-Testbed/testbed/post_process')
# discard_time = 3600*4
## loading cosim_manager data
lp = open('./cosim_data.json').read()
cosim_data = json.loads(lp)
## Appending all cosim data with one more entry
for key, value in cosim_data.items():
for k, v in value.items():
if k == 'Timestamp':
# v.append(v[-1]+v[-1]-v[-2]) # adding one more timestamp
v.append(v[-1] + v[0])
else:
v.append(v[-1]) # repeating the last value again
cosim_data[key][k] = v
cosim_time = cosim_data[list(cosim_data)[0]]['Timestamp']
cosim_data['time'] = np.array([int(i) for i in cosim_time])
# create mapping of each node to its ADC
adc_nodes_map=[]
adc_file = "./../../../GLD/initial_scenario/ADC_Location/ADC_Placement_by_Voltage_Drop.csv"
with open(adc_file, mode='r') as csv_file:
for i in range(1):
next(csv_file)
csv_reader = csv.reader(csv_file)
for row in csv_reader:
adc_nodes_map.append([row[0], row[-1]])
adc_nodes_map = np.array(adc_nodes_map)
#function to return adc name of the input node
def find_adc(node, adc_nodes_map=adc_nodes_map):
ind = np.where(adc_nodes_map[:,0]==node)[0][0]
adc_name = 'M' + gld_num + '_ADC' + adc_nodes_map[ind,1]
return adc_name
# Loading gld_data.json
lp = open('GLD_' + gld_num + '_data.json').read()
gld_data = json.loads(lp)
# creating a dict to map each adc to the indexes of devices in gld_data for each der type
# adc['der']['adc name']=[indexes in the gld data]
# t=time.time()
# adc_ind = {}
# der_type=[['battInv', 'power'], ['solarInv','power'], ['hvac','power'], ['wh','power']]
# for der in der_type:
# adc_ind[der[0]] = {}
# obj = gld_data[der[0]][der[1]]['object_name']
# for a in obj:
# b = a.split('_')[-2][1:]
# # if 'l102_tm' in a:
# if find_adc(b) not in adc_ind[der[0]]:
# adc_ind[der[0]][find_adc(b)] = []
# adc_ind[der[0]][find_adc(b)].append(obj.index(a))
# print('elapsed time is ',time.time()-t)
# creating a dict to map each adc to the indexes of devices in gld_data for each der type
# adc_ind['adc name']['der']=[indexes in the gld data]
t=time.time()
adc_ind = {}
der_type=[['battInv', 'power'], ['solarInv','power'], ['hvac','power'], ['wh','power']]
for der in der_type:
obj = gld_data[der[0]][der[1]]['object_name']
for a in obj:
b = a.split('_')[-2][1:]
# if 'l102_tm' in a:
if find_adc(b) == 'M1_ADCNONE':
continue
if find_adc(b) not in adc_ind:
adc_ind[find_adc(b)] = {}
if der[0] not in adc_ind[find_adc(b)]:
adc_ind[find_adc(b)][der[0]]=[]
adc_ind[find_adc(b)][der[0]].append(obj.index(a))
# print('elapsed time is ',time.time()-t)
#Voltages
voltages = np.array(gld_data['hvac']['voltages']['values']).astype(np.cfloat)
# Actuation Signals
#hrs = gld_data['battInv']['P_Out']['time']
battInv_Pout = np.array(gld_data['battInv']['P_Out']['values']).astype(np.float)
battInv_Qout = np.array(gld_data['battInv']['Q_Out']['values']).astype(np.float)
solarInv_Pout = np.array(gld_data['solarInv']['P_Out']['values']).astype(np.float)
solarInv_Qout = np.array(gld_data['solarInv']['Q_Out']['values']).astype(np.float)
hvac_seth = np.array(gld_data['hvac']['heating_setpoint']['values']).astype(np.float)
hvac_setc = np.array(gld_data['hvac']['cooling_setpoint']['values']).astype(np.float)
hvac_cooling_demand = (np.array(gld_data['hvac']['cooling_demand']['values'])).astype(np.float)
hvac_fan_power = (np.array(gld_data['hvac']['fan_design_power']['values'])).astype(np.float)/1000
hvac_rating = hvac_cooling_demand+hvac_fan_power
hvac_c_thermal_capacity = (np.array(gld_data['hvac']['design_cooling_capacity']['values'])).astype(np.float)
hvac_c_cop = (np.array(gld_data['hvac']['cooling_COP']['values'])).astype(np.float)
hvac_rating1 = hvac_c_thermal_capacity/12000/hvac_c_cop*3.5168
wh_tanks = np.array(gld_data['wh']['tank_setpoint']['values']).astype(np.float)
hvac_c_status = np.array(gld_data['hvac']['cooling_status']['values']).astype(np.float)
wh_rating = np.array(gld_data['wh']['heating_element_capacity']['values']).astype(np.float)
battInv_rated = (np.array(gld_data['battInv']['rated_power']['values'])).astype(np.float)
batt_rated = (np.array(gld_data['batt']['rated_power']['values'])).astype(np.float)
solar_rated = (np.array(gld_data['solar']['rated_power']['values'])).astype(np.float)
# Device Power Outputs
battInv_power = (np.array(gld_data['battInv']['power']['values'])).astype(np.cfloat)
solarInv_power = (np.array(gld_data['solarInv']['power']['values'])).astype(np.cfloat)
hvac_power = (np.array(gld_data['hvac']['power']['values'])).astype(np.cfloat)
wh_power = (np.array(gld_data['wh']['power']['values'])).astype(np.cfloat)
solar_VA = (np.array(gld_data['solar']['VA']['values'])).astype(np.cfloat)
#aggregating device outputs per adc in adc_agg dict
# adc_agg['adc name']['der type']=sum of all devices of der type
t=time.time()
adc_agg = copy.deepcopy(adc_ind)
adc_Prating = {}
num_der = {}
total_num_der = 0
for adc_num in adc_ind:
adc_Prating[adc_num] = {}
if "battInv" in adc_agg[adc_num]:
adc_agg[adc_num]["battInv"] = np.sum(battInv_power[:, adc_ind[adc_num]['battInv']], 1)/1000
adc_agg[adc_num]["batt_Pout"] = np.sum(battInv_Pout[:, adc_ind[adc_num]['battInv']], 1) / 1000
adc_agg[adc_num]["batt_Qout"] = np.sum(battInv_Qout[:, adc_ind[adc_num]['battInv']], 1) / 1000
adc_agg[adc_num]["total"] = adc_agg[adc_num]["battInv"]
adc_Prating[adc_num]["battInv"] = np.sum(battInv_rated[0, adc_ind[adc_num]['battInv']])/1000
adc_Prating[adc_num]["total"] = adc_Prating[adc_num]["battInv"]
if "solarInv" in adc_agg[adc_num]:
adc_agg[adc_num]["solarInv"] = np.sum(solarInv_power[:, adc_ind[adc_num]['solarInv']], 1) / 1000
adc_agg[adc_num]["solar_Pout"] = np.sum(solarInv_Pout[:, adc_ind[adc_num]['solarInv']], 1) / 1000
adc_agg[adc_num]["solar_Qout"] = np.sum(solarInv_Qout[:, adc_ind[adc_num]['solarInv']], 1) / 1000
adc_agg[adc_num]["total"] = adc_agg[adc_num]["total"] + adc_agg[adc_num]["solarInv"]
adc_Prating[adc_num]["solarInv"] = np.sum(solar_rated[0, adc_ind[adc_num]['solarInv']]) / 1000
adc_Prating[adc_num]["solarVA"] = np.sum(solar_VA[:, adc_ind[adc_num]['solarInv']], 1) / 1000
adc_Prating[adc_num]["total"] = adc_Prating[adc_num]["total"] + adc_Prating[adc_num]["solarInv"]
if "hvac" in adc_agg[adc_num]:
adc_agg[adc_num]["hvac"] = np.sum(hvac_power[:, adc_ind[adc_num]['hvac']], 1)
adc_agg[adc_num]["total"] = adc_agg[adc_num]["total"] + adc_agg[adc_num]["hvac"]
adc_Prating[adc_num]["hvac"] = np.sum(hvac_rating[0, adc_ind[adc_num]['hvac']])
adc_Prating[adc_num]["total"] = adc_Prating[adc_num]["total"] + adc_Prating[adc_num]["hvac"]
if "wh" in adc_agg[adc_num]:
adc_agg[adc_num]["wh"] = np.sum(wh_power[:, adc_ind[adc_num]['wh']], 1)
adc_agg[adc_num]["total"] = adc_agg[adc_num]["total"] + adc_agg[adc_num]["wh"]
adc_Prating[adc_num]["wh"] = np.sum(wh_rating[0, adc_ind[adc_num]['wh']])
adc_Prating[adc_num]["total"] = adc_Prating[adc_num]["total"] + adc_Prating[adc_num]["wh"]
error_metrics.calculate(adc_agg, adc_Prating, cosim_data)
#Plot aggregate devices output at given adc for each der type
time_format = '%H:%M:%S'
time_stamp = [t.split(' ')[1] for t in gld_data['wh']['power']['time']]
time_h = [datetime.strptime(t, '%H:%M:%S') for t in time_stamp]
hrs = [int((i-time_h[0]).total_seconds()) for i in time_h]
# start_time = 3600*4
adc_num = 'M1_ADC18'
# total_rating = sum(wh_rating[0, adc_ind[adc_num]['wh']]) + sum(hvac_rating[0, adc_ind[adc_num]['hvac']]) + sum(
# battInv_rated[0, adc_ind[adc_num]['battInv']]) / 1000 + sum(solar_rated[0, adc_ind[adc_num]['solarInv']]) / 1000
fig1, ax1 = plt.subplots(2, 2, sharex='col')
# ax1[0,0].plot(hrs, np.real(adc_agg[adc_num]['batt_Pout']), label='Battery', color='C0')
# ax1[0,0].plot(hrs, np.real(adc_agg[adc_num]['solar_Pout']), label='Solar', color='C1')
ax1[0,0].plot(hrs, | np.real(adc_agg[adc_num]['batt_Pout'] + adc_agg[adc_num]['solar_Pout']) | numpy.real |
import numpy as np
import copy
from .ParticleGroupExtension import ParticleGroupExtension, divide_particles
import numpy.polynomial.polynomial as poly
from random import shuffle
def postprocess_screen(screen, **params):
need_copy_params = ['take_slice', 'take_range', 'cylindrical_copies', 'remove_correlation', 'kill_zero_weight',
'radial_aperture', 'remove_spinning', 'include_ids', 'random_N', 'first_N', 'clip_to_charge']
need_copy = any([p in params for p in need_copy_params])
if ('need_copy' in params):
need_copy = params['need_copy']
if (need_copy):
screen = copy.deepcopy(screen)
if ('kill_zero_weight' in params):
if (params['kill_zero_weight']):
screen = kill_zero_weight(screen, make_copy=False)
if ('include_ids' in params):
ids = params['include_ids']
if (len(ids) > 0):
screen = include_ids(screen, ids, make_copy=False)
if ('take_range' in params):
(take_range_var, range_min, range_max) = params['take_range']
if (range_min < range_max):
screen = take_range(screen, take_range_var, range_min, range_max, make_copy=False)
if ('take_slice' in params):
(take_slice_var, slice_index, n_slices) = params['take_slice']
if (n_slices > 1):
screen = take_slice(screen, take_slice_var, slice_index, n_slices, make_copy=False)
if ('clip_to_charge' in params):
target_charge = params['clip_to_charge']
if (target_charge > 0):
screen = clip_to_charge(screen, target_charge, verbose=False, make_copy=False)
if ('cylindrical_copies' in params):
cylindrical_copies_n = params['cylindrical_copies']
if (cylindrical_copies_n > 0):
screen = add_cylindrical_copies(screen, params['cylindrical_copies'], make_copy=False)
if ('remove_spinning' in params):
if (params['remove_spinning']):
screen = remove_spinning(screen, make_copy=False)
if ('remove_correlation' in params):
(remove_correlation_var1, remove_correlation_var2, remove_correlation_n) = params['remove_correlation']
if (remove_correlation_n >= 0):
screen = remove_correlation(screen, remove_correlation_var1, remove_correlation_var2, remove_correlation_n, make_copy=False)
if ('random_N' in params):
N = params['random_N']
if (N > 0):
screen = random_N(screen, N, random=True, make_copy=False)
else:
if ('first_N' in params):
N = params['first_N']
if (N > 0):
screen = random_N(screen, N, random=False, make_copy=False)
return screen
# Returns IDs of the N nearest particles to center_particle_id in the ndim dimensional phase space
# "Nearest" is determined by changing coordinates to ones with sigma_matrix = identity_matrix
def id_of_nearest_N(screen_input, center_particle_id, N, ndim=4):
screen = copy.deepcopy(screen_input)
if (ndim == 6):
screen.drift_to_t()
x = screen.x
px = screen.px
w = screen.weight
pid = screen.id
if (center_particle_id not in pid):
print('Cannot find center particle')
return np.array([])
if (ndim == 2):
x = x - np.sum(x*w)/np.sum(w)
px = px - np.sum(px*w)/np.sum(w)
u0 = np.vstack((x, px))
if (ndim == 4):
y = screen.y
py = screen.py
x = x - np.sum(x*w)/np.sum(w)
px = px - np.sum(px*w)/np.sum(w)
y = y - np.sum(y*w)/np.sum(w)
py = py - np.sum(py*w)/np.sum(w)
u0 = np.vstack((x, px, y, py))
if (ndim == 6):
y = screen.y
py = screen.py
z = screen.z
pz = screen.pz
x = x - np.sum(x*w)/np.sum(w)
px = px - np.sum(px*w)/np.sum(w)
y = y - np.sum(y*w)/np.sum(w)
py = py - np.sum(py*w)/ | np.sum(w) | numpy.sum |
# Copyright 2019-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#-------------------------------------------------------------------------------
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Description: generate inputs and targets for the dlrm benchmark
# The inpts and outputs are generated according to the following three option(s)
# 1) random distribution
# 2) synthetic distribution, based on unique accesses and distances between them
# i) <NAME>, <NAME>, <NAME> and <NAME> "Synthetic Trace-Driven
# Simulation of Cache Memory", IEEE AINAM'07
# 3) public data set
# i) Criteo Kaggle Display Advertising Challenge Dataset
# https://labs.criteo.com/2014/02/kaggle-display-advertising-challenge-dataset
# ii) Criteo Terabyte Dataset
# https://labs.criteo.com/2013/12/download-terabyte-click-logs
from __future__ import absolute_import, division, print_function, unicode_literals
# others
from os import path
import bisect
import collections
import data_utils
# numpy
import numpy as np
from numpy import random as ra
# pytorch
import torch
from torch.utils.data import Dataset, RandomSampler
import data_loader_terabyte
import mlperf_logger
import extend_distributed as ext_dist
# Kaggle Display Advertising Challenge Dataset
# dataset (str): name of dataset (Kaggle or Terabyte)
# randomize (str): determines randomization scheme
# "none": no randomization
# "day": randomizes each day"s data (only works if split = True)
# "total": randomizes total dataset
# split (bool) : to split into train, test, validation data-sets
class CriteoDataset(Dataset):
def __init__(
self,
dataset,
max_ind_range,
sub_sample_rate,
randomize,
split="train",
raw_path="",
pro_data="",
memory_map=False
):
# dataset
# tar_fea = 1 # single target
den_fea = 13 # 13 dense features
# spa_fea = 26 # 26 sparse features
# tad_fea = tar_fea + den_fea
# tot_fea = tad_fea + spa_fea
if dataset == "kaggle":
days = 7
out_file = "kaggleAdDisplayChallenge_processed"
elif dataset == "terabyte":
days = 24
out_file = "terabyte_processed"
else:
raise(ValueError("Data set option is not supported"))
self.max_ind_range = max_ind_range
self.memory_map = memory_map
# split the datafile into path and filename
lstr = raw_path.split("/")
self.d_path = "/".join(lstr[0:-1]) + "/"
self.d_file = lstr[-1].split(".")[0] if dataset == "kaggle" else lstr[-1]
self.npzfile = self.d_path + (
(self.d_file + "_day") if dataset == "kaggle" else self.d_file
)
self.trafile = self.d_path + (
(self.d_file + "_fea") if dataset == "kaggle" else "fea"
)
# check if pre-processed data is available
data_ready = True
if memory_map:
for i in range(days):
reo_data = self.npzfile + "_{0}_reordered.npz".format(i)
if not path.exists(str(reo_data)):
data_ready = False
else:
if not path.exists(str(pro_data)):
data_ready = False
# pre-process data if needed
# WARNNING: when memory mapping is used we get a collection of files
if data_ready:
print("Reading pre-processed data=%s" % (str(pro_data)))
file = str(pro_data)
else:
print("Reading raw data=%s" % (str(raw_path)))
file = data_utils.getCriteoAdData(
raw_path,
out_file,
max_ind_range,
sub_sample_rate,
days,
split,
randomize,
dataset == "kaggle",
memory_map
)
# get a number of samples per day
total_file = self.d_path + self.d_file + "_day_count.npz"
with np.load(total_file) as data:
total_per_file = data["total_per_file"]
# compute offsets per file
self.offset_per_file = np.array([0] + [x for x in total_per_file])
for i in range(days):
self.offset_per_file[i + 1] += self.offset_per_file[i]
# print(self.offset_per_file)
# setup data
if memory_map:
# setup the training/testing split
self.split = split
if split == 'none' or split == 'train':
self.day = 0
self.max_day_range = days if split == 'none' else days - 1
elif split == 'test' or split == 'val':
self.day = days - 1
num_samples = self.offset_per_file[days] - \
self.offset_per_file[days - 1]
self.test_size = int(np.ceil(num_samples / 2.))
self.val_size = num_samples - self.test_size
else:
sys.exit("ERROR: dataset split is neither none, nor train or test.")
'''
# text
print("text")
for i in range(days):
fi = self.npzfile + "_{0}".format(i)
with open(fi) as data:
ttt = 0; nnn = 0
for _j, line in enumerate(data):
ttt +=1
if np.int32(line[0]) > 0:
nnn +=1
print("day=" + str(i) + " total=" + str(ttt) + " non-zeros="
+ str(nnn) + " ratio=" +str((nnn * 100.) / ttt) + "%")
# processed
print("processed")
for i in range(days):
fi = self.npzfile + "_{0}_processed.npz".format(i)
with np.load(fi) as data:
yyy = data["y"]
ttt = len(yyy)
nnn = np.count_nonzero(yyy)
print("day=" + str(i) + " total=" + str(ttt) + " non-zeros="
+ str(nnn) + " ratio=" +str((nnn * 100.) / ttt) + "%")
# reordered
print("reordered")
for i in range(days):
fi = self.npzfile + "_{0}_reordered.npz".format(i)
with np.load(fi) as data:
yyy = data["y"]
ttt = len(yyy)
nnn = np.count_nonzero(yyy)
print("day=" + str(i) + " total=" + str(ttt) + " non-zeros="
+ str(nnn) + " ratio=" +str((nnn * 100.) / ttt) + "%")
'''
# load unique counts
with np.load(self.d_path + self.d_file + "_fea_count.npz") as data:
self.counts = data["counts"]
self.m_den = den_fea # X_int.shape[1]
self.n_emb = len(self.counts)
print("Sparse features= %d, Dense features= %d" % (self.n_emb, self.m_den))
# Load the test data
# Only a single day is used for testing
if self.split == 'test' or self.split == 'val':
# only a single day is used for testing
fi = self.npzfile + "_{0}_reordered.npz".format(
self.day
)
with np.load(fi) as data:
self.X_int = data["X_int"] # continuous feature
self.X_cat = data["X_cat"] # categorical feature
self.y = data["y"] # target
else:
# load and preprocess data
with np.load(file) as data:
X_int = data["X_int"] # continuous feature
X_cat = data["X_cat"] # categorical feature
y = data["y"] # target
self.counts = data["counts"]
self.m_den = X_int.shape[1] # den_fea
self.n_emb = len(self.counts)
print("Sparse fea = %d, Dense fea = %d" % (self.n_emb, self.m_den))
# create reordering
indices = np.arange(len(y))
if split == "none":
# randomize all data
if randomize == "total":
indices = np.random.permutation(indices)
print("Randomized indices...")
X_int[indices] = X_int
X_cat[indices] = X_cat
y[indices] = y
else:
indices = np.array_split(indices, self.offset_per_file[1:-1])
# randomize train data (per day)
if randomize == "day": # or randomize == "total":
for i in range(len(indices) - 1):
indices[i] = np.random.permutation(indices[i])
print("Randomized indices per day ...")
train_indices = np.concatenate(indices[:-1])
test_indices = indices[-1]
test_indices, val_indices = np.array_split(test_indices, 2)
print("Defined %s indices..." % (split))
# randomize train data (across days)
if randomize == "total":
train_indices = np.random.permutation(train_indices)
print("Randomized indices across days ...")
# create training, validation, and test sets
if split == 'train':
self.X_int = [X_int[i] for i in train_indices]
self.X_cat = [X_cat[i] for i in train_indices]
self.y = [y[i] for i in train_indices]
elif split == 'val':
self.X_int = [X_int[i] for i in val_indices]
self.X_cat = [X_cat[i] for i in val_indices]
self.y = [y[i] for i in val_indices]
elif split == 'test':
self.X_int = [X_int[i] for i in test_indices]
self.X_cat = [X_cat[i] for i in test_indices]
self.y = [y[i] for i in test_indices]
print("Split data according to indices...")
def __getitem__(self, index):
if isinstance(index, slice):
return [
self[idx] for idx in range(
index.start or 0, index.stop or len(self), index.step or 1
)
]
if self.memory_map:
if self.split == 'none' or self.split == 'train':
# check if need to swicth to next day and load data
if index == self.offset_per_file[self.day]:
# print("day_boundary switch", index)
self.day_boundary = self.offset_per_file[self.day]
fi = self.npzfile + "_{0}_reordered.npz".format(
self.day
)
# print('Loading file: ', fi)
with np.load(fi) as data:
self.X_int = data["X_int"] # continuous feature
self.X_cat = data["X_cat"] # categorical feature
self.y = data["y"] # target
self.day = (self.day + 1) % self.max_day_range
i = index - self.day_boundary
elif self.split == 'test' or self.split == 'val':
# only a single day is used for testing
i = index + (0 if self.split == 'test' else self.test_size)
else:
sys.exit("ERROR: dataset split is neither none, nor train or test.")
else:
i = index
if self.max_ind_range > 0:
return self.X_int[i], self.X_cat[i] % self.max_ind_range, self.y[i]
else:
return self.X_int[i], self.X_cat[i], self.y[i]
def _default_preprocess(self, X_int, X_cat, y):
X_int = torch.log(torch.tensor(X_int, dtype=torch.float) + 1)
if self.max_ind_range > 0:
X_cat = torch.tensor(X_cat % self.max_ind_range, dtype=torch.long)
else:
X_cat = torch.tensor(X_cat, dtype=torch.long)
y = torch.tensor(y.astype(np.float32))
return X_int, X_cat, y
def __len__(self):
if self.memory_map:
if self.split == 'none':
return self.offset_per_file[-1]
elif self.split == 'train':
return self.offset_per_file[-2]
elif self.split == 'test':
return self.test_size
elif self.split == 'val':
return self.val_size
else:
sys.exit("ERROR: dataset split is neither none, nor train nor test.")
else:
return len(self.y)
def collate_wrapper_criteo(list_of_tuples):
# where each tuple is (X_int, X_cat, y)
transposed_data = list(zip(*list_of_tuples))
X_int = torch.log(torch.tensor(transposed_data[0], dtype=torch.float) + 1)
X_cat = torch.tensor(transposed_data[1], dtype=torch.long)
T = torch.tensor(transposed_data[2], dtype=torch.float32).view(-1, 1)
batchSize = X_cat.shape[0]
featureCnt = X_cat.shape[1]
lS_i = [X_cat[:, i] for i in range(featureCnt)]
lS_o = [torch.tensor(range(batchSize)) for _ in range(featureCnt)]
return X_int, torch.stack(lS_o), torch.stack(lS_i), T
def ensure_dataset_preprocessed(args, d_path):
_ = CriteoDataset(
args.data_set,
args.max_ind_range,
args.data_sub_sample_rate,
args.data_randomize,
"train",
args.raw_data_file,
args.processed_data_file,
args.memory_map
)
_ = CriteoDataset(
args.data_set,
args.max_ind_range,
args.data_sub_sample_rate,
args.data_randomize,
"test",
args.raw_data_file,
args.processed_data_file,
args.memory_map
)
for split in ['train', 'val', 'test']:
print('Running preprocessing for split =', split)
train_files = ['{}_{}_reordered.npz'.format(args.raw_data_file, day)
for
day in range(0, 23)]
test_valid_file = args.raw_data_file + '_23_reordered.npz'
output_file = d_path + '_{}.bin'.format(split)
input_files = train_files if split == 'train' else [test_valid_file]
data_loader_terabyte.numpy_to_binary(input_files=input_files,
output_file_path=output_file,
split=split)
def make_criteo_data_and_loaders(args):
if args.mlperf_logging and args.memory_map and args.data_set == "terabyte":
# more efficient for larger batches
data_directory = path.dirname(args.raw_data_file)
if args.mlperf_bin_loader:
lstr = args.processed_data_file.split("/")
d_path = "/".join(lstr[0:-1]) + "/" + lstr[-1].split(".")[0]
train_file = d_path + "_train.bin"
test_file = d_path + "_test.bin"
# val_file = d_path + "_val.bin"
counts_file = args.raw_data_file + '_fea_count.npz'
if any(not path.exists(p) for p in [train_file,
test_file,
counts_file]):
ensure_dataset_preprocessed(args, d_path)
train_data = data_loader_terabyte.CriteoBinDataset(
data_file=train_file,
counts_file=counts_file,
batch_size=args.mini_batch_size,
max_ind_range=args.max_ind_range
)
mlperf_logger.log_event(key=mlperf_logger.constants.TRAIN_SAMPLES,
value=train_data.num_samples)
train_loader = torch.utils.data.DataLoader(
train_data,
batch_size=None,
batch_sampler=None,
shuffle=False,
num_workers=0,
collate_fn=None,
pin_memory=False,
drop_last=False,
sampler=RandomSampler(train_data) if args.mlperf_bin_shuffle else None
)
test_data = data_loader_terabyte.CriteoBinDataset(
data_file=test_file,
counts_file=counts_file,
batch_size=args.test_mini_batch_size,
max_ind_range=args.max_ind_range
)
mlperf_logger.log_event(key=mlperf_logger.constants.EVAL_SAMPLES,
value=test_data.num_samples)
test_loader = torch.utils.data.DataLoader(
test_data,
batch_size=None,
batch_sampler=None,
shuffle=False,
num_workers=0,
collate_fn=None,
pin_memory=False,
drop_last=False,
)
else:
data_filename = args.raw_data_file.split("/")[-1]
train_data = CriteoDataset(
args.data_set,
args.max_ind_range,
args.data_sub_sample_rate,
args.data_randomize,
"train",
args.raw_data_file,
args.processed_data_file,
args.memory_map
)
test_data = CriteoDataset(
args.data_set,
args.max_ind_range,
args.data_sub_sample_rate,
args.data_randomize,
"test",
args.raw_data_file,
args.processed_data_file,
args.memory_map
)
train_loader = data_loader_terabyte.DataLoader(
data_directory=data_directory,
data_filename=data_filename,
days=list(range(23)),
batch_size=args.mini_batch_size,
max_ind_range=args.max_ind_range,
split="train"
)
test_loader = data_loader_terabyte.DataLoader(
data_directory=data_directory,
data_filename=data_filename,
days=[23],
batch_size=args.test_mini_batch_size,
max_ind_range=args.max_ind_range,
split="test"
)
else:
train_data = CriteoDataset(
args.data_set,
args.max_ind_range,
args.data_sub_sample_rate,
args.data_randomize,
"train",
args.raw_data_file,
args.processed_data_file,
args.memory_map
)
test_data = CriteoDataset(
args.data_set,
args.max_ind_range,
args.data_sub_sample_rate,
args.data_randomize,
"test",
args.raw_data_file,
args.processed_data_file,
args.memory_map
)
train_loader = torch.utils.data.DataLoader(
train_data,
batch_size=args.mini_batch_size,
shuffle=False,
num_workers=args.num_workers,
collate_fn=collate_wrapper_criteo,
pin_memory=False,
drop_last=False, # True
)
test_loader = torch.utils.data.DataLoader(
test_data,
batch_size=args.test_mini_batch_size,
shuffle=False,
num_workers=args.test_num_workers,
collate_fn=collate_wrapper_criteo,
pin_memory=False,
drop_last=False, # True
)
return train_data, train_loader, test_data, test_loader
# uniform ditribution (input data)
class RandomDataset(Dataset):
def __init__(
self,
m_den,
ln_emb,
data_size,
num_batches,
mini_batch_size,
num_indices_per_lookup,
num_indices_per_lookup_fixed,
num_targets=1,
round_targets=False,
data_generation="random",
trace_file="",
enable_padding=False,
reset_seed_on_access=False,
rand_seed=0
):
# compute batch size
nbatches = int(np.ceil((data_size * 1.0) / mini_batch_size))
if num_batches != 0:
nbatches = num_batches
data_size = nbatches * mini_batch_size
# print("Total number of batches %d" % nbatches)
# save args (recompute data_size if needed)
self.m_den = m_den
self.ln_emb = ln_emb
self.data_size = data_size
self.num_batches = nbatches
self.mini_batch_size = mini_batch_size
self.num_indices_per_lookup = num_indices_per_lookup
self.num_indices_per_lookup_fixed = num_indices_per_lookup_fixed
self.num_targets = num_targets
self.round_targets = round_targets
self.data_generation = data_generation
self.trace_file = trace_file
self.enable_padding = enable_padding
self.reset_seed_on_access = reset_seed_on_access
self.rand_seed = rand_seed
def reset_numpy_seed(self, numpy_rand_seed):
np.random.seed(numpy_rand_seed)
# torch.manual_seed(numpy_rand_seed)
def __getitem__(self, index):
if isinstance(index, slice):
return [
self[idx] for idx in range(
index.start or 0, index.stop or len(self), index.step or 1
)
]
# WARNING: reset seed on access to first element
# (e.g. if same random samples needed across epochs)
if self.reset_seed_on_access and index == 0:
self.reset_numpy_seed(self.rand_seed)
# number of data points in a batch
n = min(self.mini_batch_size, self.data_size - (index * self.mini_batch_size))
if ext_dist.my_size > 1:
n = n // ext_dist.my_size
# generate a batch of dense and sparse features
if self.data_generation == "random":
(X, lS_o, lS_i) = generate_uniform_input_batch(
self.m_den,
self.ln_emb,
n,
self.num_indices_per_lookup,
self.num_indices_per_lookup_fixed
)
elif self.data_generation == "synthetic":
(X, lS_o, lS_i) = generate_synthetic_input_batch(
self.m_den,
self.ln_emb,
n,
self.num_indices_per_lookup,
self.num_indices_per_lookup_fixed,
self.trace_file,
self.enable_padding
)
else:
sys.exit(
"ERROR: --data-generation=" + self.data_generation + " is not supported"
)
# generate a batch of target (probability of a click)
T = generate_random_output_batch(n, self.num_targets, self.round_targets)
return (X, lS_o, lS_i, T)
def __len__(self):
# WARNING: note that we produce bacthes of outputs in __getitem__
# therefore we should use num_batches rather than data_size below
return self.num_batches
def collate_wrapper_random(list_of_tuples):
# where each tuple is (X, lS_o, lS_i, T)
(X, lS_o, lS_i, T) = list_of_tuples[0]
return (X,
torch.stack(lS_o),
lS_i,
T)
def make_random_data_and_loader(args, ln_emb, m_den):
train_data = RandomDataset(
m_den,
ln_emb,
args.data_size,
args.num_batches,
args.mini_batch_size,
args.num_indices_per_lookup,
args.num_indices_per_lookup_fixed,
1, # num_targets
args.round_targets,
args.data_generation,
args.data_trace_file,
args.data_trace_enable_padding,
reset_seed_on_access=True,
rand_seed=args.numpy_rand_seed
) # WARNING: generates a batch of lookups at once
train_loader = torch.utils.data.DataLoader(
train_data,
batch_size=1,
shuffle=False,
num_workers=args.num_workers,
collate_fn=collate_wrapper_random,
pin_memory=False,
drop_last=False, # True
)
return train_data, train_loader
def generate_random_data(
m_den,
ln_emb,
data_size,
num_batches,
mini_batch_size,
num_indices_per_lookup,
num_indices_per_lookup_fixed,
num_targets=1,
round_targets=False,
data_generation="random",
trace_file="",
enable_padding=False,
):
nbatches = int(np.ceil((data_size * 1.0) / mini_batch_size))
if num_batches != 0:
nbatches = num_batches
data_size = nbatches * mini_batch_size
# print("Total number of batches %d" % nbatches)
# inputs
lT = []
lX = []
lS_offsets = []
lS_indices = []
for j in range(0, nbatches):
# number of data points in a batch
n = min(mini_batch_size, data_size - (j * mini_batch_size))
# generate a batch of dense and sparse features
if data_generation == "random":
(Xt, lS_emb_offsets, lS_emb_indices) = generate_uniform_input_batch(
m_den,
ln_emb,
n,
num_indices_per_lookup,
num_indices_per_lookup_fixed
)
elif data_generation == "synthetic":
(Xt, lS_emb_offsets, lS_emb_indices) = generate_synthetic_input_batch(
m_den,
ln_emb,
n,
num_indices_per_lookup,
num_indices_per_lookup_fixed,
trace_file,
enable_padding
)
else:
sys.exit(
"ERROR: --data-generation=" + data_generation + " is not supported"
)
# dense feature
lX.append(Xt)
# sparse feature (sparse indices)
lS_offsets.append(lS_emb_offsets)
lS_indices.append(lS_emb_indices)
# generate a batch of target (probability of a click)
P = generate_random_output_batch(n, num_targets, round_targets)
lT.append(P)
return (nbatches, lX, lS_offsets, lS_indices, lT)
def generate_random_output_batch(n, num_targets, round_targets=False):
# target (probability of a click)
if round_targets:
P = np.round(ra.rand(n, num_targets).astype(np.float32)).astype(np.float32)
else:
P = ra.rand(n, num_targets).astype(np.float32)
return torch.tensor(P)
# uniform ditribution (input data)
def generate_uniform_input_batch(
m_den,
ln_emb,
n,
num_indices_per_lookup,
num_indices_per_lookup_fixed,
):
# dense feature
Xt = torch.tensor(ra.rand(n, m_den).astype(np.float32))
# sparse feature (sparse indices)
lS_emb_offsets = []
lS_emb_indices = []
# for each embedding generate a list of n lookups,
# where each lookup is composed of multiple sparse indices
for size in ln_emb:
lS_batch_offsets = []
lS_batch_indices = []
offset = 0
for _ in range(n):
# num of sparse indices to be used per embedding (between
if num_indices_per_lookup_fixed:
sparse_group_size = np.int64(num_indices_per_lookup)
else:
# random between [1,num_indices_per_lookup])
r = ra.random(1)
sparse_group_size = np.int64(
np.round(max([1.0], r * min(size, num_indices_per_lookup)))
)
# sparse indices to be used per embedding
r = ra.random(sparse_group_size)
sparse_group = np.unique(np.round(r * (size - 1)).astype(np.int64))
# reset sparse_group_size in case some index duplicates were removed
sparse_group_size = np.int64(sparse_group.size)
# store lengths and indices
lS_batch_offsets += [offset]
lS_batch_indices += sparse_group.tolist()
# update offset for next iteration
offset += sparse_group_size
lS_emb_offsets.append(torch.tensor(lS_batch_offsets))
lS_emb_indices.append(torch.tensor(lS_batch_indices))
return (Xt, lS_emb_offsets, lS_emb_indices)
# synthetic distribution (input data)
def generate_synthetic_input_batch(
m_den,
ln_emb,
n,
num_indices_per_lookup,
num_indices_per_lookup_fixed,
trace_file,
enable_padding=False,
):
# dense feature
Xt = torch.tensor(ra.rand(n, m_den).astype(np.float32))
# sparse feature (sparse indices)
lS_emb_offsets = []
lS_emb_indices = []
# for each embedding generate a list of n lookups,
# where each lookup is composed of multiple sparse indices
for i, size in enumerate(ln_emb):
lS_batch_offsets = []
lS_batch_indices = []
offset = 0
for _ in range(n):
# num of sparse indices to be used per embedding (between
if num_indices_per_lookup_fixed:
sparse_group_size = | np.int64(num_indices_per_lookup) | numpy.int64 |
import coopihc
from coopihc.space import StateElement, State, StateNotContainedError, Space
import gym
import numpy
import sys
import copy
_str = sys.argv[1]
# -------- Correct assigment
if _str == "correct" or _str == "all":
x = StateElement(
values=None,
spaces=[
coopihc.space.Space(
[
numpy.array([-1], dtype=numpy.float32),
numpy.array([1], dtype=numpy.float32),
]
),
coopihc.space.Space([numpy.array([1, 2, 3], dtype=numpy.int16)]),
coopihc.space.Space(
[numpy.array([-6, -5, -4, -3, -2, -1], dtype=numpy.int16)]
),
],
)
gridsize = (11, 11)
number_of_targets = 3
y = StateElement(
values=None,
spaces=[
Space(
[
numpy.array([i for i in range(gridsize[0])], dtype=numpy.int16),
numpy.array([i for i in range(gridsize[1])], dtype=numpy.int16),
]
)
for j in range(number_of_targets)
],
clipping_mode="error",
)
x = StateElement(
values=None,
spaces=[
coopihc.space.Space(
[
numpy.array([-1], dtype=numpy.float32),
numpy.array([1], dtype=numpy.float32),
]
),
coopihc.space.Space([numpy.array([1, 2, 3], dtype=numpy.int16)]),
coopihc.space.Space([numpy.array([-6, -5, -4, -3, -2, -1], dtype=numpy.int16)]),
],
)
gridsize = (11, 11)
number_of_targets = 3
y = StateElement(
values=None,
spaces=[
Space(
[
numpy.array([i for i in range(gridsize[0])], dtype=numpy.int16),
numpy.array([i for i in range(gridsize[1])], dtype=numpy.int16),
]
)
for j in range(number_of_targets)
],
clipping_mode="error",
)
if _str == "action-state":
a = StateElement(values=None, spaces=Space([numpy.array([None], dtype=object)]))
# -------------- accessing values
if _str == "access" or _str == "all":
x["values"]
x["spaces"]
x.values
x["values"] = [
numpy.array([[0.335]]),
numpy.array([[2]]),
numpy.array([[-4]]),
]
x["values"] = [0.2, 2, -5]
y["values"]
y["spaces"]
y["values"] = [
numpy.array([1, 1]),
numpy.array([0, 0]),
numpy.array([2, 2]),
]
y["values"] = [
numpy.array([15, 15]),
numpy.array([0, 0]),
numpy.array([2, 2]),
]
# ------ normal reset
if _str == "reset" or _str == "all":
x.reset()
y.reset()
# -------- forced reset
if _str == "forced-reset" or _str == "all":
reset_dic = {"values": [-1 / 2, 2, -2]}
x.reset(dic=reset_dic)
reset_dic = {"values": [[0, 0], [10, 10], [5, 5]]}
y.reset(dic=reset_dic)
# ------ iterate on StateElement
if _str == "iter" or _str == "all":
for _x in x:
print(_x)
for _y in y:
print(_y)
if _str == "cartesianproduct" or _str == "all":
x.reset()
for n, _x in enumerate(x.cartesian_product()):
# print(n, _x.values)
print(n, _x)
y.reset()
# There are a million elements in y
for n, _y in enumerate(y[0].cartesian_product()):
print(n, _y.values)
if _str == "comp" or _str == "all":
x.reset()
a = x[0]
print(x < numpy.array([2, -2, 4]))
if _str == "len" or _str == "all":
len(x)
len(y)
if _str == "cast" or _str == "all":
y.reset()
targetdomain = StateElement(
values=None,
spaces=[
coopihc.space.Space(
[
-numpy.ones((2, 1), dtype=numpy.float32),
numpy.ones((2, 1), dtype=numpy.float32),
]
)
for j in range(3)
],
)
res = y.cast(targetdomain)
b = StateElement(
values=5,
spaces=coopihc.space.Space(
[numpy.array([-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5], dtype=numpy.int16)]
),
)
a = StateElement(
values=0,
spaces=coopihc.space.Space(
[
numpy.array([-1], dtype=numpy.float32),
numpy.array([1], dtype=numpy.float32),
]
),
)
# C2D
continuous = []
discrete = []
for elem in numpy.linspace(-1, 1, 200):
a["values"] = elem
continuous.append(a["values"][0].squeeze().tolist())
discrete.append(a.cast(b, mode="center")["values"][0].squeeze().tolist())
import matplotlib.pyplot as plt
plt.plot(continuous, discrete, "b*")
plt.show()
# D2C
continuous = []
discrete = []
for elem in [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5]:
b["values"] = elem
discrete.append(elem)
continuous.append(b.cast(a, mode="edges")["values"][0].squeeze().tolist())
import matplotlib.pyplot as plt
plt.plot(discrete, continuous, "b*")
plt.show()
# C2C
a = StateElement(
values=0,
spaces=coopihc.space.Space(
[
numpy.array([-2], dtype=numpy.float32),
numpy.array([1], dtype=numpy.float32),
]
),
)
b = StateElement(
values=3.5,
spaces=coopihc.space.Space(
[
numpy.array([3], dtype=numpy.float32),
numpy.array([4], dtype=numpy.float32),
]
),
)
c1 = []
c2 = []
for elem in numpy.linspace(-2, 1, 100):
a["values"] = elem
c1.append(a["values"][0].squeeze().tolist())
c2.append(a.cast(b)["values"][0].squeeze().tolist())
import matplotlib.pyplot as plt
plt.plot(c1, c2, "b*")
plt.show()
# D2D
a = StateElement(
values=5,
spaces=coopihc.space.Space(
[numpy.array([i for i in range(11)], dtype=numpy.int16)]
),
)
b = StateElement(
values=5,
spaces=coopihc.space.Space(
[numpy.array([-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5], dtype=numpy.int16)]
),
)
d1 = []
d2 = []
for i in range(11):
a["values"] = i
d1.append(i)
d2.append(a.cast(b)["values"][0].squeeze().tolist())
import matplotlib.pyplot as plt
plt.plot(d1, d2, "b*")
plt.show()
if _str == "neg" or _str == "all":
x = StateElement(
values=numpy.array([[-0.237]], dtype=numpy.float32),
spaces=coopihc.space.Space(
[
numpy.array([-1], dtype=numpy.float32),
| numpy.array([1], dtype=numpy.float32) | numpy.array |
import numpy as np
def length(x, axis=-1, keepdims=True):
"""
Computes vector norm along a tensor axis(axes)
:param x: tensor
:param axis: axis(axes) along which to compute the norm
:param keepdims: indicates if the dimension(s) on axis should be kept
:return: The length or vector of lengths.
"""
lgth = np.sqrt(np.sum(x * x, axis=axis, keepdims=keepdims))
return lgth
def normalize(x, axis=-1, eps=1e-8):
"""
Normalizes a tensor over some axis (axes)
:param x: data tensor
:param axis: axis(axes) along which to compute the norm
:param eps: epsilon to prevent numerical instabilities
:return: The normalized tensor
"""
res = x / (length(x, axis=axis) + eps)
return res
def quat_normalize(x, eps=1e-8):
"""
Normalizes a quaternion tensor
:param x: data tensor
:param eps: epsilon to prevent numerical instabilities
:return: The normalized quaternions tensor
"""
res = normalize(x, eps=eps)
return res
def quat_getDif(x, y, eps=1e-8):
"""
Normalizes a quaternion tensor
:param x: data tensor 1
:param y: data tensor 2
:return: The difference quaternion betweeen both quaternions
"""
return quat_normalize(quat_mul(quat_inv(x),y))
def angle_axis_to_quat(angle, axis):
"""
Converts from and angle-axis representation to a quaternion representation
:param angle: angles tensor
:param axis: axis tensor
:return: quaternion tensor
"""
c = np.cos(angle / 2.0)[..., np.newaxis]
s = np.sin(angle / 2.0)[..., np.newaxis]
q = np.concatenate([c, s * axis], axis=-1)
return q
def euler_to_quat(e, order='zyx'):
"""
Converts from an euler representation to a quaternion representation
:param e: euler tensor
:param order: order of euler rotations
:return: quaternion tensor
"""
axis = {
'x': np.asarray([1, 0, 0], dtype=np.float32),
'y': np.asarray([0, 1, 0], dtype=np.float32),
'z': np.asarray([0, 0, 1], dtype=np.float32)}
q0 = angle_axis_to_quat(e[..., 0], axis[order[0]])
q1 = angle_axis_to_quat(e[..., 1], axis[order[1]])
q2 = angle_axis_to_quat(e[..., 2], axis[order[2]])
return quat_mul(q0, quat_mul(q1, q2))
def quat_to_euler(q):
"""
Converts from an quaternion representation to a euler representation
:param q: quaterion tensor
:param order: order of euler rotations
:return: euler tensor (x-y-z order)
"""
phi = np.arctan2(2 * (q[..., 0] * q[..., 1] + q[..., 2] * q[..., 3]), 1 - 2 * (q[..., 1]**2 + q[..., 2]**2))
theta = np.arcsin(2 * (q[..., 0] * q[..., 2] + q[..., 3] * q[..., 1]))
psi = np.arctan2(2 * (q[..., 0] * q[..., 3] + q[..., 1] * q[..., 2]), 1 - 2 * (q[..., 2]**2 + q[..., 3]**2))
return np.stack([phi, theta, psi], axis = -1)
def quat_inv(q):
"""
Inverts a tensor of quaternions
:param q: quaternion tensor
:return: tensor of inverted quaternions
"""
res = np.asarray([1, -1, -1, -1], dtype=np.float32) * q
return res
def quat_fk(lrot, lpos, parents):
"""
Performs Forward Kinematics (FK) on local quaternions and local positions to retrieve global representations
:param lrot: tensor of local quaternions with shape (..., Nb of joints, 4)
:param lpos: tensor of local positions with shape (..., Nb of joints, 3)
:param parents: list of parents indices
:return: tuple of tensors of global quaternion, global positions
"""
gp, gr = [lpos[..., :1, :]], [lrot[..., :1, :]]
for i in range(1, len(parents)):
gp.append(quat_mul_vec(gr[parents[i]], lpos[..., i:i+1, :]) + gp[parents[i]])
gr.append(quat_mul (gr[parents[i]], lrot[..., i:i+1, :]))
res = np.concatenate(gr, axis=-2), | np.concatenate(gp, axis=-2) | numpy.concatenate |
"""
Main script: Train and test DeepVO on the KITTI odometry benchmark
"""
# The following two lines are needed because, conda on Mila SLURM sets
# 'Qt5Agg' as the default version for matplotlib.use(). The interpreter
# throws a warning that says matplotlib.use('Agg') needs to be called
# before importing pyplot. If the warning is ignored, this results in
# an error and the code crashes while storing plots (after validation).
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import time
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from tqdm import tqdm, trange
import datetime
# Other project files with definitions
import args
from KITTIDataset import KITTIDataset
from losses import MahalanobisLoss
from Model import DeepVO
from plotTrajectories import plotSequenceRelative, plotSequenceAbsolute
from Trainer import Trainer
# Parse commandline arguements
cmd = args.arguments
# Seed the RNGs (ensure deterministic outputs), if specified via commandline
if cmd.isDeterministic:
# rn.seed(cmd.randomseed)
np.random.seed(cmd.randomseed)
torch.manual_seed(cmd.randomseed)
torch.cuda.manual_seed(cmd.randomseed)
torch.backends.cudnn.deterministic = True
# Debug parameters. This is to run in 'debug' mode, which runs a very quick pass
# through major segments of code, to ensure nothing awful happens when we deploy
# on GPU clusters for instance, as a batch script. It is sometimes very annoying
# when code crashes after a few epochs of training, while attempting to write a
# checkpoint to a directory that does not exist.
if cmd.debug is True:
cmd.debugIters = 3
cmd.nepochs = 2
# Set default tensor type to cuda.FloatTensor, for GPU execution
torch.set_default_tensor_type(torch.cuda.FloatTensor)
# Create directory structure, to store results
cmd.basedir = os.path.dirname(os.path.realpath(__file__))
if not os.path.exists(os.path.join(cmd.basedir, cmd.cachedir, cmd.dataset)):
os.makedirs(os.path.join(cmd.basedir, cmd.cachedir, cmd.dataset))
write_dir = './runs/deepvo/deepvo_' + str(datetime.datetime.now())
cmd.expDir = os.path.join(cmd.basedir, cmd.cachedir, cmd.dataset, cmd.expID)
if not os.path.exists(write_dir):
os.makedirs(write_dir)
print('Created dir: ', write_dir)
if not os.path.exists(os.path.join(write_dir, 'models')):
os.makedirs(os.path.join(write_dir, 'models'))
print('Created dir: ', os.path.join(write_dir, 'models'))
if not os.path.exists(os.path.join(write_dir, 'plots', 'traj')):
os.makedirs(os.path.join(write_dir, 'plots', 'traj'))
print('Created dir: ', os.path.join(write_dir, 'plots', 'traj'))
if not os.path.exists(os.path.join(write_dir, 'plots', 'loss')):
os.makedirs(os.path.join(write_dir, 'plots', 'loss'))
print('Created dir: ', os.path.join(write_dir, 'plots', 'loss'))
for seq in range(11):
if not os.path.exists(os.path.join(write_dir, 'plots', 'traj', str(seq).zfill(2))):
os.makedirs(os.path.join(write_dir, 'plots', 'traj', str(seq).zfill(2)))
print('Created dir: ', os.path.join(write_dir, 'plots', 'traj', str(seq).zfill(2)))
# Save all the command line arguements in a text file in the experiment directory.
cmdFile = open(os.path.join(write_dir, 'args.txt'), 'w')
for arg in vars(cmd):
cmdFile.write(arg + ' ' + str(getattr(cmd, arg)) + '\n')
cmdFile.close()
# TensorboardX visualization support
if cmd.tensorboardX is True:
from tensorboardX import SummaryWriter
writer = SummaryWriter(log_dir = write_dir)
########################################################################
### Model Definition + Weight init + FlowNet weight loading ###
########################################################################
# Get the definition of the model
if cmd.modelType == 'flownet' or cmd.modelType is None:
# Model definition without batchnorm
deepVO = DeepVO(cmd.imageWidth, cmd.imageHeight, activation = cmd.activation, parameterization = cmd.outputParameterization, \
numLSTMCells = cmd.numLSTMCells, hidden_units_LSTM = [1024, 1024], flownet_weights_path=cmd.loadModel)
elif cmd.modelType == 'flownet_batchnorm':
# Model definition with batchnorm
deepVO = DeepVO(cmd.imageWidth, cmd.imageHeight, activation = cmd.activation, parameterization = cmd.outputParameterization, \
batchnorm = True, flownet_weights_path = cmd.loadModel)
# Load a pretrained DeepVO model
if cmd.modelType == 'deepvo' or cmd.modelType == 'flownet':
deepVO.load_state_dict(torch.load(cmd.loadModel), strict=False)
else:
# Initialize weights for fully connected layers and for LSTMCells
deepVO.init_weights()
# CUDAfy
deepVO.cuda()
print('Loaded! Good to launch!')
########################################################################
### Criterion, optimizer, and scheduler ###
########################################################################
if cmd.outputParameterization == 'mahalanobis':
criterion = MahalanobisLoss
else:
criterion = nn.MSELoss(reduction = 'sum')
if cmd.freezeCNN is True:
n = 0
for p in deepVO.parameters():
if p.requires_grad is True:
# The first 18 trainable parameters correspond to the CNN (FlowNetS)
if n <= 17:
p.requires_grad = False
n += 1
if cmd.optMethod == 'adam':
optimizer = optim.Adam(deepVO.parameters(), lr = cmd.lr, betas = (cmd.beta1, cmd.beta2), weight_decay = cmd.weightDecay, amsgrad = False)
elif cmd.optMethod == 'sgd':
optimizer = optim.SGD(deepVO.parameters(), lr = cmd.lr, momentum = cmd.momentum, weight_decay = cmd.weightDecay, nesterov = False)
else:
optimizer = optim.Adagrad(deepVO.parameters(), lr = cmd.lr, lr_decay = cmd.lrDecay , weight_decay = cmd.weightDecay)
# Initialize scheduler, if specified
if cmd.lrScheduler is not None:
if cmd.lrScheduler == 'cosine':
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max = cmd.nepochs)
elif cmd.lrScheduler == 'plateau':
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer)
########################################################################
### Main loop ###
########################################################################
rotLosses_train = []
transLosses_train = []
totalLosses_train = []
rotLosses_val = []
transLosses_val = []
totalLosses_val = []
bestValLoss = np.inf
# Create datasets for the current epoch
# train_seq = [0, 1, 2, 8, 9]
train_seq = [0, 2]
# train_startFrames = [0, 0, 0, 0, 0]
# train_endFrames = [4443, 1100, 4660, 4070, 1590]
train_startFrames = [0, 0]
train_endFrames = [3399, 6999]
val_seq = [1,3]
val_startFrames = [0,0]
val_endFrames = [1043,1420]
# val_seq = [3, 4, 5, 6, 7, 10]
# val_startFrames = [0, 0, 0, 0, 0, 0]
# val_endFrames = [800, 270, 2760, 1100, 1100, 1200]
for epoch in range(cmd.nepochs):
print('================> Starting epoch: ' + str(epoch+1) + '/' + str(cmd.nepochs))
train_seq_cur_epoch = []
train_startFrames_cur_epoch = []
train_endFrames_cur_epoch = []
# Take each sequence and split it into chunks
for s in range(len(train_seq)):
MAX_NUM_CHUNKS = 1
num_chunks = 0
if (train_endFrames[s] - train_startFrames[s]) // cmd.trainBatch != 0:
num_chunks = np.random.randint(0, min((MAX_NUM_CHUNKS, (train_endFrames[s] - train_startFrames[s]) // cmd.trainBatch)))
# We don't need no chunks. We need at least one
if num_chunks == 0:
num_chunks = 1
cur_seq = [idx for idx in range(train_startFrames[s], train_endFrames[s], (train_endFrames[s] - train_startFrames[s]) // num_chunks)]
print ("cur_seq {}".format(cur_seq))
for j in range(len(cur_seq)-1):
train_seq_cur_epoch.append(train_seq[s])
train_startFrames_cur_epoch.append(cur_seq[j])
train_endFrames_cur_epoch.append(cur_seq[j+1]-1)
if len(cur_seq) == 1: # Corner case
train_seq_cur_epoch.append(train_seq[s])
train_startFrames_cur_epoch.append(train_startFrames[s])
train_endFrames_cur_epoch.append(train_endFrames[s])
permutation = np.random.permutation(len(train_seq_cur_epoch))
train_seq_cur_epoch = [train_seq_cur_epoch[p] for p in permutation]
train_startFrames_cur_epoch = [train_startFrames_cur_epoch[p] for p in permutation]
train_endFrames_cur_epoch = [train_endFrames_cur_epoch[p] for p in permutation]
kitti_train = KITTIDataset(cmd.datadir, train_seq_cur_epoch, train_startFrames_cur_epoch, \
train_endFrames_cur_epoch, width = cmd.imageWidth, height = cmd.imageHeight, \
parameterization = cmd.outputParameterization, outputFrame = cmd.outputFrame)
kitti_val = KITTIDataset(cmd.datadir, val_seq, val_startFrames, val_endFrames, \
width = cmd.imageWidth, height = cmd.imageHeight, parameterization = cmd.outputParameterization, \
outputFrame = cmd.outputFrame)
# print (cmd.outputFrame)
# Initialize a trainer (Note that any accumulated gradients on the model are flushed
# upon creation of this Trainer object)
trainer = Trainer(cmd, epoch, deepVO, kitti_train, kitti_val, criterion, write_dir, optimizer, \
scheduler = None)
# Training loop
print('===> Training: ' + str(epoch+1) + '/' + str(cmd.nepochs))
startTime = time.time()
rotLosses_train_cur, transLosses_train_cur, totalLosses_train_cur = trainer.train()
print('Train time: ', time.time() - startTime)
rotLosses_train += rotLosses_train_cur
transLosses_train += transLosses_train_cur
totalLosses_train += totalLosses_train_cur
# Learning rate scheduler, if specified
if cmd.lrScheduler is not None:
scheduler.step()
# Snapshot
if cmd.snapshotStrategy == 'default':
if epoch % cmd.snapshot == 0 or epoch == cmd.nepochs - 1 and epoch > 0:
print('Saving model after epoch', epoch, '...')
torch.save(deepVO, os.path.join(cmd.expDir, 'models', 'model' + str(epoch).zfill(3) + '.pt'))
elif cmd.snapshotStrategy == 'recent':
# Save the most recent model
print('Saving model after epoch', epoch, '...')
torch.save(deepVO, os.path.join(cmd.expDir, 'models', 'recent.pt'))
elif cmd.snapshotStrategy == 'best' or 'none':
# If we only want to save the best model, defer the decision
pass
# Validation loop
print('===> Validation: ' + str(epoch+1) + '/' + str(cmd.nepochs))
startTime = time.time()
rotLosses_val_cur, transLosses_val_cur, totalLosses_val_cur = trainer.validate()
print('Val time: ', time.time() - startTime)
rotLosses_val += rotLosses_val_cur
transLosses_val += transLosses_val_cur
totalLosses_val += totalLosses_val_cur
# Snapshot (if using 'best' strategy)
if cmd.snapshotStrategy == 'best':
if np.mean(totalLosses_val_cur) <= bestValLoss:
bestValLoss = np.mean(totalLosses_val_cur)
print('Saving model after epoch', epoch, '...')
torch.save(deepVO, os.path.join(cmd.expDir, 'models', 'best' + '.pt'))
if cmd.tensorboardX is True:
writer.add_scalar('loss/train/rot_loss_train', np.mean(rotLosses_train), epoch)
writer.add_scalar('loss/train/trans_loss_train', np.mean(transLosses_train), epoch)
writer.add_scalar('loss/train/total_loss_train', np.mean(totalLosses_train), epoch)
writer.add_scalar('loss/train/rot_loss_val', np.mean(rotLosses_val), epoch)
writer.add_scalar('loss/train/trans_loss_val', | np.mean(transLosses_val) | numpy.mean |
# -*- coding: utf-8 -*-
"""
Implementation of the modified Gromov–Hausdorff (mGH) distance
between compact metric spaces induced by unweighted graphs. This
code complements the results from "Efficient estimation of a
Gromov–Hausdorff distance between unweighted graphs" by <NAME> et
al. (https://arxiv.org/pdf/1909.09772). The mGH distance was first
defined in "Some properties of Gromov–Hausdorff distances" by F.
Mémoli (Discrete & Computational Geometry, 2012).
Author: <NAME>
===================================================================
Usage examples:
1) Estimating the mGH distance between 4-clique and single-vertex
graph from their adjacency matrices. Note that it suffices to fill
only the upper triangle of an adjacency matrix.
>>> AG = [[0, 1, 1, 1], [0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]]
>>> AH = [[0]]
>>> lb, ub = gromov_hausdorff(AG, AH)
>>> lb, ub
(0.5, 0.5)
2) Estimating the mGH distance between cycle graphs of length 2 and
5 from their adjacency matrices. Note that the adjacency matrices
can be given in both dense and sparse SciPy formats.
>>> AI = np.array([[0, 1], [0, 0]])
>>> AJ = sps.csr_matrix(([1] * 5, ([0, 0, 1, 2, 3], [1, 4, 2, 3, 4])), shape=(5, 5))
>>> lb, ub = gromov_hausdorff(AI, AJ)
>>> lb, ub
(0.5, 1.0)
3) Estimating all pairwise mGH distances between multiple graphs
from their adjacency matrices as an iterable.
>>> As = [AG, AH, AI, AJ]
>>> lbs, ubs = gromov_hausdorff(As)
>>> lbs
array([[0. , 0.5, 0.5, 0.5],
[0.5, 0. , 0.5, 1. ],
[0.5, 0.5, 0. , 0.5],
[0.5, 1. , 0.5, 0. ]])
>>> ubs
array([[0. , 0.5, 0.5, 0.5],
[0.5, 0. , 0.5, 1. ],
[0.5, 0.5, 0. , 1. ],
[0.5, 1. , 1. , 0. ]])
===================================================================
Notations:
|X| denotes the number of elements in set X.
X → Y denotes the set of all mappings of set X into set Y.
V(G) denotes vertex set of graph G.
mGH(X, Y) denotes the modified Gromov–Hausdorff distance between
compact metric spaces X and Y.
row_i(A) denotes the i-th row of matrix A.
PSPS^n(A) denotes the set of all permutation similarities of
all n×n principal submatrices of square matrix A.
PSPS^n_{i←j}(A) denotes the set of all permutation similarities of
all n×n principal submatrices of square matrix A whose i-th row is
comprised of the entries in row_j(A).
===================================================================
Glossary:
Distance matrix of metric space X is a |X|×|X| matrix whose
(i, j)-th entry holds the distance between i-th and j-th points of
X. By the properties of a metric, distance matrices are symmetric
and non-negative, their diagonal entries are 0 and off-diagonal
entries are positive.
Curvature is a generalization of distance matrix that allows
repetitions in the underlying points of a metric space. Curvature
of an n-tuple of points from metric space X is an n×n matrix whose
(i, j)-th entry holds the distance between the points from i-th and
j-th positions of the tuple. Since these points need not be
distinct, the off-diagonal entries of a curvature can equal 0.
n-th curvature set of metric space X is the set of all curvatures
of X that are of size n×n.
d-bounded curvature for some d > 0 is a curvature whose
off-diagonal entries are all ≥ d.
Positive-bounded curvature is a curvature whose off-diagonal
entries are all positive, i.e. the points in the underlying tuple
are distinct. Equivalently, positive-bounded curvatures are
distance matrices on the subsets of a metric space.
"""
import numpy as np
import warnings
import scipy.sparse as sps
from scipy.sparse.csgraph import shortest_path, connected_components
__all__ = ["gromov_hausdorff"]
# To sample √|X| * log (|X| + 1) mappings from X → Y by default.
DEFAULT_MAPPING_SAMPLE_SIZE_ORDER = np.array([.5, 1])
def gromov_hausdorff(
AG, AH=None, mapping_sample_size_order=DEFAULT_MAPPING_SAMPLE_SIZE_ORDER):
"""
Estimate the mGH distance between simple unweighted graphs,
represented as compact metric spaces based on their shortest
path lengths.
Parameters
-----------
AG: (N,N) np.array
(Sparse) adjacency matrix of graph G with N vertices, or an iterable of
adjacency matrices if AH=None.
AH: (M,M) np.array
(Sparse) adjacency matrix of graph H with M vertices, or None.
mapping_sample_size_order: (2,) np.array
Parameter that regulates the number of mappings to sample when
tightening upper bound of the mGH distance.
Returns
--------
lb: float
Lower bound of the mGH distance, or a square matrix holding
lower bounds of pairwise mGH distances if AH=None.
ub: float
Upper bound of the mGH distance, or a square matrix holding
upper bounds of pairwise mGH distances if AH=None.
"""
# Form iterable with adjacency matrices.
if AH is None:
if len(AG) < 2:
raise ValueError("'estimate_between_unweighted_graphs' needs at least"
"2 graphs to discriminate")
As = AG
else:
As = (AG, AH)
N = len(As)
# Find lower and upper bounds of each pairwise mGH distance between
# the graphs.
lbs = np.zeros((N, N))
ubs = np.zeros((N, N))
for i in range(N):
for j in range(i + 1, N):
# Transform adjacency matrices of a pair of graphs to
# distance matrices.
DX = make_distance_matrix_from_adjacency_matrix(As[i])
DY = make_distance_matrix_from_adjacency_matrix(As[j])
# Find lower and upper bounds of the mGH distance between
# the pair of graphs.
lbs[i, j], ubs[i, j] = estimate(
DX, DY, mapping_sample_size_order=mapping_sample_size_order)
if AH is None:
# Symmetrize matrices with lower and upper bounds of pairwise
# mGH distances between the graphs.
lower_triangle_indices = np.tril_indices(N, -1)
lbs[lower_triangle_indices] = lbs.T[lower_triangle_indices]
ubs[lower_triangle_indices] = ubs.T[lower_triangle_indices]
return lbs, ubs
else:
return lbs[0, 1], ubs[0, 1]
def make_distance_matrix_from_adjacency_matrix(AG):
"""
Represent simple unweighted graph as compact metric space (with
integer distances) based on its shortest path lengths.
Parameters
-----------
AG: (N,N) np.array
(Sparse) adjacency matrix of simple unweighted graph G with N vertices.
Returns
--------
DG: (N,N) np.array
(Dense) distance matrix of the compact metric space
representation of G based on its shortest path lengths.
"""
# Convert adjacency matrix to SciPy format if needed.
if not sps.issparse(AG) and not isinstance(AG, np.ndarray):
AG = | np.asarray(AG) | numpy.asarray |
#!/usr/bin/env python
# -*- coding: utf-8 -*
import unittest
import numpy as np
from pyscf import gto, scf, dft
import decodense
# decimal tolerance
TOL = 9
# settings
LOC = ('', 'fb', 'pm', 'ibo-2', 'ibo-4')
POP = ('mulliken', 'iao')
PART = ('orbitals', 'eda', 'atoms')
# init molecule
mol = gto.M(verbose = 0, output = None,
basis = 'pcseg1', symmetry = True,
atom = 'geom/h2o.xyz')
# mf calc
mf = dft.RKS(mol)
mf.xc = 'wb97m_v'
mf.nlc = 'vv10'
mf.nlcgrids.atom_grid = (50, 194)
mf.nlcgrids.prune = dft.gen_grid.sg1_prune
mf.kernel()
def tearDownModule():
global mol, mf
mol.stdout.close()
del mol, mf
class KnownValues(unittest.TestCase):
def test(self):
mf_e_tot = mf.e_tot
for loc in LOC:
for pop in POP:
for part in PART:
with self.subTest(loc=loc, pop=pop, part=part):
decomp = decodense.DecompCls(loc=loc, pop=pop, part=part)
res = decodense.main(mol, decomp, mf)
if part == 'orbitals':
e_tot = | np.sum(res['struct']) | numpy.sum |
import shlex
import subprocess
import cv2
import pybullet_data
from gym import error
import numpy as np
from inspect import currentframe, getframeinfo
import gibson
from gibson.core.physics.robot_locomotors import Fetch
from gibson.core.render.pcrender import PCRenderer
from gibson.data.datasets import ViewDataSet3D
from gibson.envs.env_bases import *
from gibson.envs.env_modalities import CameraRobotEnv, OneViewUI, TwoViewUI, ThreeViewUI, FourViewUI
CALC_OBSTACLE_PENALTY = 1
tracking_camera = {
'yaw': 20,
'z_offset': 0.5,
'distance': 1,
'pitch': -20
}
def quaternion_multiply(quaternion1, quaternion0):
w0, x0, y0, z0 = quaternion0
w1, x1, y1, z1 = quaternion1
return np.array([-x1 * x0 - y1 * y0 - z1 * z0 + w1 * w0,
x1 * w0 + y1 * z0 - z1 * y0 + w1 * x0,
-x1 * z0 + y1 * w0 + z1 * x0 + w1 * y0,
x1 * y0 - y1 * x0 + z1 * w0 + w1 * z0], dtype=np.float64)
class FetchNavigateEnv(CameraRobotEnv):
def _reward(self, action):
raise NotImplementedError()
def __init__(self, config, gpu_idx=0, depth_render_port=5556, use_filler=None):
self.config = config
assert (self.config["envname"] == self.__class__.__name__ or self.config["envname"] == "TestEnv")
if isinstance(use_filler, bool):
self._use_filler = use_filler
else:
self._use_filler = config["use_filler"]
CameraRobotEnv.__init__(self, self.config, gpu_idx,
scene_type="stadium" if self.config["model_id"] == "stadium" else "building",
tracking_camera=tracking_camera, start_port=depth_render_port, use_filler=use_filler)
# print("Finished setting up camera_env")
self.window_width = self.config['window_width']
self.window_height = self.config['window_height']
self._render_width = self.config['window_width']
self._render_height = self.config['window_height']
self._target_labels = self.config['target_labels']
self.keys_to_action = {
(ord('s'),): [-0.05, 0] + [0] * 13, # backward
(ord('w'),): [0.05, 0] + [0] * 13, # forward
(ord('d'),): [0, 0.05] + [0] * 13, # turn right
(ord('a'),): [0, -0.05] + [0] * 13, # turn left
(): [0] * 15
}
# print("[{} {}] Fetch init'd".format(getframeinfo(currentframe()).filename, getframeinfo(currentframe()).lineno))
fetch = Fetch(self.config, env=self)
# print("[{} {}] Introducing robot".format(getframeinfo(currentframe()).filename, getframeinfo(currentframe()).lineno))
self.robot_introduce(fetch)
# print("[{} {}] Introducing scene".format(getframeinfo(currentframe()).filename,
# getframeinfo(currentframe()).lineno))
self.scene_introduce()
# print("[{} {}] Scene Introduced".format(getframeinfo(currentframe()).filename,
# getframeinfo(currentframe()).lineno))
self.total_reward = 0
self.total_frame = 0
self.goal_img = None
self.initial_pos = config['initial_pos']
self.initial_orn = config['initial_orn']
self.step = self._step
self.reset = self._reset
self.nonWheelJoints = [j for j in self.robot.ordered_joints if 'wheel' not in j.joint_name]
self.markers = []
self.marker_ids = []
# Initialize camera to point top down
if self.gui:
pos = self.robot._get_scaled_position()
# orn = self.robot.get_orientation()
pos = (pos[0], pos[1], pos[2] + self.tracking_camera['z_offset'])
pos = np.array(pos)
# dist = self.tracking_camera['distance'] / self.robot.mjcf_scaling
# [yaw, pitch, dist] = p.getDebugVisualizerCamera()[8:11]
p.resetDebugVisualizerCamera(3, 0, 269, pos)
def robot_introduce(self, robot):
self.robot = robot
self.robot.env = self
self.action_space = self.robot.action_space
# Robot's eye observation, in sensor mode black pixels are returned
self.observation_space = self.robot.observation_space
self.sensor_space = self.robot.sensor_space
# seed for robot
self.robot.np_random = self.np_random
self._robot_introduced = True
# assert (512 >= self.robot.resolution >= 64), "Robot resolution must in [64, 512]"
self.window_width = self.config['window_width']
self.window_height = self.config['window_height']
self.scale_up = 1 # int(512 / self.window_width)
self.window_dim = self.robot.resolution
if "fast_lq_render" in self.config and self.config["fast_lq_render"]:
self.scale_up *= 2
self.setup_rendering_camera()
def reset_observations(self):
# Initialize blank render image
self.render_rgb_filled = np.zeros((self._render_width, self._render_height, 3))
self.render_rgb_prefilled = np.zeros((self._render_width, self._render_height, 3))
self.render_depth = np.zeros((self._render_width, self._render_height, 1))
self.render_normal = np.zeros((self._render_width, self._render_height, 3))
self.render_semantics = np.zeros((self._render_width, self._render_height, 3))
def setup_rendering_camera(self):
if self.test_env:
return
self.r_camera_rgb = None # Rendering engine
self.r_camera_mul = None # Multi channel rendering engine
self.r_camera_dep = None
# self.check_port_available()
ui_map = {
1: OneViewUI,
2: TwoViewUI,
3: ThreeViewUI,
4: FourViewUI,
}
assert self.config["ui_num"] == len(
self.config['ui_components']), "In configuration, ui_num is not equal to the number of ui components"
if self.config["display_ui"]:
ui_num = self.config["ui_num"]
self.UI = ui_map[ui_num](self.window_width, self.window_height, self, self.port_ui)
if self._require_camera_input:
self.setup_camera_multi()
self.setup_camera_pc()
if self.config["mode"] == "web_ui":
ui_num = self.config["ui_num"]
self.webUI = ui_map[ui_num](self.window_width, self.window_height, self, self.port_ui, use_pygame=False)
def setup_camera_pc(self):
# Camera specific
assert self._require_camera_input
if self.scene_type == "building":
self.dataset = ViewDataSet3D(
transform=np.array,
mist_transform=np.array,
seqlen=2,
off_3d=False,
train=False,
overwrite_fofn=True, env=self, only_load=self.config["model_id"])
scene_dict = dict(zip(self.dataset.scenes, range(len(self.dataset.scenes))))
## Todo: (hzyjerry) more error handling
if not self.model_id in scene_dict.keys():
raise error.Error("Dataset not found: models {} cannot be loaded".format(self.model_id))
else:
scene_id = scene_dict[self.model_id]
uuids, rts = self.dataset.get_scene_info(scene_id)
targets, sources, source_depths, poses = [], [], [], []
source_semantics = []
if not self.multiprocessing or self.config["envname"] == "TestEnv":
all_data = self.dataset.get_multi_index([v for k, v in uuids])
for i, data in enumerate(all_data):
target, target_depth = data[1], data[3]
if not self._require_rgb:
continue
ww = target.shape[0] // 8 + 2
target[:ww, :, :] = target[ww, :, :]
target[-ww:, :, :] = target[-ww, :, :]
if self.scale_up != 1:
target = cv2.resize(
target, None,
fx=1.0 / self.scale_up,
fy=1.0 / self.scale_up,
interpolation=cv2.INTER_CUBIC)
target_depth = cv2.resize(
target_depth, None,
fx=1.0 / self.scale_up,
fy=1.0 / self.scale_up,
interpolation=cv2.INTER_CUBIC)
pose = data[-1][0].numpy()
targets.append(target)
poses.append(pose)
sources.append(target)
source_depths.append(target_depth)
else:
all_data = self.dataset.get_multi_index([v for k, v in uuids])
for i, data in enumerate(all_data):
target, target_depth = data[1], data[3]
if not self._require_rgb:
continue
ww = target.shape[0] // 8 + 2
target[:ww, :, :] = target[ww, :, :]
target[-ww:, :, :] = target[-ww, :, :]
if self.scale_up != 1:
target = cv2.resize(
target, None,
fx=1.0 / self.scale_up,
fy=1.0 / self.scale_up,
interpolation=cv2.INTER_CUBIC)
target_depth = cv2.resize(
target_depth, None,
fx=1.0 / self.scale_up,
fy=1.0 / self.scale_up,
interpolation=cv2.INTER_CUBIC)
pose = data[-1][0].numpy()
targets.append(target)
poses.append(pose)
sources.append(target)
source_depths.append(target_depth)
self.r_camera_rgb = PCRenderer(self.port_rgb, sources, source_depths, target, rts,
scale_up=self.scale_up,
semantics=source_semantics,
gui=self.gui,
use_filler=self._use_filler,
gpu_idx=self.gpu_idx,
window_width=self._render_width,
window_height=self._render_height,
env=self)
def zero_joints(self):
for j in self.ordered_joints:
j.reset_joint_state(0, 0)
def setup_camera_multi(self):
assert self._require_camera_input
def camera_multi_excepthook(exctype, value, tb):
print("killing", self.r_camera_mul)
self.r_camera_mul.terminate()
if self.r_camera_dep is not None:
self.r_camera_dep.terminate()
if self._require_normal:
self.r_camera_norm.terminate()
if self._require_semantics:
self.r_camera_semt.terminate()
while tb:
if exctype == KeyboardInterrupt:
print("Exiting Gibson...")
return
filename = tb.tb_frame.f_code.co_filename
name = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno
print(' File "%.500s", line %d, in %.500s' % (filename, lineno, name))
tb = tb.tb_next
print(' %s: %s' % (exctype.__name__, value))
sys.excepthook = camera_multi_excepthook
enable_render_smooth = 0
dr_path = os.path.join(os.path.dirname(os.path.abspath(gibson.__file__)), 'core', 'channels', 'depth_render')
cur_path = os.getcwd()
os.chdir(dr_path)
render_main = "./depth_render --GPU {} --modelpath {} -w {} -h {} -f {} -p {}".format(self.gpu_idx,
self.model_path,
self._render_width,
self._render_height,
self.config[
"fov"] / np.pi * 180,
self.port_depth)
render_norm = "./depth_render --GPU {} --modelpath {} -n 1 -w {} -h {} -f {} -p {}".format(self.gpu_idx,
self.model_path,
self._render_width,
self._render_height,
self.config[
"fov"] / np.pi * 180,
self.port_normal)
render_semt = "./depth_render --GPU {} --modelpath {} -t 1 -r {} -c {} -w {} -h {} -f {} -p {}".format(
self.gpu_idx, self.model_path, self._semantic_source, self._semantic_color, self._render_width,
self._render_height,
self.config["fov"] / np.pi * 180, self.port_sem)
self.r_camera_mul = subprocess.Popen(shlex.split(render_main), shell=False)
# self.r_camera_dep = subprocess.Popen(shlex.split(render_depth), shell=False)
if self._require_normal:
self.r_camera_norm = subprocess.Popen(shlex.split(render_norm), shell=False)
if self._require_semantics:
self.r_camera_semt = subprocess.Popen(shlex.split(render_semt), shell=False)
os.chdir(cur_path)
def get_eye_pos_orientation(self):
"""Used in CameraEnv.setup"""
eye_pos = self.robot.eyes.get_position()
x, y, z, w = self.robot.eyes.get_orientation()
eye_quat = quaternion_multiply(quaternion_multiply([w, x, y, z], [0.7071, 0.7071, 0, 0]),
[0.7071, 0, -0.7071, 0]).tolist()
return eye_pos, eye_quat
def get_odom(self):
return np.array(self.robot.body_xyz) - np.array(self.config["initial_pos"]), np.array(self.robot.body_rpy)
def add_text(self, img):
font = cv2.FONT_HERSHEY_SIMPLEX
x, y, z = self.robot.get_position()
r, p, ya = self.robot.get_rpy()
cv2.putText(img, 'x:{0:.4f} y:{1:.4f} z:{2:.4f}'.format(x, y, z), (10, 20), font, 0.5, (255, 255, 255), 1,
cv2.LINE_AA)
cv2.putText(img, 'ro:{0:.4f} pth:{1:.4f} ya:{2:.4f}'.format(r, p, ya), (10, 40), font, 0.5, (255, 255, 255), 1,
cv2.LINE_AA)
cv2.putText(img, 'potential:{0:.4f}'.format(self.potential), (10, 60), font, 0.5, (255, 255, 255), 1,
cv2.LINE_AA)
cv2.putText(img, 'fps:{0:.4f}'.format(self.fps), (10, 80), font, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
return img
def _step(self, action):
observations, reward, done, other = super()._step(action)
# p.resetDebugVisualizerCamera(6, 0, 269, self.robot.body_xyz)
return observations, reward, done, other
def update_sim(self, a):
t = time.time()
base_obs, sensor_reward, done, sensor_meta = self._pre_update_sim(a)
dt = time.time() - t
# Speed bottleneck
observations = base_obs
self.fps = 0.9 * self.fps + 0.1 * 1 / dt
if self.gui:
if self.config["display_ui"]:
self.render_to_UI()
# print('render to ui')
self.save_frame += 1
elif self._require_camera_input:
# Use non-pygame GUI
self.r_camera_rgb.renderToScreen()
if self.config["mode"] == 'web_ui':
self.render_to_webUI()
if not self._require_camera_input or self.test_env:
return base_obs, sensor_reward, done, sensor_meta
else:
if self.config["show_diagnostics"] and self._require_rgb:
self.render_rgb_filled = self.add_text(self.render_rgb_filled)
return observations, sensor_reward, done, sensor_meta
def _pre_update_sim(self, a):
self.nframe += 1
# if not self.scene.multiplayer: # if multiplayer, action first applied to all robots, then global step() called, then _step() for all robots with the same actions
# self.robot.apply_action(a)
# self.scene.global_step()
p.stepSimulation()
self.rewards = [-1] # self._rewards(a)
# done = self._termination()
self.reward = 0
self.eps_reward = 0
if self.gui:
pos = self.robot._get_scaled_position()
# orn = self.robot.get_orientation()
pos = (pos[0], pos[1], pos[2] + self.tracking_camera['z_offset'])
pos = np.array(pos)
# dist = self.tracking_camera['distance'] / self.robot.mjcf_scaling
[yaw, pitch, dist] = p.getDebugVisualizerCamera()[8:11]
p.resetDebugVisualizerCamera(dist, yaw, pitch, pos)
eye_pos, eye_quat = self.get_eye_pos_orientation()
pose = [eye_pos, eye_quat]
# laser_scan_pos, laser_scan_quat = self.get_laser_scan_pos_orientation()
# laser_scan_pose = [laser_scan_pos, eye_quat]
# createPoseMarker(eye_pos, self.robot.eyes.get_orientation())
# createPoseMarker(laser_scan_pos, laser_xyzw)
# laser_observations = self.render_observations(laser_scan_pose)
observations = self.render_observations(pose)
# observations['laser'] = laser_observations['depth']
return observations, sum(self.rewards), False, dict(eye_pos=eye_pos, eye_quat=eye_quat, episode={})
def get_laser_scan_pos_orientation(self):
"""Used in CameraEnv.setup"""
lpx, lpy, lpz, lrx, lry, lrz, lrw = self.robot.parts['laser_link'].get_pose()
laser_scan_pos = np.array([lpx, lpy, lpz])
laser_scan_quat = np.array([lrw, lrx, lry, lrz])
# laser_scan_quat = quaternion_multiply(quaternion_multiply([lrw, lrx, lry, lrz], [0.7071, 0.7071, 0, 0]),
# [0.7071, 0, -0.7071, 0]).tolist()
return laser_scan_pos, laser_scan_quat
def getDebugVisualizerCamera(self):
w, h, viewMatrix, projMatrix, *_ = p.getDebugVisualizerCamera()
w, h, rgb, d, _ = p.getCameraImage(w, h, viewMatrix, projMatrix, renderer=p.ER_BULLET_HARDWARE_OPENGL)
return rgb
def _rewards(self, action=None, debugmode=False):
action_key = np.argmax(action)
a = self.robot.action_list[action_key]
realaction = []
for j in self.robot.ordered_joints:
if j.joint_name in self.robot.foot_joints.keys():
realaction.append(self.robot.action_list[action_key][self.robot.foot_joints[j.joint_name]])
else:
realaction.append(0.)
potential_old = self.potential
self.potential = self.robot.calc_potential()
progress = float(self.potential - potential_old)
feet_collision_cost = 0.0
for i, f in enumerate(
self.robot.feet): # TODO: Maybe calculating feet contacts could be done within the robot code
# print(f.contact_list())
contact_ids = set((x[2], x[4]) for x in f.contact_list())
# print("CONTACT OF '%d' WITH %d" % (contact_ids, ",".join(contact_names)) )
if self.ground_ids & contact_ids:
# see Issue 63: https://github.com/openai/roboschool/issues/63
# feet_collision_cost += self.foot_collision_cost
self.robot.feet_contact[i] = 1.0
else:
self.robot.feet_contact[i] = 0.0
# print(self.robot.feet_contact)
electricity_cost = self.electricity_cost * float(
| np.abs(realaction * self.robot.joint_speeds) | numpy.abs |
import numpy as np
import cv2
import torch
from .basic_dataset import BasicDataset
from PIL import Image
TOWNS = ['Town01', 'Town02', 'Town03', 'Town04', 'Town05', 'Town06', 'Town07', 'Town10HD']
class BEVDataset(BasicDataset):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
w = h = self.crop_size
world_w = world_h = self.crop_size//self.pixels_per_meter
self.margin = 32
def __getitem__(self, idx):
lmdb_txn = self.txn_map[idx]
index = self.idx_map[idx]
# Vehicle locations/orientations
ego_id, ego_locs, ego_oris, ego_bbox, msks, locs, oris, bbox, typs = self.__class__.filter(
lmdb_txn, index,
max_pedestrian_radius=self.max_pedestrian_radius,
max_vehicle_radius=self.max_vehicle_radius,
T=self.num_plan)
# Normalize coordinates to ego frame
ego_locs, locs, oris, bbox, typs = transform_ego(ego_locs, locs, oris, bbox, typs, ego_oris[0], self.num_plan+1)
# Random jitter
offset = int((torch.rand(1)*2-1)*self.x_jitter)
offset = np.clip(offset, -self.margin, self.margin)
angle = float(torch.rand(1)*2-1)*self.angle_jitter
# BEV images
bev = self.__class__.load_bev(lmdb_txn, index, channels=[0,1,2,9,10])
bev = rotate_image(bev, angle)
bev = (bev>0).astype(np.uint8).transpose(2,0,1)
bev = np.pad(bev, [[0,0],[self.margin,self.margin],[self.margin,self.margin]])
bev = bev[:,self.margin:self.margin+320,self.margin+offset:self.margin+offset+320]
nxp = self.__class__.access('nxp', lmdb_txn, index, 1).reshape(2)
ego_locs = rotate_points(ego_locs, -angle, ego_locs[0]) + [offset/self.pixels_per_meter, 0]
nxp = rotate_points(nxp, -angle, ego_locs[0]) + [offset/self.pixels_per_meter, 0]
cmd = int(self.__class__.access('cmd', lmdb_txn, index, 1, dtype=np.uint8))
bra = int(self.__class__.access('bra', lmdb_txn, index, 1, dtype=np.uint8))
# Overwrite cmd with the additional STOP command.
spd = np.mean(np.linalg.norm(ego_locs[1:]-ego_locs[:-1],axis=-1))
cmd = self.num_cmds-1 if spd < self.brake_speed else cmd
locs = rotate_points(locs, -angle, ego_locs[0]) + [offset/self.pixels_per_meter, 0]
oris[1:] = oris[1:] - np.deg2rad(angle) # Ego vehicle not affected
# Pad tensors
num_objs = min(len(locs), self.max_objs)
padded_locs = np.zeros((self.max_objs,self.num_plan+1,2), dtype=np.float32)
padded_oris = np.zeros((self.max_objs,), dtype=np.float32)
padded_typs = np.zeros((self.max_objs,), dtype=np.int32)
padded_locs[:num_objs] = locs[:num_objs]
padded_oris[:num_objs] = oris[:num_objs,0]
padded_typs[:num_objs] = typs[:num_objs,0]
return (
bev, # Segmentation targets
-ego_locs, cmd, -nxp, bra, # Planning targets
-padded_locs, padded_oris, padded_typs, num_objs # Motion forecast targets
)
def rotate_image(image, angle, image_center=(160,280)):
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
return result
def rotate_points(points, angle, ego_loc):
radian = np.deg2rad(angle)
return (points-ego_loc) @ [
[ np.cos(radian), np.sin(radian)],
[-np.sin(radian), np.cos(radian)]
] + ego_loc
def transform_ego(ego_locs, locs, oris, bbox, typs, ego_ori, T=11):
ego_loc = ego_locs[0]
keys = sorted(list(locs.keys()))
locs = np.array([locs[k] for k in keys]).reshape(-1,T,2)
oris = np.array([oris[k] for k in keys]).reshape(-1,T)
bbox = np.array([bbox[k] for k in keys]).reshape(-1,T,2)
typs = np.array([typs[k] for k in keys]).reshape(-1,T)
R = [[ | np.sin(ego_ori) | numpy.sin |
# -*- coding: utf-8 -*-
'''
<NAME>
Again, I sampled the PDF using the CDF.
1. Variances are given on the plots. For Chi2, I used 10 bins. KS is the least precise and MLE the most. This is a little surprising; I expected Chi2 to have the least, since it is binned. However, perhaps the underlying similarity between MLE and Chi2 has something to do with this. I suppose it is a bit strange to use KS to find the value of a parameter like this: usually it is just used to compare relative frequencies.
2. MLE has the highest power, acccording to the Neyman-Pearson lemma, and KS has the least (again somewhat surprising).
In blue is P=0.5 and in green P=0.0.
'''
import random
import math
import matplotlib.pyplot as plt
import numpy as np
import pickle
import scipy.stats as stat
import scipy.special as sp
import statistics as st
#Samples from the PDF and computes the mean.
#Maps random reals in (0,1) to the Poisson distribution using a Poisson lookup table
class theDistro:
lookup_x=[]
lookup_y=[]
gencdf=[]
normcdf=[]
maxcdf=0
thetabins=np.linspace(0,2*math.pi,10, endpoint=False)
def GenerateSample(self):
randomNumber = random.uniform(theDistro.gencdf[0],theDistro.maxcdf)
index=-1
if randomNumber < theDistro.gencdf[0]:
index=0
else:
for i in range(0,len(theDistro.gencdf)-1):
if randomNumber > theDistro.gencdf[i] and randomNumber < theDistro.gencdf[i+1]:
index=i+1
if index != -1:
self.samples.append(theDistro.lookup_x[index])
def GenerateNSamples(self, numSamples):
for i in range(0, numSamples):
self.GenerateSample()
def __init__(self, nSamples):
self.samples=[]
self.logbins=[]
self.ksP=0;
self.chiP=0;
self.mlP=0;
self.GenerateNSamples(nSamples)
self.trialCDF=np.zeros(nSamples)
self.ksthetaspace=np.linspace(0,2*math.pi,200)
runningTotal=0
self.sortedsamples= | np.sort(self.samples) | numpy.sort |
"""
@author: <NAME>
@date: 2020.03
"""
import cv2
import json
import numpy as np
import os
from scipy.io import loadmat
from collections import OrderedDict
from dataset.JointsDataset import JointsDataset
class MPIIDataset(JointsDataset):
def __init__(self, DATASET, stage, transform=None):
super().__init__(DATASET, stage, transform)
self.cur_dir = os.path.split(os.path.realpath(__file__))[0]
self.train_gt_file = 'train.json'
self.train_gt_path = os.path.join(self.cur_dir, 'gt_json',
self.train_gt_file)
self.val_gt_file = 'valid.json'
self.val_gt_path = os.path.join(self.cur_dir, 'gt_json',
self.val_gt_file)
self.val_gt_mat = os.path.join(self.cur_dir, 'gt_json', 'valid.mat')
self.test_det_file = 'test.json'
self.test_det_path = os.path.join(self.cur_dir, 'det_json',
self.test_det_file)
self.data = self._get_data()
self.data_num = len(self.data)
def _get_data(self):
data = list()
if self.stage == 'train':
mpii = json.load(open(self.train_gt_path))
elif self.stage == 'val':
mpii = json.load(open(self.val_gt_path))
else:
mpii = json.load(open(self.test_det_path))
for d in mpii:
img_name = d['image']
img_id = img_name.split('.')[0]
img_path = os.path.join(self.cur_dir, 'images', img_name)
center = np.array(d['center'], dtype=np.float32)
scale = np.array([d['scale'], d['scale']], dtype=np.float32)
if center[0] != -1:
center[1] = center[1] + 15 * scale[1]
center -= 1
if self.stage == 'test':
joints = np.zeros((self.keypoint_num, 3), dtype=np.float32)
else:
joints = np.array(d['joints'], dtype=np.float32)
joints -= 1
joints_vis = np.array(d['joints_vis'], dtype=np.float32)
joints_vis = joints_vis.reshape(-1, 1) * 2
joints = np.concatenate((joints, joints_vis), axis=1)
data.append(dict(center=center,
img_id=img_id,
img_path=img_path,
img_name=img_name,
joints=joints,
scale=scale))
return data
# referring msra high resolution
def evaluate(self, preds):
preds = preds[:, :, 0:2] + 1.0
SC_BIAS = 0.6
threshold = 0.5
gt_file = os.path.join(self.val_gt_mat)
gt_dict = loadmat(gt_file)
dataset_joints = gt_dict['dataset_joints']
jnt_missing = gt_dict['jnt_missing']
pos_gt_src = gt_dict['pos_gt_src']
headboxes_src = gt_dict['headboxes_src']
pos_pred_src = np.transpose(preds, [1, 2, 0])
head = | np.where(dataset_joints == 'head') | numpy.where |
# Created by YongHua
# 05 November 2020
# Python 3.8
import numpy as np
"Sequence as black, silver, red, blue, green, white."
a = | np.array([0.248, 0.217, 0.133, 0.211, 0.024, 0.167]) | numpy.array |
import numpy as np
import pandas as pd
import itertools
from functools import partial
from scipy.linalg import block_diag
def get_diag_index(d_, l):
idx = d_[d_.Class == d_.Class.value_counts().index[l]].index
return idx
def row_feature_rep(rows_, features_):
r_1 = rows_.mean(axis=1).values
f_1 = features_.mean(axis=0).values
r_0 = 1 - r_1
f_0 = 1 - f_1
f = np.array([f_0, f_1])
r = | np.array([r_0, r_1]) | numpy.array |
import numpy as np
import scipy.stats
import os
import logging
from astropy.tests.helper import pytest, catch_warnings
from astropy.modeling import models
from astropy.modeling.fitting import _fitter_to_model_params
from stingray import Powerspectrum
from stingray.modeling import ParameterEstimation, PSDParEst, \
OptimizationResults, SamplingResults
from stingray.modeling import PSDPosterior, set_logprior, PSDLogLikelihood, \
LogLikelihood
try:
from statsmodels.tools.numdiff import approx_hess
comp_hessian = True
except ImportError:
comp_hessian = False
try:
import emcee
can_sample = True
except ImportError:
can_sample = False
try:
import matplotlib.pyplot as plt
can_plot = True
except ImportError:
can_plot = False
class LogLikelihoodDummy(LogLikelihood):
def __init__(self, x, y, model):
LogLikelihood.__init__(self, x, y, model)
def evaluate(self, parse, neg=False):
return np.nan
class OptimizationResultsSubclassDummy(OptimizationResults):
def __init__(self, lpost, res, neg, log=None):
if log is None:
self.log = logging.getLogger('Fitting summary')
self.log.setLevel(logging.DEBUG)
if not self.log.handlers:
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
self.log.addHandler(ch)
self.neg = neg
if res is not None:
self.result = res.fun
self.p_opt = res.x
else:
self.result = None
self.p_opt = None
self.model = lpost.model
class TestParameterEstimation(object):
@classmethod
def setup_class(cls):
np.random.seed(100)
m = 1
nfreq = 100
freq = np.arange(nfreq)
noise = np.random.exponential(size=nfreq)
power = noise * 2.0
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
cls.ps = ps
cls.a_mean, cls.a_var = 2.0, 1.0
cls.model = models.Const1D()
p_amplitude = lambda amplitude: \
scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)
cls.priors = {"amplitude": p_amplitude}
cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power, cls.model,
m=cls.ps.m)
cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)
def test_par_est_initializes(self):
pe = ParameterEstimation()
def test_parest_stores_max_post_correctly(self):
"""
Make sure the keyword for Maximum A Posteriori fits is stored correctly
as a default.
"""
pe = ParameterEstimation()
assert pe.max_post is True, "max_post should be set to True as a default."
def test_object_works_with_loglikelihood_object(self):
llike = PSDLogLikelihood(self.ps.freq, self.ps.power,
self.model, m=self.ps.m)
pe = ParameterEstimation()
res = pe.fit(llike, [2.0])
assert isinstance(res,
OptimizationResults), "res must be of " \
"type OptimizationResults"
def test_fit_fails_when_object_is_not_posterior_or_likelihood(self):
x = np.ones(10)
y = np.ones(10)
pe = ParameterEstimation()
with pytest.raises(TypeError):
res = pe.fit(x, y)
def test_fit_fails_without_lpost_or_t0(self):
pe = ParameterEstimation()
with pytest.raises(TypeError):
res = pe.fit()
def test_fit_fails_without_t0(self):
pe = ParameterEstimation()
with pytest.raises(TypeError):
res = pe.fit(np.ones(10))
def test_fit_fails_with_incorrect_number_of_parameters(self):
pe = ParameterEstimation()
t0 = [1, 2]
with pytest.raises(ValueError):
res = pe.fit(self.lpost, t0)
def test_fit_method_works_with_correct_parameter(self):
pe = ParameterEstimation()
t0 = [2.0]
res = pe.fit(self.lpost, t0)
def test_fit_method_fails_with_too_many_tries(self):
lpost = LogLikelihoodDummy(self.ps.freq, self.ps.power, self.model)
pe = ParameterEstimation()
t0 = [2.0]
with pytest.raises(Exception):
res = pe.fit(lpost, t0, neg=True)
def test_compute_lrt_fails_when_garbage_goes_in(self):
pe = ParameterEstimation()
t0 = [2.0]
with pytest.raises(TypeError):
pe.compute_lrt(self.lpost, t0, None, t0)
with pytest.raises(ValueError):
pe.compute_lrt(self.lpost, t0[:-1], self.lpost, t0)
def test_compute_lrt_sets_max_post_to_false(self):
t0 = [2.0]
pe = ParameterEstimation(max_post=True)
assert pe.max_post is True
delta_deviance, opt1, opt2 = pe.compute_lrt(self.lpost, t0,
self.lpost, t0)
assert pe.max_post is False
assert delta_deviance < 1e-7
@pytest.mark.skipif("not can_sample", "not can_plot")
def test_sampler_runs(self):
pe = ParameterEstimation()
if os.path.exists("test_corner.pdf"):
os.unlink("test_corner.pdf")
with catch_warnings(RuntimeWarning):
sample_res = pe.sample(self.lpost, [2.0], nwalkers=50, niter=10,
burnin=50, print_results=True, plot=True)
assert os.path.exists("test_corner.pdf")
assert sample_res.acceptance > 0.25
assert isinstance(sample_res, SamplingResults)
# TODO: Fix pooling with the current setup of logprior
# @pytest.mark.skipif("not can_sample", "not can_plot")
# def test_sampler_pooling(self):
# pe = ParameterEstimation()
# if os.path.exists("test_corner.pdf"):
# os.unlink("test_corner.pdf")
# with catch_warnings(RuntimeWarning):
# sample_res = pe.sample(self.lpost, [2.0], nwalkers=50, niter=10,
# burnin=50, print_results=True, plot=True,
# pool=True)
@pytest.mark.skipif("can_sample")
def test_sample_raises_error_without_emcee(self):
pe = ParameterEstimation()
with pytest.raises(ImportError):
sample_res = pe.sample(self.lpost, [2.0])
def test_simulate_lrt_fails_in_superclass(self):
pe = ParameterEstimation()
with pytest.raises(NotImplementedError):
pe.simulate_lrts(None, None, None, None, None)
class TestOptimizationResults(object):
@classmethod
def setup_class(cls):
np.random.seed(1000)
m = 1
nfreq = 100
freq = np.arange(nfreq)
noise = np.random.exponential(size=nfreq)
power = noise * 2.0
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.n = freq.shape[0]
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
cls.ps = ps
cls.a_mean, cls.a_var = 2.0, 1.0
cls.model = models.Const1D()
p_amplitude = lambda amplitude: \
scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)
cls.priors = {"amplitude": p_amplitude}
cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power,
cls.model, m=cls.ps.m)
cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)
cls.fitmethod = "powell"
cls.max_post = True
cls.t0 = np.array([2.0])
cls.neg = True
cls.opt = scipy.optimize.minimize(cls.lpost, cls.t0,
method=cls.fitmethod,
args=cls.neg, tol=1.e-10)
cls.opt.x = np.atleast_1d(cls.opt.x)
cls.optres = OptimizationResultsSubclassDummy(cls.lpost,
cls.opt,
neg=True)
def test_object_initializes_correctly(self):
res = OptimizationResults(self.lpost, self.opt, neg=self.neg)
assert hasattr(res, "p_opt")
assert hasattr(res, "result")
assert hasattr(res, "deviance")
assert hasattr(res, "aic")
assert hasattr(res, "bic")
assert hasattr(res, "model")
assert isinstance(res.model, models.Const1D)
assert res.p_opt == self.opt.x, "res.p_opt must be the same as opt.x!"
assert np.isclose(res.p_opt[0], 2.0, atol=0.1, rtol=0.1)
assert res.model == self.lpost.model
assert res.result == self.opt.fun
mean_model = np.ones_like(self.lpost.x) * self.opt.x[0]
assert np.allclose(res.mfit, mean_model), "res.model should be exactly " \
"the model for the data."
def test_compute_criteria_works_correctly(self):
res = OptimizationResults(self.lpost, self.opt, neg = self.neg)
test_aic = res.result+ 2.0*res.p_opt.shape[0]
test_bic = res.result + res.p_opt.shape[0] * \
np.log(self.lpost.x.shape[0])
test_deviance = -2 * self.lpost.loglikelihood(res.p_opt,
neg=False)
assert np.isclose(res.aic, test_aic, atol=0.1, rtol=0.1)
assert np.isclose(res.bic, test_bic, atol=0.1, rtol=0.1)
assert np.isclose(res.deviance, test_deviance, atol=0.1, rtol=0.1)
def test_merit_calculated_correctly(self):
res = OptimizationResults(self.lpost, self.opt, neg=self.neg)
test_merit = np.sum(((self.ps.power - 2.0)/2.0)**2.)
assert np.isclose(res.merit, test_merit, rtol=0.2)
def test_compute_statistics_computes_mfit(self):
assert hasattr(self.optres, "mfit") is False
self.optres._compute_statistics(self.lpost)
assert hasattr(self.optres, "mfit")
def test_compute_model(self):
self.optres._compute_model(self.lpost)
assert hasattr(self.optres,
"mfit"), "OptimizationResult object should have mfit " \
"attribute at this point!"
_fitter_to_model_params(self.model, self.opt.x)
mfit_test = self.model(self.lpost.x)
assert np.allclose(self.optres.mfit, mfit_test)
def test_compute_statistics_computes_all_statistics(self):
self.optres._compute_statistics(self.lpost)
assert hasattr(self.optres, "merit")
assert hasattr(self.optres, "dof")
assert hasattr(self.optres, "sexp")
assert hasattr(self.optres, "ssd")
assert hasattr(self.optres, "sobs")
test_merit = np.sum(((self.ps.power - 2.0)/2.0)**2.)
test_dof = self.ps.n - self.lpost.npar
test_sexp = 2.0 * self.lpost.x.shape[0] * len(self.optres.p_opt)
test_ssd = np.sqrt(2.0*test_sexp)
test_sobs = np.sum(self.ps.power - self.optres.p_opt[0])
assert np.isclose(test_merit, self.optres.merit, rtol=0.2)
assert test_dof == self.optres.dof
assert test_sexp == self.optres.sexp
assert test_ssd == self.optres.ssd
assert np.isclose(test_sobs, self.optres.sobs, atol=0.01, rtol=0.01)
def test_compute_criteria_returns_correct_attributes(self):
self.optres._compute_criteria(self.lpost)
assert hasattr(self.optres, "aic")
assert hasattr(self.optres, "bic")
assert hasattr(self.optres, "deviance")
npar = self.optres.p_opt.shape[0]
test_aic = self.optres.result + 2. * npar
test_bic = self.optres.result + npar * np.log(self.ps.freq.shape[0])
test_deviance = -2 * self.lpost.loglikelihood(self.optres.p_opt,
neg=False)
assert np.isclose(test_aic, self.optres.aic)
assert np.isclose(test_bic, self.optres.bic)
assert np.isclose(test_deviance, self.optres.deviance)
def test_compute_covariance_with_hess_inverse(self):
self.optres._compute_covariance(self.lpost, self.opt)
assert np.allclose(self.optres.cov, np.asarray(self.opt.hess_inv))
assert np.allclose(self.optres.err, np.sqrt(np.diag(self.opt.hess_inv)))
@pytest.mark.skipif("comp_hessian")
def test_compute_covariance_without_comp_hessian(self):
self.optres._compute_covariance(self.lpost, None)
assert self.optres.cov is None
assert self.optres.err is None
@pytest.mark.skipif("not comp_hessian")
def test_compute_covariance_with_hess_inverse(self):
optres = OptimizationResultsSubclassDummy(self.lpost, self.opt,
neg=True)
optres._compute_covariance(self.lpost, self.opt)
if comp_hessian:
phess = approx_hess(self.opt.x, self.lpost)
hess_inv = np.linalg.inv(phess)
assert np.allclose(optres.cov, hess_inv)
assert np.allclose(optres.err, np.sqrt(np.diag(np.abs(hess_inv))))
def test_print_summary_works(self, logger, caplog):
self.optres._compute_covariance(self.lpost, None)
self.optres.print_summary(self.lpost)
assert 'Parameter amplitude' in caplog.text
assert "Fitting statistics" in caplog.text
assert "number of data points" in caplog.text
assert "Deviance [-2 log L] D =" in caplog.text
assert "The Akaike Information Criterion of " \
"the model is" in caplog.text
assert "The Bayesian Information Criterion of " \
"the model is" in caplog.text
assert "The figure-of-merit function for this model" in caplog.text
assert "Summed Residuals S =" in caplog.text
assert "Expected S" in caplog.text
assert "merit function" in caplog.text
if can_sample:
class SamplingResultsDummy(SamplingResults):
def __init__(self, sampler, ci_min=0.05, ci_max=0.95, log=None):
if log is None:
self.log = logging.getLogger('Fitting summary')
self.log.setLevel(logging.DEBUG)
if not self.log.handlers:
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
self.log.addHandler(ch)
# store all the samples
self.samples = sampler.get_chain(flat=True)
chain_ndims = sampler.get_chain().shape
self.nwalkers = float(chain_ndims[0])
self.niter = float(chain_ndims[1])
# store number of dimensions
self.ndim = chain_ndims[2]
# compute and store acceptance fraction
self.acceptance = np.nanmean(sampler.acceptance_fraction)
self.L = self.acceptance * self.samples.shape[0]
class TestSamplingResults(object):
@classmethod
def setup_class(cls):
m = 1
nfreq = 100
freq = np.arange(nfreq)
noise = np.random.exponential(size=nfreq)
power = noise * 2.0
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
cls.ps = ps
cls.a_mean, cls.a_var = 2.0, 1.0
cls.model = models.Const1D()
p_amplitude = lambda amplitude: \
scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(
amplitude)
cls.priors = {"amplitude": p_amplitude}
cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power,
cls.model, m=cls.ps.m)
cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)
cls.fitmethod = "BFGS"
cls.max_post = True
cls.t0 = [2.0]
cls.neg = True
pe = ParameterEstimation()
res = pe.fit(cls.lpost, cls.t0)
cls.nwalkers = 50
cls.niter = 100
np.random.seed(200)
p0 = np.array(
[np.random.multivariate_normal(res.p_opt, res.cov) for
i in range(cls.nwalkers)])
cls.sampler = emcee.EnsembleSampler(cls.nwalkers,
len(res.p_opt), cls.lpost,
args=[False])
with catch_warnings(RuntimeWarning):
_, _, _ = cls.sampler.run_mcmc(p0, cls.niter)
def test_can_sample_is_true(self):
assert can_sample
def test_sample_results_object_initializes(self):
s = SamplingResults(self.sampler)
assert s.samples.shape[0] == self.nwalkers * self.niter
assert s.acceptance > 0.25
assert np.isclose(s.L,
s.acceptance * self.nwalkers * self.niter)
def test_check_convergence_works(self):
s = SamplingResultsDummy(self.sampler)
s._check_convergence(self.sampler)
assert hasattr(s, "rhat")
rhat_test = 0.038688
assert np.isclose(rhat_test, s.rhat[0], atol=0.02, rtol=0.1)
s._infer()
assert hasattr(s, "mean")
assert hasattr(s, "std")
assert hasattr(s, "ci")
test_mean = 2.0
test_std = 0.2
assert np.isclose(test_mean, s.mean[0], rtol=0.1)
assert np.isclose(test_std, s.std[0], atol=0.01, rtol=0.01)
assert s.ci.size == 2
def test_infer_computes_correct_values(self):
s = SamplingResults(self.sampler)
@pytest.fixture()
def logger():
logger = logging.getLogger('Some.Logger')
logger.setLevel(logging.INFO)
return logger
class TestPSDParEst(object):
@classmethod
def setup_class(cls):
m = 1
nfreq = 100
freq = np.linspace(1, 10.0, nfreq)
rng = np.random.RandomState(100) # set the seed for the random number generator
noise = rng.exponential(size=nfreq)
cls.model = models.Lorentz1D() + models.Const1D()
cls.x_0_0 = 2.0
cls.fwhm_0 = 0.05
cls.amplitude_0 = 1000.0
cls.amplitude_1 = 2.0
cls.model.x_0_0 = cls.x_0_0
cls.model.fwhm_0 = cls.fwhm_0
cls.model.amplitude_0 = cls.amplitude_0
cls.model.amplitude_1 = cls.amplitude_1
p = cls.model(freq)
np.random.seed(400)
power = noise*p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1]-freq[0]
ps.norm = "leahy"
cls.ps = ps
cls.a_mean, cls.a_var = 2.0, 1.0
cls.a2_mean, cls.a2_var = 100.0, 10.0
p_amplitude_1 = lambda amplitude: \
scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)
p_x_0_0 = lambda alpha: \
scipy.stats.uniform(0.0, 5.0).pdf(alpha)
p_fwhm_0 = lambda alpha: \
scipy.stats.uniform(0.0, 0.5).pdf(alpha)
p_amplitude_0 = lambda amplitude: \
scipy.stats.norm(loc=cls.a2_mean, scale=cls.a2_var).pdf(amplitude)
cls.priors = {"amplitude_1": p_amplitude_1,
"amplitude_0": p_amplitude_0,
"x_0_0": p_x_0_0,
"fwhm_0": p_fwhm_0}
cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power,
cls.model, m=cls.ps.m)
cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)
cls.fitmethod = "powell"
cls.max_post = True
cls.t0 = [cls.x_0_0, cls.fwhm_0, cls.amplitude_0, cls.amplitude_1]
cls.neg = True
def test_fitting_with_ties_and_bounds(self, capsys):
double_f = lambda model : model.x_0_0 * 2
model = self.model.copy()
model += models.Lorentz1D(amplitude=model.amplitude_0,
x_0 = model.x_0_0 * 2,
fwhm = model.fwhm_0)
model.x_0_0 = self.model.x_0_0
model.amplitude_0 = self.model.amplitude_0
model.amplitude_1 = self.model.amplitude_1
model.fwhm_0 = self.model.fwhm_0
model.x_0_2.tied = double_f
model.fwhm_0.bounds = [0, 10]
model.amplitude_0.fixed = True
p = model(self.ps.freq)
noise = np.random.exponential(size=len(p))
power = noise*p
ps = Powerspectrum()
ps.freq = self.ps.freq
ps.power = power
ps.m = self.ps.m
ps.df = self.ps.df
ps.norm = "leahy"
pe = PSDParEst(ps, fitmethod="TNC")
llike = PSDLogLikelihood(ps.freq, ps.power, model)
true_pars = [self.x_0_0, self.fwhm_0,
self.amplitude_1,
model.amplitude_2.value,
model.fwhm_2.value]
res = pe.fit(llike, true_pars, neg=True)
compare_pars = [self.x_0_0, self.fwhm_0,
self.amplitude_1,
model.amplitude_2.value,
model.fwhm_2.value]
assert np.allclose(compare_pars, res.p_opt, rtol=0.5)
def test_par_est_initializes(self):
pe = PSDParEst(self.ps)
assert pe.max_post is True, "max_post should be set to True as a default."
def test_fit_fails_when_object_is_not_posterior_or_likelihood(self):
x = np.ones(10)
y = np.ones(10)
pe = PSDParEst(self.ps)
with pytest.raises(TypeError):
res = pe.fit(x, y)
def test_fit_fails_without_lpost_or_t0(self):
pe = PSDParEst(self.ps)
with pytest.raises(TypeError):
res = pe.fit()
def test_fit_fails_without_t0(self):
pe = PSDParEst(self.ps)
with pytest.raises(TypeError):
res = pe.fit(np.ones(10))
def test_fit_fails_with_incorrect_number_of_parameters(self):
pe = PSDParEst(self.ps)
t0 = [1,2]
with pytest.raises(ValueError):
res = pe.fit(self.lpost, t0)
@pytest.mark.skipif("not can_plot")
def test_fit_method_works_with_correct_parameter(self):
pe = PSDParEst(self.ps)
lpost = PSDPosterior(self.ps.freq, self.ps.power,
self.model, self.priors, m=self.ps.m)
t0 = [2.0, 1, 1, 1]
res = pe.fit(lpost, t0)
assert isinstance(res, OptimizationResults), "res must be of type " \
"OptimizationResults"
pe.plotfits(res, save_plot=True)
assert os.path.exists("test_ps_fit.png")
os.unlink("test_ps_fit.png")
pe.plotfits(res, save_plot=True, log=True)
assert os.path.exists("test_ps_fit.png")
os.unlink("test_ps_fit.png")
pe.plotfits(res, res2=res, save_plot=True)
assert os.path.exists("test_ps_fit.png")
os.unlink("test_ps_fit.png")
def test_compute_lrt_fails_when_garbage_goes_in(self):
pe = PSDParEst(self.ps)
t0 = [2.0, 1, 1, 1]
with pytest.raises(TypeError):
pe.compute_lrt(self.lpost, t0, None, t0)
with pytest.raises(ValueError):
pe.compute_lrt(self.lpost, t0[:-1], self.lpost, t0)
def test_compute_lrt_works(self):
t0 = [2.0, 1, 1, 1]
pe = PSDParEst(self.ps, max_post=True)
assert pe.max_post is True
delta_deviance, _, _ = pe.compute_lrt(self.lpost, t0, self.lpost, t0)
assert pe.max_post is False
assert np.absolute(delta_deviance) < 1.5e-4
def test_simulate_lrts_works(self):
m = 1
nfreq = 100
freq = np.linspace(1, 10, nfreq)
rng = np.random.RandomState(100)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
s_all = np.atleast_2d(np.ones(5) * 2.0).T
model2 = models.PowerLaw1D() + models.Const1D()
model2.x_0_0.fixed = True
loglike2 = PSDLogLikelihood(ps.freq, ps.power, model2, 1)
pe = PSDParEst(ps)
lrt_obs, res1, res2 = pe.compute_lrt(loglike, [2.0], loglike2,
[2.0, 1.0, 2.0], neg=True)
lrt_sim = pe.simulate_lrts(s_all, loglike, [2.0], loglike2,
[2.0, 1.0, 2.0],
seed=100)
assert (lrt_obs > 0.4) and (lrt_obs < 0.6)
assert np.all(lrt_sim < 10.0) and np.all(lrt_sim > 0.01)
def test_compute_lrt_fails_with_wrong_input(self):
pe = PSDParEst(self.ps)
with pytest.raises(AssertionError):
lrt_sim = pe.simulate_lrts(np.arange(5), self.lpost, [1, 2, 3, 4],
[1, 2, 3, 4], [1, 2, 3, 4])
def test_generate_model_data(self):
pe = PSDParEst(self.ps)
m = self.model
_fitter_to_model_params(m, self.t0)
model = m(self.ps.freq)
pe_model = pe._generate_model(self.lpost, [self.x_0_0, self.fwhm_0,
self.amplitude_0,
self.amplitude_1])
assert np.allclose(model, pe_model)
def generate_data_rng_object_works(self):
pe = PSDParEst(self.ps)
sim_data1 = pe._generate_data(self.lpost,
[self.x_0_0, self.fwhm_0,
self.amplitude_0,
self.amplitude_1],
seed=1)
sim_data2 = pe._generate_data(self.lpost,
[self.x_0_0, self.fwhm_0,
self.amplitude_0,
self.amplitude_1],
seed=1)
assert np.allclose(sim_data1.power, sim_data2.power)
def test_generate_data_produces_correct_distribution(self):
model = models.Const1D()
model.amplitude = 2.0
p = model(self.ps.freq)
seed = 100
rng = np.random.RandomState(seed)
noise = rng.exponential(size=len(p))
power = noise*p
ps = Powerspectrum()
ps.freq = self.ps.freq
ps.power = power
ps.m = 1
ps.df = self.ps.freq[1]-self.ps.freq[0]
ps.norm = "leahy"
lpost = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
pe = PSDParEst(ps)
rng2 = np.random.RandomState(seed)
sim_data = pe._generate_data(lpost, [2.0], rng2)
assert np.allclose(ps.power, sim_data.power)
def test_generate_model_breaks_with_wrong_input(self):
pe = PSDParEst(self.ps)
with pytest.raises(AssertionError):
pe_model = pe._generate_model([1, 2, 3, 4], [1, 2, 3, 4])
def test_generate_model_breaks_for_wrong_number_of_parameters(self):
pe = PSDParEst(self.ps)
with pytest.raises(AssertionError):
pe_model = pe._generate_model(self.lpost, [1, 2, 3])
def test_pvalue_calculated_correctly(self):
a = [1, 1, 1, 2]
obs_val = 1.5
pe = PSDParEst(self.ps)
pval = pe._compute_pvalue(obs_val, a)
assert np.isclose(pval, 1./len(a))
def test_calibrate_lrt_fails_without_lpost_objects(self):
pe = PSDParEst(self.ps)
with pytest.raises(TypeError):
pval = pe.calibrate_lrt(self.lpost, [1, 2, 3, 4],
np.arange(10), np.arange(4))
def test_calibrate_lrt_fails_with_wrong_parameters(self):
pe = PSDParEst(self.ps)
with pytest.raises(ValueError):
pval = pe.calibrate_lrt(self.lpost, [1, 2, 3, 4],
self.lpost, [1, 2, 3])
def test_calibrate_lrt_works_as_expected(self):
m = 1
nfreq = 100
freq = np.linspace(1, 10, nfreq)
rng = np.random.RandomState(100)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
s_all = np.atleast_2d(np.ones(10) * 2.0).T
model2 = models.PowerLaw1D() + models.Const1D()
model2.x_0_0.fixed = True
loglike2 = PSDLogLikelihood(ps.freq, ps.power, model2, 1)
pe = PSDParEst(ps)
pval = pe.calibrate_lrt(loglike, [2.0], loglike2,
[2.0, 1.0, 2.0], sample=s_all,
max_post=False, nsim=5,
seed=100)
assert pval > 0.001
@pytest.mark.skipif("not can_sample")
def test_calibrate_lrt_works_with_sampling(self):
m = 1
nfreq = 100
freq = np.linspace(1, 10, nfreq)
rng = np.random.RandomState(100)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
lpost = PSDPosterior(ps.freq, ps.power, model, m=1)
p_amplitude_1 = lambda amplitude: \
scipy.stats.norm(loc=2.0, scale=1.0).pdf(amplitude)
p_alpha_0 = lambda alpha: \
scipy.stats.uniform(0.0, 5.0).pdf(alpha)
p_amplitude_0 = lambda amplitude: \
scipy.stats.norm(loc=self.a2_mean, scale=self.a2_var).pdf(
amplitude)
priors = {"amplitude": p_amplitude_1}
priors2 = {"amplitude_1": p_amplitude_1,
"amplitude_0": p_amplitude_0,
"alpha_0": p_alpha_0}
lpost.logprior = set_logprior(lpost, priors)
model2 = models.PowerLaw1D() + models.Const1D()
model2.x_0_0.fixed = True
lpost2 = PSDPosterior(ps.freq, ps.power, model2, 1)
lpost2.logprior = set_logprior(lpost2, priors2)
pe = PSDParEst(ps)
with catch_warnings(RuntimeWarning):
pval = pe.calibrate_lrt(lpost, [2.0], lpost2,
[2.0, 1.0, 2.0], sample=None,
max_post=True, nsim=10, nwalkers=10,
burnin=10, niter=10,
seed=100)
assert pval > 0.001
def test_find_highest_outlier_works_as_expected(self):
mp_ind = 5
max_power = 1000.0
ps = Powerspectrum()
ps.freq = | np.arange(10) | numpy.arange |
import pandas as pd
import numpy as np
import json, re, pickle, os
import jieba
import tensorflow as tf
from collections import Iterable
from sklearn.feature_extraction.text import TfidfVectorizer
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib.patheffects as PathEffects
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.preprocessing import OneHotEncoder
from scipy import sparse
import gensim
import thulac
import pickle as pk
from stanfordcorenlp import StanfordCoreNLP
from string import punctuation
add_punc=',。、【 】 “”:;()《》‘’{}?!⑦()、%^>℃:.”“^-——=&#@¥'
all_punc = punctuation + add_punc
def punc_delete(fact_list):
fact_filtered = []
for word in fact_list:
fact_filtered.append(word)
if word in all_punc:
fact_filtered.remove(word)
return fact_filtered
def law_to_list(path, remain_new_line=False):
with open(path, 'r', encoding='utf-8') as f:
law = []
for line in f:
if line == '\n' or re.compile(r'第.*[节|章]').search(line[:10]) is not None:
continue
try:
tmp = re.compile(r'第.*条').search(line.strip()[:8]).group(0)
if remain_new_line:
law.append(line)
else:
law.append(line.strip())
except (TypeError, AttributeError):
if remain_new_line:
law[-1] += line
else:
law[-1] += line.strip()
return law
def stopwordslist(filepath):
stopwords = [line.strip() for line in open(filepath, 'r', encoding='utf-8').readlines()]
return stopwords
def hanzi_to_num(hanzi_1):
# for num<10000
hanzi = hanzi_1.strip().replace('零', '')
if hanzi == '':
return str(int(0))
d = {'一': 1, '二': 2, '三': 3, '四': 4, '五': 5, '六': 6, '七': 7, '八': 8, '九': 9, '': 0}
m = {'十': 1e1, '百': 1e2, '千': 1e3, }
w = {'万': 1e4, '亿': 1e8}
res = 0
tmp = 0
thou = 0
for i in hanzi:
if i not in d.keys() and i not in m.keys() and i not in w.keys():
return hanzi
if (hanzi[0]) == '十': hanzi = '一' + hanzi
for i in range(len(hanzi)):
if hanzi[i] in d:
tmp += d[hanzi[i]]
elif hanzi[i] in m:
tmp *= m[hanzi[i]]
res += tmp
tmp = 0
else:
thou += (res + tmp) * w[hanzi[i]]
tmp = 0
res = 0
return int(thou + res + tmp)
def get_cutter(dict_path="law_processed/Thuocl_seg.txt", mode='thulac', stop_words_filtered=False):
if stop_words_filtered:
stopwords = stopwordslist('law_processed/stop_word.txt') # 这里加载停用词的路径
else:
stopwords = []
if mode == 'jieba':
jieba.load_userdict(dict_path)
return lambda x: [a for a in list(jieba.cut(x)) if a not in stopwords]
elif mode == 'thulac':
thu = thulac.thulac(user_dict=dict_path, seg_only=True)
return lambda x: [a for a in thu.cut(x, text=True).split(' ') if a not in stopwords]
def process_law(law, cut):
# single article
# cut=get_cutter()
condition_list = []
for each in law.split('。')[:-1]:
suffix = None
if ':' in each:
each, suffix = each.split(':')
suffix = cut(suffix)
words = cut(each)
seg_point = [-1]
conditions = []
for i in range(len(words)):
if words[i] == ';' or words[i] == ';':
seg_point.append(i)
seg_point.append(len(words))
for i in range(len(seg_point) - 1):
for j in range(seg_point[i + 1] - 1, seg_point[i], -1):
if j + 1 < len(words) and words[j] == '的' and words[j + 1] == ',':
conditions.append(words[seg_point[i] + 1:j + 1])
break
# context=law.split('。')[:-1]
for i in range(1, len(conditions)):
conditions[i] = conditions[0] + conditions[i]
# if len(condition_list)==0 and len(conditions)==0:
# conditions.append([])
if suffix is not None:
conditions = [x + suffix for x in conditions]
condition_list += conditions
if condition_list == []:
condition_list.append(cut(law[:-1]))
n_word = [len(i) for i in condition_list]
return condition_list, n_word
def cut_law(law_list, order=None, cut_sentence=True, cut_penalty=False, stop_words_filtered=True):
res = []
cut = get_cutter(stop_words_filtered=stop_words_filtered)
if order is not None:
key_list = [int(i) for i in order.keys()]
filter = key_list
for each in law_list:
index, content = each.split(' ')
index = hanzi_to_num(index[1:-1])
charge, content = content[1:].split('】')
# if charge[-1]!='罪':
# continue
if order is not None and index not in filter:
continue
if cut_penalty:
context, n_words = process_law(content, cut)
elif cut_sentence:
context, n_words = [], []
for i in content.split('。'):
if i != '':
context.append(cut(i))
n_words.append(len(context[-1]))
else:
context = cut(content)
n_words = len(context)
res.append([index, charge, context, n_words])
if order is not None:
res = sorted(res, key=lambda x: order[str(x[0])])
return res
def flatten(x):
return [y for l in x for y in flatten(l)] if type(x) is list else [x]
def cos_similarity(a, b):
return np.sum(a * b) / (np.linalg.norm(a) * np.linalg.norm(b))
def lookup_index(x, word2id, doc_len):
res = []
for each in x:
tmp = [word2id['BLANK']] * doc_len
for i in range(len(each)):
if i >= doc_len:
break
try:
tmp[i] = word2id[each[i]]
except KeyError:
tmp[i] = word2id['UNK']
res.append(tmp)
return | np.array(res) | numpy.array |
# Copyright (c) OpenMMLab. All rights reserved.
import sys
from abc import ABCMeta
from collections import defaultdict
from copy import deepcopy
from functools import partial
from typing import OrderedDict
import mmcv
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.nn.parallel.distributed import _find_tensors
from ..architectures.common import get_module_device
from ..builder import MODELS, build_module
from .utils import _get_label_batch, _get_noise_batch, var_to_tensor
@MODELS.register_module()
class BasicGaussianDiffusion(nn.Module, metaclass=ABCMeta):
"""Basic module for gaussian Diffusion Denoising Probabilistic Models. A
diffusion probabilistic model (which we will call a 'diffusion model' for
brevity) is a parameterized Markov chain trained using variational
inference to produce samples matching the data after finite time.
The design of this module implements DDPM and improve-DDPM according to
"Denoising Diffusion Probabilistic Models" (2020) and "Improved Denoising
Diffusion Probabilistic Models" (2021).
Args:
denoising (dict): Config for denoising model.
ddpm_loss (dict): Config for losses of DDPM.
betas_cfg (dict): Config for betas in diffusion process.
num_timesteps (int, optional): The number of timesteps of the diffusion
process. Defaults to 1000.
num_classes (int | None, optional): The number of conditional classes.
Defaults to None.
sample_method (string, optional): Sample method for the denoising
process. Support 'DDPM' and 'DDIM'. Defaults to 'DDPM'.
timesteps_sampler (string, optional): How to sample timesteps in
training process. Defaults to `UniformTimeStepSampler`.
train_cfg (dict | None, optional): Config for training schedule.
Defaults to None.
test_cfg (dict | None, optional): Config for testing schedule. Defaults
to None.
"""
def __init__(self,
denoising,
ddpm_loss,
betas_cfg,
num_timesteps=1000,
num_classes=0,
sample_method='DDPM',
timestep_sampler='UniformTimeStepSampler',
train_cfg=None,
test_cfg=None):
super().__init__()
self.fp16_enable = False
# build denoising module in this function
self.num_classes = num_classes
self.num_timesteps = num_timesteps
self.sample_method = sample_method
self._denoising_cfg = deepcopy(denoising)
self.denoising = build_module(
denoising,
default_args=dict(
num_classes=num_classes, num_timesteps=num_timesteps))
# get output-related configs from denoising
self.denoising_var_mode = self.denoising.var_mode
self.denoising_mean_mode = self.denoising.mean_mode
# output_channels in denoising may be double, therefore we
# get number of channels from config
image_channels = self._denoising_cfg['in_channels']
# image_size should be the attribute of denoising network
image_size = self.denoising.image_size
image_shape = torch.Size([image_channels, image_size, image_size])
self.image_shape = image_shape
self.get_noise = partial(
_get_noise_batch,
image_shape=image_shape,
num_timesteps=self.num_timesteps)
self.get_label = partial(
_get_label_batch, num_timesteps=self.num_timesteps)
# build sampler
if timestep_sampler is not None:
self.sampler = build_module(
timestep_sampler,
default_args=dict(num_timesteps=num_timesteps))
else:
self.sampler = None
# build losses
if ddpm_loss is not None:
self.ddpm_loss = build_module(
ddpm_loss, default_args=dict(sampler=self.sampler))
if not isinstance(self.ddpm_loss, nn.ModuleList):
self.ddpm_loss = nn.ModuleList([self.ddpm_loss])
else:
self.ddpm_loss = None
self.betas_cfg = deepcopy(betas_cfg)
self.train_cfg = deepcopy(train_cfg) if train_cfg else None
self.test_cfg = deepcopy(test_cfg) if test_cfg else None
self._parse_train_cfg()
if test_cfg is not None:
self._parse_test_cfg()
self.prepare_diffusion_vars()
def _parse_train_cfg(self):
"""Parsing train config and set some attributes for training."""
if self.train_cfg is None:
self.train_cfg = dict()
self.use_ema = self.train_cfg.get('use_ema', False)
if self.use_ema:
self.denoising_ema = deepcopy(self.denoising)
self.real_img_key = self.train_cfg.get('real_img_key', 'real_img')
def _parse_test_cfg(self):
"""Parsing test config and set some attributes for testing."""
if self.test_cfg is None:
self.test_cfg = dict()
# whether to use exponential moving average for testing
self.use_ema = self.test_cfg.get('use_ema', False)
if self.use_ema:
self.denoising_ema = deepcopy(self.denoising)
def _get_loss(self, outputs_dict):
losses_dict = {}
# forward losses
for loss_fn in self.ddpm_loss:
losses_dict[loss_fn.loss_name()] = loss_fn(outputs_dict)
loss, log_vars = self._parse_losses(losses_dict)
# update collected log_var from loss_fn
for loss_fn in self.ddpm_loss:
if hasattr(loss_fn, 'log_vars'):
log_vars.update(loss_fn.log_vars)
return loss, log_vars
def _parse_losses(self, losses):
"""Parse the raw outputs (losses) of the network.
Args:
losses (dict): Raw output of the network, which usually contain
losses and other necessary information.
Returns:
tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor \
which may be a weighted sum of all losses, log_vars contains \
all the variables to be sent to the logger.
"""
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
else:
raise TypeError(
f'{loss_name} is not a tensor or list of tensor')
loss = sum(_value for _key, _value in log_vars.items()
if 'loss' in _key)
log_vars['loss'] = loss
for loss_name, loss_value in log_vars.items():
if dist.is_available() and dist.is_initialized():
loss_value = loss_value.data.clone()
dist.all_reduce(loss_value.div_(dist.get_world_size()))
log_vars[loss_name] = loss_value.item()
return loss, log_vars
def train_step(self,
data,
optimizer,
ddp_reducer=None,
loss_scaler=None,
use_apex_amp=False,
running_status=None):
"""The iteration step during training.
This method defines an iteration step during training. Different from
other repo in **MM** series, we allow the back propagation and
optimizer updating to directly follow the iterative training schedule
of DDPMs.
Of course, we will show that you can also move the back
propagation outside of this method, and then optimize the parameters
in the optimizer hook. But this will cause extra GPU memory cost as a
result of retaining computational graph. Otherwise, the training
schedule should be modified in the detailed implementation.
Args:
optimizer (dict): Dict contains optimizer for denoising network.
running_status (dict | None, optional): Contains necessary basic
information for training, e.g., iteration number. Defaults to
None.
"""
# get running status
if running_status is not None:
curr_iter = running_status['iteration']
else:
# dirty walkround for not providing running status
if not hasattr(self, 'iteration'):
self.iteration = 0
curr_iter = self.iteration
real_imgs = data[self.real_img_key]
# denoising training
optimizer['denoising'].zero_grad()
denoising_dict_ = self.reconstruction_step(
data,
timesteps=self.sampler,
sample_model='orig',
return_noise=True)
denoising_dict_['iteration'] = curr_iter
denoising_dict_['real_imgs'] = real_imgs
denoising_dict_['loss_scaler'] = loss_scaler
loss, log_vars = self._get_loss(denoising_dict_)
# prepare for backward in ddp. If you do not call this function before
# back propagation, the ddp will not dynamically find the used params
# in current computation.
if ddp_reducer is not None:
ddp_reducer.prepare_for_backward(_find_tensors(loss))
if loss_scaler:
# add support for fp16
loss_scaler.scale(loss).backward()
elif use_apex_amp:
from apex import amp
with amp.scale_loss(
loss, optimizer['denoising'],
loss_id=0) as scaled_loss_disc:
scaled_loss_disc.backward()
else:
loss.backward()
if loss_scaler:
loss_scaler.unscale_(optimizer['denoising'])
# note that we do not contain clip_grad procedure
loss_scaler.step(optimizer['denoising'])
# loss_scaler.update will be called in runner.train()
else:
optimizer['denoising'].step()
# image used for vislization
results = dict(
real_imgs=real_imgs,
x_0_pred=denoising_dict_['x_0_pred'],
x_t=denoising_dict_['diffusion_batches'],
x_t_1=denoising_dict_['fake_img'])
outputs = dict(
log_vars=log_vars, num_samples=real_imgs.shape[0], results=results)
if hasattr(self, 'iteration'):
self.iteration += 1
return outputs
def reconstruction_step(self,
data_batch,
noise=None,
label=None,
timesteps=None,
sample_model='orig',
return_noise=False,
**kwargs):
"""Reconstruction step at corresponding `timestep`. To be noted that,
denoisint target ``x_t`` for each timestep are all generated from real
images, but not the denoising result from denoising network.
``sample_from_noise`` focus on generate samples start from **random
(or given) noise**. Therefore, we design this function to realize a
reconstruction process for the given images.
If `timestep` is None, automatically perform reconstruction at all
timesteps.
Args:
data_batch (dict): Input data from dataloader.
noise (torch.Tensor | callable | None): Noise used in diffusion
process. You can directly give a batch of noise through a
``torch.Tensor`` or offer a callable function to sample a
batch of noise data. Otherwise, the ``None`` indicates to use
the default noise sampler. Defaults to None.
label (torch.Tensor | None , optional): The conditional label of
the input image. Defaults to None.
timestep (int | list | torch.Tensor | callable | None): Target
timestep to perform reconstruction.
sampel_model (str, optional): Use which model to sample fake
images. Defaults to `'orig'`.
return_noise (bool, optional): If True,``noise_batch``, ``label``
and all other intermedia variables will be returned together
with ``fake_img`` in a dict. Defaults to False.
Returns:
torch.Tensor | dict: The output may be the direct synthesized
images in ``torch.Tensor``. Otherwise, a dict with required
data , including generated images, will be returned.
"""
assert sample_model in [
'orig', 'ema'
], ('We only support \'orig\' and \'ema\' for '
f'\'reconstruction_step\', but receive \'{sample_model}\'.')
denoising_model = self.denoising if sample_model == 'orig' \
else self.denoising_ema
# 0. prepare for timestep, noise and label
device = get_module_device(self)
real_imgs = data_batch[self.real_img_key]
num_batches = real_imgs.shape[0]
if timesteps is None:
# default to performing the whole reconstruction process
timesteps = torch.LongTensor([
t for t in range(self.num_timesteps)
]).view(self.num_timesteps, 1)
timesteps = timesteps.repeat([1, num_batches])
if isinstance(timesteps, (int, list)):
timesteps = torch.LongTensor(timesteps)
elif callable(timesteps):
timestep_generator = timesteps
timesteps = timestep_generator(num_batches)
else:
assert isinstance(timesteps, torch.Tensor), (
'we only support int list tensor or a callable function')
if timesteps.ndim == 1:
timesteps = timesteps.unsqueeze(0)
timesteps = timesteps.to(get_module_device(self))
if noise is not None:
assert 'noise' not in data_batch, (
'Receive \'noise\' in both data_batch and passed arguments.')
if noise is None:
noise = data_batch['noise'] if 'noise' in data_batch else None
if self.num_classes > 0:
if label is not None:
assert 'label' not in data_batch, (
'Receive \'label\' in both data_batch '
'and passed arguments.')
if label is None:
label = data_batch['label'] if 'label' in data_batch else None
label_batches = self.get_label(
label, num_batches=num_batches).to(device)
else:
label_batches = None
output_dict = defaultdict(list)
# loop all timesteps
for timestep in timesteps:
# 1. get diffusion results and parameters
noise_batches = self.get_noise(
noise, num_batches=num_batches).to(device)
diffusion_batches = self.q_sample(real_imgs, timestep,
noise_batches)
# 2. get denoising results.
denoising_batches = self.denoising_step(
denoising_model,
diffusion_batches,
timestep,
label=label_batches,
return_noise=return_noise,
clip_denoised=not self.training)
# 3. get ground truth by q_posterior
target_batches = self.q_posterior_mean_variance(
real_imgs, diffusion_batches, timestep, logvar=True)
if return_noise:
output_dict_ = dict(
timesteps=timestep,
noise=noise_batches,
diffusion_batches=diffusion_batches)
if self.num_classes > 0:
output_dict_['label'] = label_batches
output_dict_.update(denoising_batches)
output_dict_.update(target_batches)
else:
output_dict_ = dict(fake_img=denoising_batches)
# update output of `timestep` to output_dict
for k, v in output_dict_.items():
if k in output_dict:
output_dict[k].append(v)
else:
output_dict[k] = [v]
# 4. concentrate list to tensor
for k, v in output_dict.items():
output_dict[k] = torch.cat(v, dim=0)
# 5. return results
if return_noise:
return output_dict
return output_dict['fake_img']
def sample_from_noise(self,
noise,
num_batches=0,
sample_model='ema/orig',
label=None,
**kwargs):
"""Sample images from noises by using Denoising model.
Args:
noise (torch.Tensor | callable | None): You can directly give a
batch of noise through a ``torch.Tensor`` or offer a callable
function to sample a batch of noise data. Otherwise, the
``None`` indicates to use the default noise sampler.
num_batches (int, optional): The number of batch size.
Defaults to 0.
sample_model (str, optional): The model to sample. If ``ema/orig``
is passed, this method will try to sample from ema (if
``self.use_ema == True``) and orig model. Defaults to
'ema/orig'.
label (torch.Tensor | None , optional): The conditional label.
Defaults to None.
Returns:
torch.Tensor | dict: The output may be the direct synthesized
images in ``torch.Tensor``. Otherwise, a dict with queried
data, including generated images, will be returned.
"""
# get sample function by name
sample_fn_name = f'{self.sample_method.upper()}_sample'
if not hasattr(self, sample_fn_name):
raise AttributeError(
f'Cannot find sample method [{sample_fn_name}] correspond '
f'to [{self.sample_method}].')
sample_fn = getattr(self, sample_fn_name)
if sample_model == 'ema':
assert self.use_ema
_model = self.denoising_ema
elif sample_model == 'ema/orig' and self.use_ema:
_model = self.denoising_ema
else:
_model = self.denoising
outputs = sample_fn(
_model,
noise=noise,
num_batches=num_batches,
label=label,
**kwargs)
if isinstance(outputs, dict) and 'noise_batch' in outputs:
# return_noise is True
noise = outputs['x_t']
label = outputs['label']
kwargs['timesteps_noise'] = outputs['noise_batch']
fake_img = outputs['fake_img']
else:
fake_img = outputs
if sample_model == 'ema/orig' and self.use_ema:
_model = self.denoising
outputs_ = sample_fn(
_model, noise=noise, num_batches=num_batches, **kwargs)
if isinstance(outputs_, dict) and 'noise_batch' in outputs_:
# return_noise is True
fake_img_ = outputs_['fake_img']
else:
fake_img_ = outputs_
if isinstance(fake_img, dict):
# save_intermedia is True
fake_img = {
k: torch.cat([fake_img[k], fake_img_[k]], dim=0)
for k in fake_img.keys()
}
else:
fake_img = torch.cat([fake_img, fake_img_], dim=0)
return fake_img
@torch.no_grad()
def DDPM_sample(self,
model,
noise=None,
num_batches=0,
label=None,
save_intermedia=False,
timesteps_noise=None,
return_noise=False,
show_pbar=False,
**kwargs):
"""DDPM sample from random noise.
Args:
model (torch.nn.Module): Denoising model used to sample images.
noise (torch.Tensor | callable | None): You can directly give a
batch of noise through a ``torch.Tensor`` or offer a callable
function to sample a batch of noise data. Otherwise, the
``None`` indicates to use the default noise sampler.
num_batches (int, optional): The number of batch size.
Defaults to 0.
label (torch.Tensor | None , optional): The conditional label.
Defaults to None.
save_intermedia (bool, optional): Whether to save denoising result
of intermedia timesteps. If set as True, will return a dict
which key and value are denoising timestep and denoising
result. Otherwise, only the final denoising result will be
returned. Defaults to False.
timesteps_noise (torch.Tensor, optional): Noise term used in each
denoising timestep. If given, the input noise will be shaped to
[num_timesteps, b, c, h, w]. If set as None, noise of each
denoising timestep will be randomly sampled. Default as None.
return_noise (bool, optional): If True, a dict contains
``noise_batch``, ``x_t`` and ``label`` will be returned
together with the denoising results, and the key of denoising
results is ``fake_img``. To be noted that ``noise_batches``
will shape as [num_timesteps, b, c, h, w]. Defaults to False.
show_pbar (bool, optional): If True, a progress bar will be
displayed. Defaults to False.
Returns:
torch.Tensor | dict: If ``save_intermedia``, a dict contains
denoising results of each timestep will be returned.
Otherwise, only the final denoising result will be returned.
"""
device = get_module_device(self)
noise = self.get_noise(noise, num_batches=num_batches).to(device)
x_t = noise.clone()
if save_intermedia:
# save input
intermedia = {self.num_timesteps: x_t.clone()}
# use timesteps noise if defined
if timesteps_noise is not None:
timesteps_noise = self.get_noise(
timesteps_noise, num_batches=num_batches,
timesteps_noise=True).to(device)
batched_timesteps = torch.arange(self.num_timesteps - 1, -1,
-1).long().to(device)
if show_pbar:
pbar = mmcv.ProgressBar(self.num_timesteps)
for t in batched_timesteps:
batched_t = t.expand(x_t.shape[0])
step_noise = timesteps_noise[t, ...] \
if timesteps_noise is not None else None
x_t = self.denoising_step(
model, x_t, batched_t, noise=step_noise, label=label, **kwargs)
if save_intermedia:
intermedia[int(t)] = x_t.cpu().clone()
if show_pbar:
pbar.update()
denoising_results = intermedia if save_intermedia else x_t
if show_pbar:
sys.stdout.write('\n')
if return_noise:
return dict(
noise_batch=timesteps_noise,
x_t=noise,
label=label,
fake_img=denoising_results)
return denoising_results
def prepare_diffusion_vars(self):
"""Prepare for variables used in the diffusion process."""
self.betas = self.get_betas()
self.alphas = 1.0 - self.betas
self.alphas_bar = np.cumproduct(self.alphas, axis=0)
self.alphas_bar_prev = np.append(1.0, self.alphas_bar[:-1])
self.alphas_bar_next = np.append(self.alphas_bar[1:], 0.0)
# calculations for diffusion q(x_t | x_0) and others
self.sqrt_alphas_bar = np.sqrt(self.alphas_bar)
self.sqrt_one_minus_alphas_bar = np.sqrt(1.0 - self.alphas_bar)
self.log_one_minus_alphas_bar = np.log(1.0 - self.alphas_bar)
self.sqrt_recip_alplas_bar = | np.sqrt(1.0 / self.alphas_bar) | numpy.sqrt |
"""Implementation of unuspervised and supervised Fourier feature selection algorithms
"""
from sklearn.base import BaseEstimator, ClassifierMixin
import numpy as np
from itertools import chain, combinations
import sys
import compute_fourier_coeff_supervised
import compute_norms_features_unsupervised
import math
# Generates the set of all subsets with the size of each subset as maximum k
def powerset(iterable, k):
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(1, k + 1))
class OptionsUnsupervisedFourierFS:
def __init__(self, max_depth, cluster_sizes, selection_thresholds, norm_epsilon, shuffle, preranking):
self.max_depth = max_depth
self.cluster_sizes = cluster_sizes
self.selection_thresholds = selection_thresholds # same as n_redundant_threshold
self.norm_epsilon = norm_epsilon
self.shuffle = shuffle
self.preranking = preranking
def UnsupervisedFourierFS_helper(X_nmlzd, depth, input_features, options):
X_nmlzd_depth = X_nmlzd[:, input_features]
d = len(input_features)
n_clusters = math.ceil(d / options.cluster_sizes[depth])
if n_clusters == 0:
print("Error : n_clusters is zero!")
sys.exit(2)
clusters = np.linspace(0, d, n_clusters + 1, dtype=np.int)
nonredundant_Features = []
for i in range(1, len(clusters)):
features_cluster = np.arange(clusters[i - 1], clusters[i])
X_cluster = X_nmlzd_depth[:, features_cluster]
sel_feats_norm2 = compute_norms_features_unsupervised.estimate_A(X_cluster,
depth+1,
options.norm_epsilon[depth])
# import pdb; pdb.set_trace()
sel_feats_norm2 = | np.array(sel_feats_norm2) | numpy.array |
from abc import ABCMeta, abstractmethod
from mct import MC_node, MC_edge, MCFE_tree
from state import State
import util
import copy
import numpy as np
import pandas as pd
import random
import logging
logging.basicConfig(level=logging.ERROR, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
max_dist = 50
class Agent(metaclass = ABCMeta):
def run(self, eps=0.000001, max_episodes=10000):
"""Runs the episodes of a MCTS.
Keyword arguments:
eps -- constant to define when distribution is stable
episodes -- number of episodes per step
"""
depth = 0
curr_root = self.root
while depth < len(self.game.available_actions):
if depth == 0:
distrb = []
else:
distrb = curr_root.get_distribution()
for i in range(max_episodes):
self.episode(curr_root)
if i % 1000 == 0:
logger.info('X'*70)
logger.info('Episode:\t%d'%(i))
logger.info('X'*70)
if i % max_dist == 0 and i != 0:
curr_distrb = curr_root.get_distribution()
if len(distrb) > 0 and abs(util.kl_divergence(np.array(distrb), | np.array(curr_distrb) | numpy.array |
""" Runs the alignment test generated by elicitation.py on a set of test rewards and reports
performance. """
import logging
import pickle as pkl
from functools import partial
from itertools import product
from pathlib import Path
from typing import (
Dict,
Generator,
List,
Literal,
NamedTuple,
Optional,
Sequence,
Set,
Tuple,
Union,
cast,
)
import argh # type: ignore
import numpy as np
import tensorflow as tf # type: ignore
from driver.gym_env.legacy_env import LegacyEnv
from gym.spaces import flatten # type: ignore
from search import GeometricSearch, TestRewardSearch
tf.config.set_visible_devices([], "GPU") # Car simulation stuff is faster on cpu
from argh import arg
from driver.legacy.models import Driver
from gym.core import Env # type: ignore
from joblib import Parallel, delayed # type: ignore
from sklearn.metrics import confusion_matrix # type: ignore
from active.simulation_utils import TrajOptimizer, assert_normals, make_normals, orient_normals
from equiv_utils import add_equiv_constraints, remove_equiv
from random_baseline import make_random_questions
from testing_factory import TestFactory
from utils import (
assert_nonempty,
assert_reward,
assert_rewards,
get_mean_reward,
load,
make_gaussian_rewards,
parse_replications,
rollout,
setup_logging,
shape_compat,
)
Experiment = Tuple[float, Optional[float], int]
input_features_name = Path("input_features.npy")
normals_name = Path("normals.npy")
preferences_name = Path("preferences.npy")
true_reward_name = Path("true_reward.npy")
flags_name = Path("flags.pkl")
use_equiv = False
# Top level functions callable from fire
@arg("--epsilons", nargs="+", type=float)
def premake_test_rewards(
epsilons: List[float] = [0.0],
n_rewards: int = 100,
n_test_states: Optional[int] = None,
n_gt_test_questions: int = 10000,
true_reward_name: Path = Path("true_reward.npy"),
datadir: Path = Path(),
outdir: Path = Path(),
replications: Optional[Union[str, Tuple[int, ...]]] = None,
n_cpus: int = 1,
overwrite: bool = False,
verbosity: Literal["INFO", "DEBUG"] = "INFO",
):
""" Finds test rewards for each experiment. """
outdir.mkdir(parents=True, exist_ok=True)
# TODO(joschnei): I'm making some dangerous logging decisions. Do I want to append to logs, or
# give logs unique names? I really need to pick at least one.
setup_logging(verbosity, log_path=outdir / "log.txt")
if replications is not None:
replication_indices = parse_replications(replications)
for replication in replication_indices:
if not (datadir / str(replication)).exists():
logging.warning(f"Replication {replication} does not exist, skipping")
continue
premake_test_rewards(
epsilons=epsilons,
n_rewards=n_rewards,
n_test_states=n_test_states,
n_gt_test_questions=n_gt_test_questions,
true_reward_name=true_reward_name,
datadir=datadir / str(replication),
outdir=outdir / str(replication),
use_equiv=use_equiv,
n_cpus=n_cpus,
overwrite=overwrite,
verbosity=verbosity,
)
logging.info(f"Done with replication {replication}")
exit()
true_reward = np.load(datadir / true_reward_name)
assert_reward(true_reward, False, 4)
with Parallel(n_jobs=n_cpus) as parallel:
make_test_rewards(
epsilons=epsilons,
true_reward=true_reward,
n_rewards=n_rewards,
n_test_states=n_test_states,
n_gt_test_questions=int(n_gt_test_questions),
outdir=outdir,
parallel=parallel,
use_equiv=use_equiv,
overwrite=overwrite,
)
@arg("--epsilons", nargs="+", type=float)
@arg("--deltas", nargs="+", type=float)
@arg("--human-samples", nargs="+", type=int)
def simulated(
epsilons: List[float] = [0.0],
n_rewards: int = 100,
human_samples: List[int] = [1],
n_reward_samples: int = 1000,
n_test_states: Optional[int] = None,
n_gt_test_questions: int = 10000,
traj_opt: bool = False,
datadir: Path = Path(),
outdir: Path = Path(),
deltas: List[Optional[float]] = [None],
use_mean_reward: bool = False,
use_random_test_questions: bool = False,
n_random_test_questions: Optional[int] = None,
use_cheating_questions: bool = False,
skip_remove_duplicates: bool = False,
skip_epsilon_filtering: bool = False,
skip_redundancy_filtering: bool = False,
use_true_epsilon: bool = False,
legacy_test_rewards: bool = False,
replications: Optional[Union[str, Tuple[int, ...]]] = None,
n_cpus: int = 1,
overwrite_test_rewards: bool = False,
overwrite_results: bool = False,
verbosity: Literal["INFO", "DEBUG"] = "INFO",
) -> None:
""" Evaluates alignment test generated by ground-truth rewards. """
logging.basicConfig(level=verbosity, format="%(levelname)s:%(asctime)s:%(message)s")
if replications is not None:
replication_indices = parse_replications(replications)
for replication in replication_indices:
if not (datadir / str(replication)).exists():
logging.warning(f"Replication {replication} does not exist, skipping")
continue
logging.info(f"Starting replication {replication}")
simulated(
epsilons=epsilons,
deltas=deltas,
n_rewards=n_rewards,
human_samples=human_samples,
n_reward_samples=n_reward_samples,
n_test_states=n_test_states,
n_gt_test_questions=n_gt_test_questions,
datadir=datadir / str(replication),
outdir=outdir / str(replication),
use_mean_reward=use_mean_reward,
use_random_test_questions=use_random_test_questions,
use_cheating_questions=use_cheating_questions,
n_random_test_questions=n_random_test_questions,
skip_remove_duplicates=skip_remove_duplicates,
skip_epsilon_filtering=skip_epsilon_filtering,
skip_redundancy_filtering=skip_redundancy_filtering,
use_true_epsilon=use_true_epsilon,
legacy_test_rewards=legacy_test_rewards,
n_cpus=n_cpus,
overwrite_test_rewards=overwrite_test_rewards,
overwrite_results=overwrite_results,
verbosity=verbosity,
)
exit()
logging.info(f"Using {n_cpus} cpus.")
parallel = Parallel(n_jobs=n_cpus)
outdir.mkdir(parents=True, exist_ok=True)
if n_random_test_questions is not None:
# Argh defaults to parsing something as a string if its optional
n_random_test_questions = int(n_random_test_questions)
flags = pkl.load(open(datadir / flags_name, "rb"))
query_type = flags["query_type"]
equiv_probability = flags["equiv_size"]
env = Driver()
n_reward_features = env.num_of_features
logging.info("Loading elicitation results")
elicited_normals, elicited_preferences, elicited_input_features = load_elicitation(
datadir=datadir,
normals_name=normals_name,
preferences_name=preferences_name,
input_features_name=input_features_name,
n_reward_features=n_reward_features,
use_equiv=use_equiv,
query_type=query_type,
equiv_probability=equiv_probability,
)
true_reward = np.load(datadir / true_reward_name)
assert_reward(true_reward, False, n_reward_features)
if use_equiv:
true_reward = np.append(true_reward, [1])
else:
assert not np.any(elicited_preferences == 0)
factory = TestFactory(
query_type=query_type,
reward_dimension=elicited_normals.shape[1],
equiv_probability=equiv_probability,
n_reward_samples=n_reward_samples,
use_mean_reward=use_mean_reward,
skip_dedup=skip_remove_duplicates,
skip_noise_filtering=True,
skip_epsilon_filtering=skip_epsilon_filtering,
skip_redundancy_filtering=skip_redundancy_filtering,
use_true_epsilon=use_true_epsilon,
true_reward=true_reward,
)
logging.info(
f"""Filtering settings:
# reward samples={n_reward_samples},
use mean reward={use_mean_reward},
skip duplicates={skip_remove_duplicates}
skip noise={True}
skip epsilon={skip_epsilon_filtering}
skip redundancy={skip_redundancy_filtering}
use true epsilon={use_true_epsilon}
"""
)
confusion_path, test_path = make_outnames(
outdir,
skip_remove_duplicates,
True,
skip_epsilon_filtering,
skip_redundancy_filtering,
)
confusions: Dict[Experiment, np.ndarray] = load(confusion_path, overwrite_results, default={})
minimal_tests: Dict[Experiment, np.ndarray] = load(test_path, overwrite_results, default={})
experiments = make_experiments(
epsilons, deltas, human_samples, overwrite_results, experiments=set(minimal_tests.keys())
)
if use_random_test_questions:
logging.info("Making random test")
logging.info(f"True reward: {true_reward}")
normals, preferences, input_features = make_random_test(
n_random_test_questions,
elicited_input_features,
elicited_preferences,
reward_iterations=flags["reward_iterations"],
query_type=query_type,
equiv_size=flags["equiv_size"],
sim=env,
use_equiv=use_equiv,
)
good_indices = (true_reward @ normals.T) > 0
logging.info(f"{np.mean(good_indices)*100:2f}% of new test questions agree with gt reward.")
if use_cheating_questions:
logging.info(f"Selecting only questions consistent with gt reward")
normals = normals[good_indices]
preferences = preferences[good_indices]
input_features = input_features[good_indices]
assert_normals(normals, use_equiv)
else:
max_n = max(human_samples)
preferences = elicited_preferences[:max_n]
input_features = elicited_input_features[:max_n]
logging.debug(f"elicited_normals={elicited_normals[:10]}")
normals = orient_normals(
elicited_normals[:max_n], preferences, use_equiv, n_reward_features
)
logging.debug(f"normals={normals[:10]}")
assert np.all(true_reward @ normals.T >= 0)
if not legacy_test_rewards:
test_rewards = make_test_rewards(
epsilons=epsilons,
true_reward=true_reward,
n_rewards=n_rewards,
n_test_states=n_test_states,
n_gt_test_questions=int(n_gt_test_questions),
traj_opt=traj_opt,
outdir=outdir,
parallel=parallel,
use_equiv=use_equiv,
overwrite=overwrite_test_rewards,
)
else:
test_rewards = legacy_make_test_rewards(1000, n_rewards, true_reward, epsilons, use_equiv)
for indices, confusion, experiment in parallel(
delayed(run_gt_experiment)(
normals=normals,
test_rewards=test_rewards[epsilon][0],
test_reward_alignment=test_rewards[epsilon][1],
epsilon=epsilon,
delta=delta,
use_equiv=use_equiv,
n_human_samples=n,
factory=factory,
input_features=input_features,
preferences=preferences,
outdir=outdir,
verbosity=verbosity,
)
for epsilon, delta, n in experiments
):
minimal_tests[experiment] = indices
confusions[experiment] = confusion
pkl.dump(confusions, open(confusion_path, "wb"))
pkl.dump(minimal_tests, open(test_path, "wb"))
@arg("--epsilons", nargs="+", type=float)
@arg("--deltas", nargs="+", type=float)
@arg("--human-samples", nargs="+", type=int)
def human(
epsilons: List[float] = [0.0],
deltas: List[float] = [0.05],
n_rewards: int = 10000,
human_samples: List[int] = [1],
n_model_samples: int = 1000,
input_features_name: Path = Path("input_features.npy"),
normals_name: Path = Path("normals.npy"),
preferences_name: Path = Path("preferences.npy"),
flags_name: Path = Path("flags.pkl"),
datadir: Path = Path("questions"),
outdir: Path = Path("questions"),
rewards_path: Optional[Path] = None,
use_mean_reward: bool = False,
skip_remove_duplicates: bool = False,
skip_epsilon_filtering: bool = False,
skip_redundancy_filtering: bool = False,
n_cpus: int = 1,
overwrite: bool = False,
):
""" Evaluates alignment test elicited from a human. """
outdir.mkdir(parents=True, exist_ok=True)
parallel = Parallel(n_jobs=n_cpus)
flags = pkl.load(open(datadir / flags_name, "rb"))
query_type = flags["query_type"]
equiv_probability = flags["equiv_size"]
sim = Driver()
n_reward_features = sim.num_of_features
elicited_normals, elicited_preferences, elicited_input_features = load_elicitation(
datadir=datadir,
normals_name=normals_name,
preferences_name=preferences_name,
input_features_name=input_features_name,
n_reward_features=n_reward_features,
use_equiv=use_equiv,
query_type=query_type,
equiv_probability=equiv_probability,
)
assert elicited_preferences.shape[0] > 0
factory = TestFactory(
query_type=query_type,
reward_dimension=elicited_normals.shape[1],
equiv_probability=equiv_probability,
n_reward_samples=n_model_samples,
use_mean_reward=use_mean_reward,
skip_dedup=skip_remove_duplicates,
skip_noise_filtering=True,
skip_epsilon_filtering=skip_epsilon_filtering,
skip_redundancy_filtering=skip_redundancy_filtering,
)
test_path = outdir / make_outname(
skip_remove_duplicates,
True,
skip_epsilon_filtering,
skip_redundancy_filtering,
base="indices",
)
test_results_path = outdir / make_outname(
skip_remove_duplicates,
True,
skip_epsilon_filtering,
skip_redundancy_filtering,
base="test_results",
)
minimal_tests: Dict[Experiment, np.ndarray] = load(test_path, overwrite)
results: Dict[Experiment, np.ndarray] = load(test_results_path, overwrite)
test_rewards = (
np.load(open(rewards_path, "rb"))
if rewards_path is not None
else make_gaussian_rewards(n_rewards, use_equiv)
)
np.save(outdir / "test_rewards.npy", test_rewards)
experiments = make_experiments(
epsilons, deltas, human_samples, overwrite, experiments=set(minimal_tests.keys())
)
for indices, result, experiment in parallel(
delayed(run_human_experiment)(
test_rewards,
elicited_normals,
elicited_input_features,
elicited_preferences,
epsilon,
delta,
n,
factory,
use_equiv,
)
for epsilon, delta, n in experiments
):
minimal_tests[experiment] = indices
results[experiment] = result
pkl.dump(minimal_tests, open(test_path, "wb"))
pkl.dump(results, open(test_results_path, "wb"))
def compare_test_labels(
test_rewards_path: Path,
true_reward_path: Path,
traj_opt: bool = False,
elicitation: bool = False,
replications: Optional[str] = None,
normals_path: Optional[Path] = None,
):
if replications is not None:
raise NotImplementedError("Replications not yet implemented")
starting_tests: Dict[float, Tuple[np.ndarray, np.ndarray]] = pkl.load(
open(test_rewards_path, "rb")
)
assert not (traj_opt == elicitation), "Provided labels must come from exactly one source"
class Test(NamedTuple):
rewards: np.ndarray
q_labels: np.ndarray
elicitation_labels: np.ndarray
test_rewards: Dict[float, Test] = {}
true_reward = np.load(true_reward_path)
if traj_opt:
normals = np.load(normals_path)
for epsilon, (rewards, q_labels) in starting_tests.items():
normals = normals[true_reward @ normals.T > epsilon]
elicitation_labels = run_test(normals, rewards, use_equiv=False)
test_rewards[epsilon] = Test(
rewards=rewards, q_labels=q_labels, elicitation_labels=elicitation_labels
)
elif elicitation:
parallel = Parallel(n_cpus=-4)
env = LegacyEnv(reward=true_reward, random_start=True)
traj_optimizer = TrajOptimizer(10)
for epsilon, (rewards, elicitation_labels) in starting_tests.items():
q_labels = rewards_aligned(
traj_optimizer=traj_optimizer,
env=env,
true_reward=true_reward,
test_rewards=rewards,
epsilon=epsilon,
parallel=parallel,
)
test_rewards[epsilon] = Test(
rewards=rewards, q_labels=q_labels, elicitation_labels=elicitation_labels
)
total_agree = 0
total_rewards = 0
for epsilon, test in test_rewards.items():
total_agree += np.sum(test.q_labels == test.elicitation_labels)
total_rewards += len(test.rewards)
print(
f"Critic and superset labels agree on {total_agree / total_rewards * 100 :.1f}% of rewards"
)
# Test reward generation
def make_test_rewards(
epsilons: Sequence[float],
true_reward: np.ndarray,
n_rewards: int,
outdir: Path,
parallel: Parallel,
n_test_states: Optional[int] = None,
traj_opt: bool = False,
max_attempts: int = 10,
n_gt_test_questions: Optional[int] = None,
use_equiv: bool = False,
overwrite: bool = False,
) -> Dict[float, Tuple[np.ndarray, np.ndarray]]:
""" Makes test rewards sets for every epsilon and saves them to a file. """
traj_optimizer = (
TrajOptimizer(n_planner_iters=100, optim=tf.keras.optimizers.Adam(0.2))
if traj_opt
else None
)
reward_path = outdir / "test_rewards.pkl"
test_rewards: Dict[float, Tuple[np.ndarray, np.ndarray]] = load(
reward_path, overwrite=overwrite
)
if test_rewards is None:
test_rewards = {}
else:
logging.info(f"Loading test rewards from {reward_path}")
new_epsilons = set(epsilons) - test_rewards.keys()
if len(new_epsilons) > 0:
logging.info(f"Creating new test rewards for epsilons: {new_epsilons}")
if (n_test_states is not None and n_test_states > 1) or len(new_epsilons) == 1:
# Parallelize internally
test_rewards.update(
{
epsilon: find_reward_boundary(
true_reward=true_reward,
traj_optimizer=traj_optimizer,
n_rewards=n_rewards,
use_equiv=use_equiv,
epsilon=epsilon,
n_test_states=n_test_states,
max_attempts=max_attempts,
outdir=outdir,
n_gt_test_questions=n_gt_test_questions,
overwrite=overwrite,
parallel=parallel,
)[:2]
for epsilon in new_epsilons
}
)
else:
for rewards, alignment, epsilon in parallel(
delayed(find_reward_boundary)(
true_reward=true_reward,
traj_optimizer=traj_optimizer,
n_rewards=n_rewards,
use_equiv=use_equiv,
epsilon=epsilon,
n_test_states=n_test_states,
max_attempts=max_attempts,
n_gt_test_questions=n_gt_test_questions,
outdir=outdir,
overwrite=overwrite,
parallel=None,
)
for epsilon in new_epsilons
):
test_rewards[epsilon] = (rewards, alignment)
logging.info(f"Writing generated test rewards to {reward_path}")
pkl.dump(test_rewards, open(reward_path, "wb"))
return test_rewards
def find_reward_boundary(
true_reward: np.ndarray,
traj_optimizer: Optional[TrajOptimizer],
n_rewards: int,
use_equiv: bool,
epsilon: float,
max_attempts: int,
outdir: Path,
parallel: Parallel,
n_test_states: Optional[int] = None,
n_gt_test_questions: Optional[int] = None,
overwrite: bool = False,
) -> Tuple[np.ndarray, np.ndarray, float]:
""" Finds a ballanced set of test rewards according to a critic and epsilon. """
env = LegacyEnv(reward=true_reward)
# Don't parallelize here if we're only testing at one state
logging.debug(f"# test states={n_test_states}")
parallel = None if n_test_states is None or n_test_states <= 1 else parallel
new_rewards = partial(
make_gaussian_rewards, n_rewards=n_rewards, use_equiv=use_equiv, mean=true_reward
)
get_alignment = partial(
rewards_aligned,
traj_optimizer=traj_optimizer,
env=env,
true_reward=true_reward,
epsilon=epsilon,
parallel=parallel,
n_test_states=n_test_states,
n_questions=n_gt_test_questions,
)
search = TestRewardSearch.load(epsilon=epsilon, path=outdir / "search.pkl", overwrite=overwrite)
if search is None:
search = TestRewardSearch(
epsilon,
cov_search=GeometricSearch(start=1.0),
max_attempts=max_attempts,
outdir=outdir,
new_rewards=new_rewards,
get_alignment=get_alignment,
)
else:
search.new_rewards = new_rewards
search.get_alignment = get_alignment
best_test = search.run()
return best_test.rewards, best_test.alignment, epsilon
def rewards_aligned(
traj_optimizer: Optional[TrajOptimizer],
env: Env,
true_reward: np.ndarray,
test_rewards: np.ndarray,
epsilon: float,
parallel: Optional[Parallel] = None,
n_test_states: Optional[int] = None,
n_questions: int = 100000,
use_equiv: bool = False,
) -> np.ndarray:
""" Determines the epsilon-alignment of a set of test rewards relative to a critic and epsilon. """
# This test can produce both false positives and false negatives
# This test is prone to false positives, but a negative is always a true negative
gt_test = make_gt_test_align(test_rewards, n_questions, true_reward, epsilon, use_equiv)
if traj_optimizer is not None:
traj_opt_alignment = make_traj_opt_align(
traj_optimizer, env, true_reward, test_rewards, epsilon, parallel, n_test_states
)
# Start with traj opt alignment, then mask out all of the rewards that failed the gt test
# x y z
# 0 0 0
# 0 1 0 don't trust y when it says something is aligned if you failed the traj opt
# 1 0 0 if y says it's misaligned, then it is
# 1 1 1
# This is just the & function
alignment = traj_opt_alignment & gt_test
n_masked = np.sum(gt_test & np.logical_not(gt_test))
logging.info(
f"Trajectory optimization labelling produced at least {n_masked} false positives"
)
else:
alignment = gt_test
return alignment
def make_gt_test_align(
test_rewards: np.ndarray,
n_questions: int,
true_reward: np.ndarray,
epsilon: float,
use_equiv: bool = False,
) -> np.ndarray:
env = Driver()
trajs = make_random_questions(n_questions, env)
_, normals = make_normals(trajs, env, use_equiv)
value_diff = true_reward @ normals.T
eps_questions = np.abs(value_diff) > epsilon
normals = normals[eps_questions]
gt_pref = value_diff[eps_questions] > 0
normals = orient_normals(normals, gt_pref, use_equiv)
alignment = cast(np.ndarray, np.all(test_rewards @ normals.T > 0, axis=1))
assert alignment.shape == (
test_rewards.shape[0],
), f"alignment shape={alignment.shape} is not expected {test_rewards.shape[0]}"
return alignment
def make_traj_opt_align(
traj_optimizer: TrajOptimizer,
env: Env,
true_reward: np.ndarray,
test_rewards: np.ndarray,
epsilon: float,
parallel: Optional[Parallel] = None,
n_test_states: Optional[int] = None,
) -> np.ndarray:
state_shape = env.observation_space.sample().shape
action_shape = env.action_space.sample().shape
if n_test_states is not None:
raw_states = np.array(
[
flatten(env.observation_space, env.observation_space.sample())
for _ in range(n_test_states)
]
)
else:
n_test_states = 1
raw_states = np.array([env.state])
assert raw_states.shape == (n_test_states, *state_shape)
opt_plans = make_plans(
true_reward.reshape(1, 4),
raw_states,
traj_optimizer,
parallel,
action_shape,
memorize=True,
)
assert opt_plans.shape == (
1,
n_test_states,
50,
*action_shape,
), f"opt_plans shape={opt_plans.shape} is not expected {(1,n_test_states,50,*action_shape)}"
opt_values: np.ndarray = rollout_plans(env, opt_plans, raw_states)
plans = make_plans(test_rewards, raw_states, traj_optimizer, parallel, action_shape)
assert plans.shape == (
len(test_rewards),
n_test_states,
50,
*action_shape,
), f"plans shape={plans.shape} is not expected {(len(test_rewards),n_test_states,50,*action_shape)}"
values = rollout_plans(env, plans, raw_states)
assert values.shape == (
len(test_rewards),
n_test_states,
), f"Values shape={values.shape} is not expected {(len(test_rewards), n_test_states)}"
alignment = cast(np.ndarray, np.all(opt_values - values < epsilon, axis=1))
return alignment
def rollout_plans(env: LegacyEnv, plans: np.ndarray, states: np.ndarray):
returns = np.empty((plans.shape[0], plans.shape[1]))
assert len(returns.shape) == 2
assert len(plans.shape) == 4
for i in range(plans.shape[0]):
for j in range(plans.shape[1]):
returns[i, j] = rollout(plans[i, j], env, states[j])
return returns
def legacy_make_test_rewards(
n_questions: int,
n_rewards: int,
true_reward: np.ndarray,
epsilons: List[float],
use_equiv: bool,
) -> Dict[float, Tuple[np.ndarray, np.ndarray]]:
""" Generates n_rewards reward vectors and determines which are aligned. """
assert n_rewards > 0
assert_reward(true_reward, use_equiv)
trajs = make_random_questions(n_questions, Driver())
_, normals = make_normals(trajs, Driver(), use_equiv)
gt_pref = true_reward @ normals.T > 0
normals = orient_normals(normals, gt_pref, use_equiv)
assert_normals(normals, use_equiv)
n_reward_features = normals.shape[1]
test_rewards: Dict[float, Tuple[np.ndarray, np.ndarray]] = {}
for epsilon in epsilons:
assert epsilon >= 0.0
cov = 1.0
rewards = make_gaussian_rewards(n_rewards, use_equiv, mean=true_reward, cov=cov)
normals = normals[true_reward @ normals.T > epsilon]
ground_truth_alignment = cast(np.ndarray, np.all(rewards @ normals.T > 0, axis=1))
mean_agree = np.mean(ground_truth_alignment)
while mean_agree > 0.55 or mean_agree < 0.45:
if mean_agree > 0.55:
cov *= 1.1
else:
cov /= 1.1
if not np.isfinite(cov) or cov <= 0.0 or cov >= 100.0:
# TODO(joschnei): Break is a code smell
logging.warning(f"cov={cov}, using last good batch of rewards.")
break
rewards = make_gaussian_rewards(n_rewards, use_equiv, mean=true_reward, cov=cov)
normals = normals[true_reward @ normals.T > epsilon]
ground_truth_alignment = cast(np.ndarray, np.all(rewards @ normals.T > 0, axis=1))
mean_agree = np.mean(ground_truth_alignment)
assert ground_truth_alignment.shape == (n_rewards,)
assert rewards.shape == (n_rewards, n_reward_features)
test_rewards[epsilon] = (rewards, ground_truth_alignment)
return test_rewards
def make_plans(
rewards: np.ndarray,
states: np.ndarray,
optim: TrajOptimizer,
parallel: Optional[Parallel] = None,
action_shape: Tuple[int, ...] = (2,),
memorize: bool = False,
) -> np.ndarray:
assert shape_compat(
rewards, (-1, 4)
), f"rewards shape={rewards.shape} is wrong, expected (-1, 4)"
if parallel is not None:
input_batches = np.array_split(list(product(rewards, states)), parallel.n_jobs)
logging.debug("Branching")
return np.concatenate(
parallel(
delayed(align_worker)(
rewards=batch[:, 0],
states=batch[:, 1],
optim=optim,
action_shape=action_shape,
)
for batch in input_batches
)
).reshape(len(rewards), len(states), 50, *action_shape)
else:
plans = np.empty((len(rewards), len(states), 50, *action_shape))
for i, reward in enumerate(rewards):
assert reward.shape == (4,)
for j, state in enumerate(states):
traj, _ = optim.make_opt_traj(reward, state, memorize=memorize)
plans[i, j] = traj.reshape(-1, *action_shape)
return plans
def align_worker(
rewards: np.ndarray,
states: np.ndarray,
optim: TrajOptimizer,
action_shape: Tuple[int, ...] = (2,),
):
batch_size = rewards.shape[0]
assert states.shape[0] == batch_size
plans = np.empty((batch_size, 50, *action_shape))
for i, (reward, state) in enumerate(zip(rewards, states)):
traj, _ = optim.make_opt_traj(reward, state)
plans[i] = traj.reshape(-1, *action_shape)
return plans
# Simulated Experiment
def run_gt_experiment(
normals: np.ndarray,
test_rewards: np.ndarray,
test_reward_alignment: np.ndarray,
epsilon: float,
delta: Optional[float],
use_equiv: bool,
n_human_samples: int,
factory: TestFactory,
input_features: np.ndarray,
preferences: np.ndarray,
outdir: Path,
verbosity: Literal["INFO", "DEBUG"] = "INFO",
) -> Tuple[np.ndarray, np.ndarray, Experiment]:
""" Executes an alignment test on a set of test rewards and records the performance of the test."""
experiment = (epsilon, delta, n_human_samples)
logdir = outdir / "logs"
logdir.mkdir(parents=True, exist_ok=True)
logging.basicConfig(
filename=logdir / f"{epsilon}.{delta}.{n_human_samples}.log",
filemode="w",
level=verbosity,
force=True,
format="%(levelname)s:%(asctime)s:%(message)s",
)
logging.info(f"Working on epsilon={epsilon}, delta={delta}, n={n_human_samples}")
# TODO(joschnei): Really need to make this a fixed set common between comparisons.
filtered_normals = normals[:n_human_samples]
input_features = input_features[:n_human_samples]
preferences = preferences[:n_human_samples]
filtered_normals, indices = factory.filter_halfplanes(
inputs_features=input_features,
normals=filtered_normals,
epsilon=epsilon,
preferences=preferences,
delta=delta,
)
confusion = eval_test(
normals=filtered_normals,
rewards=test_rewards,
aligned=test_reward_alignment,
use_equiv=use_equiv,
)
assert confusion.shape == (2, 2)
return indices, confusion, experiment
def eval_test(
normals: np.ndarray, rewards: np.ndarray, aligned: np.ndarray, use_equiv: bool
) -> np.ndarray:
""" Evaluates an alignment test on a set of test rewards and reports confusion wrt ground truth. """
assert rewards.shape[0] == aligned.shape[0]
assert_rewards(rewards, use_equiv)
if normals.shape[0] > 0:
results = run_test(normals, rewards, use_equiv)
logging.info(
f"predicted true={np.sum(results)}, predicted false={results.shape[0] - np.sum(results)}"
)
return confusion_matrix(y_true=aligned, y_pred=results, labels=[False, True])
else:
return confusion_matrix(
y_true=aligned,
y_pred= | np.ones(aligned.shape, dtype=bool) | numpy.ones |
import unittest
import numpy as np
from numpy import sinh as sh
import green
class GreenForLaplaceOperatorTest(unittest.TestCase):
def test_line_segment_laplace(self):
def line_segment(a, b):
def grin_function(x, s):
x_, s_ = x, s
result = np.zeros_like(s_)
idx = np.logical_and(x_ >= a, x_ <= s_)
result[idx] = ((x_[idx] - a) * (b - s_[idx])) / (b - a)
idx = np.logical_and(x_ >= s_, x_ <= b)
result[idx] = ((s_[idx] - a) * (b - x_[idx])) / (b - a)
return result
return grin_function
a, b = -7.56, 6.35
gr = line_segment(a, b)
x = np.random.rand(100) * 10 - 5
s = np.random.rand(100) * 10 - 5
is_equal = np.allclose(gr(x, s), green.line_segment(x, s, ab=(a, b), operator="laplace"))
self.assertTrue(is_equal)
def test_line_segment_gelmgols(self):
def line_segment(a, b, kappa):
def grin_function(x, s):
x_, s_ = x, s
result = np.zeros_like(s_)
idx = np.logical_and(x_ >= a, x_ <= s_)
result[idx] = (sh(kappa * (x_[idx] - a)) * sh(kappa * (b - s_[idx]))) / (kappa * sh(kappa * (b - a)))
idx = np.logical_and(x_ >= s_, x_ <= b)
result[idx] = (sh(kappa * (s_[idx] - a)) * sh(kappa * (b - x_[idx]))) / (kappa * sh(kappa * (b - a)))
return result
return grin_function
a, b, kappa = -7.56, 6.35, 1.2
gr = line_segment(a, b, kappa)
x = np.random.rand(100) * 10 - 5
s = np.random.rand(100) * 10 - 5
is_equal = np.allclose(gr(x, s), green.line_segment(x, s, ab=(a, b), kappa=kappa, operator="gelmgols"))
self.assertTrue(is_equal)
def test_square_laplace(self):
def square(a, b, n):
def grin_function(x, s):
P = (np.pi * np.arange(1, n + 1)) / a
Q = (np.pi * np.arange(1, n + 1)) / b
Pm, Qm = np.meshgrid(P, Q)
Pm = np.tile(Pm[:, :, np.newaxis, np.newaxis], (1, 1, s.shape[1], s.shape[2]))
Qm = np.tile(Qm[:, :, np.newaxis, np.newaxis], (1, 1, s.shape[1], s.shape[2]))
return (4 / (a * b)) * np.sum(np.sum(
(np.sin(Pm * x[0]) * np.sin(Qm * x[1]) * np.sin(Pm * s[0]) * np.sin(Qm * s[1])) / (
np.power(Pm, 2) + | np.power(Qm, 2) | numpy.power |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Generate the mindir for bprop"""
import numpy as np
import mindspore.nn as nn
from mindspore import Tensor, Parameter
from mindspore.ops import operations as P
import mindspore.ops.functional as F
import mindspore.ops as ops
from mindspore.ops.operations import _inner_ops as inner
import mindspore.common.dtype as mstype
from mindspore.common.initializer import initializer
from mindspore.ops.bprop_mindir import serializable_bprop_ops
from mindspore._c_expression import load_mindir
import mindspore.ops._grad as g
class Net(nn.Cell):
def __init__(self, op):
super(Net, self).__init__()
self.op = op
def construct(self, *inputs):
return self.op(*inputs)
class TupleInputNet(nn.Cell):
def __init__(self, op):
super(TupleInputNet, self).__init__()
self.op = op
def construct(self, x):
return self.op((x,))
class GradNet(nn.Cell):
def __init__(self, network):
super(GradNet, self).__init__()
self.grad = ops.GradOperation(get_all=True)
self.network = network
def construct(self, *inputs):
gout = self.grad(self.network)(*inputs)
return gout
def test_load_mindir_dir():
"""
Feature: Bprop pre-compilation.
Description: Load all the mindir files of serializable bprop.
Expectation: All are loaded successfully.
"""
bprop_path = g.__file__
bprop_installed_dir = bprop_path[: bprop_path.rindex('/')]
bprop_mindir_export_dir = bprop_installed_dir + "/../bprop_mindir"
for op in serializable_bprop_ops:
if isinstance(op, str):
op_name = op
else:
op_name = op.__name__
file_name = bprop_mindir_export_dir + "/" + op_name + "_bprop.mindir"
graph = load_mindir(file_name)
assert not graph is None
def test_relu():
x = Tensor(np.array([[[[-1, 1, 10],
[1, -1, 1],
[10, 1, -1]]]]).astype(np.float32))
relu = Net(P.ReLU())
grad = GradNet(relu)
grad.compile(x)
def test_identity():
x = Tensor(np.array([1, 2, 3, 4]).astype(np.int64))
identity = Net(P.Identity())
grad = GradNet(identity)
grad.compile(x)
def test_range():
x = Tensor(np.array([1, 2, 3, 2]).astype(np.int64))
range_net = Net(inner.Range(1.0, 8.0, 2.0))
grad = GradNet(range_net)
grad.compile(x)
def test_ones_like():
x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))
ones_like = Net(P.OnesLike())
grad = GradNet(ones_like)
grad.compile(x)
def test_zeros_like():
x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))
zeros_like = Net(P.ZerosLike())
grad = GradNet(zeros_like)
grad.compile(x)
def test_argmax():
x = Tensor(np.array([[1, 20, 5], [67, 8, 9], [130, 24, 15]]).astype(np.float32))
argmax = Net(P.Argmax())
grad = GradNet(argmax)
grad.compile(x)
def test_argmin():
x = Tensor(np.array([[1, 20, 5], [67, 8, 9], [130, 24, 15]]).astype(np.float32))
argmin = Net(P.Argmin())
grad = GradNet(argmin)
grad.compile(x)
def test_broadcast():
x = Tensor(np.array([1, 2, 5, 2]).astype(np.float32))
broadcast = TupleInputNet(P.Broadcast(1))
grad = GradNet(broadcast)
grad.compile(x)
def test_is_finite():
x = Tensor(np.ones([2, 4]).astype(np.int32))
is_finite = Net(P.IsFinite())
grad = GradNet(is_finite)
grad.compile(x)
def test_approximate_equal():
x = Tensor(np.array([1, 2, 3]).astype(np.float32))
y = Tensor(np.array([2, 4, 6]).astype(np.float32))
approximate_equal = Net(P.ApproximateEqual(2.))
grad = GradNet(approximate_equal)
grad.compile(x, y)
def test_logical_not():
x = Tensor(np.array([True, False, True]).astype(np.bool))
logical_not = Net(P.LogicalNot())
grad = GradNet(logical_not)
grad.compile(x)
def test_sign():
x = Tensor(np.array([[2.0, 0.0, -1.0]]).astype(np.float32))
sign = Net(P.Sign())
grad = GradNet(sign)
grad.compile(x)
def test_round():
x = Tensor(np.array([0.8, 1.5, 2.3, 2.5, -4.5]).astype(np.float32))
round_net = Net(P.Round())
grad = GradNet(round_net)
grad.compile(x)
def test_lin_space():
start = Tensor(1, mstype.float32)
stop = Tensor(10, mstype.float32)
num = 5
lin_space = Net(P.LinSpace())
grad = GradNet(lin_space)
grad.compile(start, stop, num)
def test_dropout_gen_mask():
x = (2, 4, 2, 2)
keep_prob = Tensor(1.0, mstype.float32)
dropout_gen_mask = Net(P.DropoutGenMask(10, 28))
grad = GradNet(dropout_gen_mask)
grad.compile(x, keep_prob)
def test_onehot():
indices = Tensor(np.array([0, 1, 2]).astype(np.int32))
depth, on_value, off_value = 3, Tensor(1.0, mstype.float32), Tensor(0.0, mstype.float32)
one_hot = Net(P.OneHot())
grad = GradNet(one_hot)
grad.compile(indices, depth, on_value, off_value)
def test_assign():
class AssignNet(nn.Cell):
def __init__(self):
super(AssignNet, self).__init__()
self.assign = P.Assign()
self.variable = Parameter(Tensor([1.0], mstype.float32), name="variable")
def construct(self, x):
return self.assign(self.variable, x)
value = Tensor([2.0], mstype.float32)
assign = AssignNet()
grad = GradNet(assign)
grad.compile(value)
def test_assign_add():
class AssignAddNet(nn.Cell):
def __init__(self):
super(AssignAddNet, self).__init__()
self.assign_add = P.AssignAdd()
self.variable = Parameter(initializer(1, [1], mstype.int64), name="global_step")
def construct(self, x):
return self.assign_add(self.variable, x)
value = Tensor(np.ones([1]).astype(np.int64) * 100)
assign_add = AssignAddNet()
grad = GradNet(assign_add)
grad.compile(value)
def test_assign_sub():
class AssignSubNet(nn.Cell):
def __init__(self):
super(AssignSubNet, self).__init__()
self.assign = P.AssignSub()
self.variable = Parameter(initializer(1, [1], mstype.int32), name="global_step")
def construct(self, x):
return self.assign(self.variable, x)
value = Tensor(np.ones([1]).astype(np.int32) * 100)
assign_sub = AssignSubNet()
grad = GradNet(assign_sub)
grad.compile(value)
def test_iou():
anchor_boxes = Tensor(np.random.randint(1.0, 5.0, [3, 4]).astype(np.float16))
gt_boxes = Tensor(np.random.randint(1.0, 5.0, [3, 4]).astype(np.float16))
iou = Net(P.IOU())
grad = GradNet(iou)
grad.compile(anchor_boxes, gt_boxes)
def test_bn_training_reduce():
x = Tensor(np.ones([128, 3, 32, 3]).astype(np.float32))
bn_training_reduce = Net(P.BNTrainingReduce())
grad = GradNet(bn_training_reduce)
grad.compile(x)
def test_equal():
x = Tensor([2.0], mstype.float32)
y = Tensor([2.0], mstype.float32)
equal = Net(P.Equal())
grad = GradNet(equal)
grad.compile(x, y)
def test_not_equal():
x = Tensor([2.0], mstype.float32)
y = Tensor([2.0], mstype.float32)
not_equal = Net(P.NotEqual())
grad = GradNet(not_equal)
grad.compile(x, y)
def test_greater():
x = Tensor(np.array([1, 2, 3]), mstype.int32)
y = Tensor(np.array([1, 1, 4]), mstype.int32)
greater = Net(P.Greater())
grad = GradNet(greater)
grad.compile(x, y)
def test_greater_equal():
x = Tensor(np.array([1, 2, 3]), mstype.int32)
y = Tensor(np.array([1, 1, 4]), mstype.int32)
greater_equal = Net(P.GreaterEqual())
grad = GradNet(greater_equal)
grad.compile(x, y)
def test_less():
x = Tensor(np.array([1, 2, 3]), mstype.int32)
y = Tensor(np.array([1, 1, 4]), mstype.int32)
less = Net(P.Less())
grad = GradNet(less)
grad.compile(x, y)
def test_less_equal():
x = Tensor(np.array([1, 2, 3]), mstype.int32)
y = Tensor(np.array([1, 1, 4]), mstype.int32)
less_equal = Net(P.LessEqual())
grad = GradNet(less_equal)
grad.compile(x, y)
def test_logical_and():
x = Tensor(np.array([True, False, True]), mstype.bool_)
y = Tensor(np.array([True, True, False]), mstype.bool_)
logical_and = Net(P.LogicalAnd())
grad = GradNet(logical_and)
grad.compile(x, y)
def test_logical_or():
x = Tensor(np.array([True, False, True]), mstype.bool_)
y = Tensor(np.array([True, True, False]), mstype.bool_)
logical_or = Net(P.LogicalOr())
grad = GradNet(logical_or)
grad.compile(x, y)
def test_reduce_all():
x = Tensor(np.array([[True, False], [True, True]]))
reduce_all = Net(P.ReduceAll(keep_dims=True))
grad = GradNet(reduce_all)
grad.compile(x)
def test_reduce_any():
x = Tensor(np.array([[True, False], [True, True]]))
reduce_all = Net(P.ReduceAny(keep_dims=True))
grad = GradNet(reduce_all)
grad.compile(x)
def test_dropout_do_mask():
input_x = Tensor(np.ones([2, 2, 3]), mstype.float32)
keep_prob = Tensor(0.5, mstype.float32)
mask = Tensor(np.ones([2]), mstype.uint8)
dropout_do_mask = Net(P.DropoutDoMask())
grad = GradNet(dropout_do_mask)
grad.compile(input_x, mask, keep_prob)
def test_select():
"""
Feature: Bprop pre-compilation.
Description: Compile the backward graph for the select op.
Expectation: Load the bprop mindir successfully.
"""
input_cond = Tensor([True, False])
x = Tensor(np.array([1, 2]), mstype.int32)
y = Tensor(np.array([1, 1]), mstype.int32)
select = Net(P.Select())
grad = GradNet(select)
grad.compile(input_cond, x, y)
def test_scatter_max():
"""
Feature: Bprop pre-compilation.
Description: Compile the backward graph for the scatter_max op.
Expectation: Load the bprop mindir successfully.
"""
class ScatterMaxNet(nn.Cell):
def __init__(self):
super(ScatterMaxNet, self).__init__()
self.scatter_max = P.ScatterMax()
self.input_x = Parameter(Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), mstype.float32),
name="input_x")
def construct(self, indices, updates):
return self.scatter_max(self.input_x, indices, updates)
indices = Tensor(np.array([[0, 0], [1, 1]]), mstype.int32)
updates = Tensor(np.ones([2, 2, 3]) * 88, mstype.float32)
scatter_max = ScatterMaxNet()
grad = GradNet(scatter_max)
grad.compile(indices, updates)
def test_relu_grad():
"""
Feature: Bprop pre-compilation.
Description: Compile the backward graph for the relu_grad op.
Expectation: Load the bprop mindir successfully.
"""
x = Tensor(np.array([[[[-1, 1, 10],
[1, -1, 1],
[10, 1, -1]]]]).astype(np.float32))
relu = Net(P.ReLU())
grad1 = GradNet(relu)
grad2 = GradNet(grad1)
grad2.compile(x)
def test_tuple_getitem():
"""
Feature: Bprop pre-compilation.
Description: Compile the backward graph for the tuple_getitem op.
Expectation: Load the bprop mindir successfully.
"""
class TupleGetitemNet(nn.Cell):
def __init__(self):
super(TupleGetitemNet, self).__init__()
self.maxpool_arg = P.MaxPoolWithArgmax(pad_mode="VALID", kernel_size=2, strides=1)
def construct(self, x):
output = self.maxpool_arg(x)
return output[0]
x = Tensor(np.arange(1 * 3 * 3 * 4).reshape((1, 3, 3, 4)), mstype.float32)
tuple_getitem = TupleGetitemNet()
grad = GradNet(tuple_getitem)
grad.compile(x)
def test_depend():
"""
Feature: Bprop pre-compilation.
Description: Compile the backward graph for the depend op.
Expectation: Load the bprop mindir successfully.
"""
class DependNet(nn.Cell):
def __init__(self):
super(DependNet, self).__init__()
self.softmax = P.Softmax()
self.depend = ops.Depend()
def construct(self, x, y):
mul = x * y
y = self.depend(y, mul)
output = self.softmax(y)
return output
x = Tensor(np.ones([4, 5]), mstype.float32)
y = Tensor(np.ones([4, 5]), mstype.float32)
depend = DependNet()
grad = GradNet(depend)
grad.compile(x, y)
def test_stop_gradient():
"""
Feature: Bprop pre-compilation.
Description: Compile the backward graph for the stop_gradient op.
Expectation: Load the bprop mindir successfully.
"""
class StopGradientNet(nn.Cell):
def __init__(self):
super(StopGradientNet, self).__init__()
def construct(self, x, y):
c = x * y
c_s = F.stop_gradient(c)
return c_s
x = Tensor(np.ones([4, 5]), mstype.float32)
y = Tensor(np.ones([4, 5]), mstype.float32)
stop_gradient = StopGradientNet()
grad = GradNet(stop_gradient)
grad.compile(x, y)
def test_switch():
"""
Feature: Bprop pre-compilation.
Description: Compile the backward graph for the switch op.
Expectation: Load the bprop mindir successfully.
"""
class SwitchNet(nn.Cell):
def __init__(self):
super(SwitchNet, self).__init__()
def construct(self, x, y):
if x > y:
return x
return y
x = Tensor(np.array([3]), mstype.float32)
y = Tensor(np.array([2]), mstype.float32)
switch_net = SwitchNet()
grad = GradNet(switch_net)
grad.compile(x, y)
def test_update_state():
"""
Feature: Bprop pre-compilation.
Description: Compile the backward graph for the update_state op.
Expectation: Load the bprop mindir successfully.
"""
class UpdateStateNet(nn.Cell):
def __init__(self):
super(UpdateStateNet, self).__init__()
self.assign_add = P.AssignAdd()
self.variable = Parameter(initializer(1, [1], mstype.int64), name="global_step")
def construct(self, x):
return self.assign_add(self.variable, x)
value = Tensor(np.ones([1]).astype(np.int64) * 100)
update_state = UpdateStateNet()
grad = GradNet(update_state)
grad.compile(value)
def test_load():
"""
Feature: Bprop pre-compilation.
Description: Compile the backward graph for the load op.
Expectation: Load the bprop mindir successfully.
"""
class LoadNet(nn.Cell):
def __init__(self):
super(LoadNet, self).__init__()
self.add = P.Add()
self.variable = Parameter(initializer(1, [1], mstype.int64), name="global_step")
def construct(self, x):
return self.add(self.variable, x)
value = Tensor(np.ones([1]).astype(np.int64) * 100)
load = LoadNet()
grad = GradNet(load)
grad.compile(value)
def test_floor_div():
"""
Feature: Bprop pre-compilation.
Description: Compile the backward graph for the floor_div op.
Expectation: Load the bprop mindir successfully.
"""
x = Tensor(np.array([2, 4, -1]), mstype.int32)
y = Tensor(np.array([3, 3, 3]), mstype.int32)
floor_div = Net(P.FloorDiv())
grad = GradNet(floor_div)
grad.compile(x, y)
def test_truncate_div():
"""
Feature: Bprop pre-compilation.
Description: Compile the backward graph for the truncate_div op.
Expectation: Load the bprop mindir successfully.
"""
x = Tensor(np.array([2, 4, -1]), mstype.int32)
y = Tensor( | np.array([3, 3, 3]) | numpy.array |
"""
Signals and Systems Function Module
Copyright (c) March 2017, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
Notes
-----
The primary purpose of this function library is to support the book Signals and Systems for Dummies. Beyond that it should be useful to anyone who wants to use Pylab for general signals and systems modeling and simulation. There is a good collection of digital communication simulation primitives included in the library. More enhancements are planned over time.
The formatted docstrings for the library follow. Click index in the upper right to get an
alphabetical listing of the library functions. In all of the example code given it is assumed that ssd has been imported into your workspace. See the examples below for import options.
Examples
--------
>>> import sk_dsp_comm.sigsys as ssd
>>> # Commands then need to be prefixed with ssd., i.e.,
>>> ssd.tri(t,tau)
>>> # A full import of the module, to avoid the the need to prefix with ssd, is:
>>> from sk_dsp_comm.sigsys import *
Function Catalog
----------------
"""
from matplotlib import pylab
import numpy as np
from numpy import fft
import matplotlib.pyplot as plt
from scipy import signal
from scipy.io import wavfile
from logging import getLogger
log = getLogger(__name__)
import warnings
def cic(m, k):
"""
A functional form implementation of a cascade of integrator comb (CIC) filters.
Parameters
----------
m : Effective number of taps per section (typically the decimation factor).
k : The number of CIC sections cascaded (larger K gives the filter a wider image rejection bandwidth).
Returns
-------
b : FIR filter coefficients for a simple direct form implementation using the filter() function.
Notes
-----
Commonly used in multirate signal processing digital down-converters and digital up-converters. A true CIC filter
requires no multiplies, only add and subtract operations. The functional form created here is a simple FIR requiring
real coefficient multiplies via filter().
<NAME> July 2013
"""
if k == 1:
b = np.ones(m)
else:
h = np.ones(m)
b = h
for i in range(1, k):
b = signal.convolve(b, h) # cascade by convolving impulse responses
# Make filter have unity gain at DC
return b / np.sum(b)
def ten_band_eq_filt(x,GdB,Q=3.5):
"""
Filter the input signal x with a ten-band equalizer having octave gain values in ndarray GdB.
The signal x is filtered using octave-spaced peaking filters starting at 31.25 Hz and
stopping at 16 kHz. The Q of each filter is 3.5, but can be changed. The sampling rate
is assumed to be 44.1 kHz.
Parameters
----------
x : ndarray of the input signal samples
GdB : ndarray containing ten octave band gain values [G0dB,...,G9dB]
Q : Quality factor vector for each of the NB peaking filters
Returns
-------
y : ndarray of output signal samples
Examples
--------
>>> # Test with white noise
>>> w = randn(100000)
>>> y = ten_band_eq_filt(x,GdB)
>>> psd(y,2**10,44.1)
"""
fs = 44100.0 # Hz
NB = len(GdB)
if not NB == 10:
raise ValueError("GdB length not equal to ten")
Fc = 31.25*2**np.arange(NB)
B = np.zeros((NB,3))
A = np.zeros((NB,3))
# Create matrix of cascade coefficients
for k in range(NB):
[b,a] = peaking(GdB[k],Fc[k],Q)
B[k,:] = b
A[k,:] = a
# Pass signal x through the cascade of ten filters
y = np.zeros(len(x))
for k in range(NB):
if k == 0:
y = signal.lfilter(B[k,:],A[k,:],x)
else:
y = signal.lfilter(B[k,:],A[k,:],y)
return y
def ten_band_eq_resp(GdB,Q=3.5):
"""
Create a frequency response magnitude plot in dB of a ten band equalizer
using a semilogplot (semilogx()) type plot
Parameters
----------
GdB : Gain vector for 10 peaking filters [G0,...,G9]
Q : Quality factor for each peaking filter (default 3.5)
Returns
-------
Nothing : two plots are created
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sk_dsp_comm import sigsys as ss
>>> ss.ten_band_eq_resp([0,10.0,0,0,-1,0,5,0,-4,0])
>>> plt.show()
"""
fs = 44100.0 # Hz
NB = len(GdB)
if not NB == 10:
raise ValueError("GdB length not equal to ten")
Fc = 31.25*2**np.arange(NB)
B = np.zeros((NB,3));
A = np.zeros((NB,3));
# Create matrix of cascade coefficients
for k in range(NB):
b,a = peaking(GdB[k],Fc[k],Q,fs)
B[k,:] = b
A[k,:] = a
# Create the cascade frequency response
F = np.logspace(1,np.log10(20e3),1000)
H = np.ones(len(F))*np.complex(1.0,0.0)
for k in range(NB):
w,Htemp = signal.freqz(B[k,:],A[k,:],2*np.pi*F/fs)
H *= Htemp
plt.figure(figsize=(6,4))
plt.subplot(211)
plt.semilogx(F,20*np.log10(abs(H)))
plt.axis([10, fs/2, -12, 12])
plt.grid()
plt.title('Ten-Band Equalizer Frequency Response')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Gain (dB)')
plt.subplot(212)
plt.stem(np.arange(NB),GdB,'b','bs')
#plt.bar(np.arange(NB)-.1,GdB,0.2)
plt.axis([0, NB-1, -12, 12])
plt.xlabel('Equalizer Band Number')
plt.ylabel('Gain Set (dB)')
plt.grid()
def peaking(GdB, fc, Q=3.5, fs=44100.):
"""
A second-order peaking filter having GdB gain at fc and approximately
and 0 dB otherwise.
The filter coefficients returns correspond to a biquadratic system function
containing five parameters.
Parameters
----------
GdB : Lowpass gain in dB
fc : Center frequency in Hz
Q : Filter Q which is inversely proportional to bandwidth
fs : Sampling frquency in Hz
Returns
-------
b : ndarray containing the numerator filter coefficients
a : ndarray containing the denominator filter coefficients
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from sk_dsp_comm.sigsys import peaking
>>> from scipy import signal
>>> b,a = peaking(2.0,500)
>>> f = np.logspace(1,5,400)
>>> w,H = signal.freqz(b,a,2*np.pi*f/44100)
>>> plt.semilogx(f,20*np.log10(abs(H)))
>>> plt.ylabel("Power Spectral Density (dB)")
>>> plt.xlabel("Frequency (Hz)")
>>> plt.show()
>>> b,a = peaking(-5.0,500,4)
>>> w,H = signal.freqz(b,a,2*np.pi*f/44100)
>>> plt.semilogx(f,20*np.log10(abs(H)))
>>> plt.ylabel("Power Spectral Density (dB)")
>>> plt.xlabel("Frequency (Hz)")
"""
mu = 10**(GdB/20.)
kq = 4/(1 + mu)*np.tan(2*np.pi*fc/fs/(2*Q))
Cpk = (1 + kq *mu)/(1 + kq)
b1 = -2*np.cos(2*np.pi*fc/fs)/(1 + kq*mu)
b2 = (1 - kq*mu)/(1 + kq*mu)
a1 = -2*np.cos(2*np.pi*fc/fs)/(1 + kq)
a2 = (1 - kq)/(1 + kq)
b = Cpk*np.array([1, b1, b2])
a = np.array([1, a1, a2])
return b,a
def ex6_2(n):
"""
Generate a triangle pulse as described in Example 6-2
of Chapter 6.
You need to supply an index array n that covers at least [-2, 5].
The function returns the hard-coded signal of the example.
Parameters
----------
n : time index ndarray covering at least -2 to +5.
Returns
-------
x : ndarray of signal samples in x
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from sk_dsp_comm import sigsys as ss
>>> n = np.arange(-5,8)
>>> x = ss.ex6_2(n)
>>> plt.stem(n,x) # creates a stem plot of x vs n
"""
x = np.zeros(len(n))
for k, nn in enumerate(n):
if nn >= -2 and nn <= 5:
x[k] = 8 - nn
return x
def position_cd(Ka, out_type ='fb_exact'):
"""
CD sled position control case study of Chapter 18.
The function returns the closed-loop and open-loop
system function for a CD/DVD sled position control
system. The loop amplifier gain is the only variable
that may be changed. The returned system function can
however be changed.
Parameters
----------
Ka : loop amplifier gain, start with 50.
out_type : 'open_loop' for open loop system function
out_type : 'fb_approx' for closed-loop approximation
out_type : 'fb_exact' for closed-loop exact
Returns
-------
b : numerator coefficient ndarray
a : denominator coefficient ndarray
Notes
-----
With the exception of the loop amplifier gain, all
other parameters are hard-coded from Case Study example.
Examples
--------
>>> b,a = position_cd(Ka,'fb_approx')
>>> b,a = position_cd(Ka,'fb_exact')
"""
rs = 10/(2*np.pi)
# Load b and a ndarrays with the coefficients
if out_type.lower() == 'open_loop':
b = np.array([Ka*4000*rs])
a = np.array([1,1275,31250,0])
elif out_type.lower() == 'fb_approx':
b = np.array([3.2*Ka*rs])
a = np.array([1, 25, 3.2*Ka*rs])
elif out_type.lower() == 'fb_exact':
b = np.array([4000*Ka*rs])
a = np.array([1, 1250+25, 25*1250, 4000*Ka*rs])
else:
raise ValueError('out_type must be: open_loop, fb_approx, or fc_exact')
return b, a
def cruise_control(wn,zeta,T,vcruise,vmax,tf_mode='H'):
"""
Cruise control with PI controller and hill disturbance.
This function returns various system function configurations
for a the cruise control Case Study example found in
the supplementary article. The plant model is obtained by the
linearizing the equations of motion and the controller contains a
proportional and integral gain term set via the closed-loop parameters
natural frequency wn (rad/s) and damping zeta.
Parameters
----------
wn : closed-loop natural frequency in rad/s, nominally 0.1
zeta : closed-loop damping factor, nominally 1.0
T : vehicle time constant, nominally 10 s
vcruise : cruise velocity set point, nominally 75 mph
vmax : maximum vehicle velocity, nominally 120 mph
tf_mode : 'H', 'HE', 'HVW', or 'HED' controls the system function returned by the function
'H' : closed-loop system function V(s)/R(s)
'HE' : closed-loop system function E(s)/R(s)
'HVW' : closed-loop system function V(s)/W(s)
'HED' : closed-loop system function E(s)/D(s), where D is the hill disturbance input
Returns
-------
b : numerator coefficient ndarray
a : denominator coefficient ndarray
Examples
--------
>>> # return the closed-loop system function output/input velocity
>>> b,a = cruise_control(wn,zeta,T,vcruise,vmax,tf_mode='H')
>>> # return the closed-loop system function loop error/hill disturbance
>>> b,a = cruise_control(wn,zeta,T,vcruise,vmax,tf_mode='HED')
"""
tau = T/2.*vmax/vcruise
g = 9.8
g *= 3*60**2/5280. # m/s to mph conversion
Kp = T*(2*zeta*wn-1/tau)/vmax
Ki = T*wn**2./vmax
K = Kp*vmax/T
wn = np.sqrt(K/(Kp/Ki))
zeta = (K + 1/tau)/(2*wn)
log.info('wn = %s' % (wn))
log.info('zeta = %s' % (zeta))
a = np.array([1, 2*zeta*wn, wn**2])
if tf_mode == 'H':
b = np.array([K, wn**2])
elif tf_mode == 'HE':
b = np.array([1, 2*zeta*wn-K, 0.])
elif tf_mode == 'HVW':
b = np.array([ 1, wn**2/K+1/tau, wn**2/(K*tau)])
b *= Kp
elif tf_mode == 'HED':
b = np.array([g, 0])
else:
raise ValueError('tf_mode must be: H, HE, HVU, or HED')
return b, a
def splane(b,a,auto_scale=True,size=[-1,1,-1,1]):
"""
Create an s-plane pole-zero plot.
As input the function uses the numerator and denominator
s-domain system function coefficient ndarrays b and a respectively.
Assumed to be stored in descending powers of s.
Parameters
----------
b : numerator coefficient ndarray.
a : denominator coefficient ndarray.
auto_scale : True
size : [xmin,xmax,ymin,ymax] plot scaling when scale = False
Returns
-------
(M,N) : tuple of zero and pole counts + plot window
Notes
-----
This function tries to identify repeated poles and zeros and will
place the multiplicity number above and to the right of the pole or zero.
The difficulty is setting the tolerance for this detection. Currently it
is set at 1e-3 via the function signal.unique_roots.
Examples
--------
>>> # Here the plot is generated using auto_scale
>>> splane(b,a)
>>> # Here the plot is generated using manual scaling
>>> splane(b,a,False,[-10,1,-10,10])
"""
if (isinstance(a,int) or isinstance(a,float)):
a = [a]
if (isinstance(b,int) or isinstance(b,float)):
b = [b]
M = len(b) - 1
N = len(a) - 1
plt.figure(figsize=(5,5))
#plt.axis('equal')
N_roots = np.array([0.0])
if M > 0:
N_roots = np.roots(b)
D_roots = np.array([0.0])
if N > 0:
D_roots = np.roots(a)
if auto_scale:
size[0] = min(np.min(np.real(N_roots)),np.min(np.real(D_roots)))-0.5
size[1] = max(np.max(np.real(N_roots)),np.max(np.real(D_roots)))+0.5
size[1] = max(size[1],0.5)
size[2] = min(np.min(np.imag(N_roots)),np.min(np.imag(D_roots)))-0.5
size[3] = max(np.max(np.imag(N_roots)),np.max(np.imag(D_roots)))+0.5
plt.plot([size[0],size[1]],[0,0],'k--')
plt.plot([0,0],[size[2],size[3]],'r--')
# Plot labels if multiplicity greater than 1
x_scale = size[1]-size[0]
y_scale = size[3]-size[2]
x_off = 0.03
y_off = 0.01
if M > 0:
#N_roots = np.roots(b)
N_uniq, N_mult=signal.unique_roots(N_roots,tol=1e-3, rtype='avg')
plt.plot(np.real(N_uniq),np.imag(N_uniq),'ko',mfc='None',ms=8)
idx_N_mult = np.nonzero(np.ravel(N_mult>1))[0]
for k in range(len(idx_N_mult)):
x_loc = np.real(N_uniq[idx_N_mult[k]]) + x_off*x_scale
y_loc =np.imag(N_uniq[idx_N_mult[k]]) + y_off*y_scale
plt.text(x_loc,y_loc,str(N_mult[idx_N_mult[k]]),ha='center',va='bottom',fontsize=10)
if N > 0:
#D_roots = np.roots(a)
D_uniq, D_mult=signal.unique_roots(D_roots,tol=1e-3, rtype='avg')
plt.plot(np.real(D_uniq),np.imag(D_uniq),'kx',ms=8)
idx_D_mult = np.nonzero(np.ravel(D_mult>1))[0]
for k in range(len(idx_D_mult)):
x_loc = np.real(D_uniq[idx_D_mult[k]]) + x_off*x_scale
y_loc =np.imag(D_uniq[idx_D_mult[k]]) + y_off*y_scale
plt.text(x_loc,y_loc,str(D_mult[idx_D_mult[k]]),ha='center',va='bottom',fontsize=10)
plt.xlabel('Real Part')
plt.ylabel('Imaginary Part')
plt.title('Pole-Zero Plot')
#plt.grid()
plt.axis(np.array(size))
return M,N
def os_filter(x, h, N, mode=0):
"""
Overlap and save transform domain FIR filtering.
This function implements the classical overlap and save method of
transform domain filtering using a length P FIR filter.
Parameters
----------
x : input signal to be filtered as an ndarray
h : FIR filter coefficients as an ndarray of length P
N : FFT size > P, typically a power of two
mode : 0 or 1, when 1 returns a diagnostic matrix
Returns
-------
y : the filtered output as an ndarray
y_mat : an ndarray whose rows are the individual overlap outputs.
Notes
-----
y_mat is used for diagnostics and to gain understanding of the algorithm.
Examples
--------
>>> from numpy import arange, cos, pi, ones
>>> n = arange(0,100)
>>> x = cos(2*pi*0.05*n)
>>> b = ones(10)
>>> y = os_filter(x,h,N)
>>> # set mode = 1
>>> y, y_mat = os_filter(x,h,N,1)
"""
P = len(h)
# zero pad start of x so first frame can recover first true samples of x
x = np.hstack((np.zeros(P-1),x))
L = N - P + 1
Nx = len(x)
Nframe = int(np.ceil(Nx/float(L)))
# zero pad end of x to full number of frames needed
x = np.hstack((x,np.zeros(Nframe*L-Nx)))
y = np.zeros(int(Nframe*N))
# create an instrumentation matrix to observe the overlap and save behavior
y_mat = np.zeros((Nframe,int(Nframe*N)))
H = fft.fft(h,N)
# begin the filtering operation
for k in range(Nframe):
xk = x[k*L:k*L+N]
Xk = fft.fft(xk,N)
Yk = H*Xk
yk = np.real(fft.ifft(Yk)) # imag part should be zero
y[k*L+P-1:k*L+N] = yk[P-1:]
y_mat[k,k*L:k*L+N] = yk
if mode == 1:
return y[P-1:Nx], y_mat[:,P-1:Nx]
else:
return y[P-1:Nx]
def oa_filter(x, h, N, mode=0):
"""
Overlap and add transform domain FIR filtering.
This function implements the classical overlap and add method of
transform domain filtering using a length P FIR filter.
Parameters
----------
x : input signal to be filtered as an ndarray
h : FIR filter coefficients as an ndarray of length P
N : FFT size > P, typically a power of two
mode : 0 or 1, when 1 returns a diagnostic matrix
Returns
-------
y : the filtered output as an ndarray
y_mat : an ndarray whose rows are the individual overlap outputs.
Notes
-----
y_mat is used for diagnostics and to gain understanding of the algorithm.
Examples
--------
>>> import numpy as np
>>> from sk_dsp_comm.sigsys import oa_filter
>>> n = np.arange(0,100)
>>> x = np.cos(2*np.pi*0.05*n)
>>> b = np.ones(10)
>>> y = oa_filter(x,h,N)
>>> # set mode = 1
>>> y, y_mat = oa_filter(x,h,N,1)
"""
P = len(h)
L = int(N) - P + 1 # need N >= L + P -1
Nx = len(x)
Nframe = int(np.ceil(Nx/float(L)))
# zero pad to full number of frames needed
x = np.hstack((x,np.zeros(Nframe*L-Nx)))
y = np.zeros(Nframe*N)
# create an instrumentation matrix to observe the overlap and add behavior
y_mat = np.zeros((Nframe,Nframe*N))
H = fft.fft(h,N)
# begin the filtering operation
for k in range(Nframe):
xk = x[k*L:(k+1)*L]
Xk = fft.fft(xk,N)
Yk = H*Xk
yk = np.real(fft.ifft(Yk))
y[k*L:k*L+N] += yk
y_mat[k,k*L:k*L+N] = yk
if mode == 1:
return y[0:Nx], y_mat[:,0:Nx]
else:
return y[0:Nx]
def lp_samp(fb,fs,fmax,N,shape='tri',fsize=(6,4)):
"""
Lowpass sampling theorem plotting function.
Display the spectrum of a sampled signal after setting the bandwidth,
sampling frequency, maximum display frequency, and spectral shape.
Parameters
----------
fb : spectrum lowpass bandwidth in Hz
fs : sampling frequency in Hz
fmax : plot over [-fmax,fmax]
shape : 'tri' or 'line'
N : number of translates, N positive and N negative
fsize : the size of the figure window, default (6,4)
Returns
-------
Nothing : A plot window opens containing the spectrum plot
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sk_dsp_comm.sigsys import lp_samp
No aliasing as bandwidth 10 Hz < 25/2; fs > fb.
>>> lp_samp(10,25,50,10)
>>> plt.show()
Now aliasing as bandwidth 15 Hz > 25/2; fs < fb.
>>> lp_samp(15,25,50,10)
"""
plt.figure(figsize=fsize)
# define the plot interval
f = np.arange(-fmax,fmax+fmax/200.,fmax/200.)
A = 1.0
line_ampl = A/2.*np.array([0, 1])
# plot the lowpass spectrum in black
shapes = ['tri', 'line']
if shape.lower() not in shapes:
raise ValueError('shape must be tri or line')
if shape.lower() == 'tri':
plt.plot(f,lp_tri(f,fb))
# overlay positive and negative frequency translates
for n in range(N):
plt.plot(f, lp_tri(f - (n + 1) * fs, fb), '--r')
plt.plot(f, lp_tri(f + (n + 1) * fs, fb), '--g')
elif shape.lower() == 'line':
plt.plot([fb, fb],line_ampl,'b', linewidth=2)
plt.plot([-fb, -fb],line_ampl,'b', linewidth=2)
# overlay positive and negative frequency translates
for n in range(N):
plt.plot([fb+(n+1)*fs, fb+(n+1)*fs],line_ampl,'--r', linewidth=2)
plt.plot([-fb+(n+1)*fs, -fb+(n+1)*fs],line_ampl,'--r', linewidth=2)
plt.plot([fb-(n+1)*fs, fb-(n+1)*fs],line_ampl,'--g', linewidth=2)
plt.plot([-fb-(n+1)*fs, -fb-(n+1)*fs],line_ampl,'--g', linewidth=2)
plt.ylabel('Spectrum Magnitude')
plt.xlabel('Frequency in Hz')
plt.axis([-fmax,fmax,0,1])
plt.grid()
def lp_tri(f, fb):
"""
Triangle spectral shape function used by :func:`lp_samp`.
Parameters
----------
f : ndarray containing frequency samples
fb : the bandwidth as a float constant
Returns
-------
x : ndarray of spectrum samples for a single triangle shape
Notes
-----
This is a support function for the lowpass spectrum plotting function
:func:`lp_samp`.
Examples
--------
>>> x = lp_tri(f, fb)
"""
x = np.zeros(len(f))
for k in range(len(f)):
if abs(f[k]) <= fb:
x[k] = 1 - abs(f[k])/float(fb)
return x
def sinusoid_awgn(x, SNRdB):
"""
Add white Gaussian noise to a single real sinusoid.
Input a single sinusoid to this function and it returns a noisy
sinusoid at a specific SNR value in dB. Sinusoid power is calculated
using np.var.
Parameters
----------
x : Input signal as ndarray consisting of a single sinusoid
SNRdB : SNR in dB for output sinusoid
Returns
-------
y : Noisy sinusoid return vector
Examples
--------
>>> # set the SNR to 10 dB
>>> n = arange(0,10000)
>>> x = cos(2*pi*0.04*n)
>>> y = sinusoid_awgn(x,10.0)
"""
# Estimate signal power
x_pwr = np.var(x)
# Create noise vector
noise = np.sqrt(x_pwr/10**(SNRdB/10.))*np.random.randn(len(x));
return x + noise
def simple_quant(x, b_tot, x_max, limit):
"""
A simple rounding quantizer for bipolar signals having Btot = B + 1 bits.
This function models a quantizer that employs Btot bits that has one of
three selectable limiting types: saturation, overflow, and none.
The quantizer is bipolar and implements rounding.
Parameters
----------
x : input signal ndarray to be quantized
b_tot : total number of bits in the quantizer, e.g. 16
x_max : quantizer full-scale dynamic range is [-Xmax, Xmax]
Limit = Limiting of the form 'sat', 'over', 'none'
Returns
-------
xq : quantized output ndarray
Notes
-----
The quantization can be formed as e = xq - x
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from matplotlib.mlab import psd
>>> import numpy as np
>>> from sk_dsp_comm import sigsys as ss
>>> n = np.arange(0,10000)
>>> x = np.cos(2*np.pi*0.211*n)
>>> y = ss.sinusoid_awgn(x,90)
>>> Px, f = psd(y,2**10,Fs=1)
>>> plt.plot(f, 10*np.log10(Px))
>>> plt.ylim([-80, 25])
>>> plt.ylabel("Power Spectral Density (dB)")
>>> plt.xlabel(r'Normalized Frequency $\omega/2\pi$')
>>> plt.show()
>>> yq = ss.simple_quant(y,12,1,'sat')
>>> Px, f = psd(yq,2**10,Fs=1)
>>> plt.plot(f, 10*np.log10(Px))
>>> plt.ylim([-80, 25])
>>> plt.ylabel("Power Spectral Density (dB)")
>>> plt.xlabel(r'Normalized Frequency $\omega/2\pi$')
>>> plt.show()
"""
B = b_tot - 1
x = x / x_max
if limit.lower() == 'over':
xq = (np.mod(np.round(x*2**B) + 2 ** B, 2 ** b_tot) - 2 ** B) / 2 ** B
elif limit.lower() == 'sat':
xq = np.round(x*2**B)+2**B
s1 = np.nonzero(np.ravel(xq >= 2 ** b_tot - 1))[0]
s2 = np.nonzero(np.ravel(xq < 0))[0]
xq[s1] = (2 ** b_tot - 1) * np.ones(len(s1))
xq[s2] = np.zeros(len(s2))
xq = (xq - 2**B)/2**B
elif limit.lower() == 'none':
xq = np.round(x*2**B)/2**B
else:
raise ValueError('limit must be the string over, sat, or none')
return xq * x_max
def prin_alias(f_in,fs):
"""
Calculate the principle alias frequencies.
Given an array of input frequencies the function returns an
array of principle alias frequencies.
Parameters
----------
f_in : ndarray of input frequencies
fs : sampling frequency
Returns
-------
f_out : ndarray of principle alias frequencies
Examples
--------
>>> # Linear frequency sweep from 0 to 50 Hz
>>> f_in = arange(0,50,0.1)
>>> # Calculate principle alias with fs = 10 Hz
>>> f_out = prin_alias(f_in,10)
"""
return abs(np.rint(f_in/fs)*fs - f_in)
"""
Principle alias via recursion
f_out = np.copy(f_in)
for k in range(len(f_out)):
while f_out[k] > fs/2.:
f_out[k] = abs(f_out[k] - fs)
return f_out
"""
def cascade_filters(b1,a1,b2,a2):
"""
Cascade two IIR digital filters into a single (b,a) coefficient set.
To cascade two digital filters (system functions) given their numerator
and denominator coefficients you simply convolve the coefficient arrays.
Parameters
----------
b1 : ndarray of numerator coefficients for filter 1
a1 : ndarray of denominator coefficients for filter 1
b2 : ndarray of numerator coefficients for filter 2
a2 : ndarray of denominator coefficients for filter 2
Returns
-------
b : ndarray of numerator coefficients for the cascade
a : ndarray of denominator coefficients for the cascade
Examples
--------
>>> from scipy import signal
>>> b1,a1 = signal.butter(3, 0.1)
>>> b2,a2 = signal.butter(3, 0.15)
>>> b,a = cascade_filters(b1,a1,b2,a2)
"""
return signal.convolve(b1,b2), signal.convolve(a1,a2)
def soi_snoi_gen(s,SIR_dB,N,fi,fs = 8000):
"""
Add an interfering sinusoidal tone to the input signal at a given SIR_dB.
The input is the signal of interest (SOI) and number of sinsuoid signals
not of interest (SNOI) are addedto the SOI at a prescribed signal-to-
intereference SIR level in dB.
Parameters
----------
s : ndarray of signal of SOI
SIR_dB : interference level in dB
N : Trim input signal s to length N + 1 samples
fi : ndarray of intereference frequencies in Hz
fs : sampling rate in Hz, default is 8000 Hz
Returns
-------
r : ndarray of combined signal plus intereference of length N+1 samples
Examples
--------
>>> # load a speech ndarray and trim to 5*8000 + 1 samples
>>> fs,s = from_wav('OSR_us_000_0030_8k.wav')
>>> r = soi_snoi_gen(s,10,5*8000,[1000, 1500])
"""
n = np.arange(0,N+1)
K = len(fi)
si = np.zeros(N+1)
for k in range(K):
si += np.cos(2*np.pi*fi[k]/fs*n);
s = s[:N+1]
Ps = np.var(s)
Psi = np.var(si)
r = s + np.sqrt(Ps/Psi*10**(-SIR_dB/10))*si
return r
def lms_ic(r,M,mu,delta=1):
"""
Least mean square (LMS) interference canceller adaptive filter.
A complete LMS adaptive filter simulation function for the case of
interference cancellation. Used in the digital filtering case study.
Parameters
----------
M : FIR Filter length (order M-1)
delta : Delay used to generate the reference signal
mu : LMS step-size
delta : decorrelation delay between input and FIR filter input
Returns
-------
n : ndarray Index vector
r : ndarray noisy (with interference) input signal
r_hat : ndarray filtered output (NB_hat[n])
e : ndarray error sequence (WB_hat[n])
ao : ndarray final value of weight vector
F : ndarray frequency response axis vector
Ao : ndarray frequency response of filter
Examples
----------
>>> # import a speech signal
>>> fs,s = from_wav('OSR_us_000_0030_8k.wav')
>>> # add interference at 1kHz and 1.5 kHz and
>>> # truncate to 5 seconds
>>> r = soi_snoi_gen(s,10,5*8000,[1000, 1500])
>>> # simulate with a 64 tap FIR and mu = 0.005
>>> n,r,r_hat,e,ao,F,Ao = lms_ic(r,64,0.005)
"""
N = len(r)-1;
# Form the reference signal y via delay delta
y = signal.lfilter(np.hstack((np.zeros(delta), np.array([1]))),1,r)
# Initialize output vector x_hat to zero
r_hat = np.zeros(N+1)
# Initialize error vector e to zero
e = np.zeros(N+1)
# Initialize weight vector to zero
ao = np.zeros(M+1)
# Initialize filter memory to zero
z = np.zeros(M)
# Initialize a vector for holding ym of length M+1
ym = np.zeros(M+1)
for k in range(N+1):
# Filter one sample at a time
r_hat[k],z = signal.lfilter(ao,np.array([1]),np.array([y[k]]),zi=z)
# Form the error sequence
e[k] = r[k] - r_hat[k]
# Update the weight vector
ao = ao + 2*mu*e[k]*ym
# Update vector used for correlation with e(k)
ym = np.hstack((np.array([y[k]]), ym[:-1]))
# Create filter frequency response
F, Ao = signal.freqz(ao,1,1024)
F/= (2*np.pi)
Ao = 20*np.log10(abs(Ao))
return np.arange(0,N+1), r, r_hat, e, ao, F, Ao
def fir_iir_notch(fi,fs,r=0.95):
"""
Design a second-order FIR or IIR notch filter.
A second-order FIR notch filter is created by placing conjugate
zeros on the unit circle at angle corresponidng to the notch center
frequency. The IIR notch variation places a pair of conjugate poles
at the same angle, but with radius r < 1 (typically 0.9 to 0.95).
Parameters
----------
fi : notch frequency is Hz relative to fs
fs : the sampling frequency in Hz, e.g. 8000
r : pole radius for IIR version, default = 0.95
Returns
-------
b : numerator coefficient ndarray
a : denominator coefficient ndarray
Notes
-----
If the pole radius is 0 then an FIR version is created, that is
there are no poles except at z = 0.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sk_dsp_comm import sigsys as ss
>>> b_FIR, a_FIR = ss.fir_iir_notch(1000,8000,0)
>>> ss.zplane(b_FIR, a_FIR)
>>> plt.show()
>>> b_IIR, a_IIR = ss.fir_iir_notch(1000,8000)
>>> ss.zplane(b_IIR, a_IIR)
"""
w0 = 2*np.pi*fi/float(fs)
if r >= 1:
raise ValueError('Poles on or outside unit circle.')
elif r == 0:
a = np.array([1.0])
else:
a = np.array([1, -2*r*np.cos(w0), r**2])
b = np.array([1, -2*np.cos(w0), 1])
return b, a
def simple_sa(x, NS, NFFT, fs, NAVG=1, window='boxcar'):
"""
Spectral estimation using windowing and averaging.
This function implements averaged periodogram spectral estimation
estimation similar to the NumPy's psd() function, but more
specialized for the windowing case study of Chapter 16.
Parameters
----------
x : ndarray containing the input signal
NS : The subrecord length less zero padding, e.g. NS < NFFT
NFFT : FFT length, e.g., 1024 = 2**10
fs : sampling rate in Hz
NAVG : the number of averages, e.g., 1 for deterministic signals
window : hardcoded window 'boxcar' (default) or 'hanning'
Returns
-------
f : ndarray frequency axis in Hz on [0, fs/2]
Sx : ndarray the power spectrum estimate
Notes
-----
The function also prints the maximum number of averages K possible
for the input data record.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from sk_dsp_comm import sigsys as ss
>>> n = np.arange(0,2048)
>>> x = np.cos(2*np.pi*1000/10000*n) + 0.01*np.cos(2*np.pi*3000/10000*n)
>>> f, Sx = ss.simple_sa(x,128,512,10000)
>>> plt.plot(f, 10*np.log10(Sx))
>>> plt.ylim([-80, 0])
>>> plt.xlabel("Frequency (Hz)")
>>> plt.ylabel("Power Spectral Density (dB)")
>>> plt.show()
With a hanning window.
>>> f, Sx = ss.simple_sa(x,256,1024,10000,window='hanning')
>>> plt.plot(f, 10*np.log10(Sx))
>>> plt.xlabel("Frequency (Hz)")
>>> plt.ylabel("Power Spectral Density (dB)")
>>> plt.ylim([-80, 0])
"""
Nx = len(x)
K = int(Nx/NS)
log.info('K = ', K)
if NAVG > K:
warnings.warn('NAVG exceeds number of available subrecords')
return 0,0
if window.lower() == 'boxcar' or window.lower() == 'rectangle':
w = signal.boxcar(NS)
elif window.lower() == 'hanning':
w = signal.hanning(NS)
xsw = np.zeros((K,NS)) + 1j*np.zeros((K,NS))
for k in range(NAVG):
xsw[k,] = w*x[k*NS:(k+1)*NS]
Sx = np.zeros(NFFT)
for k in range(NAVG):
X = fft.fft(xsw[k,],NFFT)
Sx += abs(X)**2
Sx /= float(NAVG)
Sx /= float(NFFT**2)
NFFTby2 = int(NFFT/2)
if x.dtype != 'complex128':
n = np.arange(NFFTby2)
f = fs*n/float(NFFT)
Sx = Sx[0:NFFTby2]
else:
n = np.arange(NFFTby2)
f = fs*np.hstack((np.arange(-NFFTby2,0),np.arange(NFFTby2)))/float(NFFT)
Sx = np.hstack((Sx[NFFTby2:],Sx[0:NFFTby2]))
return f, Sx
def line_spectra(fk,Xk,mode,sides=2,linetype='b',lwidth=2,floor_dB=-100,fsize=(6,4)):
"""
Plot the Fourier series line spectral given the coefficients.
This function plots two-sided and one-sided line spectra of a periodic
signal given the complex exponential Fourier series coefficients and
the corresponding harmonic frequencies.
Parameters
----------
fk : vector of real sinusoid frequencies
Xk : magnitude and phase at each positive frequency in fk
mode : 'mag' => magnitude plot, 'magdB' => magnitude in dB plot,
mode cont : 'magdBn' => magnitude in dB normalized, 'phase' => a phase plot in radians
sides : 2; 2-sided or 1-sided
linetype : line type per Matplotlib definitions, e.g., 'b';
lwidth : 2; linewidth in points
fsize : optional figure size in inches, default = (6,4) inches
Returns
-------
Nothing : A plot window opens containing the line spectrum plot
Notes
-----
Since real signals are assumed the frequencies of fk are 0 and/or positive
numbers. The supplied Fourier coefficients correspond.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from sk_dsp_comm.sigsys import line_spectra
>>> n = np.arange(0,25)
>>> # a pulse train with 10 Hz fundamental and 20% duty cycle
>>> fk = n*10
>>> Xk = np.sinc(n*10*.02)*np.exp(-1j*2*np.pi*n*10*.01) # 1j = sqrt(-1)
>>> line_spectra(fk,Xk,'mag')
>>> plt.show()
>>> line_spectra(fk,Xk,'phase')
"""
plt.figure(figsize=fsize)
# Eliminate zero valued coefficients
idx = np.nonzero(Xk)[0]
Xk = Xk[idx]
fk = fk[idx]
if mode == 'mag':
for k in range(len(fk)):
if fk[k] == 0 and sides == 2:
plt.plot([fk[k], fk[k]],[0, np.abs(Xk[k])],linetype, linewidth=lwidth)
elif fk[k] == 0 and sides == 1:
plt.plot([fk[k], fk[k]],[0, np.abs(Xk[k])],linetype, linewidth=2*lwidth)
elif fk[k] > 0 and sides == 2:
plt.plot([fk[k], fk[k]],[0, np.abs(Xk[k])],linetype, linewidth=lwidth)
plt.plot([-fk[k], -fk[k]],[0, np.abs(Xk[k])],linetype, linewidth=lwidth)
elif fk[k] > 0 and sides == 1:
plt.plot([fk[k], fk[k]],[0, 2.*np.abs(Xk[k])],linetype, linewidth=lwidth)
else:
warnings.warn('Invalid sides type')
plt.grid()
if sides == 2:
plt.axis([-1.2*max(fk), 1.2*max(fk), 0, 1.05*max(abs(Xk))])
elif sides == 1:
plt.axis([0, 1.2*max(fk), 0, 1.05*2*max(abs(Xk))])
else:
warnings.warn('Invalid sides type')
plt.ylabel('Magnitude')
plt.xlabel('Frequency (Hz)')
elif mode == 'magdB':
Xk_dB = 20*np.log10(np.abs(Xk))
for k in range(len(fk)):
if fk[k] == 0 and sides == 2:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)
elif fk[k] == 0 and sides == 1:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=2*lwidth)
elif fk[k] > 0 and sides == 2:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)
plt.plot([-fk[k], -fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)
elif fk[k] > 0 and sides == 1:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]+6.02],linetype, linewidth=lwidth)
else:
warnings.warn('Invalid sides type')
plt.grid()
max_dB = np.ceil(max(Xk_dB/10.))*10
min_dB = max(floor_dB,np.floor(min(Xk_dB/10.))*10)
if sides == 2:
plt.axis([-1.2*max(fk), 1.2*max(fk), min_dB, max_dB])
elif sides == 1:
plt.axis([0, 1.2*max(fk), min_dB, max_dB])
else:
warnings.warn('Invalid sides type')
plt.ylabel('Magnitude (dB)')
plt.xlabel('Frequency (Hz)')
elif mode == 'magdBn':
Xk_dB = 20*np.log10(np.abs(Xk)/max(np.abs(Xk)))
for k in range(len(fk)):
if fk[k] == 0 and sides == 2:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)
elif fk[k] == 0 and sides == 1:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=2*lwidth)
elif fk[k] > 0 and sides == 2:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)
plt.plot([-fk[k], -fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)
elif fk[k] > 0 and sides == 1:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]+6.02],linetype, linewidth=lwidth)
else:
warnings.warn('Invalid sides type')
plt.grid()
max_dB = np.ceil(max(Xk_dB/10.))*10
min_dB = max(floor_dB,np.floor(min(Xk_dB/10.))*10)
if sides == 2:
plt.axis([-1.2*max(fk), 1.2*max(fk), min_dB, max_dB])
elif sides == 1:
plt.axis([0, 1.2*max(fk), min_dB, max_dB])
else:
warnings.warn('Invalid sides type')
plt.ylabel('Normalized Magnitude (dB)')
plt.xlabel('Frequency (Hz)')
elif mode == 'phase':
for k in range(len(fk)):
if fk[k] == 0 and sides == 2:
plt.plot([fk[k], fk[k]],[0, np.angle(Xk[k])],linetype, linewidth=lwidth)
elif fk[k] == 0 and sides == 1:
plt.plot([fk[k], fk[k]],[0, np.angle(Xk[k])],linetype, linewidth=2*lwidth)
elif fk[k] > 0 and sides == 2:
plt.plot([fk[k], fk[k]],[0, np.angle(Xk[k])],linetype, linewidth=lwidth)
plt.plot([-fk[k], -fk[k]],[0, -np.angle(Xk[k])],linetype, linewidth=lwidth)
elif fk[k] > 0 and sides == 1:
plt.plot([fk[k], fk[k]],[0, np.angle(Xk[k])],linetype, linewidth=lwidth)
else:
warnings.warn('Invalid sides type')
plt.grid()
if sides == 2:
plt.plot([-1.2*max(fk), 1.2*max(fk)], [0, 0],'k')
plt.axis([-1.2*max(fk), 1.2*max(fk), -1.1*max(np.abs(np.angle(Xk))), 1.1*max(np.abs(np.angle(Xk)))])
elif sides == 1:
plt.plot([0, 1.2*max(fk)], [0, 0],'k')
plt.axis([0, 1.2*max(fk), -1.1*max(np.abs(np.angle(Xk))), 1.1*max(np.abs(np.angle(Xk)))])
else:
warnings.warn('Invalid sides type')
plt.ylabel('Phase (rad)')
plt.xlabel('Frequency (Hz)')
else:
warnings.warn('Invalid mode type')
def fs_coeff(xp,N,f0,one_side=True):
"""
Numerically approximate the Fourier series coefficients given periodic x(t).
The input is assummed to represent one period of the waveform
x(t) that has been uniformly sampled. The number of samples supplied
to represent one period of the waveform sets the sampling rate.
Parameters
----------
xp : ndarray of one period of the waveform x(t)
N : maximum Fourier series coefficient, [0,...,N]
f0 : fundamental frequency used to form fk.
Returns
-------
Xk : ndarray of the coefficients over indices [0,1,...,N]
fk : ndarray of the harmonic frequencies [0, f0,2f0,...,Nf0]
Notes
-----
len(xp) >= 2*N+1 as len(xp) is the fft length.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from numpy import arange
>>> import sk_dsp_comm.sigsys as ss
>>> t = arange(0,1,1/1024.)
>>> # a 20% duty cycle pulse starting at t = 0
>>> x_rect = ss.rect(t-.1,0.2)
>>> Xk, fk = ss.fs_coeff(x_rect,25,10)
>>> # plot the spectral lines
>>> ss.line_spectra(fk,Xk,'mag')
>>> plt.show()
"""
Nint = len(xp)
if Nint < 2*N+1:
raise ValueError('Number of samples in xp insufficient for requested N.')
Xp = fft.fft(xp,Nint)/float(Nint)
# To interface with the line_spectra function use one_side mode
if one_side:
Xk = Xp[0:N+1]
fk = f0*np.arange(0,N+1)
else:
Xk = np.hstack((Xp[-N:],Xp[0:N+1]))
fk = f0*np.arange(-N,N+1)
return Xk, fk
def fs_approx(Xk,fk,t):
"""
Synthesize periodic signal x(t) using Fourier series coefficients at harmonic frequencies
Assume the signal is real so coefficients Xk are supplied for nonnegative
indicies. The negative index coefficients are assumed to be complex
conjugates.
Parameters
----------
Xk : ndarray of complex Fourier series coefficients
fk : ndarray of harmonic frequencies in Hz
t : ndarray time axis corresponding to output signal array x_approx
Returns
-------
x_approx : ndarray of periodic waveform approximation over time span t
Examples
--------
>>> t = arange(0,2,.002)
>>> # a 20% duty cycle pulse train
>>> n = arange(0,20,1) # 0 to 19th harmonic
>>> fk = 1*n % period = 1s
>>> t, x_approx = fs_approx(Xk,fk,t)
>>> plot(t,x_approx)
"""
x_approx = np.zeros(len(t))
for k,Xkk in enumerate(Xk):
if fk[k] == 0:
x_approx += Xkk.real*np.ones(len(t))
else:
x_approx += 2*np.abs(Xkk)*np.cos(2*np.pi*fk[k]*t+np.angle(Xkk))
return x_approx
def ft_approx(x,t,Nfft):
'''
Approximate the Fourier transform of a finite duration signal using scipy.signal.freqz()
Parameters
----------
x : input signal array
t : time array used to create x(t)
Nfft : the number of frdquency domain points used to
approximate X(f) on the interval [fs/2,fs/2], where
fs = 1/Dt. Dt being the time spacing in array t
Returns
-------
f : frequency axis array in Hz
X : the Fourier transform approximation (complex)
Notes
-----
The output time axis starts at the sum of the starting values in x1 and x2
and ends at the sum of the two ending values in x1 and x2. The default
extents of ('f','f') are used for signals that are active (have support)
on or within n1 and n2 respectively. A right-sided signal such as
:math:`a^n*u[n]` is semi-infinite, so it has extent 'r' and the
convolution output will be truncated to display only the valid results.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> import sk_dsp_comm.sigsys as ss
>>> fs = 100 # sampling rate in Hz
>>> tau = 1
>>> t = np.arange(-5,5,1/fs)
>>> x0 = ss.rect(t-.5,tau)
>>> plt.figure(figsize=(6,5))
>>> plt.plot(t,x0)
>>> plt.grid()
>>> plt.ylim([-0.1,1.1])
>>> plt.xlim([-2,2])
>>> plt.title(r'Exact Waveform')
>>> plt.xlabel(r'Time (s)')
>>> plt.ylabel(r'$x_0(t)$')
>>> plt.show()
>>> # FT Exact Plot
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> import sk_dsp_comm.sigsys as ss
>>> fs = 100 # sampling rate in Hz
>>> tau = 1
>>> t = np.arange(-5,5,1/fs)
>>> x0 = ss.rect(t-.5,tau)
>>> fe = np.arange(-10,10,.01)
>>> X0e = tau*np.sinc(fe*tau)
>>> plt.plot(fe,abs(X0e))
>>> #plot(f,angle(X0))
>>> plt.grid()
>>> plt.xlim([-10,10])
>>> plt.title(r'Exact (Theory) Spectrum Magnitude')
>>> plt.xlabel(r'Frequency (Hz)')
>>> plt.ylabel(r'$|X_0e(f)|$')
>>> plt.show()
>>> # FT Approximation Plot
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> import sk_dsp_comm.sigsys as ss
>>> fs = 100 # sampling rate in Hz
>>> tau = 1
>>> t = np.arange(-5,5,1/fs)
>>> x0 = ss.rect(t-.5,tau)
>>> f,X0 = ss.ft_approx(x0,t,4096)
>>> plt.plot(f,abs(X0))
>>> #plt.plot(f,angle(X0))
>>> plt.grid()
>>> plt.xlim([-10,10])
>>> plt.title(r'Approximation Spectrum Magnitude')
>>> plt.xlabel(r'Frequency (Hz)')
>>> plt.ylabel(r'$|X_0(f)|$');
>>> plt.tight_layout()
>>> plt.show()
'''
fs = 1/(t[1] - t[0])
t0 = (t[-1]+t[0])/2 # time delay at center
N0 = len(t)/2 # FFT center in samples
f = np.arange(-1./2,1./2,1./Nfft)
w, X = signal.freqz(x,1,2*np.pi*f)
X /= fs # account for dt = 1/fs in integral
X *= np.exp(-1j*2*np.pi*f*fs*t0)# time interval correction
X *= np.exp(1j*2*np.pi*f*N0)# FFT time interval is [0,Nfft-1]
F = f*fs
return F, X
def conv_sum(x1,nx1,x2,nx2,extent=('f','f')):
"""
Discrete convolution of x1 and x2 with proper tracking of the output time axis.
Convolve two discrete-time signals using the SciPy function :func:`scipy.signal.convolution`.
The time (sequence axis) are managed from input to output. y[n] = x1[n]*x2[n].
Parameters
----------
x1 : ndarray of signal x1 corresponding to nx1
nx1 : ndarray time axis for x1
x2 : ndarray of signal x2 corresponding to nx2
nx2 : ndarray time axis for x2
extent : ('e1','e2') where 'e1', 'e2' may be 'f' finite, 'r' right-sided, or 'l' left-sided
Returns
-------
y : ndarray of output values y
ny : ndarray of the corresponding sequence index n
Notes
-----
The output time axis starts at the sum of the starting values in x1 and x2
and ends at the sum of the two ending values in x1 and x2. The default
extents of ('f','f') are used for signals that are active (have support)
on or within n1 and n2 respectively. A right-sided signal such as
a^n*u[n] is semi-infinite, so it has extent 'r' and the
convolution output will be truncated to display only the valid results.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> import sk_dsp_comm.sigsys as ss
>>> nx = np.arange(-5,10)
>>> x = ss.drect(nx,4)
>>> y,ny = ss.conv_sum(x,nx,x,nx)
>>> plt.stem(ny,y)
>>> plt.show()
Consider a pulse convolved with an exponential. ('r' type extent)
>>> h = 0.5**nx*ss.dstep(nx)
>>> y,ny = ss.conv_sum(x,nx,h,nx,('f','r')) # note extents set
>>> plt.stem(ny,y) # expect a pulse charge and discharge sequence
"""
nnx1 = np.arange(0,len(nx1))
nnx2 = np.arange(0,len(nx2))
n1 = nnx1[0]
n2 = nnx1[-1]
n3 = nnx2[0]
n4 = nnx2[-1]
# Start by finding the valid output support or extent interval to insure that
# for no finite extent signals ambiquous results are not returned.
# Valid extents are f (finite), r (right-sided), and l (left-sided)
if extent[0] == 'f' and extent[1] == 'f':
nny = np.arange(n1+n3,n2+1+n4+1-1)
ny = np.arange(0,len(x1)+len(x2)-1) + nx1[0]+nx2[0]
elif extent[0] == 'f' and extent[1] == 'r':
nny = np.arange(n1+n3,n1+1+n4+1-1)
ny = nny + nx1[0]+nx2[0]
elif extent[0] == 'r' and extent[1] == 'f':
nny = np.arange(n1+n3,n2+1+n3+1-1)
ny = nny + nx1[0]+nx2[0]
elif extent[0] == 'f' and extent[1] == 'l':
nny = np.arange(n2+n3,n2+1+n4+1-1)
ny = nny + nx1[-1]+nx2[0]
elif extent[0] == 'l' and extent[1] == 'f':
nny = np.arange(n1+n4,n2+1+n4+1-1)
ny = nny + nx1[0]+nx2[-1]
elif extent[0] == 'r' and extent[1] == 'r':
nny = np.arange(n1+n3,min(n1+1+n4+1,n2+1+n3+1)-1)
ny = nny + nx1[0]+nx2[0]
elif extent[0] == 'l' and extent[1] == 'l':
nny = np.arange(max(n1+n4,n2+n3),n2+1+n4+1-1)
ny = nny + max(nx1[0]+nx2[-1],nx1[-1]+nx2[0])
else:
raise ValueError('Invalid x1 x2 extents specified or valid extent not found!')
# Finally convolve the sequences
y = signal.convolve(x1, x2)
log.info('Output support: (%+d, %+d)' % (ny[0],ny[-1]))
return y[nny], ny
def conv_integral(x1,tx1,x2,tx2,extent=('f','f')):
"""
Continuous-time convolution of x1 and x2 with proper tracking of the output time axis.
Appromimate the convolution integral for the convolution of two continuous-time signals using the SciPy function signal. The time (sequence axis) are managed from input to output. y(t) = x1(t)*x2(t).
Parameters
----------
x1 : ndarray of signal x1 corresponding to tx1
tx1 : ndarray time axis for x1
x2 : ndarray of signal x2 corresponding to tx2
tx2 : ndarray time axis for x2
extent : ('e1','e2') where 'e1', 'e2' may be 'f' finite, 'r' right-sided, or 'l' left-sided
Returns
-------
y : ndarray of output values y
ty : ndarray of the corresponding time axis for y
Notes
-----
The output time axis starts at the sum of the starting values in x1 and x2
and ends at the sum of the two ending values in x1 and x2. The time steps used in
x1(t) and x2(t) must match. The default extents of ('f','f') are used for signals
that are active (have support) on or within t1 and t2 respectively. A right-sided
signal such as exp(-a*t)*u(t) is semi-infinite, so it has extent 'r' and the
convolution output will be truncated to display only the valid results.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> import sk_dsp_comm.sigsys as ss
>>> tx = np.arange(-5,10,.01)
>>> x = ss.rect(tx-2,4) # pulse starts at t = 0
>>> y,ty = ss.conv_integral(x,tx,x,tx)
>>> plt.plot(ty,y) # expect a triangle on [0,8]
>>> plt.show()
Now, consider a pulse convolved with an exponential.
>>> h = 4*np.exp(-4*tx)*ss.step(tx)
>>> y,ty = ss.conv_integral(x,tx,h,tx,extent=('f','r')) # note extents set
>>> plt.plot(ty,y) # expect a pulse charge and discharge waveform
"""
dt = tx1[1] - tx1[0]
nx1 = np.arange(0,len(tx1))
nx2 = np.arange(0,len(tx2))
n1 = nx1[0]
n2 = nx1[-1]
n3 = nx2[0]
n4 = nx2[-1]
# Start by finding the valid output support or extent interval to insure that
# for no finite extent signals ambiquous results are not returned.
# Valid extents are f (finite), r (right-sided), and l (left-sided)
if extent[0] == 'f' and extent[1] == 'f':
ny = np.arange(n1+n3,n2+1+n4+1-1)
ty = np.arange(0,len(x1)+len(x2)-1)*dt + tx1[0]+tx2[0]
elif extent[0] == 'f' and extent[1] == 'r':
ny = np.arange(n1+n3,n1+1+n4+1-1)
ty = ny*dt + tx1[0]+tx2[0]
elif extent[0] == 'r' and extent[1] == 'f':
ny = np.arange(n1+n3,n2+1+n3+1-1)
ty = ny*dt + tx1[0]+tx2[0]
elif extent[0] == 'f' and extent[1] == 'l':
ny = np.arange(n2+n3,n2+1+n4+1-1)
ty = ny*dt + tx1[-1]+tx2[0]
elif extent[0] == 'l' and extent[1] == 'f':
ny = np.arange(n1+n4,n2+1+n4+1-1)
ty = ny*dt + tx1[0]+tx2[-1]
elif extent[0] == 'r' and extent[1] == 'r':
ny = np.arange(n1+n3,min(n1+1+n4+1,n2+1+n3+1)-1)
ty = ny*dt + tx1[0]+tx2[0]
elif extent[0] == 'l' and extent[1] == 'l':
ny = np.arange(max(n1+n4,n2+n3),n2+1+n4+1-1)
ty = ny*dt + max(tx1[0]+tx2[-1],tx1[-1]+tx2[0])
else:
raise ValueError('Invalid x1 x2 extents specified or valid extent not found!')
# Finally convolve the sampled sequences and scale by dt
y = signal.convolve(x1, x2)*dt
log.info('Output support: (%+2.2f, %+2.2f)' % (ty[0],ty[-1]))
return y[ny], ty
def delta_eps(t,eps):
"""
Rectangular pulse approximation to impulse function.
Parameters
----------
t : ndarray of time axis
eps : pulse width
Returns
-------
d : ndarray containing the impulse approximation
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from numpy import arange
>>> from sk_dsp_comm.sigsys import delta_eps
>>> t = np.arange(-2,2,.001)
>>> d = delta_eps(t,.1)
>>> plt.plot(t,d)
>>> plt.show()
"""
d = np.zeros(len(t))
for k,tt in enumerate(t):
if abs(tt) <= eps/2.:
d[k] = 1/float(eps)
return d
def step(t):
"""
Approximation to step function signal u(t).
In this numerical version of u(t) the step turns on at t = 0.
Parameters
----------
t : ndarray of the time axis
Returns
-------
x : ndarray of the step function signal u(t)
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from numpy import arange
>>> from sk_dsp_comm.sigsys import step
>>> t = arange(-1,5,.01)
>>> x = step(t)
>>> plt.plot(t,x)
>>> plt.ylim([-0.01, 1.01])
>>> plt.show()
To turn on at t = 1, shift t.
>>> x = step(t - 1.0)
>>> plt.ylim([-0.01, 1.01])
>>> plt.plot(t,x)
"""
x = np.zeros(len(t))
for k,tt in enumerate(t):
if tt >= 0:
x[k] = 1.0
return x
def rect(t,tau):
"""
Approximation to the rectangle pulse Pi(t/tau).
In this numerical version of Pi(t/tau) the pulse is active
over -tau/2 <= t <= tau/2.
Parameters
----------
t : ndarray of the time axis
tau : the pulse width
Returns
-------
x : ndarray of the signal Pi(t/tau)
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from numpy import arange
>>> from sk_dsp_comm.sigsys import rect
>>> t = arange(-1,5,.01)
>>> x = rect(t,1.0)
>>> plt.plot(t,x)
>>> plt.ylim([0, 1.01])
>>> plt.show()
To turn on the pulse at t = 1 shift t.
>>> x = rect(t - 1.0,1.0)
>>> plt.plot(t,x)
>>> plt.ylim([0, 1.01])
"""
x = np.zeros(len(t))
for k,tk in enumerate(t):
if np.abs(tk) > tau/2.:
x[k] = 0
else:
x[k] = 1
return x
def tri(t,tau):
"""
Approximation to the triangle pulse Lambda(t/tau).
In this numerical version of Lambda(t/tau) the pulse is active
over -tau <= t <= tau.
Parameters
----------
t : ndarray of the time axis
tau : one half the triangle base width
Returns
-------
x : ndarray of the signal Lambda(t/tau)
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from numpy import arange
>>> from sk_dsp_comm.sigsys import tri
>>> t = arange(-1,5,.01)
>>> x = tri(t,1.0)
>>> plt.plot(t,x)
>>> plt.show()
To turn on at t = 1, shift t.
>>> x = tri(t - 1.0,1.0)
>>> plt.plot(t,x)
"""
x = np.zeros(len(t))
for k,tk in enumerate(t):
if np.abs(tk) > tau/1.:
x[k] = 0
else:
x[k] = 1 - np.abs(tk)/tau
return x
def dimpulse(n):
"""
Discrete impulse function delta[n].
Parameters
----------
n : ndarray of the time axis
Returns
-------
x : ndarray of the signal delta[n]
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from numpy import arange
>>> from sk_dsp_comm.sigsys import dimpulse
>>> n = arange(-5,5)
>>> x = dimpulse(n)
>>> plt.stem(n,x)
>>> plt.show()
Shift the delta left by 2.
>>> x = dimpulse(n+2)
>>> plt.stem(n,x)
"""
x = np.zeros(len(n))
for k,nn in enumerate(n):
if nn == 0:
x[k] = 1.0
return x
def dstep(n):
"""
Discrete step function u[n].
Parameters
----------
n : ndarray of the time axis
Returns
-------
x : ndarray of the signal u[n]
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from numpy import arange
>>> from sk_dsp_comm.sigsys import dstep
>>> n = arange(-5,5)
>>> x = dstep(n)
>>> plt.stem(n,x)
>>> plt.show()
Shift the delta left by 2.
>>> x = dstep(n+2)
>>> plt.stem(n,x)
"""
x = np.zeros(len(n))
for k,nn in enumerate(n):
if nn >= 0:
x[k] = 1.0
return x
def drect(n,N):
"""
Discrete rectangle function of duration N samples.
The signal is active on the interval 0 <= n <= N-1. Also known
as the rectangular window function, which is available in
scipy.signal.
Parameters
----------
n : ndarray of the time axis
N : the pulse duration
Returns
-------
x : ndarray of the signal
Notes
-----
The discrete rectangle turns on at n = 0, off at n = N-1 and
has duration of exactly N samples.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from numpy import arange
>>> from sk_dsp_comm.sigsys import drect
>>> n = arange(-5,5)
>>> x = drect(n, N=3)
>>> plt.stem(n,x)
>>> plt.show()
Shift the delta left by 2.
>>> x = drect(n+2, N=3)
>>> plt.stem(n,x)
"""
x = np.zeros(len(n))
for k,nn in enumerate(n):
if nn >= 0 and nn < N:
x[k] = 1.0
return x
def rc_imp(Ns,alpha,M=6):
"""
A truncated raised cosine pulse used in digital communications.
The pulse shaping factor :math:`0< \\alpha < 1` is required as well as the
truncation factor M which sets the pulse duration to be 2*M*Tsymbol.
Parameters
----------
Ns : number of samples per symbol
alpha : excess bandwidth factor on (0, 1), e.g., 0.35
M : equals RC one-sided symbol truncation factor
Returns
-------
b : ndarray containing the pulse shape
Notes
-----
The pulse shape b is typically used as the FIR filter coefficients
when forming a pulse shaped digital communications waveform.
Examples
--------
Ten samples per symbol and alpha = 0.35.
>>> import matplotlib.pyplot as plt
>>> from numpy import arange
>>> from sk_dsp_comm.sigsys import rc_imp
>>> b = rc_imp(10,0.35)
>>> n = arange(-10*6,10*6+1)
>>> plt.stem(n,b)
>>> plt.show()
"""
# Design the filter
n = np.arange(-M*Ns,M*Ns+1)
b = np.zeros(len(n));
a = alpha;
Ns *= 1.0
for i in range(len(n)):
if (1 - 4*(a*n[i]/Ns)**2) == 0:
b[i] = np.pi/4*np.sinc(1/(2.*a))
else:
b[i] = np.sinc(n[i]/Ns)*np.cos(np.pi*a*n[i]/Ns)/(1 - 4*(a*n[i]/Ns)**2)
return b
def sqrt_rc_imp(Ns,alpha,M=6):
"""
A truncated square root raised cosine pulse used in digital communications.
The pulse shaping factor 0< alpha < 1 is required as well as the
truncation factor M which sets the pulse duration to be 2*M*Tsymbol.
Parameters
----------
Ns : number of samples per symbol
alpha : excess bandwidth factor on (0, 1), e.g., 0.35
M : equals RC one-sided symbol truncation factor
Returns
-------
b : ndarray containing the pulse shape
Notes
-----
The pulse shape b is typically used as the FIR filter coefficients
when forming a pulse shaped digital communications waveform. When
square root raised cosine (SRC) pulse is used generate Tx signals and
at the receiver used as a matched filter (receiver FIR filter), the
received signal is now raised cosine shaped, this having zero
intersymbol interference and the optimum removal of additive white
noise if present at the receiver input.
Examples
--------
>>> # ten samples per symbol and alpha = 0.35
>>> import matplotlib.pyplot as plt
>>> from numpy import arange
>>> from sk_dsp_comm.sigsys import sqrt_rc_imp
>>> b = sqrt_rc_imp(10,0.35)
>>> n = arange(-10*6,10*6+1)
>>> plt.stem(n,b)
>>> plt.show()
"""
# Design the filter
n = np.arange(-M*Ns,M*Ns+1)
b = np.zeros(len(n))
Ns *= 1.0
a = alpha
for i in range(len(n)):
if abs(1 - 16*a**2*(n[i]/Ns)**2) <= np.finfo(np.float).eps/2:
b[i] = 1/2.*((1+a)*np.sin((1+a)*np.pi/(4.*a))-(1-a)*np.cos((1-a)*np.pi/(4.*a))+(4*a)/np.pi*np.sin((1-a)*np.pi/(4.*a)))
else:
b[i] = 4*a/(np.pi*(1 - 16*a**2*(n[i]/Ns)**2))
b[i] = b[i]*(np.cos((1+a)*np.pi*n[i]/Ns) + np.sinc((1-a)*n[i]/Ns)*(1-a)*np.pi/(4.*a))
return b
def pn_gen(n_bits, m=5):
"""
Maximal length sequence signal generator.
Generates a sequence 0/1 bits of N_bit duration. The bits themselves
are obtained from an m-sequence of length m. Available m-sequence
(PN generators) include m = 2,3,...,12, & 16.
Parameters
----------
n_bits : the number of bits to generate
m : the number of shift registers. 2,3, .., 12, & 16
Returns
-------
PN : ndarray of the generator output over N_bits
Notes
-----
The sequence is periodic having period 2**m - 1 (2^m - 1).
Examples
--------
>>> # A 15 bit period signal nover 50 bits
>>> PN = pn_gen(50,4)
"""
c = m_seq(m)
Q = len(c)
max_periods = int(np.ceil(n_bits / float(Q)))
PN = np.zeros(max_periods*Q)
for k in range(max_periods):
PN[k*Q:(k+1)*Q] = c
PN = np.resize(PN, (1, n_bits))
return PN.flatten()
def m_seq(m):
"""
Generate an m-sequence ndarray using an all-ones initialization.
Available m-sequence (PN generators) include m = 2,3,...,12, & 16.
Parameters
----------
m : the number of shift registers. 2,3, .., 12, & 16
Returns
-------
c : ndarray of one period of the m-sequence
Notes
-----
The sequence period is 2**m - 1 (2^m - 1).
Examples
--------
>>> c = m_seq(5)
"""
if m == 2:
taps = np.array([1, 1, 1])
elif m == 3:
taps = np.array([1, 0, 1, 1])
elif m == 4:
taps = np.array([1, 0, 0, 1, 1])
elif m == 5:
taps = np.array([1, 0, 0, 1, 0, 1])
elif m == 6:
taps = np.array([1, 0, 0, 0, 0, 1, 1])
elif m == 7:
taps = np.array([1, 0, 0, 0, 1, 0, 0, 1])
elif m == 8:
taps = np.array([1, 0, 0, 0, 1, 1, 1, 0, 1])
elif m == 9:
taps = np.array([1, 0, 0, 0, 0, 1, 0, 0, 0, 1])
elif m == 10:
taps = np.array([1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1])
elif m == 11:
taps = np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1])
elif m == 12:
taps = np.array([1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1])
elif m == 16:
taps = np.array([1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1])
else:
raise ValueError('Invalid length specified')
# Load shift register with all ones to start
sr = np.ones(m)
# M-squence length is:
Q = 2**m - 1
c = np.zeros(Q)
for n in range(Q):
tap_xor = 0
c[n] = sr[-1]
for k in range(1,m):
if taps[k] == 1:
tap_xor = np.bitwise_xor(tap_xor,np.bitwise_xor(int(sr[-1]),int(sr[m-1-k])))
sr[1:] = sr[:-1]
sr[0] = tap_xor
return c
def bpsk_tx(N_bits, Ns, ach_fc=2.0, ach_lvl_dB=-100, pulse='rect', alpha = 0.25, M=6):
"""
Generates biphase shift keyed (BPSK) transmitter with adjacent channel interference.
Generates three BPSK signals with rectangular or square root raised cosine (SRC)
pulse shaping of duration N_bits and Ns samples per bit. The desired signal is
centered on f = 0, which the adjacent channel signals to the left and right
are also generated at dB level relative to the desired signal. Used in the
digital communications Case Study supplement.
Parameters
----------
N_bits : the number of bits to simulate
Ns : the number of samples per bit
ach_fc : the frequency offset of the adjacent channel signals (default 2.0)
ach_lvl_dB : the level of the adjacent channel signals in dB (default -100)
pulse :the pulse shape 'rect' or 'src'
alpha : square root raised cosine pulse shape factor (default = 0.25)
M : square root raised cosine pulse truncation factor (default = 6)
Returns
-------
x : ndarray of the composite signal x0 + ach_lvl*(x1p + x1m)
b : the transmit pulse shape
data0 : the data bits used to form the desired signal; used for error checking
Notes
-----
Examples
--------
>>> x,b,data0 = bpsk_tx(1000,10,pulse='src')
"""
pulse_types = ['rect', 'src']
if pulse not in pulse_types:
raise ValueError('Pulse shape must be \'rect\' or \'src\'''')
x0,b,data0 = nrz_bits(N_bits, Ns, pulse, alpha, M)
x1p,b,data1p = nrz_bits(N_bits, Ns, pulse, alpha, M)
x1m,b,data1m = nrz_bits(N_bits, Ns, pulse, alpha, M)
n = np.arange(len(x0))
x1p = x1p*np.exp(1j*2*np.pi*ach_fc/float(Ns)*n)
x1m = x1m*np.exp(-1j*2*np.pi*ach_fc/float(Ns)*n)
ach_lvl = 10**(ach_lvl_dB/20.)
return x0 + ach_lvl*(x1p + x1m), b, data0
def nrz_bits(n_bits, ns, pulse='rect', alpha=0.25, m=6):
"""
Generate non-return-to-zero (NRZ) data bits with pulse shaping.
A baseband digital data signal using +/-1 amplitude signal values
and including pulse shaping.
Parameters
----------
n_bits : number of NRZ +/-1 data bits to produce
ns : the number of samples per bit,
pulse_type : 'rect' , 'rc', 'src' (default 'rect')
alpha : excess bandwidth factor(default 0.25)
m : single sided pulse duration (default = 6)
Returns
-------
x : ndarray of the NRZ signal values
b : ndarray of the pulse shape
data : ndarray of the underlying data bits
Notes
-----
Pulse shapes include 'rect' (rectangular), 'rc' (raised cosine),
'src' (root raised cosine). The actual pulse length is 2*M+1 samples.
This function is used by BPSK_tx in the Case Study article.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sk_dsp_comm.sigsys import nrz_bits
>>> from numpy import arange
>>> x,b,data = nrz_bits(100, 10)
>>> t = arange(len(x))
>>> plt.plot(t, x)
>>> plt.ylim([-1.01, 1.01])
>>> plt.show()
"""
data = np.random.randint(0, 2, n_bits)
n_zeros = np.zeros((n_bits, int(ns) - 1))
x = np.hstack((2 * data.reshape(n_bits, 1) - 1, n_zeros))
x =x.flatten()
if pulse.lower() == 'rect':
b = np.ones(int(ns))
elif pulse.lower() == 'rc':
b = rc_imp(ns, alpha, m)
elif pulse.lower() == 'src':
b = sqrt_rc_imp(ns, alpha, m)
else:
raise ValueError('pulse type must be rec, rc, or src')
x = signal.lfilter(b,1,x)
return x, b / float(ns), data
def nrz_bits2(data, Ns, pulse='rect', alpha = 0.25, M=6):
"""
Generate non-return-to-zero (NRZ) data bits with pulse shaping with user data
A baseband digital data signal using +/-1 amplitude signal values
and including pulse shaping. The data sequence is user supplied.
Parameters
----------
data : ndarray of the data bits as 0/1 values
Ns : the number of samples per bit,
pulse_type : 'rect' , 'rc', 'src' (default 'rect')
alpha : excess bandwidth factor(default 0.25)
M : single sided pulse duration (default = 6)
Returns
-------
x : ndarray of the NRZ signal values
b : ndarray of the pulse shape
Notes
-----
Pulse shapes include 'rect' (rectangular), 'rc' (raised cosine),
'src' (root raised cosine). The actual pulse length is 2*M+1 samples.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sk_dsp_comm.sigsys import nrz_bits2
>>> from sk_dsp_comm.sigsys import m_seq
>>> from numpy import arange
>>> x,b = nrz_bits2(m_seq(5),10)
>>> t = arange(len(x))
>>> plt.ylim([-1.01, 1.01])
>>> plt.plot(t,x)
"""
N_bits = len(data)
n_zeros = np.zeros((N_bits,int(Ns)-1))
x = np.hstack((2*data.reshape(N_bits,1)-1,n_zeros))
x = x.flatten()
if pulse.lower() == 'rect':
b = np.ones(int(Ns))
elif pulse.lower() == 'rc':
b = rc_imp(Ns,alpha,M)
elif pulse.lower() == 'src':
b = sqrt_rc_imp(Ns,alpha,M)
else:
raise ValueError('pulse type must be rec, rc, or src')
x = signal.lfilter(b,1,x)
return x,b/float(Ns)
def eye_plot(x, l, s=0):
"""
Eye pattern plot of a baseband digital communications waveform.
The signal must be real, but can be multivalued in terms of the underlying
modulation scheme. Used for BPSK eye plots in the Case Study article.
Parameters
----------
x : ndarray of the real input data vector/array
l : display length in samples (usually two symbols)
s : start index
Returns
-------
Nothing : A plot window opens containing the eye plot
Notes
-----
Increase S to eliminate filter transients.
Examples
--------
1000 bits at 10 samples per bit with 'rc' shaping.
>>> import matplotlib.pyplot as plt
>>> from sk_dsp_comm import sigsys as ss
>>> x,b, data = ss.nrz_bits(1000,10,'rc')
>>> ss.eye_plot(x,20,60)
"""
plt.figure(figsize=(6,4))
idx = np.arange(0, l + 1)
plt.plot(idx, x[s:s + l + 1], 'b')
k_max = int((len(x) - s) / l) - 1
for k in range(1,k_max):
plt.plot(idx, x[s + k * l:s + l + 1 + k * l], 'b')
plt.grid()
plt.xlabel('Time Index - n')
plt.ylabel('Amplitude')
plt.title('Eye Plot')
return 0
def scatter(x, ns, start):
"""
Sample a baseband digital communications waveform at the symbol spacing.
Parameters
----------
x : ndarray of the input digital comm signal
ns : number of samples per symbol (bit)
start : the array index to start the sampling
Returns
-------
xI : ndarray of the real part of x following sampling
xQ : ndarray of the imaginary part of x following sampling
Notes
-----
Normally the signal is complex, so the scatter plot contains
clusters at points in the complex plane. For a binary signal
such as BPSK, the point centers are nominally +/-1 on the real
axis. Start is used to eliminate transients from the FIR
pulse shaping filters from appearing in the scatter plot.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sk_dsp_comm import sigsys as ss
>>> x,b, data = ss.nrz_bits(1000,10,'rc')
>>> # Add some noise so points are now scattered about +/-1
>>> y = ss.cpx_awgn(x,20,10)
>>> yI,yQ = ss.scatter(y,10,60)
>>> plt.plot(yI,yQ,'.')
>>> plt.axis('equal')
>>> plt.ylabel("Quadrature")
>>> plt.xlabel("In-Phase")
>>> plt.grid()
>>> plt.show()
"""
xI = np.real(x[start::ns])
xQ = np.imag(x[start::ns])
return xI, xQ
def bit_errors(z, data, start, ns):
"""
A simple bit error counting function.
In its present form this function counts bit errors between
hard decision BPSK bits in +/-1 form and compares them with
0/1 binary data that was transmitted. Timing between the Tx
and Rx data is the responsibility of the user. An enhanced
version of this function, which features automatic synching
will be created in the future.
Parameters
----------
z : ndarray of hard decision BPSK data prior to symbol spaced sampling
data : ndarray of reference bits in 1/0 format
start : timing reference for the received
ns : the number of samples per symbol
Returns
-------
Pe_hat : the estimated probability of a bit error
Notes
-----
The Tx and Rx data streams are exclusive-or'd and the then the bit errors
are summed, and finally divided by the number of bits observed to form an
estimate of the bit error probability. This function needs to be
enhanced to be more useful.
Examples
--------
>>> from scipy import signal
>>> x,b, data = nrz_bits(1000,10)
>>> # set Eb/N0 to 8 dB
>>> y = cpx_awgn(x,8,10)
>>> # matched filter the signal
>>> z = signal.lfilter(b,1,y)
>>> # make bit decisions at 10 and Ns multiples thereafter
>>> Pe_hat = bit_errors(z,data,10,10)
"""
Pe_hat = np.sum(data[0:len(z[start::ns])] ^ np.int64((np.sign(np.real(z[start::ns])) + 1) / 2)) / float(len(z[start::ns]))
return Pe_hat
def cpx_awgn(x, es_n0, ns):
"""
Apply white Gaussian noise to a digital communications signal.
This function represents a complex baseband white Gaussian noise
digital communications channel. The input signal array may be real
or complex.
Parameters
----------
x : ndarray noise free complex baseband input signal.
EsNO : set the channel Es/N0 (Eb/N0 for binary) level in dB
ns : number of samples per symbol (bit)
Returns
-------
y : ndarray x with additive noise added.
Notes
-----
Set the channel energy per symbol-to-noise power spectral
density ratio (Es/N0) in dB.
Examples
--------
>>> x,b, data = nrz_bits(1000,10)
>>> # set Eb/N0 = 10 dB
>>> y = cpx_awgn(x,10,10)
"""
w = np.sqrt(ns * np.var(x) * 10 ** (-es_n0 / 10.) / 2.) * (np.random.randn(len(x)) + 1j * np.random.randn(len(x)))
return x+w
def my_psd(x,NFFT=2**10,Fs=1):
"""
A local version of NumPy's PSD function that returns the plot arrays.
A mlab.psd wrapper function that returns two ndarrays;
makes no attempt to auto plot anything.
Parameters
----------
x : ndarray input signal
NFFT : a power of two, e.g., 2**10 = 1024
Fs : the sampling rate in Hz
Returns
-------
Px : ndarray of the power spectrum estimate
f : ndarray of frequency values
Notes
-----
This function makes it easier to overlay spectrum plots because
you have better control over the axis scaling than when using psd()
in the autoscale mode.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from numpy import log10
>>> from sk_dsp_comm import sigsys as ss
>>> x,b, data = ss.nrz_bits(10000,10)
>>> Px,f = ss.my_psd(x,2**10,10)
>>> plt.plot(f, 10*log10(Px))
>>> plt.ylabel("Power Spectral Density (dB)")
>>> plt.xlabel("Frequency (Hz)")
>>> plt.show()
"""
Px,f = pylab.mlab.psd(x,NFFT,Fs)
return Px.flatten(), f
def am_tx(m,a_mod,fc=75e3):
"""
AM transmitter for Case Study of Chapter 17.
Assume input is sampled at 8 Ksps and upsampling
by 24 is performed to arrive at fs_out = 192 Ksps.
Parameters
----------
m : ndarray of the input message signal
a_mod : AM modulation index, between 0 and 1
fc : the carrier frequency in Hz
Returns
-------
x192 : ndarray of the upsampled by 24 and modulated carrier
t192 : ndarray of the upsampled by 24 time axis
m24 : ndarray of the upsampled by 24 message signal
Notes
-----
The sampling rate of the input signal is assumed to be 8 kHz.
Examples
--------
>>> n = arange(0,1000)
>>> # 1 kHz message signal
>>> m = cos(2*pi*1000/8000.*n)
>>> x192, t192 = am_tx(m,0.8,fc=75e3)
"""
m24 = interp24(m)
t192 = np.arange(len(m24))/192.0e3
#m24 = np.cos(2*np.pi*2.0e3*t192)
m_max = np.max(np.abs(m24))
x192 = (1 + a_mod*m24/m_max)*np.cos(2*np.pi*fc*t192)
return x192, t192, m24
def am_rx(x192):
"""
AM envelope detector receiver for the Chapter 17 Case Study
The receiver bandpass filter is not included in this function.
Parameters
----------
x192 : ndarray of the AM signal at sampling rate 192 ksps
Returns
-------
m_rx8 : ndarray of the demodulated message at 8 ksps
t8 : ndarray of the time axis at 8 ksps
m_rx192 : ndarray of the demodulated output at 192 ksps
x_edet192 : ndarray of the envelope detector output at 192 ksps
Notes
-----
The bandpass filter needed at the receiver front-end can be designed
using b_bpf,a_bpf = :func:`am_rx_BPF`.
Examples
--------
>>> import numpy as np
>>> n = np.arange(0,1000)
>>> # 1 kHz message signal
>>> m = np.cos(2*np.pi*1000/8000.*n)
>>> m_rx8,t8,m_rx192,x_edet192 = am_rx(x192)
"""
x_edet192 = env_det(x192)
m_rx8 = deci24(x_edet192)
# remove DC offset from the env_det + LPF output
m_rx8 -= np.mean(m_rx8)
t8 = np.arange(len(m_rx8))/8.0e3
"""
For performance testing also filter x_env_det
192e3 using a Butterworth cascade.
The filter cutoff is 5kHz, the message BW.
"""
b192,a192 = signal.butter(5,2*5.0e3/192.0e3)
m_rx192 = signal.lfilter(b192,a192,x_edet192)
m_rx192 = signal.lfilter(b192,a192,m_rx192)
m_rx192 -= np.mean(m_rx192)
return m_rx8,t8,m_rx192,x_edet192
def am_rx_bpf(n_order=7, ripple_dB=1, b=10e3, fs=192e3):
"""
Bandpass filter design for the AM receiver Case Study of Chapter 17.
Design a 7th-order Chebyshev type 1 bandpass filter to remove/reduce
adjacent channel intereference at the envelope detector input.
Parameters
----------
n_order : the filter order (default = 7)
ripple_dB : the passband ripple in dB (default = 1)
b : the RF bandwidth (default = 10e3)
fs : the sampling frequency
Returns
-------
b_bpf : ndarray of the numerator filter coefficients
a_bpf : ndarray of the denominator filter coefficients
Examples
--------
>>> from scipy import signal
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> import sk_dsp_comm.sigsys as ss
>>> # Use the default values
>>> b_bpf,a_bpf = ss.am_rx_bpf()
Pole-zero plot of the filter.
>>> ss.zplane(b_bpf,a_bpf)
>>> plt.show()
Plot of the frequency response.
>>> f = np.arange(0,192/2.,.1)
>>> w, Hbpf = signal.freqz(b_bpf,a_bpf,2*np.pi*f/192)
>>> plt.plot(f*10,20*np.log10(abs(Hbpf)))
>>> plt.axis([0,1920/2.,-80,10])
>>> plt.ylabel("Power Spectral Density (dB)")
>>> plt.xlabel("Frequency (kHz)")
>>> plt.show()
"""
b_bpf,a_bpf = signal.cheby1(n_order, ripple_dB, 2 * np.array([75e3 - b / 2., 75e3 + b / 2.]) / fs, 'bandpass')
return b_bpf,a_bpf
def env_det(x):
"""
Ideal envelope detector.
This function retains the positive half cycles of the input signal.
Parameters
----------
x : ndarray of the input sugnal
Returns
-------
y : ndarray of the output signal
Examples
--------
>>> n = arange(0,100)
>>> # 1 kHz message signal
>>> m = cos(2*pi*1000/8000.*n)
>>> x192, t192, m24 = am_tx(m,0.8,fc=75e3)
>>> y = env_det(x192)
"""
y = np.zeros(len(x))
for k,xx in enumerate(x):
if xx >= 0:
y[k] = xx
return y
def interp24(x):
"""
Interpolate by L = 24 using Butterworth filters.
The interpolation is done using three stages. Upsample by
L = 2 and lowpass filter, upsample by 3 and lowpass filter, then
upsample by L = 4 and lowpass filter. In all cases the lowpass
filter is a 10th-order Butterworth lowpass.
Parameters
----------
x : ndarray of the input signal
Returns
-------
y : ndarray of the output signal
Notes
-----
The cutoff frequency of the lowpass filters is 1/2, 1/3, and 1/4 to
track the upsampling by 2, 3, and 4 respectively.
Examples
--------
>>> y = interp24(x)
"""
# Stage 1: L = 2
b2,a2 = signal.butter(10,1/2.)
y1 = upsample(x,2)
y1 = signal.lfilter(b2,a2,2*y1)
# Stage 2: L = 3
b3,a3 = signal.butter(10,1/3.)
y2 = upsample(y1,3)
y2 = signal.lfilter(b3,a3,3*y2)
# Stage 3: L = 4
b4,a4 = signal.butter(10,1/4.)
y3 = upsample(y2,4)
y3 = signal.lfilter(b4,a4,4*y3)
return y3
def deci24(x):
"""
Decimate by L = 24 using Butterworth filters.
The decimation is done using two three stages. Downsample sample by
L = 2 and lowpass filter, downsample by 3 and lowpass filter, then
downsample by L = 4 and lowpass filter. In all cases the lowpass
filter is a 10th-order Butterworth lowpass.
Parameters
----------
x : ndarray of the input signal
Returns
-------
y : ndarray of the output signal
Notes
-----
The cutoff frequency of the lowpass filters is 1/2, 1/3, and 1/4 to
track the upsampling by 2, 3, and 4 respectively.
Examples
--------
>>> y = deci24(x)
"""
# Stage 1: M = 2
b2,a2 = signal.butter(10,1/2.)
y1 = signal.lfilter(b2,a2,x)
y1 = downsample(y1,2)
# Stage 2: M = 3
b3,a3 = signal.butter(10,1/3.)
y2 = signal.lfilter(b3,a3,y1)
y2 = downsample(y2,3)
# Stage 3: L = 4
b4,a4 = signal.butter(10,1/4.)
y3 = signal.lfilter(b4,a4,y2)
y3 = downsample(y3,4)
return y3
def upsample(x,L):
"""
Upsample by factor L
Insert L - 1 zero samples in between each input sample.
Parameters
----------
x : ndarray of input signal values
L : upsample factor
Returns
-------
y : ndarray of the output signal values
Examples
--------
>>> y = upsample(x,3)
"""
N_input = len(x)
y = np.hstack((x.reshape(N_input,1),np.zeros((N_input, int(L-1)))))
y = y.flatten()
return y
def downsample(x,M,p=0):
"""
Downsample by factor M
Keep every Mth sample of the input. The phase of the input samples
kept can be selected.
Parameters
----------
x : ndarray of input signal values
M : downsample factor
p : phase of decimated value, 0 (default), 1, ..., M-1
Returns
-------
y : ndarray of the output signal values
Examples
--------
>>> y = downsample(x,3)
>>> y = downsample(x,3,1)
"""
if not isinstance(M, int):
raise TypeError("M must be an int")
x = x[0:int(np.floor(len(x)/M))*M]
x = x.reshape((int(np.floor(len(x)/M)),M))
y = x[:,p]
return y
def unique_cpx_roots(rlist,tol = 0.001):
"""
The average of the root values is used when multiplicity
is greater than one.
<NAME> October 2016
"""
uniq = [rlist[0]]
mult = [1]
for k in range(1,len(rlist)):
N_uniq = len(uniq)
for m in range(N_uniq):
if abs(rlist[k]-uniq[m]) <= tol:
mult[m] += 1
uniq[m] = (uniq[m]*(mult[m]-1) + rlist[k])/float(mult[m])
break
uniq = np.hstack((uniq,rlist[k]))
mult = np.hstack((mult,[1]))
return np.array(uniq), np.array(mult)
def zplane(b,a,auto_scale=True,size=2,detect_mult=True,tol=0.001):
"""
Create an z-plane pole-zero plot.
Create an z-plane pole-zero plot using the numerator
and denominator z-domain system function coefficient
ndarrays b and a respectively. Assume descending powers of z.
Parameters
----------
b : ndarray of the numerator coefficients
a : ndarray of the denominator coefficients
auto_scale : bool (default True)
size : plot radius maximum when scale = False
Returns
-------
(M,N) : tuple of zero and pole counts + plot window
Notes
-----
This function tries to identify repeated poles and zeros and will
place the multiplicity number above and to the right of the pole or zero.
The difficulty is setting the tolerance for this detection. Currently it
is set at 1e-3 via the function signal.unique_roots.
Examples
--------
>>> # Here the plot is generated using auto_scale
>>> zplane(b,a)
>>> # Here the plot is generated using manual scaling
>>> zplane(b,a,False,1.5)
"""
if (isinstance(a,int) or isinstance(a,float)):
a = [a]
if (isinstance(b,int) or isinstance(b,float)):
b = [b]
M = len(b) - 1
N = len(a) - 1
# Plot labels if multiplicity greater than 1
x_scale = 1.5*size
y_scale = 1.5*size
x_off = 0.02
y_off = 0.01
#N_roots = np.array([1.0])
if M > 0:
N_roots = np.roots(b)
#D_roots = np.array([1.0])
if N > 0:
D_roots = | np.roots(a) | numpy.roots |
# pylint: disable=protected-access
"""
Test the grid math functions
"""
import pandas as pd
import numpy as np
import numpy.testing as npt
import pytest
from ..coordinates import grid_coordinates, scatter_points
from ..blockreduce import BlockReduce, BlockMean
def test_block_reduce():
"Try reducing constant values in a regular grid"
region = (-5, 0, 5, 10)
east, north = grid_coordinates(region, spacing=0.1, pixel_register=True)
data = 20 * np.ones_like(east)
reducer = BlockReduce(np.mean, spacing=1)
block_coords, block_data = reducer.filter((east, north), data)
assert len(block_coords[0]) == 25
assert len(block_coords[1]) == 25
assert len(block_data) == 25
npt.assert_allclose(block_data, 20)
npt.assert_allclose(block_coords[0][:5], np.linspace(-4.5, -0.5, 5))
npt.assert_allclose(block_coords[1][::5], np.linspace(5.5, 9.5, 5))
def test_block_reduce_scatter():
"Try reducing constant values in a dense enough scatter"
region = (-5, 0, 5, 10)
coordinates = scatter_points(region, size=10000, random_state=0)
data = 20 * np.ones_like(coordinates[0])
block_coords, block_data = BlockReduce(
np.mean, 1, region=region, center_coordinates=True
).filter(coordinates, data)
assert len(block_coords[0]) == 25
assert len(block_coords[1]) == 25
assert len(block_data) == 25
npt.assert_allclose(block_data, 20)
npt.assert_allclose(block_coords[0][:5], np.linspace(-4.5, -0.5, 5))
npt.assert_allclose(block_coords[1][::5], np.linspace(5.5, 9.5, 5))
def test_block_reduce_weights():
"Average with an outlier and zero weight should ignore the outlier"
region = (-5, 0, 5, 10)
size = 10000
coords = scatter_points(region, size=size, random_state=0)
data = 20 * np.ones(size)
weights = np.ones_like(data)
outlier = 1000
data[outlier] = 10000
weights[outlier] = 0
block_coords, block_data = BlockReduce(np.average, 1, region=region).filter(
coords, data, weights
)
assert len(block_coords[0]) == 25
assert len(block_coords[1]) == 25
assert len(block_data) == 25
npt.assert_allclose(block_data, 20)
def test_block_reduce_multiple_components():
"Try reducing multiple components in a regular grid"
region = (-5, 0, 5, 10)
coords = grid_coordinates(region, spacing=0.1, pixel_register=True)
data = 20 * np.ones_like(coords[0]), -13 * np.ones_like(coords[0])
reducer = BlockReduce(np.mean, spacing=1)
block_coords, block_data = reducer.filter(coords, data)
assert len(block_coords[0]) == 25
assert len(block_coords[1]) == 25
npt.assert_allclose(block_coords[0][:5], np.linspace(-4.5, -0.5, 5))
npt.assert_allclose(block_coords[1][::5], np.linspace(5.5, 9.5, 5))
assert isinstance(block_data, tuple)
assert len(block_data) == 2
assert all(len(i) == 25 for i in block_data)
npt.assert_allclose(block_data[0], 20)
npt.assert_allclose(block_data[1], -13)
def test_block_reduce_multiple_weights():
"Try reducing multiple components with weights"
region = (-5, 0, 5, 10)
size = 10000
coords = scatter_points(region, size=size, random_state=10)
data = 20 * np.ones(size), -13 * np.ones(size)
outlier1 = 1000
outlier2 = 3000
data[0][outlier1] = 10000
data[1][outlier2] = -10000
weights = (np.ones(size), np.ones(size))
weights[0][outlier1] = 0
weights[1][outlier2] = 0
reducer = BlockReduce(np.average, spacing=1)
block_coords, block_data = reducer.filter(coords, data, weights)
assert len(block_coords[0]) == 25
assert len(block_coords[1]) == 25
assert isinstance(block_data, tuple)
assert len(block_data) == 2
assert all(len(i) == 25 for i in block_data)
npt.assert_allclose(block_data[0], 20)
npt.assert_allclose(block_data[1], -13)
def test_blockmean_noweights():
"Try blockmean with no weights"
region = (-5, 0, 5, 10)
east, north = grid_coordinates(region, spacing=0.1, pixel_register=True)
data = 20 * np.ones_like(east)
reducer = BlockMean(spacing=1)
block_coords, block_data, block_weights = reducer.filter((east, north), data)
assert len(block_coords[0]) == 25
assert len(block_coords[1]) == 25
assert len(block_data) == 25
assert len(block_weights) == 25
npt.assert_allclose(block_data, 20)
npt.assert_allclose(block_weights, 1)
npt.assert_allclose(block_coords[0][:5], np.linspace(-4.5, -0.5, 5))
npt.assert_allclose(block_coords[1][::5], np.linspace(5.5, 9.5, 5))
def test_blockmean_noweights_multiple_components():
"Try blockmean with no weights and multiple data components"
region = (-5, 0, 5, 10)
east, north = grid_coordinates(region, spacing=0.1, pixel_register=True)
data = 20 * np.ones_like(east)
reducer = BlockMean(spacing=1)
block_coords, block_data, block_weights = reducer.filter(
(east, north), (data, data)
)
assert len(block_coords[0]) == 25
assert len(block_coords[1]) == 25
npt.assert_allclose(block_coords[0][:5], np.linspace(-4.5, -0.5, 5))
npt.assert_allclose(block_coords[1][::5], np.linspace(5.5, 9.5, 5))
for datai, weighti in zip(block_data, block_weights):
assert len(datai) == 25
assert len(weighti) == 25
npt.assert_allclose(datai, 20)
npt.assert_allclose(weighti, 1)
def test_blockmean_noweights_table():
"Try blockmean with no weights using a known blocked data table"
reducer = BlockMean(spacing=1)
table = pd.DataFrame(dict(data0=[1, 2, 10, 20, 5, 5], block=[1, 1, 2, 2, 3, 3]))
mean, variance = reducer._blocked_mean_variance(table, 1)
npt.assert_allclose(mean[0], [1.5, 15, 5])
# The variance is calculated with 1 degree-of-freedom so it's divided by
# N-1 instead of N because this is a sample variance, not a population
# variance.
npt.assert_allclose(variance[0], [0.5, 50, 0])
def test_blockmean_uncertainty_weights():
"Try blockmean with uncertainty weights"
region = (-2, 0, 6, 8)
# This will be a 4x4 data grid that will be split into 2x2 blocks
coords = grid_coordinates(region, spacing=0.5, pixel_register=True)
data = 102.4 * np.ones_like(coords[0])
uncertainty = np.ones_like(data)
# Set a higher uncertainty for the first block
uncertainty[:2, :2] = 2
weights = 1 / uncertainty ** 2
reducer = BlockMean(spacing=1, uncertainty=True)
# Uncertainty propagation can only work if weights are given
with pytest.raises(ValueError):
reducer.filter(coords, data)
block_coords, block_data, block_weights = reducer.filter(coords, data, weights)
assert len(block_coords[0]) == 4
assert len(block_coords[1]) == 4
assert len(block_data) == 4
assert len(block_weights) == 4
npt.assert_allclose(block_data, 102.4)
npt.assert_allclose(block_weights, [0.25, 1, 1, 1])
npt.assert_allclose(block_coords[0][:2], [-1.5, -0.5])
npt.assert_allclose(block_coords[1][::2], [6.5, 7.5])
def test_blockmean_variance_weights():
"Try blockmean with variance weights"
region = (-2, 0, 6, 8)
# This will be a 4x4 data grid that will be split into 2x2 blocks
coords = grid_coordinates(region, spacing=0.5, pixel_register=True)
data = 102.4 * np.ones_like(coords[0])
uncertainty = np.ones_like(data)
# Set a higher uncertainty for the first block
uncertainty[:2, :2] = 2
weights = 1 / uncertainty ** 2
reducer = BlockMean(spacing=1, uncertainty=False)
block_coords, block_data, block_weights = reducer.filter(coords, data, weights)
assert len(block_coords[0]) == 4
assert len(block_coords[1]) == 4
assert len(block_data) == 4
assert len(block_weights) == 4
npt.assert_allclose(block_data, 102.4)
# The uncertainty in the first block shouldn't matter because the variance
# is still zero, so the weights should be 1
npt.assert_allclose(block_weights, [1, 1, 1, 1])
| npt.assert_allclose(block_coords[0][:2], [-1.5, -0.5]) | numpy.testing.assert_allclose |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
import numpy as np
import gym
import argparse
from parl.utils import logger, summary
from alg.BCreplay_buffer import BCReplayMemory
from model.mujoco_model import MujocoModel
from model.mujoco_agent import MujocoAgent
from alg.sac import SAC
from alg.BC import BC
from rlschool.quadrupedal.envs.env_wrappers.MonitorEnv import Param_Dict,Random_Param_Dict
from rlschool.quadrupedal.robots import robot_config
from rlschool.quadrupedal.envs.env_builder import SENSOR_MODE
import rlschool
from copy import copy
import pybullet as p
import cv2
import time
WARMUP_STEPS = 200
EVAL_EVERY_STEPS = 1e4
EVAL_EPISODES = 1
MEMORY_SIZE = int(1e7)
TRAIN_PER_STEPS = 1024
TRAIN_PER_TIME = 10
BATCH_SIZE = 1024
GAMMA = 0.99
TAU = 0.005
ALPHA = 0.2 # determines the relative importance of entropy term against the reward
ACTOR_LR = 3e-4
CRITIC_LR = 3e-4
param = copy(Param_Dict)
random_param = copy(Random_Param_Dict)
mode_map ={"pose":robot_config.MotorControlMode.POSITION,
"torque":robot_config.MotorControlMode.TORQUE,
"traj":robot_config.MotorControlMode.POSITION,}
def obs2noise(obs):
obs_noise = copy(obs)
obs_noise[7:10] += np.random.normal(0,6e-2,size=3)/0.1
obs_noise[10:13] += np.random.normal(0,1e-1,size=3)/0.5
obs_noise[13:25] += np.random.normal(0,1e-2,size=12)/0.1
obs_noise[25:37] += np.random.normal(0,0.5,size=12)
return obs_noise
def param2dynamic_dict(params):
param = copy(params)
param = np.clip(param,-1,1)
dynamic_param = {}
dynamic_param['control_latency'] = np.clip(40+10*param[0],0,80)
dynamic_param['footfriction'] = np.clip(0.2+10*param[1],0,20)
dynamic_param['basemass'] = np.clip(1.5+1*param[2],0.5,3)
dynamic_param['baseinertia'] = np.clip(np.ones(3)+1*param[3:6],np.array([0.1]*3),np.array([3]*3))
dynamic_param['legmass'] = np.clip(np.ones(3)+1*param[6:9],np.array([0.1]*3),np.array([3]*3))
dynamic_param['leginertia'] = np.clip(np.ones(12)+1*param[9:21],np.array([0.1]*12),np.array([3]*12))
dynamic_param['motor_kp'] = np.clip(80*np.ones(12)+40*param[21:33],np.array([20]*12),np.array([200]*12))
dynamic_param['motor_kd'] = np.clip(np.array([1.,2.,2.]*4)+param[33:45]* | np.array([1,2,2]*4) | numpy.array |
# start with something simple
# list of clusters are mass > 2e14 M_solar; z > 0.5
# 0 < RA < 90; 0 < dec < 90
#
import ipdb
try:
import astropy.io.fits as fits
import utils.map_creation_utils as kutils
from importlib import reload
reload(kutils)
except:
pass
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
import random
try:
from ProgressBar import ProgressBar
except:
from utils.ProgressBar import ProgressBar
from global_settings import DATA_PATH, CACHE_CNNFAILURE, VARYING_DIST_DATA_PATH
CUR_DIR = os.path.dirname(os.path.realpath(__file__))
class HalosCounter:
def __init__(self, size=40, step=30, data_path = DATA_PATH, seed=0):
self.size = size
self.step = step
self.CUTOUT_SIZE = size / 60. #in degree
self.STEP_SIZE = step / 60. #In degree
self.centers = np.arange(self.CUTOUT_SIZE / 2, 90 - self.CUTOUT_SIZE / 2, self.STEP_SIZE)
self.N_STEPS = len(self.centers)
self.data_path = data_path
self.seed = seed
def _map_deg2idx(self, ra_or_dec):
ret = np.round((ra_or_dec - self.CUTOUT_SIZE / 2.) / self.STEP_SIZE).astype(int)
return min(max(ret, 0), len(self.centers) - 1)
#return np.floor(ra_or_dec / self.STEP_SIZE - 0.5).astype(int)
def _map_idx2deg(self, idx):
return idx * self.STEP_SIZE + self.CUTOUT_SIZE / 2.
def _get_rand_sample(self, nsamples):
np.random.seed(self.seed)
return np.random.choice(self.N_STEPS ** 2, self.N_STEPS ** 2 if nsamples is None else nsamples, replace=False)
def get_halos(self, z_min=None, mvir_min=None):
cache_path = os.path.join(self.data_path, "_cache", "complete_halos.pkl")
if os.path.isfile(cache_path):
df = pd.read_pickle(cache_path)
else:
xrad = np.genfromtxt(os.path.join(self.data_path, "raw/halo_sz.ascii"), dtype=None)
cols = {"redshift": 0, "tSZ": 13, "Mvir": 10, "rvir": 12, "CL_ra": 1, "CL_dec": 2}
df = pd.DataFrame()
for k in cols.keys():
df[k] = xrad[:, cols[k]]
df.to_pickle(cache_path)
if z_min is not None: df = df[df['redshift'] >= z_min]
if mvir_min is not None: df = df[df['Mvir'] >= mvir_min]
df.index.name = "halo_id"
return df
def get_cutout_info(self):
all_maps_idx = pd.MultiIndex.from_tuples([(i, j) for i in range(self.N_STEPS) for j in range(self.N_STEPS)],
names=['map_ra_idx', 'map_dec_idx'])
df = pd.DataFrame(index=all_maps_idx).reset_index()
df.index.name = "cutout_id"
for c in ['ra', 'dec']: df["map_%s"%c] = df["map_%s_idx"%c].map(self._map_idx2deg)
return df
def _recache_one(path, headers, cache_path):
if not os.path.isfile(cache_path):
with open(path, 'r') as f:
df = [l.split() for l in f]
df = pd.DataFrame(df, columns=headers).astype(float)
try:
df['halo_id'] = df['halo_id'].astype(int)
except:
pass
df.to_pickle(cache_path)
return cache_path
def recache_IR(path = os.path.join(DATA_PATH, 'raw', 'IRBlastPop.dat')):
headers = ['halo_id', 'ra', 'dec', 'redshift', 'flux_30', 'flux_90', 'flux_148', 'flux_219', 'flux_277', 'flux_350']
cache_path = os.path.join(DATA_PATH, "_cache", "high_flux_IR_galaxies.pkl")
return pd.read_pickle(_recache_one(path, headers, cache_path))
def recache_Radio(path = os.path.join(DATA_PATH, 'raw', 'radio.cat')):
headers = ['ra', 'dec', 'redshift', 'f_1.4', 'f_30', 'f_90', 'f_148', 'f_219', 'f_277', 'f_350']
cache_path = os.path.join(DATA_PATH, "_cache", "radio_galaxies.pkl")
return pd.read_pickle(_recache_one(path, headers, cache_path))
def match_IR():
cache_path = os.path.join(DATA_PATH, "_cache", "high_flux_IR_galaxies_with_info.pkl")
if not os.path.isfile(cache_path):
self = CutoutGen(use_big=False, MULTIPLE=None)
objs = recache_IR().rename(columns={"ra":"obj_ra", "dec":"obj_dec", 'halo_id':'ir_id'})
self.df = self.halocounter.get_cutout_info()
self.df2 = self.df.reset_index().set_index(['map_ra_idx', 'map_dec_idx'])
self.df['which'] = self.df.apply(self._get_cuotout_set, axis=1)
objs['map_ra_idx'] = objs['obj_ra'].map(self.halocounter._map_deg2idx).astype(int)
objs['map_dec_idx'] = objs['obj_dec'].map(self.halocounter._map_deg2idx).astype(int)
objs['cutout_id'] = objs.apply(lambda r: self.df2.loc[(r['map_ra_idx'], r['map_dec_idx'])]['cutout_id'], axis=1)
objs['map_ra'] = objs['map_ra_idx'].map(self.halocounter._map_idx2deg)
objs['map_dec'] = objs['map_dec_idx'].map(self.halocounter._map_idx2deg)
pd.to_pickle(objs, cache_path)
return pd.read_pickle(cache_path)
def match_radio():
cache_path = os.path.join(DATA_PATH, "_cache", "radio_galaxies_with_info.pkl")
if not os.path.isfile(cache_path):
self = CutoutGen(use_big=False, MULTIPLE=None)
objs = recache_Radio().rename(columns={"ra":"obj_ra", "dec":"obj_dec"})
self.df = self.halocounter.get_cutout_info()
self.df2 = self.df.reset_index().set_index(['map_ra_idx', 'map_dec_idx'])
self.df['which'] = self.df.apply(self._get_cuotout_set, axis=1)
objs['map_ra_idx'] = objs['obj_ra'].map(self.halocounter._map_deg2idx).astype(int)
objs['map_dec_idx'] = objs['obj_dec'].map(self.halocounter._map_deg2idx).astype(int)
objs['cutout_id'] = objs.apply(lambda r: self.df2.loc[(r['map_ra_idx'], r['map_dec_idx'])]['cutout_id'], axis=1)
objs['map_ra'] = objs['map_ra_idx'].map(self.halocounter._map_idx2deg)
objs['map_dec'] = objs['map_dec_idx'].map(self.halocounter._map_idx2deg)
pd.to_pickle(objs, cache_path)
return pd.read_pickle(cache_path)
class Annotator:
def __init__(self, CUTOUT_SIZE, RESOLUTION=0.5):
#CUTOUT_SIZE in degree
self.RESOLUTION = RESOLUTION
self.CUTOUT_SIZE = CUTOUT_SIZE
self.npix = np.ceil(self.CUTOUT_SIZE * 60 / self.RESOLUTION)
def ra_to_x(self, ra, cutout_ra):
return self.npix * ((cutout_ra - ra) / self.CUTOUT_SIZE + 0.5)
def dec_to_y(self, dec, cutout_dec):
return self.npix * ((dec - cutout_dec) / self.CUTOUT_SIZE + 0.5)
def x_to_ra(self, x, cutout_ra):
return (0.5 - x / float(self.npix)) * self.CUTOUT_SIZE + cutout_ra
def y_to_dec(self, y, cutout_dec):
return (y / float(self.npix) - 0.5) * self.CUTOUT_SIZE + cutout_dec
class CutoutGen:
CFAC = {90: 5.526540e8, 148: 1.072480e9, 219: 1.318837e9}
FREQS = [90, 148, 219]
RESOLUTION = 0.25 #arcmin
def __init__(self, use_big=False, data_path=DATA_PATH,
min_redshift=0.25, min_mvir=2e14, MULTIPLE=None,
remove_overlap_in_train=True, split_exp_id=2):
if use_big:
size = 40
step = 30
else:
size = 8
step = 6
self.big = use_big
self.MULTIPLE=MULTIPLE
suffix = "" if self.big else "_small"
self.halocounter = HalosCounter(size, step, data_path=data_path)
self.data_path = data_path
#self.df2['cutout_id'] = self.df2['cutout_id'].astype(int)
self.min_redshift = min_redshift
self.min_mvir = min_mvir
self.npix = int(np.ceil(self.halocounter.CUTOUT_SIZE * 60 / CutoutGen.RESOLUTION))
self.annotator = Annotator(self.halocounter.CUTOUT_SIZE, CutoutGen.RESOLUTION)
self._thres_str = "z{:.2f}_mvir{:.0e}".format(self.min_redshift, self.min_mvir)
self.cache_dir = os.path.join(self.data_path, "_cache",
"{:.2f}{}".format(CutoutGen.RESOLUTION, suffix),
self._thres_str)
self.cache_dir = self.cache_dir.replace("+", "")
if not os.path.isdir(self.cache_dir): os.makedirs(self.cache_dir)
self.annotation_dir = self.cache_dir.replace("_cache", "maps/annotations")
if not os.path.isdir(self.annotation_dir): os.makedirs(self.annotation_dir)
self.map_dir = os.path.join(self.data_path, "maps", "reso{:.2f}{}".format(CutoutGen.RESOLUTION, suffix))
if not os.path.isdir(self.map_dir): os.makedirs(self.map_dir)
self.label_path = os.path.join(self.map_dir, "%s_label.pkl"%self._thres_str)
self.split_exp_id = split_exp_id
_cutout_info_cache_path = os.path.join(self.cache_dir, "_cutout_info_df_split%d.pkl"%split_exp_id)
if os.path.isfile(_cutout_info_cache_path):
self.df = pd.read_pickle(_cutout_info_cache_path)
self.df2 = self.df.reset_index().set_index(['map_ra_idx', 'map_dec_idx'])
else:
self.df = self.halocounter.get_cutout_info()
self.df2 = self.df.reset_index().set_index(['map_ra_idx', 'map_dec_idx'])
self.df['which'] = self.df.apply(self._get_cuotout_set, axis=1)
self.df['y'] = False
halos = self.halocounter.get_halos(z_min=self.min_redshift, mvir_min=self.min_mvir).sort_values('Mvir', ascending=False)
#ipdb.set_trace()
_map_deg2idx2 = lambda x: np.round(
(x - self.halocounter.CUTOUT_SIZE / 2) / self.halocounter.STEP_SIZE).astype(int)
halos['map_ra_idx'] = halos['CL_ra'].map(self.halocounter._map_deg2idx).astype(int)#.clip(0, len(self.halocounter.centers) - 1)
halos['map_dec_idx'] = halos['CL_dec'].map(self.halocounter._map_deg2idx).astype(int)#.clip(0, len(self.halocounter.centers) - 1)
#halos['map_ra_idx'] = halos['map_ra_idx'].astype(int)
#halos['map_dec_idx'] = halos['map_dec_idx'].astype(int)
halos['cutout_id'] = halos.apply(lambda r: self.df2.loc[(r['map_ra_idx'], r['map_dec_idx'])]['cutout_id'],
axis=1)
halos = halos.reset_index().reindex(columns=['halo_id', 'redshift', 'tSZ', 'Mvir', 'rvir', 'CL_ra', 'CL_dec', 'cutout_id'])
halos = halos.sort_values('Mvir', ascending=False).drop_duplicates('cutout_id', keep='first')
self.df = self.df.reset_index().merge(halos, how='left',on='cutout_id').set_index('cutout_id', verify_integrity=True)
self.df['y'] = self.df['halo_id'].map(lambda x: not pd.isnull(x))
pd.to_pickle(self.df, _cutout_info_cache_path)
if not self.big:
if remove_overlap_in_train:
import functools
#got rid of overlap in training set
pos_idx = self.df[self.df['y']].set_index(['map_ra_idx', 'map_dec_idx']).index
#pos_pair = self.df.loc[pos_idx].reset_index().reindex(columns=['map_ra_idx', 'map_dec_idx'])
bad_idx = [pos_idx.map(lambda x: (x[0]+o0,x[1]+o1)) for o0 in [-1,1] for o1 in [-1,1]]
bad_idx = functools.reduce(lambda x, y: x.union(y), bad_idx)
not_overlapped_idx = self.df2.index.difference(bad_idx)
not_overlapped_idx = self.df2.loc[not_overlapped_idx]['cutout_id'].reset_index().set_index('cutout_id').index
self.df['overlapped'] = True
self.df.loc[not_overlapped_idx, 'overlapped'] = False
self.df = self.df[~((self.df['which'] == 'train') & (self.df['overlapped'] & ~self.df['y']))]
#reduce the # of cutouts
old_idx = self.df[self.df['which']!='train'].index
pos_idx = self.df[self.df['y']].index
if MULTIPLE is not None:
np.random.seed(MULTIPLE)
neg_idx = self.df.index.difference(pos_idx)
neg_idx = neg_idx[np.random.permutation(len(neg_idx))]
tdf = pd.concat([self.df.loc[pos_idx], self.df.loc[neg_idx[:MULTIPLE * len(pos_idx)]]])
self.df = pd.concat([tdf, self.df.reindex(old_idx.difference(tdf.index))]).sort_index()
#self.df.index = pd.Index([i for i in range(len(self.df))], name=self.df.index.name)
self.df2 = self.df.reset_index().set_index(['map_ra_idx', 'map_dec_idx'])
def faster_assign_cutout(self, halos):
_map_degidx2 = lambda x: np.round((x - self.halocounter.CUTOUT_SIZE / 2) / self.halocounter.STEP_SIZE).astype(int)
halos['ra_idx'] = halos['CL_ra'].map(_map_degidx2).clip(0, len(self.halocounter.centers) - 1)
halos['dec_idx'] = halos['CL_dec'].map(_map_degidx2).clip(0, len(self.halocounter.centers) - 1)
halos = halos.reset_index()
halos = halos.merge(self.df2.rename(columns={"halo_id":"halo_id_old"}), how='left', left_on=['ra_idx', 'dec_idx'], right_index=True)
halos = halos.sort_values('Mvir', ascending=False)
halos = halos.drop_duplicates(subset=['ra_idx', 'dec_idx'], keep='first')
return halos
def get_df_with_halo_info(self):
return self.df.merge(self.halocounter.get_halos(), left_on='halo_id', right_index=True, how='left')
@staticmethod
def halo_angle_old(halo_info,scaledown=0.6):
#halo_info should be a dict with at least the following keys: ra, dec, rvir, redshift
#
d_in_Mpc = 4220 * halo_info['redshift']
radius_theta = halo_info['rvir'] / d_in_Mpc / np.pi * 180
return radius_theta * scaledown
@staticmethod
def halo_angle(halo_info,scaledown=0.6):
H0 = 73.8 #km/sec/Mpc
c = 299792.458 #km/sec
q0 = -(0.5 * 0.264 - 0.736) #0.5 * Omega_m - Omega_lambda
z = halo_info['redshift']
da_in_Mpc = c / (H0 * q0**2) * (z * q0 + (q0-1)* (np.sqrt(2 * q0 * z + 1.) - 1.)) / (1 + z)**2
radius_theta = halo_info['rvir'] / da_in_Mpc / np.pi * 180
return radius_theta * scaledown
@staticmethod
def _map_component(c):
component_list = ['samples', 'ksz', 'ir_pts', 'rad_pts', 'dust']
COMPONENT_MAP = {'samples': ["lensedcmb", "tsz"]}
for i, cc in enumerate(component_list):
if i == 0: continue
COMPONENT_MAP[cc] = COMPONENT_MAP[component_list[i-1]] + [cc]
#COMPONENT_MAP = {"ksz": "ksz", "ir_pts": "ir_pts", "samples": ["lensedcmb", "tsz"],
# "rad_pts": "rad_pts", "dust": "dust", "full": "full_sz", "skymap": "skymap"}
#ipdb.set_trace()
return COMPONENT_MAP[c] if isinstance(COMPONENT_MAP[c], list) else [COMPONENT_MAP[c]]
def find_cutout(self, halo):
ra_idx = np.argmin(np.abs(halo['CL_ra'] - self.halocounter.centers))
dec_idx = np.argmin(np.abs(halo['CL_dec'] - self.halocounter.centers))
return self.df2.loc[(ra_idx, dec_idx)]
def annotate(self, halo, r=None):
if r is None: r = self.find_cutout(halo)
#x and y and everything are in terms of total width / height
#ra_to_x = lambda ra: (r['map_ra'] - ra) / self.halocounter.CUTOUT_SIZE + 0.5
#dec_to_y = lambda dec: (dec - r['map_dec']) / self.halocounter.CUTOUT_SIZE + 0.5
#x_to_ra = lambda x: (0.5 - x) * self.halocounter.CUTOUT_SIZE + r['map_ra']
#y_to_dec = lambda y: (y-0.5) * self.halocounter.CUTOUT_SIZE + r['map_dec']
npix = np.ceil(self.halocounter.CUTOUT_SIZE * 60 / CutoutGen.RESOLUTION)
theta = CutoutGen.halo_angle(halo) # in degree
theta = min(theta, 6. / 60.)
#theta = 3 / npix * self.halocounter.CUTOUT_SIZE #TODO: after experiemnt, remove this hardcode
x = self.annotator.ra_to_x(halo['CL_ra'], r['map_ra'])
y = self.annotator.dec_to_y(halo['CL_dec'], r['map_dec'])
w = h = (2 * theta) / self.halocounter.CUTOUT_SIZE * npix
#draw a circle
r = theta * npix
segmentations = []
for angle in np.arange(0., 2 * np.pi, 2 * np.pi / 10.):
#since it's symmetric, we don't need to worry about axes
segmentations.extend([x + np.cos(angle) * r, y + np.sin(angle) * r])
x, y = x - w / 2., y - h / 2.
return (x, y, w, h), segmentations
def gen_cutouts(self, freq=90, component="skymap"):
result_dir = os.path.join(self.map_dir, 'components', "%s_freq%03i"%(component, freq))
if not os.path.isdir(result_dir): os.makedirs(result_dir)
raw_path = os.path.join(self.data_path, 'raw', '%03i_%s_healpix.fits'%(freq, component))
assert os.path.isfile(raw_path)
# get the map; convert from Jy/steradians -> dT/T_cmb (divided by 1.072480e9)
print('loading %i ghz map' % freq)
allc, header = kutils.hp.read_map(raw_path, h=True)
allc = allc / CutoutGen.CFAC[freq]
# this options are locked in. Have to be same as the options
# that provide the input list of indices
cutoutsize = self.halocounter.CUTOUT_SIZE * 60 # arcmin
pixsize = CutoutGen.RESOLUTION
npix = np.ceil(cutoutsize / pixsize)
for ii in ProgressBar(range(len(self.df.index))):
idx =self.df.index[ii]
curr_path = os.path.join(result_dir, "%d.npy"%idx)
if os.path.isfile(curr_path): continue
fall = kutils.get_map_from_bigsky(allc, self.df.loc[idx, "map_ra"], self.df.loc[idx, "map_dec"], pixsize, npix, npix)
np.save(curr_path, fall)
del allc
def _gen_noise(self, idx, noise_lev=1.):
np.random.seed(idx)
cutoutsize = self.halocounter.CUTOUT_SIZE * 60 # arcmin
pixsize = CutoutGen.RESOLUTION
npix = int(np.ceil(cutoutsize / pixsize))
noise_scale = noise_lev / (180. * 60. / np.pi * np.sqrt((CutoutGen.RESOLUTION / 60. * np.pi / 180.) ** 2.))
noise = np.random.standard_normal((npix, npix, len(CutoutGen.FREQS))) * noise_scale
noise[:, :, 0] *= 2.8
noise[:, :, 1] *= 2.6
noise[:, :, 2] *= 6.6
return noise
def gen_plain_multifreq(self, component="skymap", wnoise=False):
#This function is not used currently. It was used to only generate skymap
scale = 1.0e6 * 2.726 #k2uk * Tcmb
freqs = CutoutGen.FREQS
result_dir = os.path.join(self.map_dir, 'components', "%s" % (component))
result_dir_wnoise = os.path.join(self.map_dir, 'components', "%s(with noise)" % (component))
if wnoise:
for ii in ProgressBar(range(len(self.df.index))):
idx = self.df.index[ii]
curr_path = os.path.join(result_dir_wnoise, '%d.npy'%idx)
if os.path.isfile(curr_path):
ipdb.set_trace()
continue
curr_map = np.load(os.path.join(result_dir, '%d.npy'%idx)) + self._gen_noise(idx)
np.save(curr_path, curr_map)
return
if not os.path.isdir(result_dir): os.makedirs(result_dir)
if component == 'skymap':
result_dirs = {f: os.path.join('/media/zhen/Data/deepsz/maps/reso0.25_small', "%s_freq%03i" % (component, f)) for f in freqs}
else:
result_dirs = {f: os.path.join(self.map_dir, 'components', "%s_freq%03i" % (component, f)) for f in freqs}
for ii in ProgressBar(range(len(self.df.index))):
idx = self.df.index[ii]
curr_path = os.path.join(result_dir, '%d.npy'%idx)
if os.path.isfile(curr_path): continue
#ipdb.set_trace()
curr_map = np.stack([np.load(os.path.join(result_dirs[f], "%d.npy"%idx)) for f in freqs], axis=2) * scale
np.save(curr_path, curr_map)
if component == 'skymap':
return
import shutil
for dd in result_dirs.values(): shutil.rmtree(dd)
def gen_multifreq_maps(self, component="skymap", with_noise=True):
sub_components = self._map_component(component)
if isinstance(sub_components, str): sub_components = [sub_components]
result_dir = os.path.join(self.map_dir, "%s%s" % (component, "(with noise)" if with_noise else ""))
component_dirs = {c: os.path.join(self.map_dir, 'components', c) for c in sub_components}
if not os.path.isdir(result_dir): os.makedirs(result_dir)
for ii in ProgressBar(range(len(self.df.index))):
idx = self.df.index[ii]
curr_path = os.path.join(result_dir, '%d.npy'%idx)
if os.path.isfile(curr_path): continue
curr_maps = [np.load(os.path.join(component_dirs[c], "%d.npy"%idx)) for c in sub_components]
curr_map = sum(curr_maps) + (self._gen_noise(idx) if with_noise else 0.)
np.save(curr_path, curr_map)
def gen_labels(self):
pd.to_pickle(self.df.reindex(columns=['which', 'y']), self.label_path)
#np.save(self.label_path, self.df['y'].values)
def plain_routine(self, component='skymap'):
for freq in CutoutGen.FREQS:
self.gen_cutouts(freq=freq, component=component)
self.gen_plain_multifreq(component)
def routine(self, component='samples'):
#components = ['skymap', 'tsz', 'lensedcmb']
subcomponents = self._map_component(component)
for _comp in subcomponents:
for freq in CutoutGen.FREQS:
self.gen_cutouts(freq=freq, component=_comp)
self.gen_multifreq(component)
@staticmethod
def _reorder_img_axes(arr):
return np.swapaxes(np.swapaxes(arr, 0, 1), 1, 2)
def _get_cuotout_set(self, r):
if self.split_exp_id==2:
if r['map_ra'] < 0.13 * 90: return 'test'
if r['map_ra'] >= 0.2 * 90: return 'train'
return 'valid'
else:
assert self.split_exp_id==1
if r['map_ra'] > 0.75 * 90: return 'test'
if r['map_ra'] <= 0.65 * 90: return 'train'
return 'valid'
def _get_info(self,which):
return {"description": "%s set for the deepsz project"%which}
def _get_license(self):
return {"id":0, "name":"Nothing"}
def _get_categories(self):
return [{"supercategory":"object", "id":1, "name":"halo"}]
def _get_annotations(self, which):
assert which in ['train' ,'test', 'valid']
annotations = []
df = self.halocounter.get_halos(z_min=self.min_redshift, mvir_min=self.min_mvir)
print(len(df))
for i, idx in enumerate(df.index):
cutout = self.find_cutout(df.loc[idx])
if self._get_cuotout_set(cutout) != which: continue
bbox, segmentations = self.annotate(df.loc[idx], cutout)
curr_ = {"bbox": list(bbox),
"id": i,
"area": bbox[2] * bbox[3],
"segmentation": [segmentations],
"category_id": 1,
"image_id": int(cutout['cutout_id']),
'iscrowd': 0}
annotations.append(curr_)
return annotations
def _get_images(self, which):
images = []
for i, idx in enumerate(self.df.index):
if self._get_cuotout_set(self.df.loc[idx]) != which: continue
images.append({"file_name":"%d.npy"%idx,
"id":idx,
"height": self.npix,
"width": self.npix,
"license": 0})
return images
def get_labels(self, which ='train'):
import json
fname = "labels_{}.json".format(which)
cache_path = os.path.join(self.annotation_dir, fname)
if not os.path.isfile(cache_path):
res = {"info": self._get_info(which),
"licenses": self._get_license(),
"images": self._get_images(which),
"annotations": self._get_annotations(which),
"categories": self._get_categories()}
json.dump(res, open(cache_path, 'w'))
return json.load(open(cache_path, 'r'))
def get_cutout(self, cutout_id, component='skymap',read=False, withnoise=True):
if withnoise: component = component + "(with noise)"
path = os.path.join(self.map_dir, component, '%d.npy'%cutout_id)
if read: return np.load(path)
return path
def show_annotation(self, i=0, component='skymap'):
res = self.get_labels()
annotation = res['annotations'][i]
img = self.get_cutout(annotation['image_id'], read=True)
#img = CutoutGen._reorder_img_axes(img)
x,y,w,h = annotation['bbox']
#print(x,y,w,h)
x1, x2 = int(round(x)), int(round(x + w))
y1, y2 = int(round(y)), int(round(y + h))
#print(x1, x2, y1, y2)
x1, y1 = max(x1,0),max(y1,0)
x2, y2 = min(x2, self.npix-1), min(y2, self.npix-1)
#print(x1,x2,y1,y2)
highlight_value = img.max()
img[y1:y2+1, x1, :] = highlight_value
img[y1:y2+1, x2, :] = highlight_value
img[y1, x1:x2+1, :] = highlight_value
img[y2, x1:x2+1, :] = highlight_value
for i in range(3):
plt.subplot(1,3,i+1)
plt.imshow(img[:,:,i])
return img, annotation
def _read_detections(self, path=None, exp=None):
import json
if path is None:
path = os.path.join(self.data_path, 'detectron_tmp_output')
if exp is not None:
assert isinstance(exp, str)
path = os.path.join(path, exp, "test","deepsz1_test","generalized_rcnn")
detections = json.load(open(os.path.join(path, "bbox_deepsz1_test_results.json"), 'r'))
df = []
for d in detections:
df.append({"image_id":d['image_id'], 'score':d['score'],
'bbox_x':d['bbox'][0], 'bbox_y':d['bbox'][1],
'bbox_w':d['bbox'][2], 'bbox_h':d['bbox'][3],})
df = pd.DataFrame(df).reindex(columns=['image_id', 'score', 'bbox_x', 'bbox_y', 'bbox_w', 'bbox_h'])
truth = self.get_labels('test')
dftruth = []
for d in truth['annotations']:
dftruth.append({"image_id":d['image_id'],
'bbox_x':d['bbox'][0], 'bbox_y':d['bbox'][1],
'bbox_w':d['bbox'][2], 'bbox_h':d['bbox'][3],})
dftruth = pd.DataFrame(dftruth).reindex(columns=['image_id', 'score', 'bbox_x', 'bbox_y', 'bbox_w', 'bbox_h'])
return df, dftruth, truth, path
def get_detections(self, exp=None, NMS=None):
dfp, dft, _, path= self._read_detections(exp=exp)
if NMS is not None:
NMS_cache_path = os.path.join(path, 'NMS_{}.pkl'.format(NMS))
if not os.path.isfile(NMS_cache_path):
all_dfs = []
for img_id in dfp['image_id'].unique():
_dfp = dfp[dfp['image_id'] == img_id]
boxes = np.stack([_dfp['bbox_x'], _dfp['bbox_y'], _dfp['bbox_x']+_dfp['bbox_w'], _dfp['bbox_y']+_dfp['bbox_h']], axis=1)
if NMS == 'score':
_dfp = _dfp.iloc[NMS_by_score(boxes, _dfp['score'].values)]
else:
_dfp = _dfp.iloc[NMS_fast(boxes)]
all_dfs.append(_dfp)
dfp = pd.concat(all_dfs)
pd.to_pickle(dfp, NMS_cache_path)
dfp = pd.read_pickle(NMS_cache_path)
df = pd.concat([dfp,dft])
df['cutout_ra'] = df['image_id'].map(lambda x: self.df.loc[x]['map_ra'])
df['cutout_dec'] = df['image_id'].map(lambda x: self.df.loc[x]['map_dec'])
df['bbox_ra'] = df.apply(lambda r: self.annotator.x_to_ra(r['bbox_x'] + 0.5 * r['bbox_w'], r['cutout_ra']),
axis=1)
df['bbox_dec'] = df.apply(lambda r: self.annotator.y_to_dec(r['bbox_y'] + 0.5 * r['bbox_h'], r['cutout_dec']),
axis=1)
df['ispred'] = df['score'].map(lambda x: not pd.isnull(x))
return df
def _get_detections_check(self):
annotations = []
df = self.halocounter.get_halos(z_min=self.min_redshift, mvir_min=self.min_mvir)
ndf = []
for i, idx in enumerate(df.index):
cutout = self.find_cutout(df.loc[idx])
if self._get_cuotout_set(cutout) != 'test': continue
ndf.append({"cutout_ra": cutout['map_ra'], 'cutout_dec': cutout['map_dec'],
'image_id': cutout['cutout_id'],
'bbox_ra':df.loc[idx,'CL_ra'], 'bbox_dec':df.loc[idx,'CL_dec']})
return pd.DataFrame(ndf)
def show_test(self, i=0, thres=0.9, nms=None, exp='exp1', print_info=False):
#detections, truth = self._read_detections()
if print_info:
__tdf = self.get_detections(exp=exp)
pdf, tdf = __tdf[__tdf['ispred']], __tdf[~__tdf['ispred']]
else:
pdf, tdf, _ = self._read_detections(exp=exp)
#cutout_id = truth['images'][i]['id']
pdf = pdf[pdf['score'] > thres]
if nms is not None:
boxes = np.stack([pdf['bbox_x'], pdf['bbox_y'], pdf['bbox_x'] + pdf['bbox_w'], pdf['bbox_y'] + pdf['bbox_h']], axis=1)
if nms == 'score':
pdf = pdf.iloc[NMS_by_score(boxes, pdf['score'].values)]
else:
pdf = pdf.iloc[NMS_fast(boxes)]
cutout_id = pdf['image_id'].unique()[i]
#cutout_id = 31738
tdf = tdf[tdf['image_id'] == cutout_id]
pdf = pdf[pdf['image_id'] == cutout_id]
if print_info:
for i, row in pdf.iterrows():
pass
print(pdf.reindex(columns=['bbox_ra', 'bbox_dec', 'cutout_ra', 'cutout_dec', 'image_id', 'score', '']))
img = self.get_cutout(cutout_id, read=True)
img2 = img.copy()
vtrue, vdetect = np.max(img), np.min(img)
print(vtrue, vdetect)
print("%d true boxes" % len(tdf))
for _, x in tdf.iterrows():
img = _draw_bbox(img, [x['bbox_x'],x['bbox_y'],x['bbox_w'],x['bbox_h']], vtrue)
for _, x in pdf.iterrows():
img2 = _draw_bbox(img2, [x['bbox_x'],x['bbox_y'],x['bbox_w'],x['bbox_h']], vdetect)
for j in range(3):
plt.subplot(2,3,j+1)
plt.imshow(img[:,:,j])
plt.subplot(2,3,j+1+ 3)
plt.imshow(img2[:,:,j])
return None
def get_full_df(self, which='test'):
cache_path = {K: os.path.join(self.cache_dir, "_full_df_%s.pkl"%K) for K in ['test', 'valid', 'train']}
if not os.path.isfile(cache_path[which]):
df = self.halocounter.get_halos(z_min=self.min_redshift, mvir_min=self.min_mvir)
df['which'] = df.apply(lambda r: self._get_cuotout_set(self.find_cutout(r)), axis=1)
df['halo_angle'] = df.apply(lambda r: CutoutGen.halo_angle(r, scaledown=1.0), axis=1)
for k in cache_path.keys(): df[df['which'] == k].to_pickle(cache_path[k])
return pd.read_pickle(cache_path[which])
class ShiftBadCNNGenCutout:
"""
Class used to generate shifted CNN input cutouts
"""
DIST_TO_RIM = 3. / 60
RESOLUTION = 0.25
def __init__(self, split=1, data_path=DATA_PATH, MULTIPLE=10,
cnn_failure_path=CACHE_CNNFAILURE,
map_output_dir=VARYING_DIST_DATA_PATH):
self.split=split
#self.failed_df = pd.read_pickle("../data/split_%d_CNNfailures.pkl"%split)
self.failed_df = pd.read_pickle(cnn_failure_path or "../data/split_%d_CNNfailures.pkl"%split)
self.halos = self.failed_df.drop_duplicates(['halo_id']).set_index('halo_id')
self.MULTIPLE = MULTIPLE
#self.map_dir = "../data/maps/split%d_%dx"%(split, MULTIPLE)
self.map_dir = map_output_dir or "../data/maps/split%d_%dx"%(split, MULTIPLE)
if not os.path.isdir(self.map_dir): os.makedirs(self.map_dir)
self.halocounter = HalosCounter(8, 6, data_path=data_path)
self.data_path = data_path
self.radius_nsteps = 10
self.df = self.gen_df().set_index('cutout_id_new')
def gen_df(self):
cutouts = []
for halo_id, r in self.halos.iterrows():
halo_id = int(halo_id)
for step in range(self.radius_nsteps+1):
ratio = float(step) / self.radius_nsteps
for ii in range(self.MULTIPLE):
if ii != 0 and step == 0: continue
ra_offset, dec_offset = self.random_dist_offsets(ratio=ratio, seed=(halo_id * step) * self.MULTIPLE + ii)
#if step > 1: ipdb.set_trace()
cutouts.append({"halo_id":halo_id,
"cutout_ra": r['CL_ra'] + ra_offset,
"cutout_dec": r['CL_dec'] + dec_offset,
"ratio": ratio})
cutouts = pd.DataFrame(cutouts).reset_index().rename(columns={"index":"cutout_id_new"})
cutouts = cutouts.merge(self.halos.reindex(columns=['redshift', 'Mvir', 'rvir', 'tSZ', 'y', 'CL_ra', 'CL_dec']),
left_on='halo_id', right_index=True)
return cutouts
@staticmethod
def random_dist_offsets(ratio=0.5, seed=1):
radius = ratio * ShiftBadCNNGenCutout.DIST_TO_RIM
np.random.seed(seed)
theta = np.random.rand() * 2 * np.pi
ra, dec = np.cos(theta) * radius, np.sin(theta) * radius
return ra, dec
def _gen_noise(self, idx, noise_lev=1.):
np.random.seed(idx)
cutoutsize = self.halocounter.CUTOUT_SIZE * 60 # arcmin
pixsize = CutoutGen.RESOLUTION
npix = int(np.ceil(cutoutsize / pixsize))
noise_scale = noise_lev / (180. * 60. / np.pi * np.sqrt((CutoutGen.RESOLUTION / 60. * np.pi / 180.) ** 2.))
noise = np.random.standard_normal((npix, npix, len(CutoutGen.FREQS))) * noise_scale
noise[:, :, 0] *= 2.8
noise[:, :, 1] *= 2.6
noise[:, :, 2] *= 6.6
return noise
def gen_cutout(self, freq=90):
component='skymap'
result_dir = os.path.join(self.map_dir, 'components', "%s_freq%03i"%(component, freq))
if not os.path.isdir(result_dir): os.makedirs(result_dir)
raw_path = os.path.join(self.data_path, 'raw', '%03i_%s_healpix.fits'%(freq, component))
assert os.path.isfile(raw_path)
# get the map; convert from Jy/steradians -> dT/T_cmb (divided by 1.072480e9)
print('loading %i ghz map' % freq)
allc, header = kutils.hp.read_map(raw_path, h=True)
allc = allc / CutoutGen.CFAC[freq]
# this options are locked in. Have to be same as the options
# that provide the input list of indices
cutoutsize = self.halocounter.CUTOUT_SIZE * 60 # arcmin
pixsize = CutoutGen.RESOLUTION
npix = np.ceil(cutoutsize / pixsize)
for ii in ProgressBar(range(len(self.df.index))):
idx =self.df.index[ii]
curr_path = os.path.join(result_dir, "%d.npy"%idx)
if os.path.isfile(curr_path): continue
fall = kutils.get_map_from_bigsky(allc,
self.df.loc[idx, "cutout_ra"],
self.df.loc[idx, "cutout_dec"], pixsize, npix, npix)
np.save(curr_path, fall)
del allc
def gen_plain_multifreq(self, component="skymap", wnoise=False):
scale = 1.0e6 * 2.726 #k2uk * Tcmb
freqs = CutoutGen.FREQS
if wnoise:
result_dir = os.path.join(self.map_dir, 'components', "%s(with noise)" % (component))
else:
result_dir = os.path.join(self.map_dir, 'components', "%s" % (component))
if not os.path.isdir(result_dir): os.makedirs(result_dir)
result_dirs = {f: os.path.join(self.map_dir, 'components', "%s_freq%03i" % (component, f)) for f in freqs}
for ii in ProgressBar(range(len(self.df.index))):
idx = self.df.index[ii]
curr_path = os.path.join(result_dir, '%d.npy'%idx)
if os.path.isfile(curr_path): continue
curr_map = np.stack([np.load(os.path.join(result_dirs[f], "%d.npy"%idx)) for f in freqs], axis=2) * scale
np.save(curr_path, curr_map + (self._gen_noise(idx) if wnoise else 0.))
def gen_labels(self):
self.df.drop(['CL_ra','CL_dec'],axis=1).to_pickle(os.path.join(self.map_dir, "labels.pkl"))
def gen_routine(self):
for freq in CutoutGen.FREQS: self.gen_cutout(freq)
self.gen_plain_multifreq(wnoise=True)
self.gen_labels()
def _draw_bbox(img, bbox, v):
fw, fh = img.shape[:2]
x, y, w, h = bbox
# print(x,y,w,h)
x1, x2 = max(int(round(x)), 0), min(int(round(x + w)), fw-1)
y1, y2 = max(int(round(y)), 0), min(int(round(y + h)), fh-1)
img[y1:y2 + 1, x1, :] = v
img[y1:y2 + 1, x2, :] = v
img[y1, x1:x2 + 1, :] = v
img[y2, x1:x2 + 1, :] = v
return img
# def gen_noise(nmaps=N_STEPS**2, suffix=""):
def gen_noise(overlap=False, suffix=""):
# generate stampes of noise with differen noise levels
# to be added to the 90, 150, 220 freq bands
# for SPT-3G 1500 sq deg patch, it's [2.8,2.6,6.6]uK-arcmin
# (from Aug 29th Slack channel proposal_forecast)
#
# also generate a future survey for [1,1,2] uK-arcmin
nmaps = HalosCounter(overlap).N_STEPS ** 2
# base dir
bdir = DATA_PATH
npix = 10
pixsize = 0.5 # arcmin
dx = pixsize / 60.0 * np.pi / 180.0
# for nlev in [1.0, 2.6, 2.8, 2.0, 6.6]: # can just scale the 1uK-arcmin up
nlev = 1.0
noisemap = np.empty((nmaps, npix, npix), dtype=np.float64)
random.seed(29)
for i in range(nmaps):
noisemap[i] = np.random.standard_normal((npix, npix)) * nlev / (180. * 60. / np.pi * np.sqrt(dx * dx))
if i % 1000 == 0: print(i)
np.save(bdir + 'deepsz_sandbox/withcmb/noise_%iuK-arcmin_90%s.npy' % (nlev, suffix), noisemap)
random.seed(39)
for i in range(nmaps):
noisemap[i] = np.random.standard_normal((npix, npix)) * nlev / (180. * 60. / np.pi * np.sqrt(dx * dx))
if i % 1000 == 0: print(i)
np.save(bdir + 'deepsz_sandbox/withcmb/noise_%iuK-arcmin_150%s.npy' % (nlev, suffix), noisemap)
random.seed(40)
for i in range(nmaps):
noisemap[i] = np.random.standard_normal((npix, npix)) * nlev / (180. * 60. / np.pi * np.sqrt(dx * dx))
if i % 1000 == 0: print(i)
| np.save(bdir + 'deepsz_sandbox/withcmb/noise_%iuK-arcmin_220%s.npy' % (nlev, suffix), noisemap) | numpy.save |
# Copyright 2019 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Unit tests for operations that prepare squeezed states
Convention: The squeezing unitary is fixed to be
U(z) = \exp(0.5 (z^* \hat{a}^2 - z (\hat{a^\dagger}^2)))
where \hat{a} is the photon annihilation operator."""
import pytest
import numpy as np
from scipy.special import factorial
SQZ_R = np.linspace(0.0, 0.1, 5)
SQZ_THETA = np.linspace(0, 2 * np.pi, 3, endpoint=False)
def sech(x):
"""Hyberbolic secant"""
return 1 / np.cosh(x)
class TestRepresentationIndependent:
"""Basic implementation-independent tests."""
@pytest.mark.parametrize("theta", SQZ_THETA)
def test_no_squeezing(self, setup_backend, theta, tol):
"""Tests squeezing operation in some limiting cases where the result should be a vacuum state."""
backend = setup_backend(1)
backend.prepare_squeezed_state(0, theta, 0)
assert np.all(backend.is_vacuum(tol))
@pytest.mark.backends("fock", "tf")
class TestFockRepresentation:
"""Tests that make use of the Fock basis representation."""
@pytest.mark.parametrize("r", SQZ_R)
@pytest.mark.parametrize("theta", SQZ_THETA)
def test_normalized_squeezed_state(self, setup_backend, r, theta, tol):
"""Tests if a range of squeezed vacuum states are normalized."""
backend = setup_backend(1)
backend.prepare_squeezed_state(r, theta, 0)
state = backend.state()
tr = state.trace()
assert np.allclose(tr, 1.0, atol=tol, rtol=0.0)
@pytest.mark.parametrize("r", SQZ_R)
@pytest.mark.parametrize("theta", SQZ_THETA)
def test_no_odd_fock(self, setup_backend, r, theta, batch_size):
"""Tests if a range of squeezed vacuum states have
only nonzero entries for even Fock states."""
backend = setup_backend(1)
backend.prepare_squeezed_state(r, theta, 0)
s = backend.state()
if s.is_pure:
num_state = s.ket()
else:
num_state = s.dm()
if batch_size is not None:
odd_entries = num_state[:, 1::2]
else:
odd_entries = num_state[1::2]
assert | np.all(odd_entries == 0) | numpy.all |
import numpy as np
import rospy
import tf2_ros
import tf_conversions
import torch
from geometry_msgs.msg import Point, TransformStamped
from sensor_msgs.msg import CameraInfo, Image, PointCloud2, PointField
from visualization_msgs.msg import Marker
from typing import Optional, Tuple, Union
def to_pointcloud2(points: torch.Tensor, timestamp: rospy.Time,
colors: Optional[Union[torch.Tensor, str]] = None, ref_frame_id: str = "world") -> PointCloud2:
"""Create a point cloud message from a 3D points tensor.
>>> pc = torch.rand(20, 3)
>>> t = rospy.Time(10)
>>> pc_msg = to_pointcloud2(pc, timestamp=t, ref_frame_id="world")
>>> assert len(pc_msg.fields) == 3
>>> pc_msg = to_pointcloud2(pc, timestamp=t, ref_frame_id="world", colors="#FF0000")
>>> assert len(pc_msg.fields) == 6
>>> pc_msg = to_pointcloud2(pc, colors=torch.zeros(20, 3), timestamp=t)
>>> assert len(pc_msg.fields) == 6
Args:
points: point cloud as torch float tensor [N, 3].
timestamp: ros message timestamp.
colors: color of each point in the point cloud [N, 3].
ref_frame_id: id of point cloud reference frame (in TF tree).
Returns:
sensor_msgs::Pointcloud2 message instance.
"""
assert len(points.shape) == 2
assert points.shape[-1] == 3
points_np = points.cpu().detach().numpy().astype(np.float32)
num_points = len(points_np)
msg = PointCloud2()
msg.header.stamp = timestamp
msg.header.frame_id = ref_frame_id
msg.height = 1 # unordered
msg.width = num_points
msg.is_bigendian = False
msg.is_dense = True
msg.fields = [
PointField('x', 0, PointField.FLOAT32, 1),
PointField('y', 4, PointField.FLOAT32, 1),
PointField('z', 8, PointField.FLOAT32, 1),
]
msg.point_step = 3 * points_np.dtype.itemsize
if colors is not None:
if isinstance(colors, torch.Tensor):
assert colors.shape == (num_points, 3)
colors_np = colors.cpu().detach().numpy().astype(np.float32)
elif isinstance(colors, str): # hex string
if not colors.startswith("#"):
raise ValueError("String color must be hex color string such as #FF0000")
rgb = tuple(int(colors[1+i:1+i+2], 16) for i in (0, 2, 4)) # offset for skipping "#"
colors_np = np.ones((num_points, 3), dtype=np.float32)
colors_np[:, 0] = rgb[0] / 255.0
colors_np[:, 1] = rgb[1] / 255.0
colors_np[:, 2] = rgb[2] / 255.0
else:
raise NotImplementedError(f"Colors have invalid type {type(colors)}")
msg.fields += [
PointField('r', 12, PointField.FLOAT32, 1),
PointField('g', 16, PointField.FLOAT32, 1),
PointField('b', 20, PointField.FLOAT32, 1)
]
msg.point_step += 3 * colors_np.dtype.itemsize
points_np = np.concatenate([points_np, colors_np], axis=-1)
msg.row_step = msg.point_step * num_points
msg.data = points_np.tostring()
return msg
def to_cube_markers(positions: torch.Tensor, timestamp: rospy.Time, scale: Union[Tuple[float, float, float], float],
ref_frame_id: str, color: Tuple[float, float, float] = (1.0, 0.0, 0.0), alpha: float = 1.0,
namespace: str = "", marker_id: int = 0) -> Marker:
"""Convert position vector to 3D occupancy grid message by using the cube array of the Marker message type.
Args:
positions: cube lower left corner position (N, 3).
timestamp: ros message timestamp.
scale: size of voxel in x, y, z direction (all voxels have the same size for efficient rendering).
ref_frame_id: id of point cloud reference frame (in TF tree).
color: voxel color as RGB color coordinate [0, 1].
alpha: voxel opacity value [0, 1].
namespace: marker namespace for identification.
marker_id: marker id for identification.
Returns:
visualization_msgs::Marker message.
"""
assert len(positions.shape) == 2
assert positions.shape[-1] == 3
assert 0 <= alpha <= 1
assert all(0 <= c <= 1 for c in color)
if type(scale) == float:
scale = (scale, scale, scale)
msg = Marker()
msg.header.frame_id = ref_frame_id
msg.header.stamp = timestamp
msg.type = Marker.CUBE_LIST
msg.ns = namespace
msg.id = marker_id
point_msgs = []
positions_np = positions.cpu().detach().numpy()
for point in positions_np:
point_msg = Point()
point_msg.x = point[0]
point_msg.y = point[1]
point_msg.z = point[2]
point_msgs.append(point_msg)
msg.points = point_msgs
msg.pose.position.x = 0 # center point
msg.pose.position.y = 0
msg.pose.position.z = 0
msg.pose.orientation.x = 0 # center orientation
msg.pose.orientation.y = 0
msg.pose.orientation.z = 0
msg.pose.orientation.w = 1
msg.scale.x = scale[0]
msg.scale.y = scale[1]
msg.scale.z = scale[2]
msg.color.a = alpha
msg.color.r = color[0]
msg.color.g = color[1]
msg.color.b = color[2]
return msg
def from_image(msg: Image, device: Optional[torch.device] = None) -> Tuple[torch.Tensor, str, rospy.Time]:
"""Create an image float tensor from an image message.
>>> img_rgb = torch.randint(0, 255, (3, 4, 2), dtype=torch.uint8)
>>> img_msg = to_image(img_rgb, timestamp=rospy.Time(10))
>>> img_rgb2, _, _ = from_image(img_msg)
>>> assert img_rgb2.shape == img_rgb.shape
>>> assert img_rgb2.dtype == img_rgb.dtype
>>> assert torch.allclose(img_rgb, img_rgb2)
Args:
msg: image message to convert.
device: device to ship image to.
Returns:
image: RGB tensor with shape (C, H, W).
frame_id: frame id of image in TF tree.
timestamp: ros message timestamp.
"""
if msg.encoding == "rgb8":
img = np.frombuffer(msg.data, dtype=np.uint8).reshape(msg.height, msg.width, 3)
img = | np.transpose(img, (2, 0, 1)) | numpy.transpose |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@td.xfail_non_writeable
def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
_maybe_remove(store, "df2")
store.put("df2", df[:10], format="table")
store.append("df2", df[10:])
tm.assert_frame_equal(store["df2"], df)
_maybe_remove(store, "df3")
store.append("/df3", df[:10])
store.append("/df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, "/df3 foo")
store.append("/df3 foo", df[:10])
store.append("/df3 foo", df[10:])
tm.assert_frame_equal(store["df3 foo"], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df["mixed_column"] = "testing"
df.loc[2, "mixed_column"] = np.nan
_maybe_remove(store, "df")
store.append("df", df)
tm.assert_frame_equal(store["df"], df)
# uints - test storage of uints
uint_data = DataFrame(
{
"u08": Series(
np.random.randint(0, high=255, size=5), dtype=np.uint8
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
),
"u64": Series(
[2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
dtype=np.uint64,
),
},
index= | np.arange(5) | numpy.arange |
import warnings
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
import matplotlib.dates as mdates
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.patheffects as pe
from obspy.geodetics import gps2dist_azimuth
from .stack import get_peak_coordinates
import utm
from datetime import datetime
from . import RTMWarning
def plot_time_slice(S, processed_st, time_slice=None, label_stations=True,
hires=False, dem=None, plot_peak=True, xy_grid=None,
cont_int=5, annot_int=50):
"""
Plot a time slice through :math:`S` to produce a map-view plot. If time is
not specified, then the slice corresponds to the maximum of :math:`S` in
the time direction. Can also plot the peak of the stack function over
time.
Args:
S (:class:`~xarray.DataArray`): The stack function :math:`S`
processed_st (:class:`~obspy.core.stream.Stream`): Pre-processed
Stream; output of :func:`~rtm.waveform.process_waveforms` (This is
needed because Trace metadata from this Stream are used to plot
stations on the map)
time_slice (:class:`~obspy.core.utcdatetime.UTCDateTime`): Time of
desired time slice. The nearest time in :math:`S` to this specified
time will be plotted. If `None`, the time corresponding to
:math:`\max(S)` is used (default: `None`)
label_stations (bool): Toggle labeling stations with network and
station codes (default: `True`)
hires (bool): If `True`, use higher-resolution coastlines, which looks better
but can be slow (default: `False`)
dem (:class:`~xarray.DataArray`): Overlay time slice on a user-supplied
DEM from :class:`~rtm.grid.produce_dem` (default: `None`)
plot_peak (bool): Plot the peak stack function over time as a subplot
(default: `True`)
xy_grid (int, float, or None): If not `None`, transforms UTM
coordinates such that the grid center is at (0, 0) — the plot
extent is then given by (-xy_grid, xy_grid) [meters] for easting
and northing. Only valid for projected grids
cont_int (int): Contour interval [m] for plots with DEM data
annot_int (int): Annotated contour interval [m] for plots with DEM data
(these contours are thicker and labeled)
Returns:
:class:`~matplotlib.figure.Figure`: Output figure
"""
# Don't plot peak of stack function when length of stack is one
if plot_peak and len(S.time) == 1:
plot_peak = False
warnings.warn('Stack time length = 1, not plotting peak', RTMWarning)
st = processed_st.copy()
# Get coordinates of stack maximum in (latitude, longitude)
time_max, y_max, x_max, peaks, props = get_peak_coordinates(S, unproject=S.UTM)
# Gather coordinates of grid center
lon_0, lat_0 = S.grid_center
if S.UTM:
# Don't use cartopy for UTM
proj = None
transform = None
plot_transform = None
lon_0, lat_0, _, _ = utm.from_latlon(S.grid_center[1], S.grid_center[0])
x_max, y_max, _, _ = utm.from_latlon(y_max, x_max)
for tr in st:
tr.stats.longitude, tr.stats.latitude, _, _ = utm.from_latlon(
tr.stats.latitude, tr.stats.longitude)
else:
# This is a good projection to use since it preserves area
proj = ccrs.AlbersEqualArea(central_longitude=lon_0,
central_latitude=lat_0,
standard_parallels=(S.y.values.min(),
S.y.values.max()))
transform = ccrs.PlateCarree()
plot_transform = ccrs.PlateCarree()
if plot_peak:
fig, (ax, ax1) = plt.subplots(figsize=(8, 12), nrows=2,
gridspec_kw={'height_ratios': [3, 1]},
subplot_kw=dict(projection=proj))
#axes kluge so the second one can have a different projection
ax1.remove()
ax1 = fig.add_subplot(414)
else:
fig, ax = plt.subplots(figsize=(8, 8),
subplot_kw=dict(projection=proj))
# In either case, we convert from UTCDateTime to np.datetime64
if time_slice:
time_to_plot = | np.datetime64(time_slice) | numpy.datetime64 |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Util functions to generate an experiment report after training.
Please refer to the README.md for an overview of the reporting tool.
"""
import enum
import functools
import json
import os
import time
from typing import Any, Callable, Dict, List, Optional, Tuple
import dataclasses
import numpy as onp
from aqt.utils import tfevent_utils
EventSeries = tfevent_utils.EventSeries
# Type Aliases
# Nested dict mapping from component (first key), attribute (second key) to
# events stored in EventSeries. E.g. component = 'train', attribute = 'loss'.
_AllEvents = Dict[str, Dict[str, EventSeries]]
# Nested dict mapping from component (first key), attribute (second key) to
# aggregated metric (float). E.g. component = 'train', attribute = 'loss'.
_AllAggMetrics = Dict[str, Dict[str, float]]
@enum.unique
class MinOrMax(enum.Enum):
"""Aggregation function to use for finding early stopping step."""
MIN = enum.auto() # use min value for early stopping step.
MAX = enum.auto() # use max value for early stopping step.
def get_func(self):
"""Returns function associated with enum option. See parent class."""
if self == MinOrMax.MIN:
return onp.nanargmin
elif self == MinOrMax.MAX:
return onp.nanargmax
else:
raise ValueError('MinOrMax enum option not recognized.')
@enum.unique
class SmoothingKernel(enum.Enum):
"""Kernel function to use for smoothing."""
# RECTANGULAR:Every value in symmetric window weighted equally. Values
# outside the window are not included in average.
# TRIANGULAR: Every value in symmetric window weighted as a linear function of
# absolute distance to kernel center. Values outside the window are not
# included in average.
RECTANGULAR = enum.auto()
TRIANGULAR = enum.auto()
def rectangular_kernel(self, x, window_size_in_steps):
"""Rectangular kernel for moving window average.
All values in window are equally weighted.
Args:
x: Distance to kernel center in steps.
window_size_in_steps: Size of the window to average over.
Returns:
Unnormalized weight to use for averaging, e.g. in `np.average()`.
Raises:
ValueError: If window_size_in_steps arg is less than 1.
"""
if window_size_in_steps < 1:
raise ValueError('window_size_in_steps has to be >= 1.')
if abs(x) <= window_size_in_steps / 2:
return 1.0
else:
return 0.0
def triangular_kernel(self, x, window_size_in_steps):
"""Triangular kernel for moving window average.
The weight is a linear function of the absolute distance to the kernel
center.
Args:
x: Distance to kernel center in steps.
window_size_in_steps: Size of the window to average over.
Returns:
Unnormalized weight to use for averaging, e.g. in `np.average()`.
Raises:
ValueError: If window_size_in_steps arg is less than 1.
"""
if window_size_in_steps < 1:
raise ValueError('window_size_in_steps has to be >= 1.')
return max(0.0, window_size_in_steps / 2 - abs(x))
def get_func(self,
window_size_in_steps = None):
"""Returns function associated with enum option. See parent class."""
if self == SmoothingKernel.RECTANGULAR:
if window_size_in_steps is None:
raise ValueError('For rectangular smoothing_kernel '
'window_size_in_steps must be provided.')
return functools.partial(
self.rectangular_kernel, window_size_in_steps=window_size_in_steps)
elif self == SmoothingKernel.TRIANGULAR:
if window_size_in_steps is None:
raise ValueError('For triangular smoothing_kernel '
'window_size_in_steps must be provided.')
return functools.partial(
self.triangular_kernel, window_size_in_steps=window_size_in_steps)
else:
raise ValueError('SmoothingKernel enum option not recognized.')
@dataclasses.dataclass
class ExperimentReport:
"""Report for a single experiment run based on its TFEvents files."""
# Model directory corresponding to single run, with TFEvents files to
# generate report from.
model_dir: str
# Metrics at early stop step, with smoothing applied.
# If NaN values present, then this field will
# be left None, but unsmoothed_metrics will still be reported.
# maps component name (e.g. eval) to metrics dict, which in turn maps
# attribute name to scalar value.
metrics: Optional[_AllAggMetrics]
# Metrics without smoothing at early stop step.
# maps component name (e.g. eval) to metrics dict, which in turn maps
# attribute name to scalar value.
unsmoothed_metrics: Optional[_AllAggMetrics]
# Step at which early_stop_attr in early_stop_ds_dir is minimized. Scalars are
# reported at this step.
early_stop_step: int
# Number of training steps. In combination with early_stop_step, can help
# determine whether training converged and started to overfit.
num_train_steps: int
# Arguments passed into create_end_of_training_report(), the function that
# created this report.
# Included here because the arguments can impact the reported metrics, e.g.
# which attribute was used to find the early stopping step.
report_query_args: Dict[str, Any]
# Human-readable experiment name.
experiment_name: Optional[str] = None
# Name of user who launched the experiment.
user_name: Optional[str] = None
# When experiment was launched, formatted as '%Y%m%dT%H%M%S'.
launch_time: Optional[str] = None
# Evaluation frequency. How often summaries were saved to file.
eval_freq: Optional[int] = None
# If any metrics contain NaN values, first step at which a NaN value occurs.
first_nan_step: Optional[int] = None
# Tensorboard ID or URL.
tensorboard_id: Optional[str] = None
def check_for_nans(event_series, start_step):
"""Finds step >= start_step at which first NaN value occurs if there are any.
Args:
event_series: list of tuples (step, value).
start_step: After which step to check for NaNs.
Returns:
Step at which first NaN value occurs, or None otherwise.
"""
keep_indices = (event_series.steps >= start_step)
event_series.steps = event_series.steps[keep_indices]
event_series.values = event_series.values[keep_indices]
nan_indices = onp.argwhere( | onp.isnan(event_series.values) | numpy.isnan |
#!/user/bin/env python
'''ultimate_tictactoe.py: Implement the game of Ultimate Tic-Tac-Toe.'''
################################################################################
import numpy as np
class Ultimate_TicTacToe():
CIRCLE = 0
CROSS = 1
lines = [[(0, 0), (0, 1), (0, 2)], [(0, 0), (1, 0), (2, 0)], [(1, 0), (1, 1), (1, 2)], [(0, 1), (1, 1), (2, 1)], [(2, 0), (2, 1), (2, 2)], [(0, 2), (1, 2), (2, 2)], [(0, 0), (1, 1), (2, 2)], [(0, 2), (1, 1), (2, 0)]]
nbr_players = 2
state_dim = 81*3
def __init__(self):
self.board = | np.zeros(shape=(3,3,3,3,3)) | numpy.zeros |
# need to have a more uniform method to exchange (pack/unpack) 1D and 2D PROCESSED data with hdf5
# type of data: Data1d, MatrixWithCoordinates (not just simple numpy arrays)
import pylab as plt
import h5py
import numpy as np
import time,datetime
import os,copy,subprocess,re
import json,pickle,fabio
import multiprocessing as mp
from py4xs.slnxs import Data1d,average,filter_by_similarity,trans_mode,estimate_scaling_factor
from py4xs.utils import common_name,max_len,Schilling_p_value
from py4xs.detector_config import create_det_from_attrs
from py4xs.local import det_names,det_model,beamline_name # e.g. "_SAXS": "pil1M_image"
from py4xs.data2d import Data2d,Axes2dPlot,MatrixWithCoords,DataType
from py4xs.utils import run
from itertools import combinations
from scipy.interpolate import interp1d
from scipy.ndimage.filters import gaussian_filter
from scipy.interpolate import UnivariateSpline as uspline
from scipy.integrate import simpson
def lsh5(hd, prefix='', top_only=False, silent=False, print_attrs=True):
""" list the content of a HDF5 file
hd: a handle returned by h5py.File()
prefix: use to format the output when lsh5() is called recursively
top_only: returns the names of the top-level groups
silent: suppress printouts if True
"""
if top_only:
tp_grps = list(hd.keys())
if not silent:
print(tp_grps)
return tp_grps
for k in list(hd.keys()):
print(prefix, k)
if isinstance(hd[k], h5py.Group):
if print_attrs:
print(list(hd[k].attrs.items()))
lsh5(hd[k], prefix+"=", silent=silent, print_attrs=print_attrs)
def create_linked_files(fn, fnlist):
""" create a new file to links to data in existing files in the fn_list
for now assume that all files have the same detector/qgrid configuration without checking
"""
ff = h5py.File(fn, 'w')
for s in fnlist:
fs = h5py.File(s, "r")
if len(ff.attrs)==0:
for an in fs.attrs:
ff.attrs[an] = fs.attrs[an]
ff.flush()
for ds in lsh5(fs, top_only=True, silent=True):
ff[ds] = h5py.ExternalLink(s, ds)
fs.close()
ff.close()
def integrate_mon(em, ts, ts0, exp):
""" integrate monitor counts
monitor counts are given by em with timestamps ts
ts0 is the timestamps on the exposures, with duration of exp
"""
ffe = interp1d(ts, em)
em0 = []
for t in ts0:
tt = np.concatenate(([t], ts[(ts>t) & (ts<t+exp)], [t+exp]))
ee = ffe(tt)
em0.append(simpson(ee, tt))
return np.asarray(em0)/exp
def pack_d1(data, ret_trans=True):
""" utility function to creat a list of [intensity, error] from a Data1d object
or from a list of Data1s objects
"""
if isinstance(data, Data1d):
if ret_trans:
return np.asarray([data.data,data.err]), data.trans
else:
return np.asarray([data.data,data.err])
elif isinstance(data, list):
tvs = [d.trans for d in data]
return np.asarray([pack_d1(d, False) for d in data]),tvs
def unpack_d1(data, qgrid, label, trans_value):
""" utility function to creat a Data1d object from hdf dataset
sepatately given data[intensity and error], qgrid, label, and trans
works for a dataset that include a list of 1d data as well
transMode is set to trans_mode.external
"""
if len(data.shape)>2:
if np.isscalar(trans_value): # this should only happen when intentionally setting trans to 0
trans_value = np.zeros(len(data))
return [unpack_d1(d, qgrid, label+("f%05d" % i), t) for i,(d,t) in enumerate(zip(data,trans_value))]
else:
ret = Data1d()
ret.qgrid = qgrid
ret.data = data[0]
ret.err = data[1]
ret.label = label
ret.set_trans(trans_mode.external, trans_value) # TODO: save transMode of d1s when packing
return ret
def merge_d1s(d1s, detectors, save_merged=False, debug=False):
""" utility function to merge 1D data sets, using functions under slnxs
d1s should contain data corresponding to detectors
"""
s0 = Data1d()
s0.qgrid = d1s[0].qgrid
d_tot = np.zeros(s0.qgrid.shape)
d_max = np.zeros(s0.qgrid.shape)
d_min = np.zeros(s0.qgrid.shape)+1.e32
e_tot = np.zeros(s0.qgrid.shape)
c_tot = np.zeros(s0.qgrid.shape)
w_tot = np.zeros(s0.qgrid.shape)
label = None
comments = ""
for d1 in d1s:
# empty part of the data is nan
idx = ~np.isnan(d1.data)
# simple averaging
#d_tot[idx] += d1.data[idx]
#e_tot[idx] += d1.err[idx]
c_tot[idx] += 1
# average using 1/sigma as weight
wt = 1/d1.err[idx]**2
d_tot[idx] += wt*d1.data[idx]
e_tot[idx] += d1.err[idx]**2*wt**2
w_tot[idx] += wt
idx1 = (np.ma.fix_invalid(d1.data, fill_value=-1)>d_max).data
d_max[idx1] = d1.data[idx1]
idx2 = (np.ma.fix_invalid(d1.data, fill_value=1e32)<d_min).data
d_min[idx2] = d1.data[idx2]
comments += d1.comments
if label is None:
label = d1.label
else:
label = common_name(label, d1.label)
# simple averaging
#s0.data[idx] /= c_tot[idx]
#s0.err[idx] /= np.sqrt(c_tot[idx])
# averaging by weight
s0.data = d_tot/w_tot
s0.err = np.sqrt(e_tot)/w_tot
idx = (c_tot>1)
s0.overlaps.append({'q_overlap': s0.qgrid[idx],
'raw_data1': d_max[idx],
'raw_data2': d_min[idx]})
s0.label = label
s0.comments = comments # .replace("# ", "## ")
if save_merged:
s0.save(s0.label+".dd", debug=debug)
return s0
# copied from pipeline-test: merge, fix_angular_range, interp_d2
def merge(ds):
""" merge a list of MatrixWithCoord together
the datatype should be DataType.qphi
"""
if len(ds)==1:
return ds[0].copy()
wt = np.zeros(ds[0].shape)
avg = np.zeros(ds[0].shape)
idx = None
for d in ds:
if d.shape!=avg.shape:
raise Exception("merge: the two data sets must have the same shape: ", d.shape, avg.shape)
idx = ~np.isnan(d)
avg[idx] += d[idx]
wt[idx] += 1
idx = (wt>0)
avg[idx] /= wt[idx]
avg[~idx] = np.nan
return avg
def fix_angular_range(da):
""" da should be a numpy array
return modified angular range between -180 and 180
assume that the angular value is not too far off to begin with
"""
da1 = np.copy(da)
da1[da1>180] -= 360 # worse case some angles may go up to 360+delta
da1[da1<-180] += 360 # this shouldn't happen
return da1
def interp_d2(d2, method="spline", param=0.05):
""" d2 is a 2d array
interpolate within each row
methods should be "linear" or "spline"
a better version of this should use 2d interpolation
but only fill in the space that is narrow enough in one direction (e.g. <5 missing data points)
"""
h,w = d2.shape
xx1 = np.arange(w)
for k in range(h):
yy1 = d2[k,:]
idx = ~np.isnan(yy1)
if len(idx)<=10: # too few valid data points
continue
idx1 = np.where(idx)[0]
# only need to refill the values that are currently nan
idx2 = np.copy(idx)
idx2[:idx1[0]] = True
idx2[idx1[-1]:] = True
if method=="linear":
d2[k,~idx2] = np.interp(xx1[~idx2], xx1[idx], yy1[idx])
elif method=="spline":
fs = uspline(xx1[idx], yy1[idx])
fs.set_smoothing_factor(param)
d2[k,~idx2] = fs(xx1[~idx2])
else:
raise Exception(f"unknown method for intepolation: {method}")
def proc_2d(queue, images, sn, nframes, detectors, qphi_range, debug, starting_frame_no=0):
""" convert 2D data to q-phi map
may want to do this separately for SAXS and WAXS; how to specify?
"""
pass
def proc_make_thumnails(queue, images, sn, nframes, detectors, qphi_range, debug, starting_frame_no=0):
""" make thumbnails, specify the detector, output dataset name, color scale, etc.
"""
pass
def proc_line_profile(queue, images, sn, nframes, detectors, qphi_range, debug, starting_frame_no=0):
""" put the results in a dataset, with attributes describing where the results come from?
"""
pass
def proc_d1merge(args):
""" utility function to perfrom azimuthal average and merge detectors
"""
images,sn,nframes,starting_frame_no,debug,detectors,qgrid,reft,save_1d,save_merged,dtype = args
ret = {'merged': []}
sc = {}
for det in detectors:
ret[det.extension] = []
if det.fix_scale is not None:
sc[det.extension] = 1./det.fix_scale
if debug is True:
print("processing started: sample = %s, starting frame = #%d" % (sn, starting_frame_no))
for i in range(nframes):
for det in detectors:
dt = Data1d()
label = "%s_f%05d%s" % (sn, i+starting_frame_no, det.extension)
dt.load_from_2D(images[det.extension][i], det.exp_para, qgrid,
pre_process=det.pre_process, flat_cor=det.flat, mask=det.exp_para.mask,
save_ave=False, debug=debug, label=label, dtype=dtype)
if det.dark is not None:
dt.data -= det.dark
dt.scale(sc[det.extension])
ret[det.extension].append(dt)
dm = merge_d1s([ret[det.extension][i] for det in detectors], detectors, save_merged, debug)
ret['merged'].append(dm)
if debug is True:
print("processing completed: ", sn, starting_frame_no)
return [sn, starting_frame_no, ret]
def proc_sample(queue, images, sn, nframes, detectors, qgrid, reft, save_1d, save_merged, debug,
starting_frame_no=0, transMode=None, monitor_counts=None):
""" utility function to perfrom azimuthal average and merge detectors
"""
ret = {'merged': []}
sc = {}
for det in detectors:
ret[det.extension] = []
if det.fix_scale is not None:
sc[det.extension] = 1./det.fix_scale
if debug is True:
print("processing started: sample = %s, starting frame = #%d" % (sn, starting_frame_no))
for i in range(nframes):
for det in detectors:
dt = Data1d()
label = "%s_f%05d%s" % (sn, i+starting_frame_no, det.extension)
dt.load_from_2D(images[det.extension][i+starting_frame_no],
det.exp_para, qgrid, det.pre_process, det.exp_para.mask,
save_ave=False, debug=debug, label=label)
dt.scale(sc[det.extension])
ret[det.extension].append(dt)
dm = merge_d1s([ret[det.extension][i] for det in detectors], detectors, save_merged, debug)
ret['merged'].append(dm)
if debug is True:
print("processing completed: ", sn, starting_frame_no)
if queue is None: # single-thread
return ([sn,starting_frame_no,ret])
else: # multi-processing
queue.put([sn,starting_frame_no,ret])
class h5exp():
""" empty h5 file for exchanging exp_setup/qgrid
"""
def __init__(self, fn, exp_setup=None):
self.fn = fn
if exp_setup==None: # assume the h5 file will provide the detector config
self.qgrid = self.read_detectors()
else:
self.detectors, self.qgrid = exp_setup
self.save_detectors()
def save_detectors(self):
self.fh5 = h5py.File(self.fn, "w") # new file
dets_attr = [det.pack_dict() for det in self.detectors]
self.fh5.attrs['detectors'] = json.dumps(dets_attr)
self.fh5.attrs['qgrid'] = list(self.qgrid)
self.fh5.flush()
self.fh5.close()
def read_detectors(self):
self.fh5 = h5py.File(self.fn, "r") # file must exist
dets_attr = self.fh5.attrs['detectors']
qgrid = self.fh5.attrs['qgrid']
self.detectors = [create_det_from_attrs(attrs) for attrs in json.loads(dets_attr)]
self.fh5.close()
return np.asarray(qgrid)
def recalibrate(self, fn_std, energy=-1, e_range=[5, 20], use_recalib=False,
det_type={"_SAXS": "Pilatus1M", "_WAXS2": "Pilatus1M"},
bkg={}, temp_file_location="/tmp"):
""" fn_std should be a h5 file that contains AgBH pattern
use the specified energy (keV) if the value is valid
detector type
"""
pxsize = 0.172e-3
dstd = h5xs(fn_std, [self.detectors, self.qgrid])
uname = os.getenv("USER")
sn = dstd.samples[0]
if energy>=e_range[0] and energy<=e_range[1]:
wl = 2.*np.pi*1.973/energy
for det in self.detectors:
det.exp_para.wavelength = wl
elif energy>0:
raise Exception(f"energy should be between {e_range[0]} and {e_range[1]} (keV): {energy}")
for det in self.detectors:
print(f"processing detector {det.extension} ...")
ep = det.exp_para
data_file = f"{temp_file_location}/{uname}{det.extension}.cbf"
img = dstd.fh5["%s/primary/data/%s" % (sn, dstd.det_name[det.extension])][0]
# this would work better if the detector geometry specification
# can be more flexible for pyFAI-recalib
if ep.flip: ## can only handle flip=1 right now
if ep.flip!=1:
raise Exception(f"don't know how to handle flip={ep.flip}.")
poni1 = pxsize*ep.bm_ctr_x
poni2 = pxsize*(ep.ImageHeight-ep.bm_ctr_y)
dmask = np.fliplr(det.exp_para.mask.map.T)
else:
poni1 = pxsize*ep.bm_ctr_y
poni2 = pxsize*ep.bm_ctr_x
dmask = det.exp_para.mask.map
if det.extension in bkg.keys():
img -= bkg[det.extension]
fabio.cbfimage.CbfImage(data=img*(~dmask)).write(data_file)
if use_recalib:
# WARNING: pyFAI-recalib is obselete
poni_file = f"/tmp/{uname}{det.extension}.poni"
poni_file_text = ["poni_version: 2",
f"Detector: {det_type[det.extension]}",
"Detector_config: {}",
f"Distance: {pxsize*ep.Dd}",
f"Poni1: {poni1}", # y-axis
f"Poni2: {poni2}", # x-axis
"Rot1: 0.0", "Rot2: 0.0", "Rot3: 0.0",
f"Wavelength: {ep.wavelength*1e-10:.4g}"]
fh = open(poni_file, "w")
fh.write("\n".join(poni_file_text))
fh.close()
#cmd = ["pyFAI-recalib", "-i", poni_file,
# "-c", "AgBh", "-r", "11", "--no-tilt", "--no-gui", "--no-interactive", data_file]
cmd = ["pyFAI-calib", "-i", poni_file,
"-c", "AgBh", "--no-tilt", "--no-gui", "--no-interactive", data_file]
print(" ".join(cmd))
ret = run(cmd)
txt = ret.strip().split('\n')[-1]
#print(txt)
print(f" Original ::: bm_ctr_x = {ep.bm_ctr_x:.2f}, bm_ctr_y = {ep.bm_ctr_y:.2f}, ratioDw = {ep.ratioDw:.3f}")
d,xc,yc = np.asarray(re.findall('\d+\.\d*', txt), dtype=np.float)[:3]
dr = d/(ep.Dd*pxsize)/1000 # d is in mm
ep.ratioDw *= dr
if ep.flip: ## can only handle flip=1 right now
ep.bm_ctr_x = yc
ep.bm_ctr_y = ep.ImageHeight-xc
else:
ep.bm_ctr_y = yc
ep.bm_ctr_x = xc
print(f" Revised ::: bm_ctr_x = {ep.bm_ctr_x:.2f}, bm_ctr_y = {ep.bm_ctr_y:.2f}, ratioDw = {ep.ratioDw:.3f}")
else:
cmd = ["pyFAI-calib2",
"-D", det_type[det.extension],
"-w", f"{ep.wavelength:.4f}", "--dist", f"{pxsize*ep.Dd:.5f}",
"--poni1", f"{poni1:.6f}", "--poni2", f"{poni2:.6f}", "--no-tilt",
"-c", "AgBh", data_file]
print("pyFAI-recalib is now obselete ...")
print("Run this interactively:")
print(" ".join(cmd))
fp = input("Then enter the path/name of the PONI file:")
with open(fp, "r") as fh:
lines = {}
for _ in fh.read().split("\n"):
tl = _.split(":")
if len(tl)==2:
lines[tl[0]] = tl[1]
print(f" Original ::: bm_ctr_x = {ep.bm_ctr_x:.2f}, bm_ctr_y = {ep.bm_ctr_y:.2f}, ratioDw = {ep.ratioDw:.3f}")
ep.ratioDw *= float(lines['Distance'])/(ep.Dd*pxsize)
xc = float(lines['Poni2'])/pxsize
yc = float(lines['Poni1'])/pxsize
if ep.flip: ## can only handle flip=1 right now
ep.bm_ctr_x = yc
ep.bm_ctr_y = ep.ImageHeight-xc
else:
ep.bm_ctr_y = yc
ep.bm_ctr_x = xc
print(f" Revised ::: bm_ctr_x = {ep.bm_ctr_x:.2f}, bm_ctr_y = {ep.bm_ctr_y:.2f}, ratioDw = {ep.ratioDw:.3f}")
ep.init_coordinates()
self.save_detectors()
def find_field(fh5, fieldName):
tstream = {}
samples = lsh5(fh5, top_only=True, silent=True)
for sn in samples:
for stream in list(fh5[f"{sn}"]):
if not 'data' in list(fh5[f"{sn}/{stream}"]):
continue
if fieldName in list(fh5[f"{sn}/{stream}/data"]):
tstream[sn] = stream
break
return tstream
class h5xs():
""" Scattering data in transmission geometry
Transmitted beam intensity can be set either from the water peak (sol), or from intensity monitor.
Data processing can be done either in series, or in parallel. Serial processing can be forced.
"""
def __init__(self, fn, exp_setup=None, transField='', save_d1=True):
""" exp_setup: [detectors, qgrid]
transField: the intensity monitor field packed by suitcase from databroker
save_d1: save newly processed 1d data back to the h5 file
"""
self.d0s = {}
self.d1s = {}
self.d2s = {}
self.detectors = None
self.samples = []
self.attrs = {}
# name of the dataset that contains transmitted beam intensity, e.g. em2_current1_mean_value
self.transField = ''
self.transStream = {}
self.fn = fn
self.save_d1 = save_d1
self.fh5 = h5py.File(self.fn, "r+") # file must exist
if exp_setup==None: # assume the h5 file will provide the detector config
self.qgrid = self.read_detectors()
else:
self.detectors, self.qgrid = exp_setup
self.save_detectors()
self.list_samples(quiet=True)
# find out what are the fields corresponding to the 2D detectors
# at LiX there are two possibilities; assume all samples have have data stored in the same fileds
sn = self.samples[0]
streams = list(self.fh5[f"{sn}"])
data_fields = {}
for stnm in streams:
if 'data' in self.fh5[f"{sn}/{stnm}"].keys():
for tf in list(self.fh5[f"{sn}/{stnm}/data"]):
data_fields[tf] = stnm
self.det_name = None
# these are the detectors that are present in the data
d_dn = [d.extension for d in self.detectors]
for det_name in det_names:
for k in set(det_name.keys()).difference(d_dn):
del det_name[k]
if set(det_name.values()).issubset(data_fields.keys()):
self.det_name = det_name
break
if self.det_name is None:
print('fields in the h5 file: ', data_fields)
raise Exception("Could not find the data corresponding to the detectors.")
# transStream is more complicated
# different samples may store the data in different streams
if transField=='':
if 'trans' in self.fh5.attrs:
tf = self.fh5.attrs['trans'].split(',')
# transMove, transField, transStream
# but transStream is not always recorded
v, self.transField = tf[:2]
self.transMode = trans_mode(int(v))
self.transStream = find_field(self.fh5, self.transField)
return
else:
self.transMode = trans_mode.from_waxs
self.transField = ''
self.transStream = {}
else:
try:
self.transStream = find_field(self.fh5, transField)
except:
print("invalid field for transmitted intensity: ", transField)
raise Exception()
self.transField = transField
self.transMode = trans_mode.external
self.fh5.attrs['trans'] = ','.join([str(self.transMode.value), self.transField]) #elf.transStream])
self.fh5.flush()
def save_detectors(self):
dets_attr = [det.pack_dict() for det in self.detectors]
self.fh5.attrs['detectors'] = json.dumps(dets_attr)
self.fh5.attrs['qgrid'] = list(self.qgrid)
self.fh5.flush()
def read_detectors(self):
dets_attr = self.fh5.attrs['detectors']
qgrid = self.fh5.attrs['qgrid']
self.detectors = [create_det_from_attrs(attrs) for attrs in json.loads(dets_attr)]
return np.asarray(qgrid)
def md_dict(self, sn, md_keys=[]):
""" create the meta data to be recorded in ascii data files
from the detector_config (attribute of the h5xs object)
and scan header (in the dataset attribute, value set when writting h5 using suitcase)
based on SASDBD
example for static samples:
Instrument: BM29
Detector: Pilatus 2M
Date: 2017-10-17
Wavelength (nm): 0.1240
(Or, alternatively: X-ray energy (keV): 10.00)
Sample-to-detector distance (m): 2.01
Exposure time/frame (s): 0.100
Sample temperature (C): 20.0
example for inline SEC:
instrument: BM29
Detector: Pilatus 2M
Column type: S75 Increase
Date: 2017-10-17
Wavelength (nm): 0.1240
(Or, alternatively: X-ray energy (keV): 10.00)
Sample-to-detector distance (m): 2.01
Exposure time/frame (s): 0.995
Sample temperature (C): 20.0
Flow rate (ml/min): 0.500
Sample injection concentration (mg/ml): 25.0
Sample injection volume (ml): 0.0750
"""
md = {}
bshdr = json.loads(self.fh5[sn].attrs['start'])
md["Instrument"] = bshdr['beamline_id']
ts = time.localtime(bshdr['time'])
md["Date"] = time.strftime("%Y-%m-%d", ts)
md["Time"] = time.strftime("%H:%M:%S", ts)
ene = bshdr["energy"]["energy"]
md["Wavelength (A)"] = f"{2.*3.1416*1973/ene:.4f}"
try:
bscfg = json.loads(self.fh5[sn].attrs["descriptors"])[0]['configuration']
for det in self.detectors:
dn = self.det_name[det.extension].strip("_image")
exp = bscfg[dn]['data'][f"{dn}_cam_acquire_time"]
if not "Detector" in md.keys():
md["Detector"] = det_model[det.extension]
md["Exposure time/frame (s)"] = f"{exp:.3f}"
md["Sample-to-detector distance (m): "] = f"{det.s2d_distance/1000: .3f}"
else:
md["Detector"] += f" , {det_model[det.extension]}"
md["Exposure time/frame (s)"] += f" , {exp:.3f}"
md["Sample-to-detector distance (m): "] += f" , {det.s2d_distance/1000: .3f}"
except: # the header information may be incomplete
pass
for k in md_keys:
if k in bshdr.keys():
md[k] = bshdr[k]
return md
def md_string(self, sn, md_keys=[]):
md = self.md_dict(sn, md_keys=md_keys)
md_str = ""
for k in md.keys():
md_str += f"# {k} : {md[k]}\n"
return md_str
def header(self, sn):
if not sn in self.samples:
raise Exception(f"{sn} is not a valie sample.")
if "start" in self.fh5[sn].attrs:
return json.loads(self.fh5[sn].attrs['start'])
return None
def list_samples(self, quiet=False):
self.samples = lsh5(self.fh5, top_only=True, silent=True)
if not quiet:
print(self.samples)
def verify_frn(self, sn, frn, flatten=False):
""" simply translate between a scaler index and a multi-dimensional index based on scan shape
this became more complicated when areadetector saves data as hdf
sshape and dshape are no longer the same
not taking care of snaking here
"""
header = self.header(sn)
if 'shape' in header.keys():
sshape = header['shape']
#snaking = header['snaking']
elif 'num_points' in header.keys():
sshape = [header["num_points"]]
#snaking = False
else:
raise Exception("don't kno how to handler the header", header)
dshape = self.fh5[f"{sn}/primary/data/{list(self.det_name.values())[0]}"].shape[:-2]
if frn is None:
frn = 0
if hasattr(frn, '__iter__'): # tuple or list or np array
if len(frn)==1:
frn = frn[0]
elif len(frn)!=len(sshape):
raise Exception(f"invalid frame number {frn}, must contain {len(sshape)} element(s).")
if isinstance(frn, int): # translate to the right shape
if flatten:
return frn
idx = []
for i in reversed(range(len(dshape))):
idx = [frn%dshape[i]]+idx
frn = int(frn/dshape[i])
frn = idx
if flatten:
frn1 = 0
gs = 1 # fastest axis, increasing the index by 1 = next frame
for i in reversed(range(len(frn))):
frn1+=gs*frn[i]
gs*=sshape[i]
return frn1
return frn
def get_d1(self, sn=None, group="merged", frn=None):
if sn is None:
sn = self.samples[0]
if not group in self.d1s[sn].keys():
raise Exception(f"1d data do not exist under {group}.")
frn = self.verify_frn(sn, frn, flatten=True)
return self.d1s[sn][group][frn]
def get_d2(self, sn=None, det_ext=None, frn=None, dtype=None):
if sn is None:
sn = self.samples[0]
d2s = {}
for det in self.detectors:
dset = self.fh5["%s/primary/data/%s" % (sn, self.det_name[det.extension])]
frn = self.verify_frn(sn, frn)
d2 = Data2d(dset[tuple(frn)], exp=det.exp_para, dtype=dtype)
d2.md["frame #"] = frn
d2s[det.extension] = d2
if not det_ext:
return d2s
else:
return d2s[det_ext]
def check_bm_center(self, sn=None, det_ext='_SAXS', frn=0,
qs=0.005, qe=0.05, qn=100, Ns=9):
""" this function compares the beam intensity on both sides of the beam center,
and advise if the beam center as defined in the detector configuration is incorrect
dividing data into 4*Ns slices, show data in horizontal and vertical cuts
"""
i = 0
d2 = self.get_d2(sn=sn, det_ext=det_ext, frn=frn)
qg = np.linspace(qs, qe, qn)
d2.conv_Iqphi(Nq=qg, Nphi=Ns*4, mask=d2.exp.mask)
dch1 = d2.qphi_data.d[i]
dch2 = d2.qphi_data.d[i+2*Ns]
dcv1 = d2.qphi_data.d[i+Ns]
dcv2 = d2.qphi_data.d[i+3*Ns]
sh0,dh0 = max_len(dch1, dch2, return_all=True)
ph0 = Schilling_p_value(qn, | np.max(dh0) | numpy.max |
"""
Inference video: Extract matting on video.
Example:
python inference_video.py \
--model-type mattingrefine \
--model-backbone resnet50 \
--model-backbone-scale 0.25 \
--model-refine-mode sampling \
--model-refine-sample-pixels 80000 \
--model-checkpoint "PATH_TO_CHECKPOINT" \
--video-src "PATH_TO_VIDEO_SRC" \
--video-bgr "PATH_TO_VIDEO_BGR" \
--video-resize 1920 1080 \
--output-dir "PATH_TO_OUTPUT_DIR" \
--output-type com fgr pha err ref \
--video-target-bgr "PATH_TO_VIDEO_TARGET_BGR"
"""
import argparse
import cv2
import torch
import os
import shutil
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torchvision import transforms as T
from torchvision.transforms.functional import to_pil_image
from threading import Thread
from tqdm import tqdm
from PIL import Image
from dataset import VideoDataset, ZipDataset
from dataset import augmentation as A
from model import MattingBase, MattingRefine
from inference_utils import HomographicAlignment
from torchvision import io
import kornia
'''
以下 mask-rcnn import 部分
'''
# modify code - Mask-RCNN
import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()
# import some common libraries
import numpy as np
import os, json, cv2, random
# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
# Mask-RCNN
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
predictor = DefaultPredictor(cfg)
# --------------- Arguments ---------------
parser = argparse.ArgumentParser(description='Inference video')
parser.add_argument('--model-type', type=str, required=True, choices=['mattingbase', 'mattingrefine'])
parser.add_argument('--model-backbone', type=str, required=True, choices=['resnet101', 'resnet50', 'mobilenetv2'])
parser.add_argument('--model-backbone-scale', type=float, default=0.25)
parser.add_argument('--model-checkpoint', type=str, required=True)
parser.add_argument('--model-refine-mode', type=str, default='sampling', choices=['full', 'sampling', 'thresholding'])
parser.add_argument('--model-refine-sample-pixels', type=int, default=80_000)
parser.add_argument('--model-refine-threshold', type=float, default=0.7)
parser.add_argument('--model-refine-kernel-size', type=int, default=3)
parser.add_argument('--video-src', type=str, required=True)
parser.add_argument('--video-bgr', type=str, required=True)
parser.add_argument('--video-target-bgr', type=str, default=None, help="Path to video onto which to composite the output (default to flat green)")
parser.add_argument('--video-target-bgr-img', type=str, default=None, help="背景如果是圖片的話用這個")
parser.add_argument('--video-resize', type=int, default=None, nargs=2)
parser.add_argument('--device', type=str, choices=['cpu', 'cuda'], default='cuda')
parser.add_argument('--preprocess-alignment', action='store_true')
parser.add_argument('--output-dir', type=str, required=True)
parser.add_argument('--output-types', type=str, required=True, nargs='+', choices=['com', 'pha', 'fgr', 'err', 'ref'])
parser.add_argument('--output-format', type=str, default='video', choices=['video', 'image_sequences'])
args = parser.parse_args()
assert 'err' not in args.output_types or args.model_type in ['mattingbase', 'mattingrefine'], \
'Only mattingbase and mattingrefine support err output'
assert 'ref' not in args.output_types or args.model_type in ['mattingrefine'], \
'Only mattingrefine support ref output'
# --------------- Utils ---------------
class VideoWriter:
def __init__(self, path, frame_rate, width, height):
self.out = cv2.VideoWriter(path, cv2.VideoWriter_fourcc(*'mp4v'), frame_rate, (width, height))
def add_batch(self, frames):
frames = frames.mul(255).byte()
frames = frames.cpu().permute(0, 2, 3, 1).numpy()
for i in range(frames.shape[0]):
frame = frames[i]
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
self.out.write(frame)
class ImageSequenceWriter:
def __init__(self, path, extension):
self.path = path
self.extension = extension
self.index = 0
os.makedirs(path)
def add_batch(self, frames):
Thread(target=self._add_batch, args=(frames, self.index)).start()
self.index += frames.shape[0]
def _add_batch(self, frames, index):
frames = frames.cpu()
for i in range(frames.shape[0]):
frame = frames[i]
frame = to_pil_image(frame)
frame.save(os.path.join(self.path, str(index + i).zfill(5) + '.' + self.extension))
class Mask_Pred:
def __init__(self, src):
self.img = src.cpu().permute(0,2,3,1).numpy().astype(np.float32)
self.batch = self.img.shape[0]
self.h = self.img.shape[1]
self.w = self.img.shape[2]
self.c = self.img.shape[3]
self.output_list = []
for i in range(self.batch):
outputs = predictor(cv2.cvtColor(self.img[i]*255, cv2.COLOR_RGB2BGR))
self.output_list.append(outputs)
def pred_class(self):
pred_class = []
for i in range(self.batch):
pred_class.append(self.output_list[i]["instances"].get('pred_classes').to('cpu').numpy())
return pred_class
def get_mask(self, obj_type:int):
mask = np.zeros((self.batch, 1, self.h, self.w), np.float32)
for i in range(self.batch):
pred_class = self.output_list[i]["instances"].get('pred_classes').to('cpu').numpy()
if (obj_type in pred_class):
pred_mask = self.output_list[i]["instances"].get('pred_masks').to('cpu').numpy()
index = np.where(pred_class == obj_type)
for dex in index[0]:
mask[i][0] += pred_mask[dex]
return torch.from_numpy(mask).type(torch.float32).cuda(non_blocking=True)
def rm_mask(self, obj_type:int):
mask = np.zeros((self.batch, 1, self.h, self.w), np.float32)
sub_mask = np.zeros((self.batch, 1, self.h, self.w), np.float32)
for i in range(self.batch):
pred_class = self.output_list[i]["instances"].get('pred_classes').to('cpu').numpy()
if (obj_type in pred_class):
pred_mask = self.output_list[i]["instances"].get('pred_masks').to('cpu').numpy()
index = np.where(pred_class == obj_type)
index = index[0][1]
for dex in range(len(pred_class)):
if (dex == index):
mask[i][0] += pred_mask[dex]
else:
sub_mask[i][0] += pred_mask[dex]
mask = mask - sub_mask
return torch.from_numpy(mask).type(torch.float32).cuda(non_blocking=True).clamp_(0.,1.)
# Mask-RCNN 取得 mask
def get_mask(src):
img = src.cpu().permute(0,2,3,1).numpy().astype(np.float32)
batch, h, w, c= img.shape
mask = np.zeros((batch,1,h,w), np.float32)
for i in range(len(img)):
outputs = predictor(cv2.cvtColor(img[i]*255, cv2.COLOR_RGB2BGR))
pred_class = outputs["instances"].get('pred_classes').to('cpu').numpy()
if(3 in pred_class):
#print("find people!!")
pred_mask = outputs["instances"].get('pred_masks').to('cpu').numpy()
index = | np.where(pred_class == 3) | numpy.where |
# MIT License
#
# Copyright (c) 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Contains the RecoverReflectanceShadingLayer.
Contains the RecoverReflectanceShadingLayer that recovers RGB reflectance
and shading from a (possibly scalar) estimation of one of the two and an input
image. See the description of the class.
"""
from __future__ import absolute_import, division, print_function
import sys
import os
import numpy as np
# import timeit
from skimage.color import rgb2lab, lab2rgb
import cv2
try:
import caffe
except ImportError:
sys.path.insert(0, os.path.join(os.path.expanduser('~'),
'Repositories',
'malabar',
'python'))
import caffe
# np.finfo(np.float32).eps = 1.1920929e-07
# np.finfo(np.float).eps = 2.2204460492503131e-16
EPS = | np.finfo(np.float32) | numpy.finfo |
import itertools
import numpy as np
from ... import geometry
from ... import mesh
from .internal import GLPrimitive, GLShapeDecorator
from ... import draw
from ..internal import ShapeAttribute
from vispy import gloo
@GLShapeDecorator
class Spheropolygons(draw.Spheropolygons, GLPrimitive):
__doc__ = draw.Spheropolygons.__doc__
shaders = {}
shaders['vertex'] = """
uniform mat4 camera;
uniform vec4 rotation;
uniform vec3 translation;
uniform float radius;
attribute vec4 color;
attribute vec2 position;
attribute vec2 image;
attribute vec2 inner_image;
attribute vec4 orientation;
varying vec4 v_color;
varying vec2 v_imageDelta;
vec2 rotate(vec2 point, vec4 quat)
{
vec3 point3d = vec3(point.xy, 0.0);
vec3 result = (quat.x*quat.x - dot(quat.yzw, quat.yzw))*point3d;
result += 2.0*quat.x*cross(quat.yzw, point3d);
result += 2.0*dot(quat.yzw, point3d)*quat.yzw;
return result.xy;
}
void main()
{
vec2 real_image = (image - inner_image)*radius + inner_image;
vec2 vertexPos = position + rotate(real_image, orientation);
vertexPos = rotate(vertexPos, rotation) + translation.xy;
vec4 screenPosition = camera * vec4(vertexPos, translation.z, 1.0);
// transform to screen coordinates
gl_Position = screenPosition;
v_color = color;
v_imageDelta = real_image - inner_image;
}
"""
shaders['fragment'] = """
uniform float outline;
uniform float radius;
varying vec4 v_color;
varying vec2 v_imageDelta;
void main()
{
float lambda1 = 1.0;
float rsq = dot(v_imageDelta, v_imageDelta);
float r = sqrt(rsq);
if(outline > 1e-6)
{
lambda1 = (radius - r)/outline;
lambda1 *= lambda1;
lambda1 *= lambda1;
lambda1 *= lambda1;
lambda1 *= lambda1;
lambda1 = min(lambda1, 1.0);
}
float lambda2 = 1.0;
if(r > radius) discard;
else if(outline <= 1e-6)
{
lambda2 = r/radius;
lambda2 *= lambda2;
lambda2 *= lambda2;
lambda2 *= lambda2;
lambda2 *= lambda2;
lambda2 *= lambda2;
lambda2 *= lambda2;
lambda2 *= lambda2;
lambda2 = 1.0 - min(lambda2, 1.0);
}
else if(r > radius - outline)
{
lambda2 = (r - radius + outline)/outline;
lambda2 *= lambda2;
lambda2 *= lambda2;
lambda2 *= lambda2;
lambda2 *= lambda2;
lambda2 = 1.0 - min(lambda2, 1.0);
}
gl_FragColor = vec4(lambda1*v_color.xyz, lambda2*v_color.w);
}
"""
_vertex_attribute_names = ['position', 'orientation', 'color', 'image', 'inner_image']
_GL_UNIFORMS = list(itertools.starmap(ShapeAttribute, [
('camera', np.float32, np.eye(4), 2, False,
'Internal: 4x4 Camera matrix for world projection'),
('rotation', np.float32, (1, 0, 0, 0), 1, False,
'Internal: Rotation to be applied to each scene as a quaternion'),
('translation', np.float32, (0, 0, 0), 1, False,
'Internal: Translation to be applied to the scene'),
('outline', np.float32, 0, 0, False,
'Outline width for shapes'),
('radius', np.float32, 0, 0, False,
'Rounding radius for shapes')
]))
def __init__(self, *args, **kwargs):
GLPrimitive.__init__(self)
draw.Spheropolygons.__init__(self, *args, **kwargs)
def update_arrays(self):
if 'vertices' in self._dirty_attributes:
vertices = self.vertices
if len(vertices) < 3:
thetas = np.linspace(0, 2*np.pi, 3, endpoint=False)
vertices = np.array([ | np.cos(thetas) | numpy.cos |
# import tvm
import os
import numpy as np
import matplotlib.pyplot as plt
# from flextensor.examples import FUNC_TABLE
class Point(object):
def __init__(self, factor_lst, performance):
self.xs = tuple(factor_lst)
self.y = performance
self.dim = len(factor_lst)
def __str__(self):
ret = "[xs=" + str(self.xs) + "; y=" + str(self.y) + "]"
return ret
@classmethod
def from_str(cls, s):
lst = s[1:-1]
lst = lst.split("; ")
xs = lst[0].split("=")[1]
xs = xs[1:-1]
xs = xs.split(", ")
tmp = []
for x in xs:
tmp.append(int(x))
p = lst[1].split("=")[1]
p = float(p)
return cls(tmp, p)
class Curve(object):
def __init__(self, name, shape, point_lst):
self.name = name
self.shape = tuple(shape)
self.point_lst = point_lst
def __str__(self):
ret = "[name=" + self.name + "; shape=" + str(self.shape)
for point in self.point_lst:
ret += "; " + str(point)
ret += "]"
return ret
@classmethod
def from_str(cls, s):
lst = s[1:-1]
lst = lst.split("; ")
name = lst[0].split("=")[1]
shape = lst[1].split("=")[1]
shape = shape[1:-1]
shape = shape.split(", ")
tmp = []
for e in shape:
tmp.append(int(e))
points = []
length = (len(lst) - 2) // 2
for i in range(length):
p = lst[2 * i + 2] + "; " + lst[2 * i + 3]
point = Point.from_str(p)
points.append(point)
return cls(name, tmp, points)
def test_matmul(N_end, M_end, L_end):
N_end, M_end, L_end = max(N_end, 16), max(M_end, 16), max(L_end, 16)
N_beg, M_beg, L_beg = 512, 512, 512
fn_beg, fm_beg, fl_beg = 1, 1, 1
ascend_factor = 2
prime_add_lst = [0, 1, 5, 9]
matmul = FUNC_TABLE["matmul"].func
curve_lst = []
N_power_lst = list(range(int(np.log(N_end / N_beg) / np.log(ascend_factor)) + 1))
M_power_lst = list(range(int(np.log(M_end / M_beg) / np.log(ascend_factor)) + 1))
L_power_lst = list(range(int(np.log(L_end / L_beg) / np.log(ascend_factor)) + 1))
for N_power in N_power_lst:
M_power = L_power = N_power
N_ = N_beg * np.power(ascend_factor, N_power)
M_ = M_beg * np.power(ascend_factor, M_power)
L_ = L_beg * np.power(ascend_factor, L_power)
for prime_add in prime_add_lst:
N = N_ + prime_add
M = M_ + prime_add
L = L_ + prime_add
point_lst = []
fn_end, fm_end, fl_end = 128, 128, 128
fn_inc, fm_inc, fl_inc = 2, 2, 2
length = int( | np.log(fn_end / fn_beg) | numpy.log |
import cv2
import numpy as np
def teste():
print("teste")
def imDims(img):
return np.flip(np.array(img.shape[0:2]))
def point2DasString(point,invertY=False):
if invertY:
return str(point[0])+'\t'+str(-point[1])+'\n'
else:
return str(point[0])+'\t'+str(point[1])+'\n'
def checkPercent(percent):
if percent <= 100 and percent >= 0:
return True
else:
return False
def percentRound(dimension,percent):
return int(round(dimension*(percent/100.0)))
def cutImage(image,percent):
if not checkPercent(percent):
return image
else:
imSize = np.array(image.shape[0:2])
newHeight = percentRound(imSize[0],percent)
return image[imSize[0]-newHeight:,:]
def pointAtPercent(imSize,pCol,pRow,invertY=False):
if (not checkPercent(pCol)) or (not checkPercent(pRow)):
return np.array([-1,-1])
else:
x = percentRound(imSize[0],pCol)
if invertY:
y = -percentRound(imSize[1],pRow)
else:
y = percentRound(imSize[1],pRow)
return np.array([x,y])
def normalize(v):
norm = | np.linalg.norm(v) | numpy.linalg.norm |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'cellPoseUI.ui'
import numpy as np
import sys, os, pathlib, warnings, datetime, tempfile, glob, time, threading
from natsort import natsorted
from PyQt5 import QtCore, QtGui, QtWidgets, Qt
import pyqtgraph as pg
import cv2
from scellseg.guis import guiparts, iopart, menus, plot
from scellseg import models, utils, transforms, dynamics, dataset, io
from scellseg.dataset import DatasetShot, DatasetQuery
from scellseg.contrast_learning.dataset import DatasetPairEval
from skimage.measure import regionprops
from tqdm import trange
from math import floor, ceil
from torch.utils.data import DataLoader
try:
import matplotlib.pyplot as plt
MATPLOTLIB = True
except:
MATPLOTLIB = False
class Ui_MainWindow(QtGui.QMainWindow):
"""UI Widget Initialize and UI Layout Initialize,
With any bug or problem, please do connact us from Github Issue"""
def __init__(self, image=None):
super(Ui_MainWindow, self).__init__()
if image is not None:
self.filename = image
iopart._load_image(self, self.filename)
self.now_pyfile_path = os.path.dirname(os.path.abspath(__file__)).replace('\\', '/')
def setupUi(self, MainWindow, image=None):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1420, 800)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(self.now_pyfile_path + "/assets/logo.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
QtWidgets.QApplication.setOverrideCursor(QtCore.Qt.CrossCursor)
menus.mainmenu(self)
menus.editmenu(self)
menus.helpmenu(self)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout_2.setContentsMargins(6, 6, 6, 6)
self.verticalLayout_2.setSpacing(6)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.splitter = QtWidgets.QSplitter(self.centralwidget)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName("splitter")
self.splitter2 = QtWidgets.QSplitter()
self.splitter2.setOrientation(QtCore.Qt.Horizontal)
self.splitter2.setObjectName("splitter2")
self.scrollArea = QtWidgets.QScrollArea(self.splitter)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
# self.scrollAreaWidgetContents.setFixedWidth(500)
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 1500, 848))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
# self.TableModel = QtGui.QStandardItemModel(self.tableRow, self.tableCol)
# self.TableModel.setHorizontalHeaderLabels(["INDEX", "NAME"])
# self.TableView = QtGui.QTableView()
# self.TableView.setModel(self.TableModel)
self.mainLayout = QtWidgets.QGridLayout(self.scrollAreaWidgetContents)
self.mainLayout.setContentsMargins(0, 0, 0, 0)
self.mainLayout.setSpacing(0)
self.mainLayout.setObjectName("mainLayout")
self.previous_button = QtWidgets.QPushButton("previous image [Ctrl + ←]")
self.load_folder = QtWidgets.QPushButton("load image folder ")
self.next_button = QtWidgets.QPushButton("next image [Ctrl + →]")
self.previous_button.setShortcut(Qt.QKeySequence.MoveToPreviousWord)
self.next_button.setShortcut(Qt.QKeySequence.MoveToNextWord)
self.mainLayout.addWidget(self.previous_button, 1, 1, 1, 1)
self.mainLayout.addWidget(self.load_folder, 1, 2, 1, 1)
self.mainLayout.addWidget(self.next_button, 1, 3, 1, 1)
self.previous_button.clicked.connect(self.PreImBntClicked)
self.next_button.clicked.connect(self.NextImBntClicked)
self.load_folder.clicked.connect(self.OpenDirBntClicked)
# leftside cell list widget
self.listView = QtWidgets.QTableView()
self.myCellList = []
self.listmodel = Qt.QStandardItemModel(0,1)
self.listmodel.setHorizontalHeaderLabels(["Annotation"])
# self.listmodel.setHorizontalHeaderItem(0, QtWidgets.QTableWidgetItem())
self.listView.horizontalHeader().setDefaultAlignment(QtCore.Qt.AlignLeft)
# self.listView.horizontalHeader().setStyle("background-color: #F0F0F0")
# self.listView.horizontalHeader().setVisible(False)
self.listView.verticalHeader().setVisible(False)
for i in range(len(self.myCellList)):
self.listmodel.setItem(i,Qt.QStandardItem(self.myCellList[i]))
self.listView.horizontalHeader().setDefaultSectionSize(140)
self.listView.setMaximumWidth(120)
self.listView.setModel(self.listmodel)
self.listView.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.listView.AdjustToContents
self.listView.customContextMenuRequested.connect(self.show_menu)
# self.listView.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.listView.clicked.connect(self.showChoosen)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.toolBox = QtWidgets.QToolBox(self.splitter)
self.toolBox.setObjectName("toolBox")
self.toolBox.setMaximumWidth(340)
self.page = QtWidgets.QWidget()
self.page.setFixedWidth(340)
self.page.setObjectName("page")
self.gridLayout = QtWidgets.QGridLayout(self.page)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setSpacing(6)
self.gridLayout.setObjectName("gridLayout")
# cross-hair/Draw area
self.vLine = pg.InfiniteLine(angle=90, movable=False)
self.hLine = pg.InfiniteLine(angle=0, movable=False)
self.layer_off = False
self.masksOn = True
self.win = pg.GraphicsLayoutWidget()
self.state_label = pg.LabelItem("Scellseg has been initialized!")
self.win.addItem(self.state_label, 3, 0)
self.win.scene().sigMouseClicked.connect(self.plot_clicked)
self.win.scene().sigMouseMoved.connect(self.mouse_moved)
self.make_viewbox()
bwrmap = make_bwr()
self.bwr = bwrmap.getLookupTable(start=0.0, stop=255.0, alpha=False)
self.cmap = []
# spectral colormap
self.cmap.append(make_spectral().getLookupTable(start=0.0, stop=255.0, alpha=False))
# single channel colormaps
for i in range(3):
self.cmap.append(make_cmap(i).getLookupTable(start=0.0, stop=255.0, alpha=False))
if MATPLOTLIB:
self.colormap = (plt.get_cmap('gist_ncar')(np.linspace(0.0, .9, 1000)) * 255).astype(np.uint8)
else:
self.colormap = ((np.random.rand(1000, 3) * 0.8 + 0.1) * 255).astype(np.uint8)
self.is_stack = True # always loading images of same FOV
# if called with image, load it
# if image is not None:
# self.filename = image
# iopart._load_image(self, self.filename)
self.setAcceptDrops(True)
self.win.show()
self.show()
self.splitter2.addWidget(self.listView)
self.splitter2.addWidget(self.win)
self.mainLayout.addWidget(self.splitter2,0,1,1,3)
self.label_2 = QtWidgets.QLabel(self.page)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 7, 0, 1, 1)
self.brush_size = 3
self.BrushChoose = QtWidgets.QComboBox()
self.BrushChoose.addItems(["1", "3", "5", "7", "9", "11", "13", "15", "17", "19"])
self.BrushChoose.currentIndexChanged.connect(self.brush_choose)
self.gridLayout.addWidget(self.BrushChoose, 7, 1, 1, 1)
# turn on single stroke mode
self.sstroke_On = True
self.SSCheckBox = QtWidgets.QCheckBox(self.page)
self.SSCheckBox.setObjectName("SSCheckBox")
self.SSCheckBox.setChecked(True)
self.SSCheckBox.toggled.connect(self.toggle_sstroke)
self.gridLayout.addWidget(self.SSCheckBox, 8, 0, 1, 1)
self.eraser_button = QtWidgets.QCheckBox(self.page)
self.eraser_button.setObjectName("Edit mask")
self.eraser_button.setChecked(False)
self.eraser_button.toggled.connect(self.eraser_model_change)
self.eraser_button.setToolTip("Right-click to add pixels\nShift+Right-click to delete pixels")
self.gridLayout.addWidget(self.eraser_button, 9, 0, 1, 1)
self.CHCheckBox = QtWidgets.QCheckBox(self.page)
self.CHCheckBox.setObjectName("CHCheckBox")
self.CHCheckBox.toggled.connect(self.cross_hairs)
self.gridLayout.addWidget(self.CHCheckBox, 10, 0, 1, 1)
self.MCheckBox = QtWidgets.QCheckBox(self.page)
self.MCheckBox.setChecked(True)
self.MCheckBox.setObjectName("MCheckBox")
self.MCheckBox.setChecked(True)
self.MCheckBox.toggled.connect(self.toggle_masks)
self.gridLayout.addWidget(self.MCheckBox, 11, 0, 1, 1)
self.OCheckBox = QtWidgets.QCheckBox(self.page)
self.outlinesOn = True
self.OCheckBox.setChecked(True)
self.OCheckBox.setObjectName("OCheckBox")
self.OCheckBox.toggled.connect(self.toggle_masks)
self.gridLayout.addWidget(self.OCheckBox, 12, 0, 1, 1)
self.scale_on = True
self.SCheckBox = QtWidgets.QCheckBox(self.page)
self.SCheckBox.setObjectName("SCheckBox")
self.SCheckBox.setChecked(True)
self.SCheckBox.toggled.connect(self.toggle_scale)
self.gridLayout.addWidget(self.SCheckBox, 13, 0, 1, 1)
self.autosaveOn = True
self.ASCheckBox = QtWidgets.QCheckBox(self.page)
self.ASCheckBox.setObjectName("ASCheckBox")
self.ASCheckBox.setChecked(True)
self.ASCheckBox.toggled.connect(self.toggle_autosave)
self.ASCheckBox.setToolTip("If ON, masks/npy/list will be autosaved")
self.gridLayout.addWidget(self.ASCheckBox, 14, 0, 1, 1)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem, 15, 0, 1, 2)
# self.eraser_combobox = QtWidgets.QComboBox()
# self.eraser_combobox.addItems(["Pixal delete", "Pixal add"])
# self.gridLayout.addWidget(self.eraser_combobox, 8, 1, 1, 1)
self.RGBChoose = guiparts.RGBRadioButtons(self, 3, 1)
self.RGBDropDown = QtGui.QComboBox()
self.RGBDropDown.addItems(["rgb", "gray", "spectral", "red", "green", "blue"])
self.RGBDropDown.currentIndexChanged.connect(self.color_choose)
self.gridLayout.addWidget(self.RGBDropDown, 3, 0, 1, 1)
self.saturation_label = QtWidgets.QLabel("Saturation")
self.gridLayout.addWidget(self.saturation_label, 0, 0, 1, 1)
self.autobtn = QtGui.QCheckBox('Auto-adjust')
self.autobtn.setChecked(True)
self.autobtn.toggled.connect(self.toggle_autosaturation)
self.gridLayout.addWidget(self.autobtn, 0, 1, 1, 1)
self.currentZ = 0
self.zpos = QtGui.QLineEdit()
self.zpos.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.zpos.setText(str(self.currentZ))
self.zpos.returnPressed.connect(self.compute_scale)
self.zpos.setFixedWidth(20)
# self.gridLayout.addWidget(self.zpos, 0, 2, 1, 1)
self.slider = guiparts.RangeSlider(self)
self.slider.setMaximum(255)
self.slider.setMinimum(0)
self.slider.setHigh(255)
self.slider.setLow(0)
self.gridLayout.addWidget(self.slider, 2, 0, 1, 4)
self.slider.setObjectName("rangeslider")
self.page_2 = QtWidgets.QWidget()
self.page_2.setFixedWidth(340)
self.page_2.setObjectName("page_2")
self.gridLayout_2 = QtWidgets.QGridLayout(self.page_2)
self.gridLayout_2.setContentsMargins(0, 0, 0, 0)
self.gridLayout_2.setObjectName("gridLayout_2")
page2_l = 0
self.useGPU = QtWidgets.QCheckBox(self.page_2)
self.useGPU.setObjectName("useGPU")
self.gridLayout_2.addWidget(self.useGPU, page2_l, 0, 1, 1)
self.check_gpu()
page2_l += 1
self.label_4 = QtWidgets.QLabel(self.page_2)
self.label_4.setObjectName("label_4")
self.gridLayout_2.addWidget(self.label_4, page2_l, 0, 1, 1)
self.ModelChoose = QtWidgets.QComboBox(self.page_2)
self.ModelChoose.setObjectName("ModelChoose")
self.project_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + os.path.sep + ".")
self.model_dir = os.path.join(self.project_path, 'assets', 'pretrained_models')
print('self.model_dir', self.model_dir)
self.ModelChoose.addItem("")
self.ModelChoose.addItem("")
self.ModelChoose.addItem("")
self.gridLayout_2.addWidget(self.ModelChoose, page2_l, 1, 1, 1)
page2_l += 1
self.label_5 = QtWidgets.QLabel(self.page_2)
self.label_5.setObjectName("label_5")
self.gridLayout_2.addWidget(self.label_5, page2_l, 0, 1, 1)
self.jCBChanToSegment = QtWidgets.QComboBox(self.page_2)
self.jCBChanToSegment.setObjectName("jCBChanToSegment")
self.jCBChanToSegment.addItems(["gray", "red", "green", "blue"])
self.jCBChanToSegment.setCurrentIndex(0)
self.gridLayout_2.addWidget(self.jCBChanToSegment, page2_l, 1, 1, 1)
page2_l += 1
self.label_6 = QtWidgets.QLabel(self.page_2)
self.label_6.setObjectName("label_6")
self.gridLayout_2.addWidget(self.label_6, page2_l, 0, 1, 1)
self.jCBChan2 = QtWidgets.QComboBox(self.page_2)
self.jCBChan2.setObjectName("jCBChan2")
self.jCBChan2.addItems(["none", "red", "green", "blue"])
self.jCBChan2.setCurrentIndex(0)
self.gridLayout_2.addWidget(self.jCBChan2, page2_l, 1, 1, 1)
page2_l += 1
self.model_choose_btn = QtWidgets.QPushButton("Model file")
self.model_choose_btn.clicked.connect(self.model_file_dir_choose)
self.gridLayout_2.addWidget(self.model_choose_btn, page2_l, 0, 1, 1)
self.model_choose_btn = QtWidgets.QPushButton("Reset pre-trained")
self.model_choose_btn.clicked.connect(self.reset_pretrain_model)
self.gridLayout_2.addWidget(self.model_choose_btn, page2_l, 1, 1, 1)
page2_l += 1
self.label_null = QtWidgets.QLabel("")
self.gridLayout_2.addWidget(self.label_null, page2_l, 0, 1, 1)
slider_image_path = self.now_pyfile_path + '/assets/slider_handle.png'
self.sliderSheet = [
'QSlider::groove:vertical {',
'background-color: #D3D3D3;',
'position: absolute;',
'left: 4px; right: 4px;',
'}',
'',
'QSlider::groove:horizontal{',
'background-color:#D3D3D3;',
'position: absolute;',
'top: 4px; bottom: 4px;',
'}',
'',
'QSlider::handle:vertical {',
'height: 10px;',
'background-color: {0:s};'.format('#A9A9A9'),
'margin: 0 -4px;',
'}',
'',
'QSlider::handle:horizontal{',
'width: 10px;',
'border-image: url({0:s});'.format(slider_image_path),
'margin: -4px 0px -4px 0px;',
'}',
'QSlider::sub-page:horizontal',
'{',
'background-color: {0:s};'.format('#A9A9A9'),
'}',
'',
'QSlider::add-page {',
'background-color: {0:s};'.format('#D3D3D3'),
'}',
'',
'QSlider::sub-page {',
'background-color: {0:s};'.format('#D3D3D3'),
'}',
]
page2_l += 1
self.label_seg = QtWidgets.QLabel("Run seg for image in window")
self.gridLayout_2.addWidget(self.label_seg, page2_l, 0, 1, 4)
self.label_seg.setObjectName('label_seg')
page2_l += 1
self.label_3 = QtWidgets.QLabel(self.page_2)
self.label_3.setObjectName("label_3")
self.gridLayout_2.addWidget(self.label_3, page2_l, 0, 1, 4)
page2_l += 1
self.prev_selected = 0
self.diameter = 30
# self.Diameter = QtWidgets.QSpinBox(self.page_2)
self.Diameter = QtWidgets.QLineEdit(self.page_2)
self.Diameter.setObjectName("Diameter")
self.Diameter.setText(str(self.diameter))
self.Diameter.setFixedWidth(100)
self.Diameter.editingFinished.connect(self.compute_scale)
self.gridLayout_2.addWidget(self.Diameter, page2_l, 0, 1, 2)
self.SizeButton = QtWidgets.QPushButton(self.page_2)
self.SizeButton.setObjectName("SizeButton")
self.gridLayout_2.addWidget(self.SizeButton, page2_l, 1, 1, 1)
self.SizeButton.clicked.connect(self.calibrate_size)
self.SizeButton.setEnabled(False)
page2_l += 1
self.label_mode = QtWidgets.QLabel("Inference mode")
self.gridLayout_2.addWidget(self.label_mode, page2_l, 0, 1, 1)
self.NetAvg = QtWidgets.QComboBox(self.page_2)
self.NetAvg.setObjectName("NetAvg")
self.NetAvg.addItems(["run 1 net (fast)", "+ resample (slow)"])
self.gridLayout_2.addWidget(self.NetAvg, page2_l, 1, 1, 1)
page2_l += 1
self.invert = QtWidgets.QCheckBox(self.page_2)
self.invert.setObjectName("invert")
self.gridLayout_2.addWidget(self.invert, page2_l, 0, 1, 1)
page2_l += 1
self.ModelButton = QtWidgets.QPushButton(' Run segmentation ')
self.ModelButton.setObjectName("runsegbtn")
self.ModelButton.clicked.connect(self.compute_model)
self.gridLayout_2.addWidget(self.ModelButton, page2_l, 0, 1, 2)
self.ModelButton.setEnabled(False)
page2_l += 1
self.label_7 = QtWidgets.QLabel(self.page_2)
self.label_7.setObjectName("label_7")
self.gridLayout_2.addWidget(self.label_7, page2_l, 0, 1, 1)
self.threshold = 0.4
self.threshslider = QtWidgets.QSlider(self.page_2)
self.threshslider.setOrientation(QtCore.Qt.Horizontal)
self.threshslider.setObjectName("threshslider")
self.threshslider.setMinimum(1.0)
self.threshslider.setMaximum(30.0)
self.threshslider.setValue(31 - 4)
self.threshslider.valueChanged.connect(self.compute_cprob)
self.threshslider.setEnabled(False)
self.threshslider.setStyleSheet('\n'.join(self.sliderSheet))
self.gridLayout_2.addWidget(self.threshslider, page2_l, 1, 1, 1)
self.threshslider.setToolTip("Value: " + str(self.threshold))
page2_l += 1
self.label_8 = QtWidgets.QLabel(self.page_2)
self.label_8.setObjectName("label_8")
self.gridLayout_2.addWidget(self.label_8, page2_l, 0, 1, 1)
self.probslider = QtWidgets.QSlider(self.page_2)
self.probslider.setOrientation(QtCore.Qt.Horizontal)
self.probslider.setObjectName("probslider")
self.probslider.setStyleSheet('\n'.join(self.sliderSheet))
self.gridLayout_2.addWidget(self.probslider, page2_l, 1, 1, 1)
self.probslider.setMinimum(-6.0)
self.probslider.setMaximum(6.0)
self.probslider.setValue(0.0)
self.cellprob = 0.5
self.probslider.valueChanged.connect(self.compute_cprob)
self.probslider.setEnabled(False)
self.probslider.setToolTip("Value: " + str(self.cellprob))
page2_l += 1
self.label_batchseg = QtWidgets.QLabel("Batch segmentation")
self.label_batchseg.setObjectName('label_batchseg')
self.gridLayout_2.addWidget(self.label_batchseg, page2_l, 0, 1, 4)
page2_l += 1
self.label_bz = QtWidgets.QLabel("Batch size")
self.gridLayout_2.addWidget(self.label_bz, page2_l, 0, 1, 1)
self.bz_line = QtWidgets.QLineEdit()
self.bz_line.setPlaceholderText('Default: 8')
self.bz_line.setFixedWidth(120)
self.gridLayout_2.addWidget(self.bz_line, page2_l, 1, 1, 1)
page2_l += 1
self.dataset_inference_bnt = QtWidgets.QPushButton("Data path")
self.gridLayout_2.addWidget(self.dataset_inference_bnt, page2_l, 0, 1, 1)
self.dataset_inference_bnt.clicked.connect(self.batch_inference_dir_choose)
self.batch_inference_bnt = QtWidgets.QPushButton("Run batch")
self.batch_inference_bnt.setObjectName("binferbnt")
self.batch_inference_bnt.clicked.connect(self.batch_inference)
self.gridLayout_2.addWidget(self.batch_inference_bnt, page2_l, 1, 1, 1)
self.batch_inference_bnt.setEnabled(False)
page2_l += 1
self.label_getsingle = QtWidgets.QLabel("Get single instance")
self.label_getsingle.setObjectName('label_getsingle')
self.gridLayout_2.addWidget(self.label_getsingle, page2_l,0,1,2)
page2_l += 1
self.single_dir_bnt = QtWidgets.QPushButton("Data path")
self.single_dir_bnt.clicked.connect(self.single_dir_choose)
self.gridLayout_2.addWidget(self.single_dir_bnt, page2_l,0,1,1)
self.single_cell_btn = QtWidgets.QPushButton("Run batch")
self.single_cell_btn.setObjectName('single_cell_btn')
self.single_cell_btn.clicked.connect(self.get_single_cell)
self.gridLayout_2.addWidget(self.single_cell_btn, page2_l,1,1,1)
self.single_cell_btn.setEnabled(False)
page2_l += 1
spacerItem2 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_2.addItem(spacerItem2, page2_l, 0, 1, 2)
self.page_3 = QtWidgets.QWidget()
self.page_3.setFixedWidth(340)
self.page_3.setObjectName("page_3")
self.progress = QtWidgets.QProgressBar()
self.progress.setProperty("value", 0)
self.progress.setAlignment(QtCore.Qt.AlignCenter)
self.progress.setObjectName("progress")
self.gridLayout_3 = QtWidgets.QGridLayout(self.page_3)
self.gridLayout_3.setContentsMargins(0, 0, 0, 0)
self.gridLayout_3.setObjectName("gridLayout_3")
self.ftuseGPU = QtWidgets.QCheckBox("Use GPU")
self.ftuseGPU.setObjectName("ftuseGPU")
self.gridLayout_3.addWidget(self.ftuseGPU, 0, 0, 1, 2)
self.check_ftgpu()
self.ftdirbtn = QtWidgets.QPushButton("Dataset path")
self.ftdirbtn.clicked.connect(self.fine_tune_dir_choose)
self.gridLayout_3.addWidget(self.ftdirbtn, 0, 2, 1, 2)
self.label_10 = QtWidgets.QLabel("Model architecture")
self.gridLayout_3.addWidget(self.label_10, 1, 0, 1, 2)
self.ftmodelchooseBnt = QtWidgets.QComboBox()
self.ftmodelchooseBnt.addItems(["scellseg", "cellpose", "hover"])
self.gridLayout_3.addWidget(self.ftmodelchooseBnt, 1, 2, 1, 2)
self.label_11 = QtWidgets.QLabel("Chan to segment")
self.gridLayout_3.addWidget(self.label_11, 2, 0, 1, 2)
self.chan1chooseBnt = QtWidgets.QComboBox()
self.chan1chooseBnt.addItems(["gray", "red", "green", "blue"])
self.chan1chooseBnt.setCurrentIndex(0)
self.gridLayout_3.addWidget(self.chan1chooseBnt, 2, 2, 1, 2)
self.label_12 = QtWidgets.QLabel("Chan2 (optional)")
self.gridLayout_3.addWidget(self.label_12, 3, 0, 1, 2)
self.chan2chooseBnt = QtWidgets.QComboBox()
self.chan2chooseBnt.addItems(["none", "red", "green", "blue"])
self.chan2chooseBnt.setCurrentIndex(0)
self.gridLayout_3.addWidget(self.chan2chooseBnt, 3, 2, 1, 2)
self.label_13 = QtWidgets.QLabel("Fine-tune strategy")
self.gridLayout_3.addWidget(self.label_13, 4, 0, 1, 2)
self.stmodelchooseBnt = QtWidgets.QComboBox()
self.stmodelchooseBnt.addItems(["contrastive", "classic"])
self.gridLayout_3.addWidget(self.stmodelchooseBnt, 4, 2, 1, 2)
self.label_14 = QtWidgets.QLabel("Epoch")
self.gridLayout_3.addWidget(self.label_14, 5, 0, 1, 2)
self.epoch_line = QtWidgets.QLineEdit()
self.epoch_line.setPlaceholderText('Default: 100')
self.gridLayout_3.addWidget(self.epoch_line, 5, 2, 1, 2)
self.label_ftbz = QtWidgets.QLabel("Batch size")
self.gridLayout_3.addWidget(self.label_ftbz, 6, 0, 1, 2)
self.ftbz_line = QtWidgets.QLineEdit()
self.ftbz_line.setPlaceholderText('Default: 8')
self.gridLayout_3.addWidget(self.ftbz_line, 6, 2, 1, 2)
self.ftbnt = QtWidgets.QPushButton("Start fine-tuning")
self.ftbnt.setObjectName('ftbnt')
self.ftbnt.clicked.connect(self.fine_tune)
self.gridLayout_3.addWidget(self.ftbnt, 7, 0, 1, 4)
self.ftbnt.setEnabled(False)
spacerItem3 = QtWidgets.QSpacerItem(20, 320, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_3.addItem(spacerItem3, 8, 0, 1, 1)
#initialize scroll size
self.scroll = QtGui.QScrollBar(QtCore.Qt.Horizontal)
# self.scroll.setMaximum(10)
# self.scroll.valueChanged.connect(self.move_in_Z)
# self.gridLayout_3.addWidget(self.scroll)
spacerItem2 = QtWidgets.QSpacerItem(20, 320, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_3.addItem(spacerItem2)
self.toolBox.addItem(self.page, "")
self.toolBox.addItem(self.page_3, "")
self.toolBox.addItem(self.page_2, "")
self.verticalLayout_2.addWidget(self.splitter)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
self.toolBox.setCurrentIndex(2)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.centralwidget.setFocusPolicy(QtCore.Qt.StrongFocus)
self.reset()
def show_menu(self, point):
# print(point.x())
# item = self.listView.itemAt(point)
# print(item)
temp_cell_idx = self.listView.rowAt(point.y())
self.list_select_cell(temp_cell_idx+1)
# print(self.myCellList[temp_cell_idx])
if self.listView.rowAt(point.y()) >= 0:
self.contextMenu = QtWidgets.QMenu()
self.actionA = QtGui.QAction("Delete this cell", self)
self.actionB = QtGui.QAction("Edit this cell", self)
self.contextMenu.addAction(self.actionA)
self.contextMenu.addAction(self.actionB)
self.contextMenu.popup(QtGui.QCursor.pos())
self.actionA.triggered.connect(lambda: self.remove_cell(temp_cell_idx + 1))
self.actionB.triggered.connect(lambda: self.edit_cell(temp_cell_idx + 1))
self.contextMenu.show()
def edit_cell(self, index):
self.select_cell(index)
self.eraser_button.setChecked(True)
self.toolBox.setCurrentIndex(0)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Scellseg"))
self.CHCheckBox.setText(_translate("MainWindow", "Crosshair on [C]"))
self.MCheckBox.setText(_translate("MainWindow", "Masks on [X]"))
self.label_2.setText(_translate("MainWindow", "Brush size"))
self.OCheckBox.setText(_translate("MainWindow", "Outlines on [Z]"))
# self.ServerButton.setText(_translate("MainWindow", "send manual seg. to server"))
self.toolBox.setItemText(self.toolBox.indexOf(self.page), _translate("MainWindow", "View and Draw"))
self.SizeButton.setText(_translate("MainWindow", "Calibrate diam"))
self.label_3.setText(_translate("MainWindow", "Cell diameter (pixels):"))
self.useGPU.setText(_translate("MainWindow", "Use GPU"))
self.SCheckBox.setText(_translate("MainWindow", "Scale disk on [S]"))
self.ASCheckBox.setText(_translate("MainWindow", "Autosave [P]"))
self.SSCheckBox.setText(_translate("MainWindow", "Single stroke"))
self.eraser_button.setText(_translate("MainWindow", "Edit mask [E]"))
self.ModelChoose.setItemText(0, _translate("MainWindow", "scellseg"))
self.ModelChoose.setItemText(1, _translate("MainWindow", "cellpose"))
self.ModelChoose.setItemText(2, _translate("MainWindow", "hover"))
self.invert.setText(_translate("MainWindow", "Invert grayscale"))
self.label_4.setText(_translate("MainWindow", "Model architecture"))
self.label_5.setText(_translate("MainWindow", "Chan to segment"))
self.label_6.setText(_translate("MainWindow", "Chan2 (optional)"))
self.toolBox.setItemText(self.toolBox.indexOf(self.page_2), _translate("MainWindow", "Inference"))
self.label_7.setText(_translate("MainWindow", "Model match TH"))
self.label_8.setText(_translate("MainWindow", "Cell prob TH"))
self.toolBox.setItemText(self.toolBox.indexOf(self.page_3), _translate("MainWindow", "Fine-tune"))
# self.menuFile.setTitle(_translate("MainWindow", "File"))
# self.menuEdit.setTitle(_translate("MainWindow", "Edit"))
# self.menuHelp.setTitle(_translate("MainWindow", "Help"))
self.ImFolder = ''
self.ImNameSet = []
self.CurImId = 0
self.CurFolder = os.getcwd()
self.DefaultImFolder = self.CurFolder
def setWinTop(self):
print('get')
def OpenDirDropped(self, curFile=None):
# dir dropped callback func
if self.ImFolder != '':
self.ImNameSet = []
self.ImNameRowSet = os.listdir(self.ImFolder)
# print(self.ImNameRowSet)
for tmp in self.ImNameRowSet:
ext = os.path.splitext(tmp)[-1]
if ext in ['.png', '.jpg', '.jpeg', '.tif', '.tiff', '.jfif'] and '_mask' not in tmp:
self.ImNameSet.append(tmp)
self.ImNameSet.sort()
self.ImPath = self.ImFolder + r'/' + self.ImNameSet[0]
ImNameSetNosuffix = [os.path.splitext(imNameSeti)[0] for imNameSeti in self.ImNameSet]
# pix = QtGui.QPixmap(self.ImPath)
# self.ImShowLabel.setPixmap(pix)
if curFile is not None:
curFile = os.path.splitext(curFile)[0]
try:
self.CurImId = ImNameSetNosuffix.index(curFile)
print(self.CurImId)
except:
curFile = curFile.replace('_cp_masks', '')
curFile = curFile.replace('_masks', '')
self.CurImId = ImNameSetNosuffix.index(curFile)
print(self.CurImId)
return
# self.state_label.setText("", color='#FF6A56')
else:
self.CurImId = 0
iopart._load_image(self, filename=self.ImPath)
self.initialize_listView()
else:
print('Please Find Another File Folder')
def OpenDirBntClicked(self):
# dir choosing callback function
self.ImFolder = QtWidgets.QFileDialog.getExistingDirectory(None, "select folder", self.DefaultImFolder)
if self.ImFolder != '':
self.ImNameSet = []
self.ImNameRowSet = os.listdir(self.ImFolder)
# print(self.ImNameRowSet)
for tmp in self.ImNameRowSet:
ext = os.path.splitext(tmp)[-1]
if ext in ['.png', '.jpg', '.jpeg', '.tif', '.tiff', '.jfif'] and '_mask' not in tmp:
self.ImNameSet.append(tmp)
self.ImNameSet.sort()
print(self.ImNameSet)
self.ImPath = self.ImFolder + r'/' + self.ImNameSet[0]
# pix = QtGui.QPixmap(self.ImPath)
# self.ImShowLabel.setPixmap(pix)
self.CurImId = 0
iopart._load_image(self, filename=self.ImPath)
self.initialize_listView()
else:
print('Please Find Another File Folder')
def PreImBntClicked(self):
self.auto_save()
# show previous image
self.ImFolder = self.ImFolder
self.ImNameSet = self.ImNameSet
self.CurImId = self.CurImId
self.ImNum = len(self.ImNameSet)
print(self.ImFolder, self.ImNameSet)
self.CurImId = self.CurImId - 1
if self.CurImId >= 0: # 第一张图片没有前一张
self.ImPath = self.ImFolder + r'/' + self.ImNameSet[self.CurImId]
iopart._load_image(self, filename=self.ImPath)
self.initialize_listView()
if self.CurImId < 0:
self.CurImId = 0
self.state_label.setText("This is the first image", color='#FF6A56')
def NextImBntClicked(self):
self.auto_save()
# show next image
self.ImFolder = self.ImFolder
self.ImNameSet = self.ImNameSet
self.CurImId = self.CurImId
self.ImNum = len(self.ImNameSet)
if self.CurImId < self.ImNum - 1:
self.ImPath = self.ImFolder + r'/' + self.ImNameSet[self.CurImId + 1]
iopart._load_image(self, filename=self.ImPath)
self.initialize_listView()
self.CurImId = self.CurImId + 1
else:
self.state_label.setText("This is the last image", color='#FF6A56')
def eraser_model_change(self):
if self.eraser_button.isChecked() == True:
self.outlinesOn = False
self.OCheckBox.setChecked(False)
# self.OCheckBox.setEnabled(False)
QtWidgets.QApplication.setOverrideCursor(QtCore.Qt.CrossCursor)
# self.cur_size = self.brush_size * 6
# cursor = Qt.QPixmap("./assets/eraser.png")
# cursor_scaled = cursor.scaled(self.cur_size, self.cur_size)
# cursor_set = Qt.QCursor(cursor_scaled, self.cur_size/2, self.cur_size/2)
# QtWidgets.QApplication.setOverrideCursor(cursor_set)
self.update_plot()
else:
QtWidgets.QApplication.setOverrideCursor(QtCore.Qt.CrossCursor)
def showChoosen(self, item):
temp_cell_idx = int(item.row())
self.list_select_cell(int(temp_cell_idx) + 1)
def save_cell_list(self):
self.listView.selectAll()
self.myCellList = []
for item in self.listView.selectedIndexes():
data = item.data()
self.myCellList.append(data)
self.cell_list_name = os.path.splitext(self.filename)[0] + "_instance_list.txt"
np.savetxt(self.cell_list_name, np.array(self.myCellList), fmt="%s")
self.listView.clearSelection()
def save_cell_list_menu(self):
self.listView.selectAll()
self.myCellList = []
for item in self.listView.selectedIndexes():
data = item.data()
self.myCellList.append(data)
self.cell_list_name = os.path.splitext(self.filename)[0] + "_instance_list.txt"
np.savetxt(self.cell_list_name, np.array(self.myCellList), fmt="%s")
self.state_label.setText("Saved outlines", color='#39B54A')
self.listView.clearSelection()
def help_window(self):
HW = guiparts.HelpWindow(self)
HW.show()
def gui_window(self):
EG = guiparts.ExampleGUI(self)
EG.show()
def toggle_autosave(self):
if self.ASCheckBox.isChecked():
self.autosaveOn = True
else:
self.autosaveOn = False
print('self.autosaveOn', self.autosaveOn)
def toggle_sstroke(self):
if self.SSCheckBox.isChecked():
self.sstroke_On = True
else:
self.sstroke_On = False
print('self.sstroke_On', self.sstroke_On)
def toggle_autosaturation(self):
if self.autobtn.isChecked():
self.compute_saturation()
self.update_plot()
def cross_hairs(self):
if self.CHCheckBox.isChecked():
self.p0.addItem(self.vLine, ignoreBounds=True)
self.p0.addItem(self.hLine, ignoreBounds=True)
else:
self.p0.removeItem(self.vLine)
self.p0.removeItem(self.hLine)
def plot_clicked(self, event):
if event.double():
if event.button() == QtCore.Qt.LeftButton:
print("will initialize the range")
if (event.modifiers() != QtCore.Qt.ShiftModifier and
event.modifiers() != QtCore.Qt.AltModifier):
try:
self.p0.setYRange(0,self.Ly+self.pr)
except:
self.p0.setYRange(0,self.Ly)
self.p0.setXRange(0,self.Lx)
def mouse_moved(self, pos):
# print('moved')
items = self.win.scene().items(pos)
for x in items:
if x == self.p0:
mousePoint = self.p0.mapSceneToView(pos)
if self.CHCheckBox.isChecked():
self.vLine.setPos(mousePoint.x())
self.hLine.setPos(mousePoint.y())
# else:
# QtWidgets.QApplication.restoreOverrideCursor()
# QtWidgets.QApplication.setOverrideCursor(QtCore.Qt.DefaultCursor)
def color_choose(self):
self.color = self.RGBDropDown.currentIndex()
self.view = 0
self.RGBChoose.button(self.view).setChecked(True)
self.update_plot()
def update_ztext(self):
zpos = self.currentZ
try:
zpos = int(self.zpos.text())
except:
print('ERROR: zposition is not a number')
self.currentZ = max(0, min(self.NZ - 1, zpos))
self.zpos.setText(str(self.currentZ))
self.scroll.setValue(self.currentZ)
def calibrate_size(self):
model_type = self.ModelChoose.currentText()
pretrained_model = os.path.join(self.model_dir, model_type)
self.initialize_model(pretrained_model=pretrained_model, gpu=self.useGPU.isChecked(),
model_type=model_type)
diams, _ = self.model.sz.eval(self.stack[self.currentZ].copy(), invert=self.invert.isChecked(),
channels=self.get_channels(), progress=self.progress)
diams = np.maximum(5.0, diams)
print('estimated diameter of cells using %s model = %0.1f pixels' %
(self.current_model, diams))
self.state_label.setText('Estimated diameter of cells using %s model = %0.1f pixels' %
(self.current_model, diams), color='#969696')
self.Diameter.setText('%0.1f'%diams)
self.diameter = diams
self.compute_scale()
self.progress.setValue(100)
def enable_buttons(self):
# self.X2Up.setEnabled(True)
# self.X2Down.setEnabled(True)
self.ModelButton.setEnabled(True)
self.SizeButton.setEnabled(True)
self.saveSet.setEnabled(True)
self.savePNG.setEnabled(True)
self.saveOutlines.setEnabled(True)
self.saveCellList.setEnabled(True)
self.saveAll.setEnabled(True)
self.loadMasks.setEnabled(True)
self.loadManual.setEnabled(True)
self.loadCellList.setEnabled(True)
self.toggle_mask_ops()
self.update_plot()
self.setWindowTitle('Scellseg @ ' + self.filename)
def add_set(self):
if len(self.current_point_set) > 0:
# print(self.current_point_set)
# print(np.array(self.current_point_set).shape)
self.current_point_set = np.array(self.current_point_set)
while len(self.strokes) > 0:
self.remove_stroke(delete_points=False)
if len(self.current_point_set) > 8:
col_rand = np.random.randint(1000)
color = self.colormap[col_rand, :3]
median = self.add_mask(points=self.current_point_set, color=color)
if median is not None:
self.removed_cell = []
self.toggle_mask_ops()
self.cellcolors.append(color)
self.ncells += 1
self.add_list_item()
self.ismanual = np.append(self.ismanual, True)
# if self.NZ == 1:
# # only save after each cell if single image
# iopart._save_sets(self)
self.current_stroke = []
self.strokes = []
self.current_point_set = []
self.update_plot()
def add_mask(self, points=None, color=None):
# loop over z values
median = []
if points.shape[1] < 3:
points = np.concatenate((np.zeros((points.shape[0], 1), np.int32), points), axis=1)
zdraw = np.unique(points[:, 0])
zrange = np.arange(zdraw.min(), zdraw.max() + 1, 1, int)
zmin = zdraw.min()
pix = np.zeros((2, 0), np.uint16)
mall = np.zeros((len(zrange), self.Ly, self.Lx), np.bool)
k = 0
for z in zdraw:
iz = points[:, 0] == z
vr = points[iz, 1]
vc = points[iz, 2]
# get points inside drawn points
mask = np.zeros((np.ptp(vr) + 4, np.ptp(vc) + 4), np.uint8)
pts = np.stack((vc - vc.min() + 2, vr - vr.min() + 2), axis=-1)[:, np.newaxis, :]
mask = cv2.fillPoly(mask, [pts], (255, 0, 0))
ar, ac = np.nonzero(mask)
ar, ac = ar + vr.min() - 2, ac + vc.min() - 2
# get dense outline
contours = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
pvc, pvr = contours[-2][0].squeeze().T
vr, vc = pvr + vr.min() - 2, pvc + vc.min() - 2
# concatenate all points
ar, ac = np.hstack((np.vstack((vr, vc)), np.vstack((ar, ac))))
# if these pixels are overlapping with another cell, reassign them
ioverlap = self.cellpix[z][ar, ac] > 0
if (~ioverlap).sum() < 8:
print('ERROR: cell too small without overlaps, not drawn')
return None
elif ioverlap.sum() > 0:
ar, ac = ar[~ioverlap], ac[~ioverlap]
# compute outline of new mask
mask = np.zeros((np.ptp(ar) + 4, np.ptp(ac) + 4), np.uint8)
mask[ar - ar.min() + 2, ac - ac.min() + 2] = 1
contours = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
pvc, pvr = contours[-2][0].squeeze().T
vr, vc = pvr + ar.min() - 2, pvc + ac.min() - 2
self.draw_mask(z, ar, ac, vr, vc, color)
median.append(np.array([np.median(ar), np.median(ac)]))
mall[z - zmin, ar, ac] = True
pix = np.append(pix, np.vstack((ar, ac)), axis=-1)
mall = mall[:, pix[0].min():pix[0].max() + 1, pix[1].min():pix[1].max() + 1].astype(np.float32)
ymin, xmin = pix[0].min(), pix[1].min()
if len(zdraw) > 1:
mall, zfill = interpZ(mall, zdraw - zmin)
for z in zfill:
mask = mall[z].copy()
ar, ac = np.nonzero(mask)
ioverlap = self.cellpix[z + zmin][ar + ymin, ac + xmin] > 0
if (~ioverlap).sum() < 5:
print('WARNING: stroke on plane %d not included due to overlaps' % z)
elif ioverlap.sum() > 0:
mask[ar[ioverlap], ac[ioverlap]] = 0
ar, ac = ar[~ioverlap], ac[~ioverlap]
# compute outline of mask
outlines = utils.masks_to_outlines(mask)
vr, vc = np.nonzero(outlines)
vr, vc = vr + ymin, vc + xmin
ar, ac = ar + ymin, ac + xmin
self.draw_mask(z + zmin, ar, ac, vr, vc, color)
self.zdraw.append(zdraw)
return median
def move_in_Z(self):
if self.loaded:
self.currentZ = min(self.NZ, max(0, int(self.scroll.value())))
self.zpos.setText(str(self.currentZ))
self.update_plot()
def make_viewbox(self):
# intialize the main viewport widget
# print("making viewbox")
self.p0 = guiparts.ViewBoxNoRightDrag(
parent=self,
lockAspect=True,
name="plot1",
border=[100, 100, 100],
invertY=True
)
# self.p0.setBackgroundColor(color='#292929')
self.brush_size = 3
self.win.addItem(self.p0, 0, 0)
self.p0.setMenuEnabled(False)
self.p0.setMouseEnabled(x=True, y=True)
self.img = pg.ImageItem(viewbox=self.p0, parent=self, axisOrder='row-major')
self.img.autoDownsample = False
# self.null_image = np.ones((200,200))
# self.img.setImage(self.null_image)
self.layer = guiparts.ImageDraw(viewbox=self.p0, parent=self)
self.layer.setLevels([0, 255])
self.scale = pg.ImageItem(viewbox=self.p0, parent=self)
self.scale.setLevels([0, 255])
self.p0.scene().contextMenuItem = self.p0
# self.p0.setMouseEnabled(x=False,y=False)
self.Ly, self.Lx = 512, 512
self.p0.addItem(self.img)
self.p0.addItem(self.layer)
self.p0.addItem(self.scale)
# guiparts.make_quadrants(self)
def get_channels(self):
channels = [self.jCBChanToSegment.currentIndex(), self.jCBChan2.currentIndex()]
return channels
def compute_saturation(self):
# compute percentiles from stack
self.saturation = []
self.slider._low = np.percentile(self.stack[0].astype(np.float32), 1)
self.slider._high = np.percentile(self.stack[0].astype(np.float32), 99)
for n in range(len(self.stack)):
print('n,', n)
self.saturation.append([np.percentile(self.stack[n].astype(np.float32), 1),
np.percentile(self.stack[n].astype(np.float32), 99)])
def keyReleaseEvent(self, event):
# print('self.loaded', self.loaded)
if self.loaded:
# self.p0.setMouseEnabled(x=True, y=True)
if (event.modifiers() != QtCore.Qt.ControlModifier and
event.modifiers() != QtCore.Qt.ShiftModifier and
event.modifiers() != QtCore.Qt.AltModifier) and not self.in_stroke:
updated = False
if len(self.current_point_set) > 0:
if event.key() == QtCore.Qt.Key_Return:
self.add_set()
if self.NZ > 1:
if event.key() == QtCore.Qt.Key_Left:
self.currentZ = max(0, self.currentZ - 1)
self.zpos.setText(str(self.currentZ))
elif event.key() == QtCore.Qt.Key_Right:
self.currentZ = min(self.NZ - 1, self.currentZ + 1)
self.zpos.setText(str(self.currentZ))
else:
if event.key() == QtCore.Qt.Key_M:
self.MCheckBox.toggle()
if event.key() == QtCore.Qt.Key_O:
self.OCheckBox.toggle()
if event.key() == QtCore.Qt.Key_C:
self.CHCheckBox.toggle()
if event.key() == QtCore.Qt.Key_S:
self.SCheckBox.toggle()
if event.key() == QtCore.Qt.Key_E:
self.eraser_button.toggle()
self.toolBox.setCurrentIndex(0)
if event.key() == QtCore.Qt.Key_P:
self.ASCheckBox.toggle()
if event.key() == QtCore.Qt.Key_PageDown:
self.view = (self.view + 1) % (len(self.RGBChoose.bstr))
print('self.view ', self.view)
self.RGBChoose.button(self.view).setChecked(True)
elif event.key() == QtCore.Qt.Key_PageUp:
self.view = (self.view - 1) % (len(self.RGBChoose.bstr))
print('self.view ', self.view)
self.RGBChoose.button(self.view).setChecked(True)
# can change background or stroke size if cell not finished
if event.key() == QtCore.Qt.Key_Up:
self.color = (self.color - 1) % (6)
print('self.color', self.color)
self.RGBDropDown.setCurrentIndex(self.color)
elif event.key() == QtCore.Qt.Key_Down:
self.color = (self.color + 1) % (6)
print('self.color', self.color)
self.RGBDropDown.setCurrentIndex(self.color)
if (event.key() == QtCore.Qt.Key_BracketLeft or
event.key() == QtCore.Qt.Key_BracketRight):
count = self.BrushChoose.count()
gci = self.BrushChoose.currentIndex()
if event.key() == QtCore.Qt.Key_BracketLeft:
gci = max(0, gci - 1)
else:
gci = min(count - 1, gci + 1)
self.BrushChoose.setCurrentIndex(gci)
self.brush_choose()
self.state_label.setText("Brush size: %s"%(2*gci+1), color='#969696')
if not updated:
self.update_plot()
elif event.modifiers() == QtCore.Qt.ControlModifier:
if event.key() == QtCore.Qt.Key_Z:
self.undo_action()
if event.key() == QtCore.Qt.Key_0:
self.clear_all()
def keyPressEvent(self, event):
if event.modifiers() == QtCore.Qt.ControlModifier:
if event.key() == QtCore.Qt.Key_1:
self.toolBox.setCurrentIndex(0)
if event.key() == QtCore.Qt.Key_2:
self.toolBox.setCurrentIndex(1)
if event.key() == QtCore.Qt.Key_3:
self.toolBox.setCurrentIndex(2)
if event.key() == QtCore.Qt.Key_Minus or event.key() == QtCore.Qt.Key_Equal:
self.p0.keyPressEvent(event)
def chanchoose(self, image):
if image.ndim > 2:
if self.jCBChanToSegment.currentIndex() == 0:
image = image.astype(np.float32).mean(axis=-1)[..., np.newaxis]
else:
chanid = [self.jCBChanToSegment.currentIndex() - 1]
if self.jCBChan2.currentIndex() > 0:
chanid.append(self.jCBChan2.currentIndex() - 1)
image = image[:, :, chanid].astype(np.float32)
return image
def initialize_model(self, gpu=False, pretrained_model=False, model_type='scellseg',
diam_mean=30., net_avg=False, device=None, nclasses=3,
residual_on=True, style_on=True, concatenation=False, update_step=1,
last_conv_on=True, attn_on=False, dense_on=False, style_scale_on=True,
task_mode='cellpose', model=None):
self.current_model = model_type
self.model = models.sCellSeg(gpu=gpu, pretrained_model=pretrained_model, model_type=model_type,
diam_mean=diam_mean, net_avg=net_avg, device=device, nclasses=nclasses,
residual_on=residual_on, style_on=style_on, concatenation=concatenation, update_step=update_step,
last_conv_on=last_conv_on, attn_on=attn_on, dense_on=dense_on, style_scale_on=style_scale_on,
task_mode=task_mode, model=model)
def set_compute_thread(self):
self.seg_thread = threading.Thread(target = self.compute_model)
self.seg_thread.setDeamon(True)
self.seg_thread.start()
def compute_model(self):
self.progress.setValue(0)
self.update_plot()
self.state_label.setText("Running...", color='#969696')
QtWidgets.qApp.processEvents() # force update gui
if True:
tic = time.time()
self.clear_all()
self.flows = [[], [], []]
pretrained_model = os.path.join(self.model_dir, self.ModelChoose.currentText())
self.initialize_model(pretrained_model=pretrained_model, gpu=self.useGPU.isChecked(),
model_type=self.ModelChoose.currentText())
print('using model %s' % self.current_model)
self.progress.setValue(10)
do_3D = False
if self.NZ > 1:
do_3D = True
data = self.stack.copy()
else:
data = self.stack[0].copy()
channels = self.get_channels()
# print(channels)
self.diameter = float(self.Diameter.text())
self.update_plot()
try:
# net_avg = self.NetAvg.currentIndex() == 0
resample = self.NetAvg.currentIndex() == 1 # we need modify from here
min_size = ((30. // 2) ** 2) * np.pi * 0.05
try:
finetune_model = self.model_file_path[0]
print('ft_model', finetune_model)
except:
finetune_model = None
# inference
masks, flows, _ = self.model.inference(finetune_model=finetune_model, net_avg=False,
query_images=data, channel=channels,
diameter=self.diameter,
resample=resample, flow_threshold=self.threshold,
cellprob_threshold=self.cellprob,
min_size=min_size, eval_batch_size=8,
postproc_mode=self.model.postproc_mode,
progress=self.progress)
self.state_label.setText(
'%d cells found with scellseg net in %0.3fs' % (
len(np.unique(masks)[1:]), time.time() - tic),
color='#39B54A')
# self.state_label.setStyleSheet("color:green;")
self.update_plot()
self.progress.setValue(75)
self.flows[0] = flows[0].copy()
self.flows[1] = (np.clip(utils.normalize99(flows[2].copy()), 0, 1) * 255).astype(np.uint8)
if not do_3D:
masks = masks[np.newaxis, ...]
self.flows[0] = transforms.resize_image(self.flows[0], masks.shape[-2], masks.shape[-1],
interpolation=cv2.INTER_NEAREST)
self.flows[1] = transforms.resize_image(self.flows[1], masks.shape[-2], masks.shape[-1])
if not do_3D:
self.flows[2] = np.zeros(masks.shape[1:], dtype=np.uint8)
self.flows = [self.flows[n][np.newaxis, ...] for n in range(len(self.flows))]
else:
self.flows[2] = (flows[1][0] / 10 * 127 + 127).astype(np.uint8)
if len(flows) > 2:
self.flows.append(flows[3])
self.flows.append(np.concatenate((flows[1], flows[2][np.newaxis, ...]), axis=0))
print()
self.progress.setValue(80)
z = 0
self.masksOn = True
self.outlinesOn = True
self.MCheckBox.setChecked(True)
self.OCheckBox.setChecked(True)
iopart._masks_to_gui(self, masks, outlines=None)
self.progress.setValue(100)
self.first_load_listView()
# self.toggle_server(off=True)
if not do_3D:
self.threshslider.setEnabled(True)
self.probslider.setEnabled(True)
self.masks_for_save = masks
except Exception as e:
print('NET ERROR: %s' % e)
self.progress.setValue(0)
return
else: # except Exception as e:
print('ERROR: %s' % e)
print('Finished inference')
def batch_inference(self):
self.progress.setValue(0)
# print('threshold', self.threshold, self.cellprob)
# self.update_plot()
if True:
tic = time.time()
self.clear_all()
model_type =self.ModelChoose.currentText()
pretrained_model = os.path.join(self.model_dir, model_type)
self.initialize_model(pretrained_model=pretrained_model, gpu=self.useGPU.isChecked(),
model_type=model_type)
print('using model %s' % self.current_model)
self.progress.setValue(10)
channels = self.get_channels()
self.diameter = float(self.Diameter.text())
try:
# net_avg = self.NetAvg.currentIndex() < 2
# resample = self.NetAvg.currentIndex() == 1
min_size = ((30. // 2) ** 2) * np.pi * 0.05
try:
finetune_model = self.model_file_path[0]
print('ft_model', finetune_model)
except:
finetune_model = None
try:
dataset_path = self.batch_inference_dir
except:
dataset_path = None
# batch inference
bz = 8 if self.bz_line.text() == '' else int(self.bz_line.text())
save_name = self.current_model + '_' + dataset_path.split('\\')[-1]
utils.set_manual_seed(5)
try:
shotset = dataset.DatasetShot(eval_dir=dataset_path, class_name=None, image_filter='_img',
mask_filter='_masks',
channels=channels, task_mode=self.model.task_mode, active_ind=None,
rescale=True)
iopart._initialize_image_portable(self, iopart.imread(self.now_pyfile_path + '/assets/Loading1.png'),
resize=self.resize, X2=0)
self.state_label.setText("Running...", color='#969696')
QtWidgets.qApp.processEvents() # force update gui
except:
iopart._initialize_image_portable(self, iopart.imread(self.now_pyfile_path + '/assets/Loading4.png'),
resize=self.resize, X2=0)
self.state_label.setText("Please choose right data path",
color='#FF6A56')
print("Please choose right data path")
self.batch_inference_bnt.setEnabled(False)
return
queryset = dataset.DatasetQuery(dataset_path, class_name=None, image_filter='_img',
mask_filter='_masks')
query_image_names = queryset.query_image_names
diameter = shotset.md
print('>>>> mean diameter of this style,', round(diameter, 3))
self.model.net.save_name = save_name
self.img.setImage(iopart.imread(self.now_pyfile_path + '/assets/Loading2.png'), autoLevels=False, lut=None)
self.state_label.setText("Running...", color='#969696')
QtWidgets.qApp.processEvents() # force update gui
# flow_threshold was set to 0.4, and cellprob_threshold was set to 0.5
try:
masks, flows, _ = self.model.inference(finetune_model=finetune_model, net_avg=False,
query_image_names=query_image_names, channel=channels,
diameter=diameter,
resample=False, flow_threshold=0.4,
cellprob_threshold=0.5,
min_size=min_size, eval_batch_size=bz,
postproc_mode=self.model.postproc_mode,
progress=self.progress)
except RuntimeError:
iopart._initialize_image_portable(self,
iopart.imread(self.now_pyfile_path + '/assets/Loading4.png'),
resize=self.resize, X2=0)
self.state_label.setText("Batch size is too big, please set smaller",
color='#FF6A56')
print("Batch size is too big, please set smaller")
return
# save output images
diams = np.ones(len(query_image_names)) * diameter
imgs = [io.imread(query_image_name) for query_image_name in query_image_names]
io.masks_flows_to_seg(imgs, masks, flows, diams, query_image_names,
[channels for i in range(len(query_image_names))])
io.save_to_png(imgs, masks, flows, query_image_names, labels=None, aps=None,
task_mode=self.model.task_mode)
self.masks_for_save = masks
except:
iopart._initialize_image_portable(self, iopart.imread(self.now_pyfile_path + '/assets/Loading4.png'), resize=self.resize,
X2=0)
self.state_label.setText("Please choose right data path",
color='#FF6A56')
return
else: # except Exception as e:
print('ERROR: %s' % e)
self.img.setImage(iopart.imread(self.now_pyfile_path + '/assets/Loading3.png'), autoLevels=False, lut=None)
self.state_label.setText('Finished inference in %0.3fs!'%(time.time() - tic), color='#39B54A')
self.batch_inference_bnt.setEnabled(False)
def compute_cprob(self):
rerun = False
if self.cellprob != self.probslider.value():
rerun = True
self.cellprob = self.probslider.value()
if self.threshold != (31 - self.threshslider.value()) / 10.:
rerun = True
self.threshold = (31 - self.threshslider.value()) / 10.
if not rerun:
return
if self.threshold == 3.0 or self.NZ > 1:
thresh = None
print('computing masks with cell prob=%0.3f, no flow error threshold' %
(self.cellprob))
else:
thresh = self.threshold
print('computing masks with cell prob=%0.3f, flow error threshold=%0.3f' %
(self.cellprob, thresh))
maski = dynamics.get_masks(self.flows[3].copy(), iscell=(self.flows[4][-1] > self.cellprob),
flows=self.flows[4][:-1], threshold=thresh)
if self.NZ == 1:
maski = utils.fill_holes_and_remove_small_masks(maski)
maski = transforms.resize_image(maski, self.cellpix.shape[-2], self.cellpix.shape[-1],
interpolation=cv2.INTER_NEAREST)
self.masksOn = True
self.outlinesOn = True
self.MCheckBox.setChecked(True)
self.OCheckBox.setChecked(True)
if maski.ndim < 3:
maski = maski[np.newaxis, ...]
print('%d cells found' % (len(np.unique(maski)[1:])))
iopart._masks_to_gui(self, maski, outlines=None)
self.threshslider.setToolTip("Value: " + str(self.threshold))
self.probslider.setToolTip("Value: " + str(self.cellprob))
self.first_load_listView()
self.show()
def reset(self):
# ---- start sets of points ---- #
self.selected = 0
self.X2 = 0
self.resize = -1
self.onechan = False
self.loaded = False
self.channel = [0, 1]
self.current_point_set = []
self.in_stroke = False
self.strokes = []
self.stroke_appended = True
self.ncells = 0
self.zdraw = []
self.removed_cell = []
self.cellcolors = [np.array([255, 255, 255])]
# -- set menus to default -- #
self.color = 0
self.RGBDropDown.setCurrentIndex(self.color)
self.view = 0
self.RGBChoose.button(self.view).setChecked(True)
self.BrushChoose.setCurrentIndex(1)
self.CHCheckBox.setChecked(False)
self.OCheckBox.setEnabled(True)
self.SSCheckBox.setChecked(True)
# -- zero out image stack -- #
self.opacity = 128 # how opaque masks should be
self.outcolor = [200, 200, 255, 200]
self.NZ, self.Ly, self.Lx = 1, 512, 512
if self.autobtn.isChecked():
self.saturation = [[0, 255] for n in range(self.NZ)]
self.currentZ = 0
self.flows = [[], [], [], [], [[]]]
self.stack = np.zeros((1, self.Ly, self.Lx, 3))
# masks matrix
self.layers = 0 * np.ones((1, self.Ly, self.Lx, 4), np.uint8)
# image matrix with a scale disk
self.radii = 0 * np.ones((self.Ly, self.Lx, 4), np.uint8)
self.cellpix = np.zeros((1, self.Ly, self.Lx), np.uint16)
self.outpix = np.zeros((1, self.Ly, self.Lx), np.uint16)
self.ismanual = np.zeros(0, np.bool)
self.update_plot()
self.filename = []
self.loaded = False
def first_load_listView(self):
self.listmodel = Qt.QStandardItemModel(self.ncells,1)
# self.listmodel = Qt.QStringListModel()
self.listmodel.setHorizontalHeaderLabels(["Annotation"])
self.myCellList = ['instance_' + str(i) for i in range(1, self.ncells + 1)]
for i in range(len(self.myCellList)):
self.listmodel.setItem(i,Qt.QStandardItem(self.myCellList[i]))
self.listView.setModel(self.listmodel)
def initialize_listView(self):
if self.filename != []:
if os.path.isfile(os.path.splitext(self.filename)[0] + '_instance_list.txt'):
self.list_file_name = str(os.path.splitext(self.filename)[0] + '_instance_list.txt')
self.myCellList_array = np.loadtxt(self.list_file_name, dtype=str)
self.myCellList = self.myCellList_array.tolist()
if len(self.myCellList) == self.ncells:
self.listmodel = Qt.QStandardItemModel(self.ncells, 1)
self.listmodel.setHorizontalHeaderLabels(["Annotation"])
for i in range(len(self.myCellList)):
self.listmodel.setItem(i,Qt.QStandardItem(self.myCellList[i]))
self.listView.setModel(self.listmodel)
else:
self.listmodel = Qt.QStandardItemModel(self.ncells, 1)
# self.listmodel = Qt.QStringListModel()
self.listmodel.setHorizontalHeaderLabels(["Annotation"])
self.myCellList = ['instance_' + str(i) for i in range(1, self.ncells + 1)]
for i in range(len(self.myCellList)):
self.listmodel.setItem(i,Qt.QStandardItem(self.myCellList[i]))
self.listView.setModel(self.listmodel)
else:
self.myCellList = ['instance_' + str(i) for i in range(1, self.ncells + 1)]
self.listmodel = Qt.QStandardItemModel(self.ncells, 1)
# self.listmodel = Qt.QStringListModel()
self.listmodel.setHorizontalHeaderLabels(["Annotation"])
for i in range(len(self.myCellList)):
self.listmodel.setItem(i,Qt.QStandardItem(self.myCellList[i]))
self.listView.setModel(self.listmodel)
def initinal_p0(self):
# self.p0.removeItem(self.img)
self.p0.removeItem(self.layer)
self.p0.removeItem(self.scale)
# self.img.deleteLater()
self.layer.deleteLater()
self.scale.deleteLater()
# self.img = pg.ImageItem(viewbox=self.p0, parent=self, axisOrder='row-major')
# self.img.autoDownsample = False
self.layer = guiparts.ImageDraw(viewbox=self.p0, parent=self)
self.layer.setLevels([0, 255])
self.scale = pg.ImageItem(viewbox=self.p0, parent=self)
self.scale.setLevels([0, 255])
self.p0.scene().contextMenuItem = self.p0
# self.p0.addItem(self.img)
self.p0.addItem(self.layer)
self.p0.addItem(self.scale)
def add_list_item(self):
# print(self.ncells)
# self.myCellList = self.listmodel.data()
self.listView.selectAll()
self.myCellList = []
for item in self.listView.selectedIndexes():
data = item.data()
self.myCellList.append(data)
temp_nums = []
for celli in self.myCellList:
if 'instance_' in celli:
temp_nums.append(int(celli.split('instance_')[-1]))
if len(temp_nums) == 0:
now_cellIdx = 0
else:
now_cellIdx = np.max(np.array(temp_nums))
self.myCellList.append('instance_' + str(now_cellIdx+1))
# self.myCellList.append('instance_' + str(self.ncells))
self.listmodel = Qt.QStandardItemModel(self.ncells, 1)
# self.listmodel = Qt.QStringListModel()
self.listmodel.setHorizontalHeaderLabels(["Annotation"])
for i in range(len(self.myCellList)):
self.listmodel.setItem(i, Qt.QStandardItem(self.myCellList[i]))
self.listView.setModel(self.listmodel)
def delete_list_item(self, index):
# self.myCellList = self.listmodel.data()
self.listView.selectAll()
self.myCellList = []
for item in self.listView.selectedIndexes():
data = item.data()
self.myCellList.append(data)
self.last_remove_index = index
self.last_remove_item = self.myCellList.pop(index - 1)
self.listmodel = Qt.QStandardItemModel(self.ncells, 1)
# self.listmodel = Qt.QStringListModel()
self.listmodel.setHorizontalHeaderLabels(["Annotation"])
for i in range(len(self.myCellList)):
self.listmodel.setItem(i, Qt.QStandardItem(self.myCellList[i]))
self.listView.setModel(self.listmodel)
def check_gpu(self, torch=True):
# also decide whether or not to use torch
self.useGPU.setChecked(False)
self.useGPU.setEnabled(False)
if models.use_gpu():
self.useGPU.setEnabled(True)
self.useGPU.setChecked(True)
def check_ftgpu(self, torch=True):
# also decide whether or not to use torch
self.ftuseGPU.setChecked(False)
self.ftuseGPU.setEnabled(False)
if models.use_gpu():
self.ftuseGPU.setEnabled(True)
self.ftuseGPU.setChecked(True)
def clear_all(self):
self.prev_selected = 0
self.selected = 0
# self.layers_undo, self.cellpix_undo, self.outpix_undo = [],[],[]
self.layers = 0 * np.ones((self.NZ, self.Ly, self.Lx, 4), np.uint8)
self.cellpix = np.zeros((self.NZ, self.Ly, self.Lx), np.uint16)
self.outpix = np.zeros((self.NZ, self.Ly, self.Lx), np.uint16)
self.cellcolors = [ | np.array([255, 255, 255]) | numpy.array |
import numpy as np
import scipy.constants as sc
from scipy.special import erf
import matplotlib.pyplot as plt
from astropy.convolution import convolve, Gaussian2DKernel
class simple_disk:
"""
Args:
# Geometric Parameters
inc (float): Inclination of the source in [degrees].
PA (float): Position angle of the source in [degrees].
x0 (Optional[float]): Source center offset along x-axis in [arcsec].
y0 (Optional[float]): Source center offset along y-axis in [arcsec].
dist (Optional[float]): Distance to the source in [pc].
mstar (Optional[float]): Mass of the central star in [Msun].
r_min (Optional[float]): Inner radius in [au].
r_max (Optional[float]): Outer radius in [au].
r0 (Optional[float]): Normalization radius in [au]. (r0 must be < r_l)
r_l (Optional[float]): Turn-over radius in [au].
z0 (Optional[float]): Emission height in [au] at r0.
zpsi (Optional[float]): Index of z_l profile for r < r_l.
zphi (Optional[float]): Exponential taper index of z_l profile at
r > r_l.
# Brightness Temperatures
Tb0 (Optional[float]): Brightness temperature in [K] at r0.
Tbq (Optional[float]): Index of Tb profile for r < r_l.
Tbeps (Optional[float]): Exponential taper index of Tb profile for
r > r_l.
Tbmax (Optional[float]): Maximum Tb in [K].
Tbmax_b (Optional[float]): Maximum Tb for back side of disk in [K].
# Optical depth of front-side
tau0 (Optional[float]): Optical depth at r0.
tauq (Optional[float]): Index of optical depth profile for r < r_l
taueta (Optional[float]): Exponential taper index for optical depth
profile at r > r_l.
taumax (Optional[float]): Maximum optical depth.
# Line-widths
dV0 (Optional[float]): Doppler line width in [m/s] at r0.
dVq (Optional[float]): Index of line-width profile.
dVmax (Optional[float]): Maximum line-width.
xi_nt (Optional[float]): Non-thermal line-width fraction (of sound
speed for the gas); can use if dV0, dVq are None.
# Observational Parameters
FOV (Optional[float]): Field of view of the model in [arcsec].
Npix (Optional[int]): Number of pixels along each axis.
mu_l (Optional[float]): Mean atomic weight for line of interest.
"""
# Establish constants
mu = 2.37
msun = 1.98847e30
mH = sc.m_p + sc.m_e
# Establish useful conversion factors
fwhm = 2.*np.sqrt(2.*np.log(2.))
nwrap = 3
def __init__(self, inc, PA, x0=0., y0=0., dist=100., mstar=1.,
r_min=0., r_max=500., r0=10., r_l=100.,
z0=0., zpsi=1., zphi=np.inf,
Tb0=50., Tbq=0.5, Tbeps=np.inf, Tbmax=500., Tbmax_b=20.,
tau0=100., tauq=0., taueta=np.inf, taumax=None,
dV0=None, dVq=None, dVmax=1000., xi_nt=0.,
FOV=None, Npix=128, mu_l=28):
# Set the disk geometrical properties.
self.x0, self.y0, self.inc, self.PA, self.dist = x0, y0, inc, PA, dist
self.z0, self.zpsi, self.zphi = z0, zpsi, zphi
self.r_l, self.r0, self.r_min, self.r_max = r_l, r0, r_min, r_max
# Define the velocity, brightness and linewidth radial profiles.
self.mstar = mstar
self.Tb0, self.Tbq, self.Tbeps = Tb0, Tbq, Tbeps
self.Tbmax, self.Tbmax_b = Tbmax, Tbmax_b
self.dV0, self.dVq, self.dVmax, self.xi_nt = dV0, dVq, dVmax, xi_nt
self.tau0, self.tauq, self.taueta = tau0, tauq, taueta
self.taumax = taumax
# Set the observational parameters.
self.FOV = 2.2 * self.r_max / self.dist if FOV is None else FOV
self.Npix = Npix
self.mu_l = mu_l
# Check if dV should be set by thermal broadening.
#self._check_thermal_broadening()
self._check_optical_depth()
# Build the disk model.
self._populate_coordinates()
self._set_brightness()
self._set_linewidth()
self._set_rotation()
self._set_tau()
# -- Model Building Functions -- #
def _populate_coordinates(self):
"""
Populate the coordinates needed for the model.
"""
# Set sky cartesian coordinates, representing the pixels in the image.
self.x_sky = np.linspace(-self.FOV / 2.0, self.FOV / 2.0, self.Npix)
self.cell_sky = np.diff(self.x_sky).mean()
self.x_sky, self.y_sky = np.meshgrid(self.x_sky, self.x_sky)
# Use these pixels to define face-down disk-centric coordinates.
self.x_disk = self.x_sky * self.dist
self.y_disk = self.y_sky * self.dist
self.cell_disk = np.diff(self.x_disk).mean()
# Define three sets of cylindrical coordintes, the two emission
# surfaces and the midplane. If `z0 = 0.0` then the two emission
# surfaces are equal.
self.r_disk = np.hypot(self.y_disk, self.x_disk)
self.t_disk = np.arctan2(self.y_disk, self.x_disk)
f = self.disk_coords(x0=self.x0, y0=self.y0, inc=self.inc, PA=self.PA,
z0=self.z0, zpsi=self.zpsi, zphi=self.zphi)
self.r_sky_f = f[0] * self.dist
self.t_sky_f = f[1]
self.z_sky_f = f[2] * self.dist
if self.z0 != 0.0:
self._flat_disk = False
b = self.disk_coords(x0=self.x0, y0=self.y0, inc=-self.inc,
PA=self.PA, z0=self.z0, zpsi=self.zpsi,
zphi=self.zphi)
else:
self._flat_disk = True
b = f
self.r_sky_b = b[0] * self.dist
self.t_sky_b = b[1]
self.z_sky_b = b[2] * self.dist
# Define masks noting where the disk extends to.
self._in_disk_f = np.logical_and(self.r_sky_f >= self.r_min,
self.r_sky_f <= self.r_max)
self._in_disk_b = np.logical_and(self.r_sky_b >= self.r_min,
self.r_sky_b <= self.r_max)
self._in_disk = np.logical_and(self.r_disk >= self.r_min,
self.r_disk <= self.r_max)
@property
def r_sky(self):
return self.r_sky_f
@property
def t_sky(self):
return self.t_sky_f
@property
def v0_sky(self):
return self.v0_f
def _check_optical_depth(self):
"""
Set the optical depth parameters if they were not set when the class
was instantiated.
"""
if self.tau0 is None:
self.tau0 = 0.0
if self.tauq is None:
self.tauq = self.Tbq
if self.taueta is None:
self.taueta = 50.
if self.taumax is None:
self.taumax = 100.0
if self.r_l is None:
self.r_l = 200.0
def _set_linewidth(self):
"""
Sets the Doppler linewidth profile in [m/s].
"""
if self.dV0 is None:
csound_f = np.sqrt(sc.k * self.Tb_f / self.mu / self.mH)
self.dV_f = csound_f * \
np.sqrt(2 * self.mu / self.mu_l + self.xi_nt**2)
self.dV_f = np.clip(self.dV_f, 0.0, self.dVmax)
if self._flat_disk:
self.dV_b = None
else:
csound_b = np.sqrt(sc.k * self.Tb_b / self.mu / self.mH)
self.dV_b = csound_b * \
np.sqrt(2 * self.mu / self.mu_l + self.xi_nt**2)
self.dV_b = np.clip(self.dV_b, 0.0, self.dVmax)
else:
if self.dVq is None:
self.dVq = -0.5 * self.Tbq
self.dV_f = self.dV0 * (self.r_sky_f / self.r0)**self.dVq
self.dV_f = np.clip(self.dV_f, 0.0, self.dVmax)
if self._flat_disk:
self.dV_b = None
else:
self.dV_b = self.dV0 * (self.r_sky_b / self.r0)**self.dVq
self.dV_b = np.clip(self.dV_b, 0.0, self.dVmax)
def _set_brightness(self):
"""
Sets the brightness profile in [K].
"""
self.Tb_f = self.Tb0 * (self.r_sky_f / self.r0)**(-self.Tbq) * \
np.exp(-(self.r_sky_f / self.r_l)**self.Tbeps)
self.Tb_f = np.clip(self.Tb_f, 0.0, self.Tbmax)
self.Tb_f = np.where(self._in_disk_f, self.Tb_f, 0.0)
if self._flat_disk:
self.Tb_b = None
else:
self.Tb_b = self.Tb0 * (self.r_sky_f / self.r0)**(-self.Tbq) * \
np.exp(-(self.r_sky_f / self.r_l)**self.Tbeps)
self.Tb_b = np.clip(self.Tb_b, 0.0, self.Tbmax_b)
self.Tb_b = np.where(self._in_disk_b, self.Tb_b, 0.0)
def _set_rotation(self):
"""
Sets the projected rotation profile in [m/s].
"""
self.v0_f = self._calculate_projected_vkep(self.r_sky_f,
self.z_sky_f,
self.t_sky_f,
self.inc)
if self._flat_disk:
self.v0_b = None
else:
self.v0_b = self._calculate_projected_vkep(self.r_sky_b,
self.z_sky_b,
self.t_sky_b,
self.inc)
return
def _set_tau(self):
"""
Sets the tau radial profile.
"""
self.tau = self.tau0 * (self.r_sky_f / self.r0)**self.tauq * \
np.exp(-(self.r_sky_f / self.r_l)**self.taueta)
self.tau = np.where(self._in_disk_f, self.tau, 0.0)
def interpolate_model(self, x, y, param, x_unit='au', param_max=None,
interp1d_kw=None):
"""
Interpolate a user-provided model for the brightness temperature
profile or the line width.
Args:
x (array): Array of radii at which the model is sampled at in units
given by ``x_units``, either ``'au'`` or ``'arcsec'``.
y (array): Array of model values evaluated at ``x``. If brightness
temperature, in units of [K], or for linewidth, units of [m/s].
param (str): Parameter of the model, either ``'Tb'`` for brightness
temperature, or ``'dV'`` for linewidth.
x_unit (Optional[str]): Unit of the ``x`` array, either
``'au'`` or ``'arcsec'``.
param_max (Optional[float]): If provided, use as the maximum value
for the provided parameter (overwriting previous values).
interp1d_kw (Optional[dict]): Dictionary of kwargs to pass to
``scipy.interpolate.intep1d`` used for the linear
interpolation.
"""
from scipy.interpolate import interp1d
# Value the input models.
if x.size != y.size:
raise ValueError("`x.size` does not equal `y.size`.")
if x_unit.lower() == 'arcsec':
x *= self.dist
elif x_unit.lower() != 'au':
raise ValueError("Unknown `radii_unit` {}.".format(x_unit))
if y[0] != 0.0 or y[-1] != 0.0:
print("First or last value of `y` is non-zero and may cause " +
"issues with extrapolated values.")
# Validate the kwargs passed to interp1d.
ik = {} if interp1d_kw is None else interp1d_kw
ik['bounds_error'] = ik.pop('bounds_error', False)
ik['fill_value'] = ik.pop('fill_value', 'extrapolate')
ik['assume_sorted'] = ik.pop('assume_sorted', False)
# Interpolate the functions onto the coordinate grids.
if param.lower() == 'tb':
self.Tb_f = interp1d(x, y, **ik)(self.r_sky_f)
self.Tb_f = np.clip(self.Tb_f, 0.0, param_max)
if self.r_sky_b is not None:
self.Tb_b = interp1d(x, y, **ik)(self.r_sky_b)
self.Tb_b = np.clip(self.Tb_b, 0.0, param_max)
self.Tb0, self.Tbq, self.Tbmax = np.nan, np.nan, param_max
elif param.lower() == 'dv':
self.dV_f = interp1d(x, y, **ik)(self.r_sky_f)
self.dV_f = np.clip(self.dV_f, 0.0, param_max)
if self.r_sky_b is not None:
self.dV_b = interp1d(x, y, **ik)(self.r_sky_b)
self.dV_b = np.clip(self.dV_b, 0.0, param_max)
self.dV0, self.dVq, self.dVmax = np.nan, np.nan, param_max
elif param.lower() == 'tau':
self.tau = interp1d(x, y, **ik)(self.r_sky_f)
self.tau = np.clip(self.tau, 0.0, param_max)
else:
raise ValueError("Unknown 'param' value {}.".format(param))
@property
def v0_disk(self):
"""
Disk-frame rotation profile.
"""
v0 = self._calculate_projected_vkep(self.r_disk, 0.0)
return np.where(self._in_disk, v0, np.nan)
@property
def Tb_disk(self):
"""
Disk-frame brightness profile.
"""
Tb = self.Tb0 * (self.r_sky_f / self.r0)**(-self.Tbq) * \
np.exp(-(self.r_sky_f / self.r_l)**self.Tbeps)
return np.where(self._in_disk, Tb, np.nan)
@property
def dV_disk(self):
"""
Disk-frame line-width profile.
"""
if self.dV0 is None:
csound = np.sqrt(sc.k * Tb_disk / self.mu / self.mH)
dV = csound * np.sqrt(2 * self.mu / self.mu_l + self.xi_nt**2)
else:
if self.dVq is None:
self.dVq = -0.5 * self.Tbq
dV = self.dV0 * (self.r_disk / self.r0)**self.dVq
return np.where(self._in_disk, dV, np.nan)
def _calculate_projected_vkep(self, r, z, t=0.0, inc=90.0):
"""
Calculates the projected Keplerian rotation profile based on the
attached stellar mass and source distance and inclination.
Args:
r (float/array): Cylindrical radius in [au].
z (float/array): Cylindrical height in [au].
t (Optional[float/array]): Polar angle in [rad].
inc (Optional[float]): Dist inclination in [deg].
Returns:
vkep (float/array): Projected Keplerian velocity in [m/s].
"""
vkep2 = sc.G * self.mstar * self.msun * r**2.0
vkep2 /= np.hypot(r, z)**3.0
vkep = np.sqrt(vkep2 / sc.au)
return vkep * np.cos(t) * abs(np.sin(np.radians(inc)))
# -- Deprojection Functions -- #
def disk_coords(self, x0=0.0, y0=0.0, inc=0.0, PA=0.0, z0=0.0, zpsi=0.0,
zphi=0.0, frame='cylindrical'):
r"""
Get the disk coordinates given certain geometrical parameters and an
emission surface. The emission surface is parameterized as a powerlaw
profile:
.. math::
z(r) = z_0 \times \left(\frac{r}{1^{\prime\prime}}\right)^{\psi} +
z_1 \times \left(\frac{r}{1^{\prime\prime}}\right)^{\varphi}
Where both ``z0`` and ``z1`` are given in [arcsec]. For a razor thin
disk, ``z0=0.0``, while for a conical disk, ``psi=1.0``. Typically
``z1`` is not needed unless the data is exceptionally high SNR and well
spatially resolved.
It is also possible to override this parameterization and directly
provide a user-defined ``z_func``. This allow for highly complex
surfaces to be included. If this is provided, the other height
parameters are ignored.
Args:
x0 (Optional[float]): Source right ascension offset [arcsec].
y0 (Optional[float]): Source declination offset [arcsec].
inc (Optional[float]): Source inclination [deg].
PA (Optional[float]): Source position angle [deg]. Measured
between north and the red-shifted semi-major axis in an
easterly direction.
z0 (Optional[float]): Aspect ratio at 1" for the emission surface.
To get the far side of the disk, make this number negative.
psi (Optional[float]): Flaring angle for the emission surface.
z1 (Optional[float]): Aspect ratio correction term at 1" for the
emission surface. Should be opposite sign to ``z0``.
phi (Optional[float]): Flaring angle correction term for the
emission surface.
frame (Optional[str]): Frame of reference for the returned
coordinates. Either ``'polar'`` or ``'cartesian'``.
Returns:
Three coordinate arrays, either the cylindrical coordaintes,
``(r, theta, z)`` or cartestian coordinates, ``(x, y, z)``,
depending on ``frame``.
"""
# Check the input variables.
frame = frame.lower()
if frame not in ['cylindrical', 'cartesian']:
raise ValueError("frame must be 'cylindrical' or 'cartesian'.")
# Calculate the pixel values.
r, t, z = self._get_flared_coords(x0, y0, inc, PA, self._z_func)
if frame == 'cylindrical':
return r, t, z
return r * np.cos(t), r * np.sin(t), z
def _z_func(self, r):
"""
Returns the emission height in [arcsec].
"""
z = self.z0 * (r * self.dist / self.r0)**self.zpsi * \
np.exp(-(r * self.dist / self.r_l)**self.zphi) / self.dist
return np.clip(z, 0., None)
@staticmethod
def _rotate_coords(x, y, PA):
"""
Rotate (x, y) by PA [deg].
"""
x_rot = y * np.cos(np.radians(PA)) + x * np.sin(np.radians(PA))
y_rot = x * np.cos(np.radians(PA)) - y * np.sin(np.radians(PA))
return x_rot, y_rot
@staticmethod
def _deproject_coords(x, y, inc):
"""
Deproject (x, y) by inc [deg].
"""
return x, y / np.cos(np.radians(inc))
def _get_cart_sky_coords(self, x0, y0):
"""
Return caresian sky coordinates in [arcsec, arcsec].
"""
return self.x_sky - x0, self.y_sky - y0
def _get_polar_sky_coords(self, x0, y0):
"""
Return polar sky coordinates in [arcsec, radians].
"""
x_sky, y_sky = self._get_cart_sky_coords(x0, y0)
return np.hypot(y_sky, x_sky), np.arctan2(x_sky, y_sky)
def _get_midplane_cart_coords(self, x0, y0, inc, PA):
"""
Return cartesian coordaintes of midplane in [arcsec, arcsec].
"""
x_sky, y_sky = self._get_cart_sky_coords(x0, y0)
x_rot, y_rot = simple_disk._rotate_coords(x_sky, y_sky, PA)
return simple_disk._deproject_coords(x_rot, y_rot, inc)
def _get_midplane_polar_coords(self, x0, y0, inc, PA):
"""
Return the polar coordinates of midplane in [arcsec, radians].
"""
x_mid, y_mid = self._get_midplane_cart_coords(x0, y0, inc, PA)
return np.hypot(y_mid, x_mid), np.arctan2(y_mid, x_mid)
def _get_flared_coords(self, x0, y0, inc, PA, z_func):
"""
Return cylindrical coordinates of surface in [arcsec, radians].
"""
x_mid, y_mid = self._get_midplane_cart_coords(x0, y0, inc, PA)
r_tmp, t_tmp = np.hypot(x_mid, y_mid), np.arctan2(y_mid, x_mid)
for _ in range(5):
y_tmp = y_mid + z_func(r_tmp) * np.tan(np.radians(inc))
r_tmp = np.hypot(y_tmp, x_mid)
t_tmp = np.arctan2(y_tmp, x_mid)
return r_tmp, t_tmp, z_func(r_tmp)
@property
def xaxis_disk(self):
"""
X-axis for the disk coordinates in [au].
"""
return self.x_disk[0]
@property
def yaxis_disk(self):
"""
y-axis for the disk coordinates in [au].
"""
return self.y_disk[:, 0]
@property
def xaxis_sky(self):
"""
X-axis for the sky coordinates in [arcsec].
"""
return self.x_sky[0]
@property
def yaxis_sky(self):
"""
Y-axis for the sky coordinates in [arcsec].
"""
return self.y_sky[:, 0]
# -- Helper Functions -- #
def set_coordinates(self, x0=None, y0=None, inc=None, PA=None, dist=None,
z0=None, zpsi=None, r_min=None, r_max=None, FOV=None,
Npix=None):
"""
Helper function to redefine the coordinate system.
"""
self.x0 = self.x0 if x0 is None else x0
self.y0 = self.y0 if y0 is None else y0
self.inc = self.inc if inc is None else inc
self.PA = self.PA if PA is None else PA
self.dist = self.dist if dist is None else dist
self.z0 = self.z0 if z0 is None else z0
self.zpsi = self.zpsi if zpsi is None else zpsi
self.r_min = self.r_min if r_min is None else r_min
self.r_max = self.r_max if r_max is None else r_max
self.FOV = self.FOV if FOV is None else FOV
self.Npix = self.Npix if Npix is None else Npix
self._populate_coordinates()
self._set_brightness()
self._set_linewidth()
self._set_rotation()
self._set_tau()
def set_brightness(self, Tb0=None, Tbq=None, Tbmax=None, Tbmax_b=None):
"""
Helper function to redefine the brightnes profile.
"""
self.Tb0 = self.Tb0 if Tb0 is None else Tb0
self.Tbq = self.Tbq if Tbq is None else Tbq
self.Tbmax = self.Tbmax if Tbmax is None else Tbmax
self.Tbmax_b = self.Tbmax_b if Tbmax_b is None else Tbmax_b
self._set_brightness()
def set_linewidth(self, dV0=None, dVq=None, dVmax=None):
"""
Helper function to redefine the Doppler linewidth profile.
"""
self.dV0 = self.dV0 if dV0 is None else dV0
self.dVq = self.dVq if dVq is None else dVq
self.dVmax = self.dVmax if dVmax is None else dVmax
self._set_linewidth()
def set_tau(self, tau0=None, tauq=None, taueta=None, r_l=None, taumax=None):
"""
Helper function to redefine the optical depth profile.
"""
self.tau0 = self.tau0 if tau0 is None else tau0
self.tauq = self.tauq if tauq is None else tauq
self.taueta = self.taueta if taueta is None else taueta
self.taumax = self.taumax if taumax is None else taumax
self.r_l = self.r_l if r_l is None else r_l
self._set_tau()
# -- Pseudo Image Functions -- #
def get_cube(self, velax, dv0=None, bmaj=None, bmin=None, bpa=0.0, rms=0.0,
spectral_response=None):
"""
Return the pseudo-cube with the given velocity axis.
Args:
velax (array): 1D array of channel centres in [m/s].
dv0 (optional[ndarray]): An array of projected velocity
perturbations in [m/s].
bmaj (optional[float]): Synthesised beam major axis in [arcsec].
bmin (optional[float]): Synthesised beam minor axis in [arcsec]. If
only `bmaj` is specified, will assume a circular beam.
bpa (optional[float]): Beam position angle in [deg].
rms (optional[float]): RMS of the noise to add to the image.
spectral_response (optional[list]): The kernel to convolve the cube
with along the spectral dimension to simulation the spectral
response of the telescope.
Returns:
cube (array): A 3D image cube.
"""
# Make the image cube.
cube = np.array([self.get_channel(velax[i], dv0=dv0)
for i in range(velax.size)])
assert cube.shape[0] == velax.size, "not all channels created"
# Include convolution.
beam = self._get_beam(bmaj, bmin, bpa) if bmaj is not None else None
if beam is not None:
cube = simple_disk._convolve_cube(cube, beam)
if spectral_response is not None:
cube = np.convolve(cube, spectral_response, axis=0)
# Add noise and return.
if rms > 0.0:
noise = np.random.randn(cube.size).reshape(cube.shape)
if beam is not None:
noise = simple_disk._convolve_cube(noise, beam)
if spectral_response is not None:
noise = np.convolve(noise, spectral_response, axis=0)
noise *= rms / np.std(noise)
else:
noise = np.zeros(cube.shape)
return cube + noise
def get_channel(self, velax, dv0=None, bmaj=None, bmin=None,
bpa=0.0, rms=0.0):
"""
Calculate the channel emission in [K]. Can include velocity
perturbations with the `dv0` parameter. To simulate observations this
can include convolution with a 2D Gaussian beam or the addition of
(correlated) noise.
Args:
v_min (float): The minimum velocity of the channel in [m/s].
v_max (float): The maximum velocity of the channel in [m/s].
dv0 (optional[ndarray]): An array of projected velocity
perturbations in [m/s].
bmaj (optional[float]): Synthesised beam major axis in [arcsec].
bmin (optional[float]): Synthesised beam minor axis in [arcsec]. If
only `bmaj` is specified, will assume a circular beam.
bpa (optional[float]): Beam position angle in [deg].
rms (optional[float]): RMS of the noise to add to the image.
Returns:
channel (ndarray): A synthesied channel map in [K].
"""
# Check to see if there are one or two perturbations provided.
try:
dv0_f, dv0_b = dv0
except ValueError:
dv0_f = dv0
except TypeError:
dv0_f = np.zeros(self.r_sky_f.shape)
dv0_b = dv0_f.copy()
# Calculate the flux from the front side of the disk.
flux_f = self._calc_flux(velax, dv0_f, 'f')
# If `z0 != 0.0`, can combine the front and far sides based on a
# two-slab approximation.
if not self._flat_disk:
flux_b = self._calc_flux(velax, dv0_b, 'b')
frac_f, frac_b = self._calc_frac(velax, dv0_b)
flux = frac_f * flux_f + frac_b * flux_b
else:
flux = flux_f
# Include a beam convolution if necessary.
beam = None if bmaj is None else self._get_beam(bmaj, bmin, bpa)
if beam is not None:
flux = convolve(flux, beam)
# Add noise and return.
noise = np.random.randn(flux.size).reshape(flux.shape)
if beam is not None:
noise = convolve(noise, beam)
noise *= rms / np.std(noise)
return flux + noise
def get_channel_tau(self, velax, dv0=0.0, bmaj=None, bmin=None, bpa=0.0):
"""
As ``get_channel``, but returns the optical depth of the front side of
the disk.
Args:
v_min (float): The minimum velocity of the channel in [m/s].
v_max (float): The maximum velocity of the channel in [m/s].
dv0 (optional[ndarray]): An array of projected velocity
perturbations in [m/s].
bmaj (optional[float]): Synthesised beam major axis in [arcsec].
bmin (optional[float]): Synthesised beam minor axis in [arcsec]. If
only `bmaj` is specified, will assume a circular beam.
bpa (optional[float]): Beam position angle in [deg].
Returns:
channel (ndarray): A synthesied channel map representing the
optical depth.
"""
# Calculate the optical depth.
tau = self._calc_tau(velax, dv0=dv0)
# Include a beam convolution if necessary.
beam = None if bmaj is None else self._get_beam(bmaj, bmin, bpa)
if beam is not None:
tau = convolve(tau, beam)
return tau
def _calc_tau(self, velax, dv0=0.0):
"""
Calculate the average tau profile assuming a single Gaussian component.
"""
tau, dV, v0 = self.tau, self.dV_f, self.v0_f + dv0
optdepth = np.empty_like(tau)
ok = (tau > 0.)
optdepth[~ok] = 0.
optdepth[ok] = tau[ok] * np.exp(-((velax - v0[ok]) / dV[ok])**2)
return optdepth
def _calc_flux(self, velax, dv0=0.0, side='f'):
"""
Calculate the emergent flux assuming single Gaussian component.
"""
if side.lower() == 'f':
Tb, dV, v0 = self.Tb_f, self.dV_f, self.v0_f + dv0
elif side.lower() == 'b':
Tb, dV, v0 = self.Tb_b, self.dV_b, self.v0_b + dv0
else:
quote = "Unknown 'side' value {}. Must be 'f' or 'r'."
raise ValueError(quote.format(side))
spec = np.empty_like(Tb)
ok = (Tb > 0.)
spec[~ok] = 0.
spec[ok] = Tb[ok] * np.exp(-((velax - v0[ok]) / dV[ok])**2)
return spec
def _calc_frac(self, velax, dv0=0.0):
"""
Calculates the fraction of the front side of the disk realtive to the
back side based on the optical depth.
"""
tau = self._calc_tau(velax, dv0=dv0)
return 1.0 - np.exp(-tau), np.exp(-tau)
@staticmethod
def _convolve_cube(cube, beam):
"""
Convolve the cube.
"""
return np.array([convolve(c, beam) for c in cube])
def _get_beam(self, bmaj, bmin=None, bpa=0.0):
"""
Make a 2D Gaussian kernel for convolution.
"""
bmin = bmaj if bmin is None else bmin
bmaj /= self.cell_sky * self.fwhm
bmin /= self.cell_sky * self.fwhm
return Gaussian2DKernel(bmin, bmaj, np.radians(bpa))
# -- Velocity Perturbations -- #
def _perturbation(self, r0, t0, dr, dt=0.0, beta=0.0, projection='sky',
trim_values=False):
"""
Define a velocity perturbation in cylindrical coordinates in either
sky-plane coordaintes, ``projection='sky'``, or disk plane coordinates,
``projection='disk'``. If ``dt`` is set to zero, it assumes an
azimuthally symmetric perturbation.
Args:
r0 (float): Radius of perturbation center. If ``projection='sky'``
this is in [arcsec], while for ``projection='disk'`` this is in
[au]. For elevated emission surfaces this can additionally be
``'f'`` for the front side, or ``'b'`` for the back side.
t0 (float): Polar angle in [degrees] of perturbation center.
dr (float): Radial width of perturbation. If ``projection='sky'``
this is in [arcsec], while for ``projection='disk'`` this is in
[au].
dt (Optional[float]): Azimuthal extent of perturbations in [deg].
beat (Optional[float]): Fixed pitch angle in [deg].
projection (Optional[str]): If ``'sky'``, return the function in
sky coordinates, otherwise in disk coordinates.
trim_values(Optional[float]): If a number is specfied, fill all
absolute values below this as ``np.nan``, primarily used for
plotting.
Returns:
f (array): 2D array of the Gaussian perturbation.
"""
# Parse input variables.
if projection.lower() == 'sky' or projection.lower() == 'f':
rvals, tvals = self.r_sky / self.dist, self.t_sky
elif projection.lower() == 'b':
rvals, tvals = self.r_sky_b / self.dist, self.t_sky_b
elif projection.lower() == 'disk':
rvals, tvals = self.r_disk, self.t_disk
else:
raise ValueError("`projection` must be 'sky', 'f', 'b' or 'disk'.")
if dt == 0.0 and beta != 0.0:
raise ValueError("Cannot specify pitch angle and `dt=0.0`.")
# Azimuthally symmetric perturbation.
if dt == 0.0:
return np.exp(-0.5*((rvals - r0) / dr)**2.0)
# Calculate azmithal dependance.
f = []
nwrap = self.nwrap if self.nwrap % 2 else self.nwrap + 1
for wrap in np.arange(nwrap) - (nwrap - 1) / 2:
t_tmp = tvals.copy() + wrap * 2.0 * np.pi
r0_tmp = r0 / (1.0 + t_tmp * np.tan(np.radians(beta)))
t_tmp -= np.radians(t0)
_f = np.exp(-0.5*((rvals - r0_tmp) / dr)**2.0)
f += [_f * np.exp(-0.5*(t_tmp / np.radians(dt))**2.0)]
f = | np.sum(f, axis=0) | numpy.sum |
import pickle
import itertools
import os
import math
from sklearn.preprocessing import normalize
import re
from operator import add
import matplotlib.pyplot as plt
import numpy as np
import argparse
import pylab as pl
def grep(pat, txt, ind):
r = re.search(pat, txt)
return int(r.group(1))
def compute_embds_matrix(path, M, N):
pkls = []
for root, dirs, files in os.walk(path):
if len(files) != 0:
pkls.extend([os.path.join(root, file) for file in files if file.endswith('.pkl')])
#pkls = os.listdir(path)
pkls.sort(key=lambda txt: grep(r"(\d+)_(\d+)\.pkl", txt, 1))
pkls = pkls[:N]
print(pkls)
A_lst = []
for pkl in pkls:
print(pkl)
with open(pkl, 'rb') as handle:
samples = pickle.load(handle)
# keys = list(samples.keys())
# keys.sort(key=lambda txt: grep(r"(\d+)\.png", txt, 1))
# samples = [samples[key] for key in keys]
chunks = [normalize(np.asarray(samples[i:i + M]), axis=1, norm='l2') for i in range(0, len(samples), M)]
print(chunks[0].shape)
print(len(chunks))
A_lst.extend(chunks)
return A_lst
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Visualize nearest neighbors')
parser.add_argument('--start', required=True, help='Start of the distance threshold for neighbors', type=float)
parser.add_argument('--end', required=True, help='End of the distance threshold for neighbors', type=float)
parser.add_argument('--step_size', required=True, help='Step size of the epsilon', type=float)
#parser.add_argument('--resolution', help='resolution of the trained model', type=int)
parser.add_argument('--path', required=True, help='The path for reading embeddings', type=str)
args, other_args = parser.parse_known_args()
M = 10000
N = 10
#path = os.path.join(args.path, str(args.resolution))
path = args.path
A_lst = compute_embds_matrix(os.path.join(path, 'embds'), M, N)
for epsilon in list(pl.frange(args.start, args.end, args.step_size)):
with open(os.path.join(path, 'neighbors', 'final_neighbors_count_lstoflst_{}.pkl'.format(epsilon)), 'rb') as fp:
final_neighbors_count_lstoflst = pickle.load(fp)
# final_neighbors_count_lst = final_neighbors_count_lstoflst[0]
final_neighbors_count_lst = final_neighbors_count_lstoflst[N-1]
print(max(final_neighbors_count_lst))
final_neighbors_count_lst = np.asarray(final_neighbors_count_lst)
indices = np.argpartition(final_neighbors_count_lst, -1)[-1:]
print(indices)
indices = | np.asarray(indices) | numpy.asarray |
from keras.preprocessing.image import ImageDataGenerator
from configuration import conf
import numpy as np
def rotate_270(data):
if conf.dataset_name == 'mnist':
img_shape = (data.shape[0], 28, 28, 1) if conf.is_conv else (data.shape[0], -1)
data = data.reshape(-1, 28, 28)
elif conf.dataset_name == 'timh':
img_shape = (data.shape[0], 28, 28, 1) if conf.is_conv else (data.shape[0], -1)
data = data.reshape(-1, 28, 28)
else:
img_shape = (data.shape[0], 32, 32, 3) if conf.is_conv else (data.shape[0], -1)
data = data.reshape(-1, 32, 32, 3)
return np.rot90(data, k=3, axes=(1, 2)).reshape(img_shape)
class Multi_view:
def __init__(self):
self.datagen = [
ImageDataGenerator(samplewise_center=True),
ImageDataGenerator(samplewise_std_normalization=True),
ImageDataGenerator(featurewise_center=True),
ImageDataGenerator(featurewise_std_normalization=True),
ImageDataGenerator(zca_whitening=True, zca_epsilon=0.1),
ImageDataGenerator(zca_whitening=True),
ImageDataGenerator(rotation_range=180),
ImageDataGenerator(width_shift_range=0.4),
ImageDataGenerator(height_shift_range=0.4),
ImageDataGenerator(horizontal_flip=True),
ImageDataGenerator(vertical_flip=True),
ImageDataGenerator(zoom_range=0.3),
ImageDataGenerator(shear_range=30), ]
def fit(self, x):
for gen in self.datagen:
gen.fit(x)
def flow(self, x, y):
augment_data = []
augment_label = []
for gen in self.datagen:
data, label = gen.flow(x, y, batch_size=conf.batch_size).next()
augment_data.append(data)
augment_label.append(label)
def augment(self, x, y=None, concat=False, num_runs=1):
augment_data = [x, rotate_270(x)]
augment_label = [y, y]
if y is None:
for _ in np.arange(num_runs):
for gen in self.datagen:
data = gen.flow(x, batch_size=x.shape[0]).next()
augment_data.append(data)
if concat:
return np.concatenate(augment_data)
return augment_data
for _ in np.arange(num_runs):
for gen in self.datagen:
data, label = gen.flow(x, y, batch_size=x.shape[0]).next()
augment_data.append(data)
augment_label.append(label)
if concat:
return | np.concatenate(augment_data) | numpy.concatenate |
from math import sqrt, floor
from numpy import var
from scipy.stats import ttest_ind
from statsmodels.stats.power import tt_ind_solve_power
from oeda.log import warn, error
from oeda.analysis import Analysis
from numpy import mean
class TwoSampleTest(Analysis):
def run(self, data, knobs):
if len(data) < 2:
error("Cannot run " + self.name + " on less than two samples.")
return False
if len(data) > 2:
warn("Cannot run " + self.name + " on more than two samples.")
warn("Comparing only the first two samples.")
self.y1 = [d for d in data[0]]
self.y2 = [d for d in data[1]]
return True
class Ttest(TwoSampleTest):
name = "t-test"
def __init__(self, stage_ids, y_key, alpha=0.05):
super(self.__class__, self).__init__(stage_ids, y_key)
self.alpha = alpha
def run(self, data, knobs):
if not super(self.__class__, self).run(data, knobs):
error("Aborting analysis.")
return
statistic, pvalue = ttest_ind(self.y1, self.y2, equal_var=False)
different_averages = bool(pvalue <= self.alpha)
result = dict()
result["statistic"] = statistic
result["pvalue"] = pvalue
result["alpha"] = self.alpha
result["different_averages"] = different_averages
result["mean_diff"] = | mean(self.y1) | numpy.mean |
"""utility functions related to binning"""
import math, numpy as np
from .ps import PS, Covmat
class Binning():
def __init__(self, lmin, lmax, nbins, scheme='linear', w_func=None):
"""create a binning object that has everything needed
to bin an unbinned power-spectra
Parameters:
-----------
lmin / lmax (int): bounds of ells to bin (inclusive)
nbins: number of bins to create
scheme: binning scheme, i.e. 'linear', 'log', etc.
w_func: w_func(ell) -> weights for ell, default to 1
"""
self.lmin, self.lmax = lmin, lmax
self.nbins, self.scheme = nbins, scheme
self.w_func = w_func if w_func else lambda ells: np.ones_like(ells)
# get ells and weights
self.ells = np.arange(lmin, lmax+1)
try: self.weights = self.w_func(self.ells) # see if vectorization work
except: self.weights = np.array([self.w_func(ell) for ell in self.ells])
# find bin egdes and centers
if scheme == 'linear':
edges = np.linspace(lmin, lmax, nbins+1)
elif scheme == 'log':
edges = np.exp(np.linspace(np.log(lmin), np.log(lmax), nbins+1))
elif scheme == 'log10':
edges = 10**(np.linspace(np.log10(lmin), np.log10(lmax), nbins+1))
elif scheme == 'p2': # quadratic
edges = np.linspace(np.sqrt(lmin), np.sqrt(lmax), nbins+1)**2
elif scheme == 'p3': # cubic
edges = np.linspace(lmin**(1/3), lmax**(1/3), nbins+1)**3
else:
raise NotImplementedError(f'Binning scheme: {scheme} not supported!')
self.bin_l, self.bin_r = edges[:-1], edges[1:]
self.bin_c = (self.bin_r + self.bin_l) / 2
self.slices = [None]*nbins # store slices to extract ells
for i, (l, r) in enumerate(zip(self.bin_l, self.bin_r)):
idx_start = | np.where(self.ells>=l) | numpy.where |
__author__ = 'Ryba'
import glob
import itertools
import os
import sys
from collections import namedtuple
import matplotlib.pyplot as plt
import numpy as np
import scipy.ndimage.filters as scindifil
import scipy.ndimage.interpolation as scindiint
import scipy.ndimage.measurements as scindimea
import scipy.ndimage.morphology as scindimor
import scipy.signal as scisig
import scipy.stats as scista
import skimage.color as skicol
import skimage.exposure as skiexp
import skimage.feature as skifea
import skimage.filters as skifil
import skimage.io as skiio
import skimage.measure as skimea
import skimage.morphology as skimor
import skimage.restoration as skires
import skimage.segmentation as skiseg
from matplotlib.patches import Ellipse
from mpl_toolkits.axes_grid1 import make_axes_locatable
from skimage.segmentation import mark_boundaries
from sklearn.cluster import MeanShift, estimate_bandwidth
try:
import cPickle as pickle
except e:
import pickle
import gzip
import warnings
try:
import data_viewers
except ImportError:
pass
# if os.path.exists('../data_viewers/')
# sys.path.append('../data_viewers/')
# from dataviewers.seg_viewer import SegViewer
# import Viewer_3D
# sys.path.append('../seg_viewer/')
# from seg_viewer import SegViewer
#----------------------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------------------
def get_seeds(im, minT=0.95, maxT=1.05, minInt=0, maxInt=255, debug=False):
vals = im[np.where(np.logical_and(im>=minInt, im<=maxInt))]
hist, bins = skiexp.histogram(vals)
max_peakIdx = hist.argmax()
minT *= bins[max_peakIdx]
maxT *= bins[max_peakIdx]
histTIdxs = (bins >= minT) * (bins <= maxT)
histTIdxs = np.nonzero(histTIdxs)[0]
class1TMin = minT
class1TMax = maxT
seed_mask = np.where( (im >= class1TMin) * (im <= class1TMax), 1, 0)
if debug:
plt.figure()
plt.plot(bins, hist)
plt.hold(True)
plt.plot(bins[max_peakIdx], hist[max_peakIdx], 'ro')
plt.plot(bins[histTIdxs], hist[histTIdxs], 'r')
plt.plot(bins[histTIdxs[0]], hist[histTIdxs[0]], 'rx')
plt.plot(bins[histTIdxs[-1]], hist[histTIdxs[-1]], 'rx')
plt.title('Image histogram and its class1 = maximal peak (red dot) +/- minT/maxT % of its density (red lines).')
plt.show()
#minT *= hist[max_peakIdx]
#maxT *= hist[max_peakIdx]
#histTIdxs = (hist >= minT) * (hist <= maxT)
#histTIdxs = np.nonzero(histTIdxs)[0]
#histTIdxs = histTIdxs.astype(np.int)minT *= hist[max_peakIdx]
#class1TMin = bins[histTIdxs[0]]
#class1TMax = bins[histTIdxs[-1]
#if debug:
# plt.figure()
# plt.plot(bins, hist)
# plt.hold(True)
#
# plt.plot(bins[max_peakIdx], hist[max_peakIdx], 'ro')
# plt.plot(bins[histTIdxs], hist[histTIdxs], 'r')
# plt.plot(bins[histTIdxs[0]], hist[histTIdxs[0]], 'rx')
# plt.plot(bins[histTIdxs[-1]], hist[histTIdxs[-1]], 'rx')
# plt.title('Image histogram and its class1 = maximal peak (red dot) +/- minT/maxT % of its density (red lines).')
# plt.show()
return seed_mask, class1TMin, class1TMax
#----------------------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------------------
def seeds2superpixels(seed_mask, superpixels, debug=False, im=None):
seeds = np.argwhere(seed_mask)
superseeds = np.zeros_like(seed_mask)
for s in seeds:
label = superpixels[s[0], s[1]]
superseeds = np.where(superpixels==label, 1, superseeds)
if debug:
plt.figure(), plt.gray()
plt.subplot(121), plt.imshow(im), plt.hold(True), plt.plot(seeds[:,1], seeds[:,0], 'ro'), plt.axis('image')
plt.subplot(122), plt.imshow(im), plt.hold(True), plt.plot(seeds[:,1], seeds[:,0], 'ro'),
plt.imshow(mark_boundaries(im, superseeds, color=(1,0,0))), plt.axis('image')
plt.show()
return superseeds
#----------------------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------------------
def intensity_range2superpixels(im, superpixels, intMinT=0.95, intMaxT=1.05, debug=False, intMin=0, intMax=255):#, fromInt=0, toInt=255):
superseeds = np.zeros_like(superpixels)
#if not intMin and not intMax:
# hist, bins = skexp.histogram(im)
#
# #zeroing values that are lower/higher than fromInt/toInt
# toLow = np.where(bins < fromInt)
# hist[toLow] = 0
# toHigh = np.where(bins > toInt)
# hist[toHigh] = 0
#
# max_peakIdx = hist.argmax()
# intMin = intMinT * bins[max_peakIdx]
# intMax = intMaxT * bins[max_peakIdx]
sp_means = np.zeros(superpixels.max()+1)
for sp in range(superpixels.max()+1):
values = im[np.where(superpixels==sp)]
mean = np.mean(values)
sp_means[sp] = mean
idxs = np.argwhere(np.logical_and(sp_means>=intMin, sp_means<=intMax))
for i in idxs:
superseeds = np.where(superpixels==i[0], 1, superseeds)
if debug:
plt.figure(), plt.gray()
plt.imshow(im), plt.hold(True), plt.imshow(mark_boundaries(im, superseeds, color=(1,0,0)))
plt.axis('image')
plt.show()
return superseeds
def show_slice(data, segmentation=None, lesions=None, win_l=50, win_w=350, windowing=False, show='True'):
if windowing:
vmin = win_l - win_w / 2
vmax = win_l + win_w / 2
else:
vmin = data.min()
vmax = data.max()
plt.figure()
plt.gray()
plt.imshow(data, interpolation='nearest', vmin=vmin, vmax=vmax)
if segmentation is not None:
plt.hold(True)
contours = skimea.find_contours(segmentation, 1)
for contour in contours:
plt.plot(contour[:, 1], contour[:, 0], 'b', linewidth=2)
if lesions is not None:
plt.hold(True)
contours = skimea.find_contours(lesions, 1)
for contour in contours:
plt.plot(contour[:, 1], contour[:, 0], 'r', linewidth=2)
plt.axis('image')
if show:
plt.show()
def change_slice_index(data):
n_slices = data.shape[2]
data_reshaped = np.zeros(np.hstack((data.shape[2], data.shape[0], data.shape[1])))
for i in range(n_slices):
data_reshaped[i, :, :] = data[:, :, i]
return data_reshaped
def read_data(dcmdir, indices=None, wildcard='*.dcm', type=np.int16):
import dicom
dcmlist = []
for infile in glob.glob(os.path.join(dcmdir, wildcard)):
dcmlist.append(infile)
if indices == None:
indices = range(len(dcmlist))
data3d = []
for i in range(len(indices)):
ind = indices[i]
onefile = dcmlist[ind]
if wildcard == '*.dcm':
data = dicom.read_file(onefile)
data2d = data.pixel_array
try:
data2d = (np.float(data.RescaleSlope) * data2d) + np.float(data.RescaleIntercept)
except:
print('problem with RescaleSlope and RescaleIntercept')
else:
# data2d = cv2.imread(onefile, 0)
data2d = skiio.imread(onefile, as_grey=True)
if len(data3d) == 0:
shp2 = data2d.shape
data3d = np.zeros([shp2[0], shp2[1], len(indices)], dtype=type)
data3d[:,:,i] = data2d
#need to reshape data to have slice index (ndim==3)
if data3d.ndim == 2:
data3d.resize(np.hstack((data3d.shape,1)))
return data3d
def windowing(data, level=50, width=350, sub1024=False, sliceId=2, out_range=(0, 255)):
#srovnani na standardni skalu = odecteni 1024HU
if sub1024:
data -= 1024
#zjisteni minimalni a maximalni density
minHU = level - width / 2
maxHU = level + width / 2
if data.ndim == 3:
if sliceId == 2:
for idx in range(data.shape[2]):
#rescalovani intenzity tak, aby skala <minHU, maxHU> odpovidala intervalu <0,255>
data[:, :, idx] = skiexp.rescale_intensity(data[:, :, idx], in_range=(minHU, maxHU), out_range=(0, 255))
elif sliceId == 0:
for idx in range(data.shape[0]):
#rescalovani intenzity tak, aby skala <minHU, maxHU> odpovidala intervalu <0,255>
data[idx, :, :] = skiexp.rescale_intensity(data[idx, :, :], in_range=(minHU, maxHU), out_range=(0, 255))
else:
data = skiexp.rescale_intensity(data, in_range=(minHU, maxHU), out_range=out_range)
return data.astype(np.uint8)
def smoothing(data, d=10, sigmaColor=10, sigmaSpace=10, sliceId=2):
import cv2
if data.ndim == 3:
if sliceId == 2:
for idx in range(data.shape[2]):
data[:, :, idx] = cv2.bilateralFilter(data[:, :, idx], d=d, sigmaColor=sigmaColor, sigmaSpace=sigmaSpace)
elif sliceId == 0:
for idx in range(data.shape[0]):
data[idx, :, :] = cv2.bilateralFilter(data[idx, :, :], d=d, sigmaColor=sigmaColor, sigmaSpace=sigmaSpace)
else:
if data.dtype.type == np.float64:
# data = skiexp.rescale_intensity(data, in_range=(0, 1), out_range=(0, 255)).astype(np.uint8)
data = data.astype(np.float32)
data = cv2.bilateralFilter(data, d=d, sigmaColor=sigmaColor, sigmaSpace=sigmaSpace)
return data
def smoothing_bilateral(data, sigma_space=15, sigma_color=0.05, pseudo_3D='True', sliceId=2):
if data.ndim == 3 and pseudo_3D:
if sliceId == 2:
for idx in range(data.shape[2]):
temp = skifil.denoise_bilateral(data[:, :, idx], sigma_range=sigma_color, sigma_spatial=sigma_space)
# temp = skires.denoise_bilateral(data[:, :, idx], sigma_range=sigma_color, sigma_spatial=sigma_space)
data[:, :, idx] = (255 * temp).astype(np.uint8)
elif sliceId == 0:
for idx in range(data.shape[0]):
temp = skifil.denoise_bilateral(data[idx, :, :], sigma_range=sigma_color, sigma_spatial=sigma_space)
# temp = skires.denoise_bilateral(data[idx, :, :], sigma_range=sigma_color, sigma_spatial=sigma_space)
data[idx, :, :] = (255 * temp).astype(np.uint8)
else:
data = skifil.denoise_bilateral(data, sigma_range=sigma_color, sigma_spatial=sigma_space)
# data = skires.denoise_bilateral(data, sigma_range=sigma_color, sigma_spatial=sigma_space)
data = (255 * data).astype(np.uint8)
return data
def smoothing_tv(data, weight=0.1, pseudo_3D=True, multichannel=False, sliceId=2):
if data.ndim == 3 and pseudo_3D:
if sliceId == 2:
for idx in range(data.shape[2]):
# temp = skifil.denoise_tv_chambolle(data[:, :, idx], weight=weight, multichannel=multichannel)
temp = skires.denoise_tv_chambolle(data[:, :, idx], weight=weight, multichannel=multichannel)
data[:, :, idx] = (255 * temp).astype(np.uint8)
elif sliceId == 0:
for idx in range(data.shape[0]):
# temp = skifil.denoise_tv_chambolle(data[idx, :, :], weight=weight, multichannel=multichannel)
temp = skires.denoise_tv_chambolle(data[idx, :, :], weight=weight, multichannel=multichannel)
data[idx, :, :] = (255 * temp).astype(np.uint8)
else:
# data = skifil.denoise_tv_chambolle(data, weight=weight, multichannel=False)
data = skires.denoise_tv_chambolle(data, weight=weight, multichannel=False)
data = (255 * data).astype(np.uint8)
return data
def smoothing_gauss(data, sigma=1, pseudo_3D='True', sliceId=2):
if data.ndim == 3 and pseudo_3D:
if sliceId == 2:
for idx in range(data.shape[2]):
temp = skifil.gaussian_filter(data[:, :, idx], sigma=sigma)
data[:, :, idx] = (255 * temp).astype(np.uint8)
elif sliceId == 0:
for idx in range(data.shape[0]):
temp = skifil.gaussian_filter(data[idx, :, :], sigma=sigma)
data[idx, :, :] = (255 * temp).astype(np.uint8)
else:
data = skifil.gaussian_filter(data, sigma=sigma)
data = (255 * data).astype(np.uint8)
return data
def analyse_histogram(data, roi=None, dens_min=20, dens_max=255, minT=0.8, maxT=1.2, show=False, show_now=True):
if roi == None:
#roi = np.ones(data.shape, dtype=np.bool)
roi = np.logical_and(data >= dens_min, data <= dens_max)
voxels = data[np.nonzero(roi)]
hist, bins = skiexp.histogram(voxels)
max_peakIdx = hist.argmax()
minT = minT * hist[max_peakIdx]
maxT = maxT * hist[max_peakIdx]
histTIdxs = (hist >= minT) * (hist <= maxT)
histTIdxs = np.nonzero(histTIdxs)[0]
histTIdxs = histTIdxs.astype(np.int)
class1TMin = bins[histTIdxs[0]]
class1TMax = bins[histTIdxs[-1]]
main = data * (roi > 0)
class1 = np.where((main >= class1TMin) * (main <= class1TMax), 1, 0)
if show:
plt.figure()
plt.plot(bins, hist)
plt.hold(True)
plt.plot(bins[max_peakIdx], hist[max_peakIdx], 'ro')
plt.plot(bins[histTIdxs], hist[histTIdxs], 'r')
plt.plot(bins[histTIdxs[0]], hist[histTIdxs[0]], 'rx')
plt.plot(bins[histTIdxs[-1]], hist[histTIdxs[-1]], 'rx')
plt.title('Histogram and class1 = max peak (red dot) +-5% of its density (red lines).')
if show_now:
plt.show()
return class1
def dominant_class(data, roi=None, dens_min=0, dens_max=255, peakT=0.8, show=False, show_now=True):
if roi is None:
#roi = np.ones(data.shape, dtype=np.bool)
if isinstance(data.dtype, float):
dens_min /= 255.
dens_max /= 255.
roi = np.logical_and(data >= dens_min, data <= dens_max)
voxels = data[np.nonzero(roi)]
hist, bins = skiexp.histogram(voxels)
hist2 = hist_smoothing(bins, hist, sigma=10)
# plt.figure()
# plt.fill(bins, hist, 'b', bins, hist2, 'r', alpha=0.7)
# plt.show()
hist = hist2.copy()
max_peak = hist.max()
max_peak_idx = hist.argmax()
l_idx = max_peak_idx
while (hist[l_idx] > (max_peak * peakT)) and (l_idx > 0):
l_idx -= 1
r_idx = max_peak_idx
while (hist[r_idx] > (max_peak * peakT)) and (r_idx < len(hist) - 1):
r_idx += 1
dom_l = bins[l_idx]
dom_r = bins[r_idx]
main = data * (roi > 0)
class1 = np.where((main >= dom_l) * (main <= dom_r), 1, 0)
# std = data[np.nonzero(class1)].std()
std = 1
rv = scista.norm(loc=bins[max_peak_idx], scale=std)
if show:
plt.figure()
plt.plot(bins, hist)
plt.fill_between(bins, hist, color='b')
plt.hold(True)
# pdf = rv.pdf(bins)
# plt.figure()
# plt.plot(bins, pdf * max_peak / pdf.max(), 'm')
# plt.show()
plt.plot(bins[max_peak_idx], hist[max_peak_idx], 'ro', markersize=10)
plt.plot([bins[l_idx], bins[l_idx]], [0, hist[max_peak_idx]], 'r-', linewidth=4)
plt.plot([bins[r_idx], bins[r_idx]], [0, hist[max_peak_idx]], 'r-', linewidth=4)
plt.plot(bins[l_idx], hist[l_idx], 'rx', markersize=10, markeredgewidth=2)
plt.plot(bins[r_idx], hist[r_idx], 'rx', markersize=10, markeredgewidth=2)
plt.title('Histogram and dominant_class.')
if show_now:
plt.show()
return class1, rv
def intensity_probability(data, std=20, mask=None, dens_min=10, dens_max=255):
if mask is None:
# roi = np.logical_and(data >= dens_min, data <= dens_max)
roi = np.ones(data.shape, dtype=np.bool)
voxels = data[np.nonzero(mask)]
hist, bins = skiexp.histogram(voxels)
#zeroing histogram outside interval <dens_min, dens_max>
hist[:dens_min] = 0
hist[dens_max:] = 0
max_id = hist.argmax()
mu = round(bins[max_id])
prb = scista.norm(loc=mu, scale=std)
print('main pdf: mu = %i, std = %i' % (mu, std))
# plt.figure()
# plt.plot(bins, hist)
# plt.hold(True)
# plt.plot(mu, hist[max_id], 'ro')
# plt.show()
probs_L = prb.pdf(voxels)
probs = np.zeros(data.shape)
coords = np.argwhere(roi)
n_elems = coords.shape[0]
for i in range(n_elems):
if data.ndim == 3:
probs[coords[i,0], coords[i,1], coords[i,2]] = probs_L[i]
else:
probs[coords[i,0], coords[i,1]] = probs_L[i]
return probs
def get_zunics_compactness(obj):
if obj.ndim == 2:
obj = np.expand_dims(obj, 0)
m000 = obj.sum()
m200 = get_central_moment(obj, 2, 0, 0)
m020 = get_central_moment(obj, 0, 2, 0)
m002 = get_central_moment(obj, 0, 0, 2)
term1 = (3**(5./3)) / (5 * (4*np.pi)**(2./3))
term2 = m000**(5./3) / (m200 + m020 + m002)
K = term1 * term2
return K
def get_central_moment(obj, p, q, r):
elems = np.argwhere(obj)
m000 = obj.sum()
m100 = (elems[:,0]).sum()
m010 = (elems[:,1]).sum()
m001 = (elems[:,2]).sum()
xc = m100 / m000
yc = m010 / m000
zc = m001 / m000
mom = 0
for el in elems:
mom += (el[0] - xc)**p + (el[1] - yc)**q + (el[2] - zc)**r
return mom
def compactness(obj):
border = (obj - skimor.binary_erosion(obj, np.ones((3, 3)))).sum()
area = obj.sum()
comp = float(border ** 2) / area
return comp
def opening3D(data, selem=skimor.disk(3), sliceId=0):
if sliceId == 0:
for i in range(data.shape[0]):
data[i,:,:] = skimor.binary_opening(data[i,:,:], selem)
elif sliceId == 2:
for i in range(data.shape[2]):
data[:,:,i] = skimor.binary_opening(data[:,:,i], selem)
return data
def closing3D(data, selem=skimor.disk(3), slicewise=False, sliceId=0):
if slicewise:
if sliceId == 0:
for i in range(data.shape[0]):
data[i, :, :] = skimor.binary_closing(data[i, :, :], selem)
elif sliceId == 2:
for i in range(data.shape[2]):
data[:, :, i] = skimor.binary_closing(data[:, :, i], selem)
else:
data = scindimor.binary_closing(data, selem)
return data
def eroding3D(data, selem=None, selem_size=3, slicewise=False, sliceId=0):
# if selem is None:
# if len(data.shape) == 3:
# selem = np.ones((selem_size, selem_size, selem_size))
# else:
# selem = skimor.disk(selem_size)
# if slicewise:
# if sliceId == 0:
# for i in range(data.shape[0]):
# data[i, :, :] = skimor.binary_erosion(data[i, :, :], selem)
# elif sliceId == 2:
# for i in range(data.shape[2]):
# data[:, :, i] = skimor.binary_erosion(data[:, :, i], selem)
# else:
# data = scindimor.binary_erosion(data, selem)
data = morph_ND(data, 'erosion', selem, selem_size, slicewise, sliceId)
return data
def morph_ND(data, method, selem=None, selem_rad=3, slicewise=True, sliceId=0):
if method == 'erosion':
morph_func = scindimor.binary_erosion
elif method == 'dilation':
morph_func = scindimor.binary_dilation
elif method == 'opening':
morph_func = scindimor.binary_opening
elif method == 'closing':
morph_func = scindimor.binary_closing
else:
raise ValueError('Wrong morphological operation name.')
if selem is None:
selem = np.ones((2 * selem_rad + 1,) * data.ndim)
if data.ndim == 2:
data = morph_func(data, selem)
else:
if slicewise:
if sliceId == 0:
for i in range(data.shape[0]):
data[i, :, :] = morph_func(data[i, :, :], selem)
elif sliceId == 2:
for i in range(data.shape[2]):
data[:, :, i] = morph_func(data[:, :, i], selem)
else:
data = morph_func(data, selem)
return data
def resize3D(data, scale=None, shape=None, sliceId=2, method='cv2'):
import cv2
if data.ndim == 2:
if shape is not None:
new_data = cv2._resize_if_required(data.astype(np.uint8), shape, 0, 0, interpolation=cv2.INTER_NEAREST)
elif method == 'cv2':
if data.dtype == np.bool:
data = data.astype(np.uint8)
new_data = cv2._resize_if_required(data, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST)
else:
new_data = scindiint.zoom(data, scale)
else:
if sliceId == 2:
n_slices = data.shape[2]
# new_shape = cv2.resize(data[:,:,0], None, fx=scale, fy=scale).shape
new_shape = scindiint.zoom(data[:,:,0], scale).shape
new_data = np.zeros(np.hstack((new_shape,n_slices)), dtype=np.int)
for i in range(n_slices):
# new_data[:,:,i] = cv2.resize(data[:,:,i], None, fx=scale, fy=scale)
# new_data[:,:,i] = (255 * skitra.rescale(data[:,:,0], scale)).astype(np.int)
if method == 'cv2':
new_data[:,:,i] = cv2._resize_if_required(data[:, :, i], (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST)
else:
new_data[:,:,i] = scindiint.zoom(data[:,:,i], scale)
elif sliceId == 0:
n_slices = data.shape[0]
# new_shape = cv2.resize(data[0,:,:], None, fx=scale, fy=scale).shape
# new_shape = skitra.rescale(data[0,:,:], scale).shape
if method == 'cv2':
if data.dtype == np.bool:
data = data.astype(np.uint8)
new_shape = cv2._resize_if_required(data[0, :, :], (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST).shape
else:
new_shape = scindiint.zoom(data[0,:,:], scale).shape
new_data = np.zeros(np.hstack((n_slices, new_shape)), dtype=np.int)
for i in range(n_slices):
# new_data[i,:,:] = cv2.resize(data[i,:,:], None, fx=scale, fy=scale)
# new_data[i,:,:] = (255 * skitra.rescale(data[i,:,:], scale)).astype(np.int)
if method == 'cv2':
new_data[i,:,:] = cv2._resize_if_required(data[i, :, :], (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST)
else:
new_data[i,:,:] = scindiint.zoom(data[i,:,:], scale)
return new_data
def resize_ND(data, scale=None, shape=None, slice_id=0, method='cv2'):
if shape is None:
shape = list(data.shape)
else:
shape = list(shape)
if data.ndim == 2:
data = np.expand_dims(data, 0)
shape.insert(0, 1)
expanded = True
else:
expanded = False
if slice_id == 2:
data = np.swapaxes(np.swapaxes(data, 0, 2), 1, 2)
shape = [shape[2], shape[0], shape[1]]
swapped = True
else:
swapped = False
if scale is not None:
new_slice_shape = cv2._resize_if_required(data[0, ...], (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST).shape
else:
new_slice_shape = shape[1:]
# new_data = np.zeros(np.hstack((data.shape[0], new_slice_shape)), dtype=np.int)
# data = data.astype(np.uint8)
new_data = np.zeros(np.hstack((data.shape[0], new_slice_shape)), dtype=data.dtype)
# data = data.astype(np.uint8)
for i, im in enumerate(data):
if scale is not None:
new_data[i, ...] = cv2._resize_if_required(im, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST)
elif shape is not None:
new_data[i, ...] = cv2._resize_if_required(im, (shape[2], shape[1]), interpolation=cv2.INTER_NEAREST)
if expanded:
new_data = new_data[0, ...]
if swapped:
new_data = np.swapaxes(np.swapaxes(new_data, 0, 2), 1, 2)
return new_data
def get_overlay(mask, alpha=0.3, color='r'):
layer = None
if color == 'r':
layer = np.dstack((255*mask, np.zeros_like(mask), np.zeros_like(mask), alpha * mask))
elif color == 'g':
layer = alpha * np.dstack((np.zeros_like(mask), mask, np.zeros_like(mask)))
elif color == 'b':
layer = alpha * np.dstack((np.zeros_like(mask), np.zeros_like(mask), mask))
elif color == 'c':
layer = alpha * np.dstack((np.zeros_like(mask), mask, mask))
elif color == 'm':
layer = alpha * np.dstack((mask, np.zeros_like(mask), mask))
elif color == 'y':
layer = alpha * np.dstack((mask, mask, | np.zeros_like(mask) | numpy.zeros_like |
"""
2D Disc models
==============
Classes: Rosenfeld2d, General2d, Velocity, Intensity, Cube, Tools
"""
#TODO in show(): Perhaps use text labels on line profiles to distinguish profiles for more than 2 cubes.
#TODO in make_model(): Find a smart way to detect and pass only the coords needed by a prop attribute.
#TODO in run_mcmc(): Enable an arg to allow the user see the position of parameter walkers every 'arg' steps.
#TODO in General2d: Implement irregular grids (see e.g. meshio from nschloe on github) for the disc grid.
#TODO in General2d: Compute props in the interpolated grid (not in the original grid) to avoid interpolation of props and save time.
#TODO in General2d: Allow the lower surface to have independent intensity and line width parametrisations.
#TODO in General2d: Implement pressure support term
#TODO in make_model(): Allow for warped emitting surfaces, check notes for ideas as to how to solve for multiple intersections between l.o.s and emission surface.
#TODO in __main__(): show intro message when python -m disc2d
#TODO in run_mcmc(): use get() methods instead of allowing the user to use self obj attributes.
#TODO in make_model(): Allow R_disc to be a free parameter.
#TODO in make_model(): Enable 3D velocities too when subpixel algorithm is used
#TODO in v1.0: migrate to astropy units
from __future__ import print_function
from ..utils import constants as sfc
from ..utils import units as sfu
from astropy.convolution import Gaussian2DKernel, convolve
from scipy.interpolate import griddata, interp1d
from scipy.special import ellipk, ellipe
from scipy.optimize import curve_fit
from scipy.integrate import quad
import matplotlib.patches as patches
import matplotlib.pyplot as plt
from matplotlib import ticker
import numpy as np
import matplotlib
import itertools
import warnings
import numbers
import pprint
import copy
import time
import sys
import os
from multiprocessing import Pool
os.environ["OMP_NUM_THREADS"] = "1"
try:
import termtables
found_termtables = True
except ImportError:
print ("\n*** For nicer outputs we recommend installing 'termtables' by typing in terminal: pip install termtables ***")
found_termtables = False
#warnings.filterwarnings("error")
__all__ = ['Cube', 'Tools', 'Intensity', 'Velocity', 'General2d', 'Rosenfeld2d']
path_file = os.path.dirname(os.path.realpath(__file__))+'/'
"""
matplotlib.rcParams['font.family'] = 'monospace'
matplotlib.rcParams['font.weight'] = 'normal'
matplotlib.rcParams['lines.linewidth'] = 1.5
matplotlib.rcParams['axes.linewidth'] = 3.0
matplotlib.rcParams['xtick.major.width']=1.6
matplotlib.rcParams['ytick.major.width']=1.6
matplotlib.rc('font', size=MEDIUM_SIZE) # controls default text sizes
matplotlib.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of axes title
matplotlib.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of x and y labels
matplotlib.rc('xtick', labelsize=MEDIUM_SIZE-2) # fontsize of y tick labels
matplotlib.rc('ytick', labelsize=MEDIUM_SIZE-2) # fontsize of x tick labels
matplotlib.rc('legend', fontsize=SMALL_SIZE-1) # legend fontsize
matplotlib.rc('figure', titlesize=BIGGER_SIZE) # fontsize of figure title
params = {'xtick.major.size': 6.5,
'ytick.major.size': 6.5
}
matplotlib.rcParams.update(params)
"""
SMALL_SIZE = 10
MEDIUM_SIZE = 15
BIGGER_SIZE = 22
hypot_func = lambda x,y: np.sqrt(x**2 + y**2) #Slightly faster than np.hypot<np.linalg.norm<scipydistance. Checked precision up to au**2 orders and seemed ok.
class InputError(Exception):
"""Exception raised for errors in the input.
Attributes:
expression -- input expression in which the error occurred
message -- explanation of the error
"""
def __init__(self, expression, message):
self.expression = expression
self.message = message
def __str__(self):
return '%s --> %s'%(self.expression, self.message)
class Tools:
@staticmethod
def _rotate_sky_plane(x, y, ang):
xy = np.array([x,y])
cos_ang = np.cos(ang)
sin_ang = np.sin(ang)
rot = np.array([[cos_ang, -sin_ang],
[sin_ang, cos_ang]])
return np.dot(rot, xy)
@staticmethod
def _rotate_sky_plane3d(x, y, z, ang, axis='z'):
xyz = np.array([x,y,z])
cos_ang = np.cos(ang)
sin_ang = np.sin(ang)
if axis == 'x':
rot = np.array([[1, 0, 0],
[0, cos_ang, -sin_ang],
[0, sin_ang, cos_ang]])
if axis == 'y':
rot = np.array([[cos_ang, 0, -sin_ang],
[0, 1, 0],
[sin_ang, 0, cos_ang]])
if axis == 'z':
rot = np.array([[cos_ang, -sin_ang , 0],
[sin_ang, cos_ang, 0],
[0, 0, 1]])
return np.dot(rot, xyz)
@staticmethod
def _project_on_skyplane(x, y, z, cos_incl, sin_incl):
x_pro = x
y_pro = y * cos_incl - z * sin_incl
z_pro = y * sin_incl + z * cos_incl
return x_pro, y_pro, z_pro
@staticmethod
def get_sky_from_disc_coords(R, az, z, incl, PA):
xp = R*np.cos(az)
yp = R*np.sin(az)
zp = z
xp, yp, zp = Tools._project_on_skyplane(xp, yp, zp, np.cos(incl), np.sin(incl))
xp, yp = Tools._rotate_sky_plane(xp, yp, PA)
return xp, yp, zp
@staticmethod #should be a bound method, self.grid is constant except for z_upper, z_lower
def _compute_prop(grid, prop_funcs, prop_kwargs):
n_funcs = len(prop_funcs)
props = [{} for i in range(n_funcs)]
for side in ['upper', 'lower']:
x, y, z, R, phi, R_1d, z_1d = grid[side]
coord = {'x': x, 'y': y, 'z': z, 'phi': phi, 'R': R, 'R_1d': R_1d, 'z_1d': z_1d}
for i in range(n_funcs): props[i][side] = prop_funcs[i](coord, **prop_kwargs[i])
return props
@staticmethod
def _progress_bar(percent=0, width=50):
left = width * percent // 100
right = width - left
"""
print('\r[', '#' * left, ' ' * right, ']',
f' {percent:.0f}%',
sep='', end='', flush=True)
"""
print('\r[', '#' * left, ' ' * right, ']', ' %.0f%%'%percent, sep='', end='') #compatible with python2 docs
sys.stdout.flush()
@staticmethod
def _break_line(init='', border='*', middle='=', end='\n', width=100):
print('\r', init, border, middle * width, border, sep='', end=end)
@staticmethod
def _print_logo(filename=path_file+'logo.txt'):
logo = open(filename, 'r')
print(logo.read())
logo.close()
@staticmethod
def _get_beam_from(beam, dpix=None, distance=None, frac_pixels=1.0):
"""
beam must be str pointing to fits file to extract beam from header or radio_beam Beam object.
If radio_beam Beam instance is provided, pixel size (in SI units) will be extracted from grid obj. Distance (in pc) must be provided.
#frac_pixels: number of averaged pixels on the data (useful to reduce computing time)
"""
from radio_beam import Beam
from astropy.io import fits
from astropy import units as u
sigma2fwhm = np.sqrt(8*np.log(2))
if isinstance(beam, str):
header = fits.getheader(beam)
beam = Beam.from_fits_header(header)
pix_scale = header['CDELT2'] * u.Unit(header['CUNIT2']) * frac_pixels
elif isinstance(beam, Beam):
if distance is None: raise InputError(distance, 'Wrong input distance. Please provide a value for the distance (in pc) to transform grid pix to arcsec')
pix_radians = np.arctan(dpix / (distance*sfu.pc)) #dist*ang=projdist
pix_scale = (pix_radians*u.radian).to(u.arcsec)
#print (pix_scale, pix_radians)
else: raise InputError(beam, 'beam object must either be str or Beam instance')
x_stddev = ((beam.major/pix_scale) / sigma2fwhm).value
y_stddev = ((beam.minor/pix_scale) / sigma2fwhm).value
#print (x_stddev, beam.major, pix_scale)
angle = (90*u.deg+beam.pa).to(u.radian).value
gauss_kern = Gaussian2DKernel(x_stddev, y_stddev, angle)
#gauss_kern = beam.as_kernel(pix_scale) #as_kernel() is slowing down the run when used in astropy.convolve
return beam, gauss_kern
@staticmethod
def average_pixels_cube(data, frac_pixels, av_method=np.median):
"""
data: datacube with shape (nchan, nx0, ny0)
frac_pixels: number of pixels to average
av_method: function to compute average
"""
nchan, nx0, ny0 = np.shape(data)
nx = int(round(nx0/frac_pixels))
ny = int(round(ny0/frac_pixels))
av_data = np.zeros((nchan, nx, ny))
progress = Tools._progress_bar
if frac_pixels>1:
di = frac_pixels
dj = frac_pixels
print ('Averaging %dx%d pixels from data cube...'%(di, dj))
for k in range(nchan):
progress(int(100*k/nchan))
for i in range(nx):
for j in range(ny):
av_data[k,i,j] = av_method(data[k,i*di:i*di+di,j*dj:j*dj+dj])
progress(100)
return av_data
else:
print('frac_pixels is <= 1, no average was performed...')
return data
@staticmethod
def weighted_std(prop, weights, weighted_mean=None):
sum_weights = np.sum(weights)
if weighted_mean is None:
weighted_mean = np.sum(weights*prop)/sum_weights
n = np.sum(weights>0)
w_std = np.sqrt(np.sum(weights*(prop-weighted_mean)**2)/((n-1)/n * sum_weights))
return w_std
#define a fit_double_bell func, with a model input as an optional arg to constrain initial guesses better
@staticmethod
def fit_one_gauss_cube(data, vchannels, lw_chan=1.0, sigma_fit=None):
"""
Fit Gaussian profile along velocity axis to input data
lw_chan: initial guess for line width is lw_chan*np.mean(dvi).
sigma_fit: cube w/ channel weights for each pixel, passed to curve_fit
"""
gauss = lambda x, *p: p[0]*np.exp(-(x-p[1])**2/(2.*p[2]**2))
nchan, nx, ny = np.shape(data)
peak, dpeak = np.zeros((nx, ny)), np.zeros((nx, ny))
centroid, dcent = np.zeros((nx, ny)), np.zeros((nx, ny))
linewidth, dlinew = np.zeros((nx, ny)), np.zeros((nx, ny))
nbad = 0
ind_max = np.nanargmax(data, axis=0)
I_max = np.nanmax(data, axis=0)
vel_peak = vchannels[ind_max]
dv = lw_chan*np.mean(vchannels[1:]-vchannels[:-1])
progress = Tools._progress_bar
if sigma_fit is None: sigma_func = lambda i,j: None
else: sigma_func = lambda i,j: sigma_fit[:,i,j]
print ('Fitting Gaussian profile to pixels (along velocity axis)...')
for i in range(nx):
for j in range(ny):
isfin = np.isfinite(data[:,i,j])
try: coeff, var_matrix = curve_fit(gauss, vchannels[isfin], data[:,i,j][isfin],
p0=[I_max[i,j], vel_peak[i,j], dv],
sigma=sigma_func(i,j))
except RuntimeError:
nbad+=1
continue
peak[i,j] = coeff[0]
centroid[i,j] = coeff[1]
linewidth[i,j] = coeff[2]
dpeak[i,j], dcent[i,j], dlinew[i,j] = np.sqrt(np.diag(var_matrix))
progress(int(100*i/nx))
progress(100)
print ('\nGaussian fit did not converge for %.2f%s of the pixels'%(100.0*nbad/(nx*ny),'%'))
return peak, centroid, linewidth, dpeak, dcent, dlinew
@staticmethod
def get_tb(I, nu, beam, full=True):
"""
nu in GHz
Intensity in mJy/beam
beam object from radio_beam
if full: use full Planck law, else use rayleigh-jeans approximation
"""
from astropy import units as u
bmaj = beam.major.to(u.arcsecond).value
bmin = beam.minor.to(u.arcsecond).value
beam_area = sfu.au**2*np.pi*(bmaj*bmin)/(4*np.log(2)) #area of gaussian beam
#beam solid angle: beam_area/(dist*pc)**2. dist**2 cancels out with beamarea's dist**2 from conversion or bmaj, bmin to mks units.
beam_solid = beam_area/sfu.pc**2
mJy_to_SI = 1e-3*1e-26
nu = nu*1e9
if full:
Tb = np.sign(I)*(np.log((2*sfc.h*nu**3)/(sfc.c**2*np.abs(I)*mJy_to_SI/beam_solid)+1))**-1*sfc.h*nu/(sfc.kb)
else:
wl = sfc.c/nu
Tb = 0.5*wl**2*I*mJy_to_SI/(beam_solid*sfc.kb)
#(1222.0*I/(nu**2*(beam.minor/1.0).to(u.arcsecond)*(beam.major/1.0).to(u.arcsecond))).value #nrao RayJeans
return Tb
@staticmethod
def _get_tb(*args, **kwargs): return Tools.get_tb(*args, **kwargs)
class Residuals:
pass
class PlotTools:
@staticmethod
def mod_nticks_cbars(cbars, nbins=5):
for cb in cbars:
cb.locator = ticker.MaxNLocator(nbins=nbins)
cb.update_ticks()
@staticmethod
def mod_major_ticks(ax, axis='both', nbins=6):
ax.locator_params(axis=axis, nbins=nbins)
@staticmethod
def mod_minor_ticks(ax):
ax.minorticks_on()
ax.xaxis.set_minor_locator(ticker.AutoMinorLocator(2)) #1 minor tick per major interval
ax.yaxis.set_minor_locator(ticker.AutoMinorLocator(2))
@classmethod
def make_up_ax(cls, ax, xlims=(None, None), ylims=(None, None),
mod_minor=True, mod_major=True, **kwargs_tick_params):
kwargs_t = dict(labeltop=True, labelbottom=False, top=True, right=True, which='both', direction='in')
kwargs_t.update(kwargs_tick_params)
if mod_major: cls.mod_major_ticks(ax)
if mod_minor: cls.mod_minor_ticks(ax)
ax.set_xlim(*xlims)
ax.set_ylim(*ylims)
ax.tick_params(**kwargs_t)
@staticmethod
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=256):
new_cmap = matplotlib.colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
@staticmethod
def get_cmap_from_color(color, lev=3):
cmap = matplotlib.colors.to_rgba(color)
newcolors = np.tile(cmap, lev).reshape(lev,4) #Repeats the colour lev times
newcolors[:,-1] = np.linspace(0.25, 0.95, lev) #Modifies alpha only
new_cmap = ListedColormap(newcolors)
return new_cmap
@staticmethod
def mask_cmap_interval(cmap, cmap_lims, mask_lims, mask_color=np.ones(4), append=False):
if isinstance(cmap, str): cmap = plt.get_cmap(cmap)
cmap0, cmap1 = cmap_lims
mask0, mask1 = mask_lims
c0 = (mask0-cmap0)/(cmap1-cmap0)
c1 = (mask1-cmap0)/(cmap1-cmap0)
id0 = int(round(c0*(cmap.N)))
id1 = int(round(c1*(cmap.N)))
new_cmap = copy.copy(cmap)
new_cmap._init()
"""#The following does not work, plt does not know where to locate the newly added colorss
if append:
mask_color_arr = np.broadcast_to(mask_color, (id1-id0, 4))
new_cmap._lut = np.insert(new_cmap._lut, id0, mask_color_arr, axis=0)
new_cmap.N = cmap.N + id1-id0
#Next line redoes the continuous linearsegmented colormap, thus the masked color block is reduced to a single color
#new_cmap = new_cmap._resample(new_cmap.N)
"""
new_cmap._lut[id0:id1,:] = mask_color
return new_cmap
@staticmethod
def get_continuous_cmap(hex_list, float_list=None):
"""
Taken from https://github.com/KerryHalupka/custom_colormap
creates and returns a color map that can be used in heat map figures.
If float_list is not provided, colour map graduates linearly between each color in hex_list.
If float_list is provided, each color in hex_list is mapped to the respective location in float_list.
Parameters
----------
hex_list: list of hex code strings
float_list: list of floats between 0 and 1, same length as hex_list. Must start with 0 and end with 1.
Returns
----------
matplotlib cmap
Examples
----------
fig, ax = plt.subplots(1,1)
hex_list = ['#0091ad', '#fffffc', '#ffd166']
x, y = np.mgrid[-5:5:0.05, -5:5:0.05]
z = (np.sqrt(x**2 + y**2) + np.sin(x**2 + y**2))
im = ax.imshow(z, cmap=get_continuous_cmap(hex_list))
fig.colorbar(im)
ax.yaxis.set_major_locator(plt.NullLocator()) # remove y axis ticks
ax.xaxis.set_major_locator(plt.NullLocator()) # remove x axis ticks
plt.show()
"""
rgb_list = [matplotlib.colors.to_rgb(i) for i in hex_list]
if float_list is None: float_list = np.linspace(0,1,len(rgb_list))
cdict = dict()
for num, col in enumerate(['red', 'green', 'blue']):
col_list = [[float_list[i], rgb_list[i][num], rgb_list[i][num]] for i in range(len(float_list))]
cdict[col] = col_list
cmap_new = matplotlib.colors.LinearSegmentedColormap('my_cmp', segmentdata=cdict, N=256)
return cmap_new
@staticmethod
def append_stddev_panel(ax, prop, weights=None, hist=False, fit_gauss_hist=False): #attach significance panel to ax, based on dist. of points prop
gauss = lambda x, A, mu, sigma: A*np.exp(-(x-mu)**2/(2.*sigma**2))
ax1_ylims = ax[-2].get_ylim()
for axi in ax[:-1]: axi.tick_params(which='both', right=False, labelright=False)
ax[-1].tick_params(which='both', top=False, bottom=False, labelbottom=False,
left=False, labelleft=False, right=True, labelright=True)
ax[-1].yaxis.set_label_position('right')
ax[-1].spines['left'].set_color('0.6')
ax[-1].spines['left'].set_linewidth(3.5)
if weights is not None:
prop_mean = np.sum(weights*prop)/np.sum(weights)
prop_std = Tools.weighted_std(prop, weights, weighted_mean=prop_mean)
else:
prop_mean = np.mean(prop)
prop_std = np.std(prop)
max_y = 1.0
if hist:
n, bins, patches = ax[-1].hist(prop, bins=2*int(round(len(prop)**(1/3.)))-1, orientation='horizontal',
density=True, linewidth=1.5, facecolor='0.95', edgecolor='k', alpha=1.0)
max_y = np.max(n)
if fit_gauss_hist: #Fit Gaussian to histogram to compare against data distribution
coeff, var_matrix = curve_fit(gauss, 0.5*(bins[1:]+bins[:-1]), n, p0=[max_y, prop_mean, prop_std])
prop_x = np.linspace(prop_mean-4*prop_std, prop_mean+4*prop_std, 100)
prop_y = gauss(prop_x, *coeff)
ax[-1].plot(prop_y, prop_x, color='tomato', ls='--', lw=2.0)
prop_x = np.linspace(prop_mean-4*prop_std, prop_mean+4*prop_std, 100)
prop_pars = [max_y, prop_mean, prop_std]
prop_y = gauss(prop_x, *prop_pars)
ax[-1].plot(prop_y, prop_x, color='limegreen', lw=3.5)
ax[-1].set_xlim(-0.2*max_y, 1.2*max_y)
#ax[-1].plot([-0.2, 1.0], [prop_mean]*2, color='0.6', lw=2.5)
#for axi in ax[:-1]: axi.axhline(prop_mean, color='0.6', lw=2.5)
for i in range(0,4):
prop_stdi = prop_mean+i*prop_std
gauss_prop_stdi = gauss(prop_stdi, *prop_pars)
ax[-1].plot([-0.2*max_y, gauss_prop_stdi], [prop_stdi]*2, color='0.6', ls=':', lw=2.)
for axi in ax[:-1]: axi.axhline(prop_stdi, color='0.6', ls=':', lw=2.)
if prop_stdi < ax1_ylims[-1] and i>0:
ax[-1].text(gauss_prop_stdi+0.2*max_y, prop_stdi, r'%d$\sigma$'%i,
fontsize=14, ha='center', va='center', rotation=-90)
for axi in ax: axi.set_ylim(*ax1_ylims)
class Canvas3d:
pass
class Contours(PlotTools):
@staticmethod
def emission_surface(ax, R, phi, R_lev=None, phi_lev=None, extent=None, proj_offset=None, X=None, Y=None, kwargs_R={}, kwargs_phi={}):
kwargs_phif = dict(linestyles=':', linewidths=1.0, colors='k')
kwargs_Rf = dict(linewidths=1.4, colors='k')
kwargs_phif.update(kwargs_phi)
kwargs_Rf.update(kwargs_R)
near_nonan = ~np.isnan(R['upper'])
Rmax = np.max(R['upper'][near_nonan])
if extent is None:
extent = np.array([-Rmax, Rmax, -Rmax, Rmax])/sfu.au
kwargs_phif.update({'extent': extent})
kwargs_Rf.update({'extent': extent})
if R_lev is None: R_lev = np.linspace(0.06, 0.97, 4)*Rmax
else: R_lev = np.sort(R_lev)
if phi_lev is None: phi_lev = np.linspace(-np.pi*0.95, np.pi, 11, endpoint=False)
#Splitting phi into pos and neg to try and avoid ugly contours close to -pi and pi
phi_lev_neg = phi_lev[phi_lev<0]
phi_lev_pos = phi_lev[phi_lev>0]
phi_neg_near = np.where((phi['upper']<0) & (R['upper']>R_lev[0]) & (R['upper']<R_lev[-1]), phi['upper'], np.nan)
phi_pos_near = np.where((phi['upper']>0) & (R['upper']>R_lev[0]) & (R['upper']<R_lev[-1]), phi['upper'], np.nan)
phi_neg_far = np.where((phi['lower']<0) & (R['lower']>R_lev[0]) & (R['lower']<R_lev[-1]), phi['lower'], np.nan)
phi_pos_far = np.where((phi['lower']>0) & (R['lower']>R_lev[0]) & (R['lower']<R_lev[-1]), phi['lower'], np.nan)
if proj_offset is not None: #For 3d projections
ax.contour(X, Y, R['upper'], offset=proj_offset, levels=R_lev, **kwargs_Rf)
ax.contour(X, Y, np.where(near_nonan, np.nan, R['lower']), offset=proj_offset, levels=R_lev, **kwargs_Rf)
ax.contour(X, Y, phi_pos_near, offset=proj_offset, levels=phi_lev_pos, **kwargs_phif)
ax.contour(X, Y, phi_neg_near, offset=proj_offset, levels=phi_lev_neg, **kwargs_phif)
ax.contour(X, Y, np.where(near_nonan, np.nan, phi_pos_far), offset=proj_offset, levels=phi_lev_pos, **kwargs_phif)
ax.contour(X, Y, np.where(near_nonan, np.nan, phi_neg_far), offset=proj_offset, levels=phi_lev_neg, **kwargs_phif)
else:
ax.contour(R['upper'], levels=R_lev, **kwargs_Rf)
ax.contour(np.where(near_nonan, np.nan, R['lower']), levels=R_lev, **kwargs_Rf)
ax.contour(phi_pos_near, levels=phi_lev_pos, **kwargs_phif)
ax.contour(phi_neg_near, levels=phi_lev_neg, **kwargs_phif)
ax.contour(np.where(near_nonan, np.nan, phi_pos_far), levels=phi_lev_pos, **kwargs_phif)
ax.contour(np.where(near_nonan, np.nan, phi_neg_far), levels=phi_lev_neg, **kwargs_phif)
#The following method can be optimised if the contour finding process is separated from the plotting
# by returning coords_list and inds_cont first, which will allow the user use the same set of contours to plot different props.
@staticmethod
def prop_along_coords(ax, prop, coords, coord_ref, coord_levels,
ax2=None, X=None, Y=None,
PA=0,
acc_threshold=0.05,
max_prop_threshold=np.inf,
color_bounds=[np.pi/5, np.pi/2],
colors=['k', 'dodgerblue', (0,1,0), (1,0,0)],
lws=[2, 0.5, 0.2, 0.2], lw_ax2_factor=1,
subtract_quadrants=False,
subtract_func=np.subtract):
"""
Compute radial/azimuthal contours according to the model disc geometry
to get and plot information from the input 2D property ``prop``.
Parameters
----------
ax : `matplotlib.axes` instance, optional
ax instance to make the plot.
prop : array_like, shape (nx, ny)
Input 2D field to extract information along the computed contours.
coords : list, shape (2,)
coords[0] [array_like, shape (nx, ny)], is the coordinate 2D map onto which contours will be computed using the input ``coord_levels``;
coords[1] [array_like, shape (nx, ny)], is the coordinate 2D map against which the ``prop`` values are plotted. The output plot is prop vs coords[1]
coord_ref : scalar
Reference coordinate (referred to ``coords[0]``) to highlight among the other contours.
coord_levels : array_like, shape (nlevels,)
Contour levels to be extracted from ``coords[0]``.
ax2 : `matplotlib.axes` instance (or list of instances), optional
Additional ax(s) instance(s) to plot the location of contours in the disc.
If provided, ``X`` and ``Y`` must also be passed.
X : array_like, shape (nx, ny), optional
Meshgrid of the model x coordinate (see `numpy.meshgrid`). Required if ax2 instance(s) is provided.
Y : array_like, shape (nx, ny), optional
Meshgrid of the model y coordinate (see `numpy.meshgrid`). Required if ax2 instance(s) is provided.
PA : scalar, optional
Reference position angle.
acc_threshold : float, optional
Threshold to accept points on contours at constant coords[0]. If obtained level at a point is such that np.abs(level-level_reference)<acc_threshold the point is accepted
max_prop_threshold : float, optional
Threshold to accept points of contours. Rejects residuals of the contour if they are < max_prop_threshold. Useful to reject hot pixels.
color_bounds : array_like, shape (nbounds,), optional
Colour bounds with respect to the reference contour coord_ref.
colors : array_like, shape (nbounds+2,), optional
Contour colors. (i=0) is reserved for the reference contour coord_ref,
(i>0) for contour colors according to the bounds in color_bounds.
lws : array_like, shape (nbounds+2), optional
Contour linewidths. Similarly, (i=0) is reserved for coord_ref and
(i>0) for subsequent bounds.
subtract_quadrants : bool, optional
If True, subtract residuals by folding along the projected minor axis of the disc. Currently working for azimuthal contours only.
subtract_func : function, optional
If subtract_quadrants, this function is used to operate between folded quadrants. Defaults to np.subtract.
"""
from skimage import measure
coord_list, lev_list, resid_list, color_list = [], [], [], []
if np.sum(coord_levels==coord_ref)==0: coord_levels = np.append(coord_levels, coord_ref)
for lev in coord_levels:
contour = measure.find_contours(coords[0], lev) #, fully_connected='high', positive_orientation='high')
if len(contour)==0:
print ('no contours found for phi =', lev)
continue
ind_good = np.argmin([np.abs(lev-coords[0][tuple(np.round(contour[i][0]).astype(np.int))]) for i in range(len(contour))]) #getting ind of closest contour to lev
inds_cont = np.round(contour[ind_good]).astype(np.int)
inds_cont = [tuple(f) for f in inds_cont]
first_cont = np.array([coords[0][i] for i in inds_cont])
second_cont = np.array([coords[1][i] for i in inds_cont])
prop_cont = np.array([prop[i] for i in inds_cont])
corr_inds = np.abs(first_cont-lev) < acc_threshold
if lev == coord_ref: zorder=10
else: zorder=np.random.randint(0,10)
lw = lws[-1]
color = colors[-1]
for i,bound in enumerate(color_bounds):
if lev == coord_ref:
lw = lws[0]
color = colors[0]
zorder = 10
break
if np.abs(coord_ref - lev) < bound:
lw = lws[i+1]
color = colors[i+1]
break
if subtract_quadrants:
#if lev < color_bounds[0]: continue
ref_pos = PA+90 #Reference axis for positive angles
ref_neg = PA-90
angles = second_cont[corr_inds]
prop_ = prop_cont[corr_inds]
angles_pos = angles[angles>=0]
angles_neg = angles[angles<0]
relative_diff_pos = ref_pos - angles_pos
relative_diff_neg = ref_neg - angles_neg
angle_diff_pos, prop_diff_pos = [], []
angle_diff_neg, prop_diff_neg = [], []
for i,diff in enumerate(relative_diff_pos):
#Finding where the difference matches that of the current analysis angle
#The -1 flips the sign so that the number on the other side of the symmetry axis is found
ind = np.argmin(np.abs(-1*relative_diff_pos - diff))
mirror_ind = angles==angles_pos[ind]
current_ind = angles==angles_pos[i]
prop_diff = subtract_func(prop_[current_ind][0], prop_[mirror_ind][0])
angle_diff_pos.append(angles_pos[i])
prop_diff_pos.append(prop_diff)
angle_diff_pos = np.asarray(angle_diff_pos)
prop_diff_pos = np.asarray(prop_diff_pos)
if len(angle_diff_pos)>1:
ind_sort_pos = np.argsort(angle_diff_pos)
plot_ang_diff_pos = angle_diff_pos[ind_sort_pos]
plot_prop_diff_pos = prop_diff_pos[ind_sort_pos]
ind_prop_pos = np.abs(plot_prop_diff_pos)<max_prop_threshold
ax.plot(plot_ang_diff_pos[ind_prop_pos], plot_prop_diff_pos[ind_prop_pos], color=color, lw=lw, zorder=zorder)
coord_list.append(plot_ang_diff_pos[ind_prop_pos])
resid_list.append(plot_prop_diff_pos[ind_prop_pos])
color_list.append(color)
lev_list.append(lev)
else:
plot_ang_diff_pos = []
plot_prop_diff_pos = []
for i,diff in enumerate(relative_diff_neg):
ind = np.argmin(np.abs(-1*relative_diff_neg - diff))
mirror_ind = angles==angles_neg[ind]
current_ind = angles==angles_neg[i]
prop_diff = subtract_func(prop_[current_ind][0], prop_[mirror_ind][0])
angle_diff_neg.append(angles_neg[i])
prop_diff_neg.append(prop_diff)
angle_diff_neg = np.asarray(angle_diff_neg)
prop_diff_neg = np.asarray(prop_diff_neg)
if len(angle_diff_neg)>1:
ind_sort_neg = np.argsort(angle_diff_neg)
plot_ang_diff_neg = angle_diff_neg[ind_sort_neg]
plot_prop_diff_neg = prop_diff_neg[ind_sort_neg]
ind_prop_neg = np.abs(plot_prop_diff_neg)<max_prop_threshold
ax.plot(plot_ang_diff_neg[ind_prop_neg], plot_prop_diff_neg[ind_prop_neg], color=color, lw=lw, zorder=zorder)
coord_list.append(plot_ang_diff_neg[ind_prop_neg])
resid_list.append(plot_prop_diff_neg[ind_prop_neg])
color_list.append(color)
lev_list.append(lev)
else:
plot_ang_diff_neg = []
plot_prop_diff_neg = []
"""
if len(angle_diff_pos)>1 or len(angle_diff_neg)>1:
coord_list.append(np.append(plot_ang_diff_pos, plot_ang_diff_neg))
resid_list.append(np.append(plot_prop_diff_pos, plot_prop_diff_neg))
color_list.append(color)
lev_list.append(lev)
"""
else:
coord_list.append(second_cont[corr_inds])
resid_list.append(prop_cont[corr_inds])
color_list.append(color)
lev_list.append(lev)
ind_sort = np.argsort(second_cont[corr_inds]) #sorting by azimuth to avoid 'joint' boundaries in plot
ax.plot(second_cont[corr_inds][ind_sort],
prop_cont[corr_inds][ind_sort],
color=color, lw=lw, zorder=zorder)
if ax2 is not None:
x_cont = np.array([X[i] for i in inds_cont])
y_cont = np.array([Y[i] for i in inds_cont])
if isinstance(ax2, matplotlib.axes._subplots.Axes):
ax2.plot(x_cont[corr_inds], y_cont[corr_inds], color=color, lw=lw*lw_ax2_factor)
elif isinstance(ax2, list):
for axi in ax2:
if isinstance(axi, matplotlib.axes._subplots.Axes): axi.plot(x_cont[corr_inds], y_cont[corr_inds], color=color, lw=lw*lw_ax2_factor)
return [np.asarray(tmp) for tmp in [coord_list, resid_list, color_list, lev_list]]
@staticmethod
def make_substructures(ax, twodim=False, gaps=[], rings=[], kinks=[], make_labels=False,
kwargs_gaps={}, kwargs_rings={}, kwargs_kinks={}):
'''Overlay ring-like (if twodim) or vertical lines (if not twodim) to illustrate the radial location of substructures in the disc'''
kwargs_g = dict(color='0.2', ls='--', lw=1.7, alpha=0.9)
kwargs_r = dict(color='0.2', ls='-', lw=1.7, alpha=0.9)
kwargs_k = dict(color='purple', ls=':', lw=2.6, alpha=0.9)
kwargs_g.update(kwargs_gaps)
kwargs_r.update(kwargs_rings)
kwargs_k.update(kwargs_kinks)
if twodim:
phi = np.linspace(0, 2*np.pi, 50)
cos_phi = np.cos(phi)
sin_phi = np.sin(phi)
for R in gaps: ax.plot(R*cos_phi, R*sin_phi, **kwargs_g)
for R in rings: ax.plot(R*cos_phi, R*sin_phi, **kwargs_r)
for R in kinks: ax.plot(R*cos_phi, R*sin_phi, **kwargs_k)
else:
for R in gaps: ax.axvline(R, **kwargs_g)
for R in rings: ax.axvline(R, **kwargs_r)
for R in kinks: ax.axvline(R, **kwargs_k)
if make_labels and len(gaps)>0: ax.plot([None], [None], label='Gaps', **kwargs_g)
if make_labels and len(rings)>0: ax.plot([None], [None], label='Rings', **kwargs_r)
if make_labels and len(kinks)>0: ax.plot([None], [None], label='Kinks', **kwargs_k)
return ax
@staticmethod
def make_contour_lev(prop, lev, X, Y, acc_threshold=20):
from skimage import measure
contour = measure.find_contours(prop, lev)
inds_cont = np.round(contour[-1]).astype(np.int)
inds_cont = [tuple(f) for f in inds_cont]
first_cont = np.array([prop[i] for i in inds_cont])
corr_inds = np.abs(first_cont-lev) < acc_threshold
x_cont = np.array([X[i] for i in inds_cont])
y_cont = np.array([Y[i] for i in inds_cont])
return x_cont[corr_inds], y_cont[corr_inds], inds_cont, corr_inds
@staticmethod
def beams_along_ring(lev, Rgrid, beam_size, X, Y):
xc, yc, _, _ = Contours.make_contour_lev(Rgrid, lev, X, Y)
rc = hypot_func(xc, yc)
a = np.max(rc)
b = np.min(rc)
ellipse_perim = np.pi*(3*(a+b)-np.sqrt((3*a+b)*(a+3*b)))
return ellipse_perim/beam_size
@staticmethod
def get_average_east_west(resid_list, coord_list, lev_list,
Rgrid, beam_size, X, Y,
av_func=np.nanmean, mask_ang=0, resid_thres='3sigma',
error_func=True, error_unit=1.0, error_thres=np.inf):
#resid_thres: None, '3sigma', or list of thresholds with size len(lev_list)
nconts = len(lev_list)
if resid_thres is None: resid_thres = [np.inf]*nconts
elif resid_thres == '3sigma': resid_thres = [3*np.nanstd(resid_list[i]) for i in range(nconts)] #anything higher than 3sigma is rejected from annulus
# -np.pi<coord_list<np.pi
ind_west = [((coord_list[i]<90-mask_ang) & (coord_list[i]>-90+mask_ang)) & (np.abs(resid_list[i])<resid_thres[i]) for i in range(nconts)]
ind_east = [((coord_list[i]>90+mask_ang) | (coord_list[i]<-90-mask_ang)) & (np.abs(resid_list[i])<resid_thres[i]) for i in range(nconts)]
av_west = np.array([av_func(resid_list[i][ind_west[i]]) for i in range(nconts)])
av_east = np.array([av_func(resid_list[i][ind_east[i]]) for i in range(nconts)])
if error_func is None: av_west_error, av_east_error = None, None
else:
beams_ring_sqrt = np.sqrt([0.5*Contours.beams_along_ring(lev, Rgrid, beam_size, X, Y) for lev in lev_list]) #0.5 because we split the disc in halves
if callable(error_func): #if error map provided, compute average error per radius, divided by sqrt of number of beams (see <NAME> notes on errors)
av_west_error, av_east_error = np.zeros(nconts), np.zeros(nconts)
for i in range(nconts):
x_west, y_west, __ = get_sky_from_disc_coords(lev_list[i], coord_list[i][ind_west[i]])
x_east, y_east, __ = get_sky_from_disc_coords(lev_list[i], coord_list[i][ind_east[i]])
error_west = np.array(list(map(error_func, x_west, y_west))).T[0]
error_east = np.array(list(map(error_func, x_east, y_east))).T[0]
sigma2_west = np.where((np.isfinite(error_west)) & (error_unit*error_west<error_thres) & (error_west>0), (error_unit*error_west)**2, 0)
sigma2_east = np.where((np.isfinite(error_east)) & (error_unit*error_east<error_thres) & (error_east>0), (error_unit*error_east)**2, 0)
Np_west = len(coord_list[i][ind_west[i]])
Np_east = len(coord_list[i][ind_east[i]])
av_west_error[i] = np.sqrt(np.nansum(sigma2_west)/Np_west)/beams_ring_sqrt[i]
av_east_error[i] = np.sqrt(np.nansum(sigma2_east)/Np_east)/beams_ring_sqrt[i]
else: #compute standard error of mean value
av_west_error = np.array([np.std(resid_list[i][ind_west[i]], ddof=1) for i in range(nconts)])/beams_ring_sqrt
av_east_error = np.array([np.std(resid_list[i][ind_east[i]], ddof=1) for i in range(nconts)])/beams_ring_sqrt
return av_west, av_east, av_west_error, av_east_error
@staticmethod
def get_average(resid_list, coord_list, lev_list,
Rgrid, beam_size, X, Y,
av_func=np.nanmean, mask_ang=0, resid_thres='3sigma',
error_func=True, error_unit=1.0, error_thres=np.inf):
#mask_ang: +- angles to reject around minor axis (i.e. phi=+-90)
#resid_thres: None, '3sigma', or list of thresholds with size len(lev_list)
frac_annulus = 1.0 #if halves, 0.5; if quadrants, 0.25
nconts = len(lev_list)
if resid_thres is None: resid_thres = [np.inf]*nconts #consider all values for the average
elif resid_thres == '3sigma': resid_thres = [3*np.nanstd(resid_list[i]) for i in range(nconts)] #anything higher than 3sigma is rejected from annulus
# -np.pi<coord_list<np.pi
ind_accep = [(((coord_list[i]<90-mask_ang) & (coord_list[i]>-90+mask_ang)) |
((coord_list[i]>90+mask_ang) | (coord_list[i]<-90-mask_ang))) &
(np.abs(resid_list[i])<resid_thres[i])
for i in range(nconts)]
av_annulus = np.array([av_func(resid_list[i][ind_accep[i]]) for i in range(nconts)])
if error_func is None: av_error = None
else:
beams_ring_sqrt = np.sqrt([frac_annulus*Contours.beams_along_ring(lev, Rgrid, beam_size, X, Y) for lev in lev_list])
if callable(error_func): #if error map provided, compute average error per radius, divided by sqrt of number of beams (see <NAME> notes on errors)
av_error = np.zeros(nconts)
for i in range(nconts):
x_accep, y_accep, __ = get_sky_from_disc_coords(lev_list[i], coord_list[i][ind_accep[i]])
error_accep = np.array(list(map(error_func, x_accep, y_accep))).T[0]
sigma2_accep = np.where((np.isfinite(error_accep)) & (error_unit*error_accep<error_thres) & (error_accep>0), (error_unit*error_accep)**2, 0)
Np_accep = len(coord_list[i][ind_accep[i]])
av_error[i] = np.sqrt(np.nansum(sigma2_accep)/Np_accep)/beams_ring_sqrt[i]
else: #compute standard error of mean value
av_error = np.array([np.std(resid_list[i][ind_accep[i]], ddof=1) for i in range(nconts)])/beams_ring_sqrt
return av_annulus, av_error
class Cube(object):
def __init__(self, nchan, channels, data, beam=False, beam_kernel=False, tb={'nu': False, 'beam': False, 'full': True}):
self.nchan = nchan
self.channels = channels
self.data = data
self.point = self.cursor
self._interactive = self.cursor
self._interactive_path = self.curve
if beam: self.beam = beam
if beam_kernel: self.beam_kernel = beam_kernel
if isinstance(tb, dict):
if tb['nu'] and tb['beam']: self.data = Tools.get_tb(self.data, tb['nu'], tb['beam'], full=tb['full'])
@property
def interactive(self):
return self._interactive
@interactive.setter
def interactive(self, func):
print('Setting interactive function to', func)
self._interactive = func
@interactive.deleter
def interactive(self):
print('Deleting interactive function')
del self._interactive
@property
def interactive_path(self):
return self._interactive_path
@interactive_path.setter
def interactive_path(self, func):
print('Setting interactive_path function to', func)
self._interactive_path = func
@interactive_path.deleter
def interactive_path(self):
print('Deleting interactive_path function')
del self._interactive_path
def ellipse(self):
pass
def _plot_spectrum_region(self, x0, x1, y0, y1, ax, extent=None, compare_cubes=[], stat_func=np.mean, **kwargs):
kwargs_spec = dict(where='mid', linewidth=2.5, label=r'x0:%d,x1:%d'%(x0,x1))
kwargs_spec.update(kwargs)
v0, v1 = self.channels[0], self.channels[-1]
def get_ji(x,y):
pass
if extent is None:
j0, i0 = int(x0), int(y0)
j1, i1 = int(x1), int(y1)
else:
nz, ny, nx = np.shape(self.data)
dx = extent[1] - extent[0]
dy = extent[3] - extent[2]
j0 = int(nx*(x0-extent[0])/dx)
i0 = int(ny*(y0-extent[2])/dy)
j1 = int(nx*(x1-extent[0])/dx)
i1 = int(ny*(y1-extent[2])/dy)
slice_cube = self.data[:,i0:i1,j0:j1]
spectrum = np.array([stat_func(chan) for chan in slice_cube])
ncubes = len(compare_cubes)
if ncubes > 0:
slice_comp = [compare_cubes[i].data[:,i0:i1,j0:j1] for i in range(ncubes)]
cubes_spec = [np.array([stat_func(chan) for chan in slice_comp[i]]) for i in range(ncubes)]
if np.logical_or(np.isinf(spectrum), np.isnan(spectrum)).all(): return False
else:
plot_spec = ax.step(self.channels, spectrum, **kwargs_spec)
if ncubes > 0:
alpha = 0.2
dalpha = -alpha/ncubes
for i in range(ncubes):
ax.fill_between(self.channels, cubes_spec[i], color=plot_spec[0].get_color(), step='mid', alpha=alpha)
alpha+=dalpha
else: ax.fill_between(self.channels, spectrum, color=plot_spec[0].get_color(), step='mid', alpha=0.2)
return plot_spec
def box(self, fig, ax, extent=None, compare_cubes=[], stat_func=np.mean, **kwargs):
from matplotlib.widgets import RectangleSelector
def onselect(eclick, erelease):
if eclick.inaxes is ax[0]:
plot_spec = self._plot_spectrum_region(eclick.xdata, erelease.xdata, eclick.ydata, erelease.ydata,
ax[1], extent=extent, compare_cubes=compare_cubes,
stat_func=stat_func, **kwargs)
if plot_spec:
print('startposition: (%f, %f)' % (eclick.xdata, eclick.ydata))
print('endposition : (%f, %f)' % (erelease.xdata, erelease.ydata))
print('used button : ', eclick.button)
xc, yc = eclick.xdata, eclick.ydata #Left, bottom corner
dx, dy = erelease.xdata-eclick.xdata, erelease.ydata-eclick.ydata
rect = patches.Rectangle((xc,yc), dx, dy, lw=2, edgecolor=plot_spec[0].get_color(), facecolor='none')
ax[0].add_patch(rect)
ax[1].legend()
fig.canvas.draw()
fig.canvas.flush_events()
def toggle_selector(event):
print('Key pressed.')
if event.key in ['Q', 'q'] and toggle_selector.RS.active:
print('RectangleSelector deactivated.')
toggle_selector.RS.set_active(False)
if event.key in ['A', 'a'] and not toggle_selector.RS.active:
print('RectangleSelector activated.')
toggle_selector.RS.set_active(True)
rectprops = dict(facecolor='none', edgecolor = 'white',
alpha=0.8, fill=False)
lineprops = dict(color='white', linestyle='-',
linewidth=3, alpha=0.8)
toggle_selector.RS = RectangleSelector(ax[0], onselect, drawtype='box', rectprops=rectprops, lineprops=lineprops)
cid = fig.canvas.mpl_connect('key_press_event', toggle_selector)
return toggle_selector.RS
def _plot_spectrum_cursor(self, x, y, ax, extent=None, compare_cubes=[], **kwargs):
kwargs_spec = dict(where='mid', linewidth=2.5, label=r'%d,%d'%(x,y))
kwargs_spec.update(kwargs)
def get_ji(x,y):
pass
if extent is None:
j, i = int(x), int(y)
else:
nz, ny, nx = np.shape(self.data)
dx = extent[1] - extent[0]
dy = extent[3] - extent[2]
j = int(nx*(x-extent[0])/dx)
i = int(ny*(y-extent[2])/dy)
spectrum = self.data[:,i,j]
v0, v1 = self.channels[0], self.channels[-1]
if np.logical_or(np.isinf(spectrum), np.isnan(spectrum)).all(): return False
else:
#plot_fill = ax.fill_between(self.channels, spectrum, alpha=0.1)
plot_spec = ax.step(self.channels, spectrum, **kwargs_spec)
ncubes = len(compare_cubes)
if ncubes > 0:
alpha = 0.2
dalpha = -alpha/ncubes
for cube in compare_cubes:
ax.fill_between(self.channels, cube.data[:,i,j], color=plot_spec[0].get_color(), step='mid', alpha=alpha)
alpha+=dalpha
else: ax.fill_between(self.channels, spectrum, color=plot_spec[0].get_color(), step='mid', alpha=0.2)
return plot_spec
#def point(self, *args, **kwargs):
# return self.cursor(*args, **kwargs)
def cursor(self, fig, ax, extent=None, compare_cubes=[], **kwargs):
def onclick(event):
if event.button==3:
print ('Right click. Disconnecting click event...')
fig.canvas.mpl_disconnect(cid)
elif event.inaxes is ax[0]:
plot_spec = self._plot_spectrum_cursor(event.xdata, event.ydata, ax[1], extent=extent,
compare_cubes=compare_cubes, **kwargs)
if plot_spec:
print('%s click: button=%d, xdata=%f, ydata=%f' %
('double' if event.dblclick else 'single', event.button,
event.xdata, event.ydata))
ax[0].scatter(event.xdata, event.ydata, marker='D', s=50, facecolor=plot_spec[0].get_color(), edgecolor='k')
ax[1].legend(frameon=False, handlelength=0.7, fontsize=MEDIUM_SIZE-1)
fig.canvas.draw()
fig.canvas.flush_events()
cid = fig.canvas.mpl_connect('button_press_event', onclick)
return cid
def _plot_beam(self, ax):
x_fwhm = self.beam_kernel.model.x_fwhm
y_fwhm = self.beam_kernel.model.y_fwhm
ny_pix, nx_pix = np.shape(self.data[0])
ellipse = patches.Ellipse(xy = (0.05,0.05), angle = 90+self.beam.pa.value,
width=x_fwhm/nx_pix, height=y_fwhm/ny_pix, lw=1, fill=True,
fc='gray', ec='k', transform=ax.transAxes)
ax.add_artist(ellipse)
def surface(self, ax, *args, **kwargs): return Contours.emission_surface(ax, *args, **kwargs)
def show(self, extent=None, chan_init=0, compare_cubes=[], cursor_grid=True, cmap='gnuplot2_r',
int_unit=r'Intensity [mJy beam$^{-1}$]', pos_unit='Offset [au]', vel_unit=r'km s$^{-1}$',
show_beam=False, surface={'args': (), 'kwargs': {}}, **kwargs):
from matplotlib.widgets import Slider, Cursor, Button
v0, v1 = self.channels[0], self.channels[-1]
dv = v1-v0
fig, ax = plt.subplots(ncols=2, figsize=(12,5))
plt.subplots_adjust(wspace=0.25)
y0, y1 = ax[1].get_position().y0, ax[1].get_position().y1
axcbar = plt.axes([0.47, y0, 0.03, y1-y0])
max_data = np.nanmax([self.data]+[comp.data for comp in compare_cubes])
ax[0].set_xlabel(pos_unit)
ax[0].set_ylabel(pos_unit)
ax[1].set_xlabel('l.o.s velocity [%s]'%vel_unit)
PlotTools.mod_major_ticks(ax[0], axis='both', nbins=5)
ax[0].tick_params(direction='out')
ax[1].tick_params(direction='in', right=True, labelright=False, labelleft=False)
axcbar.tick_params(direction='out')
ax[1].set_ylabel(int_unit, labelpad=15)
ax[1].yaxis.set_label_position('right')
ax[1].set_xlim(v0-0.1, v1+0.1)
vmin, vmax = -1*max_data/100, 0.7*max_data#0.8*max_data#
ax[1].set_ylim(vmin, vmax)
#ax[1].grid(lw=1.5, ls=':')
cmap = plt.get_cmap(cmap)
cmap.set_bad(color=(0.9,0.9,0.9))
if show_beam and self.beam_kernel: self._plot_beam(ax[0])
img = ax[0].imshow(self.data[chan_init], cmap=cmap, extent=extent, origin='lower', vmin=vmin, vmax=vmax)
cbar = plt.colorbar(img, cax=axcbar)
img.cmap.set_under('w')
current_chan = ax[1].axvline(self.channels[chan_init], color='black', lw=2, ls='--')
text_chan = ax[1].text((self.channels[chan_init]-v0)/dv, 1.02, #Converting xdata coords to Axes coords
'%4.1f %s'%(self.channels[chan_init], vel_unit), ha='center',
color='black', transform=ax[1].transAxes)
if cursor_grid: cg = Cursor(ax[0], useblit=True, color='lime', linewidth=1.5)
def get_interactive(func):
return func(fig, ax, extent=extent, compare_cubes=compare_cubes, **kwargs)
interactive_obj = [get_interactive(self.interactive)]
#***************
#SLIDERS
#***************
def update_chan(val):
chan = int(val)
vchan = self.channels[chan]
img.set_data(self.data[chan])
current_chan.set_xdata(vchan)
text_chan.set_x((vchan-v0)/dv)
text_chan.set_text('%4.1f %s'%(vchan, vel_unit))
fig.canvas.draw_idle()
def update_cubes(val):
i = int(slider_cubes.val)
chan = int(slider_chan.val)
vchan = self.channels[chan]
if i==0: img.set_data(self.data[chan])
else: img.set_data(compare_cubes[i-1].data[chan])
current_chan.set_xdata(vchan)
text_chan.set_x((vchan-v0)/dv)
text_chan.set_text('%4.1f km/s'%vchan)
fig.canvas.draw_idle()
ncubes = len(compare_cubes)
if ncubes>0:
axcubes = plt.axes([0.2, 0.90, 0.24, 0.025], facecolor='0.7')
axchan = plt.axes([0.2, 0.95, 0.24, 0.025], facecolor='0.7')
slider_cubes = Slider(axcubes, 'Cube id', 0, ncubes,
valstep=1, valinit=0, valfmt='%1d', color='dodgerblue')
slider_chan = Slider(axchan, 'Channel', 0, self.nchan-1,
valstep=1, valinit=chan_init, valfmt='%2d', color='dodgerblue')
slider_cubes.on_changed(update_cubes)
slider_chan.on_changed(update_cubes)
else:
axchan = plt.axes([0.2, 0.9, 0.24, 0.05], facecolor='0.7')
slider_chan = Slider(axchan, 'Channel', 0, self.nchan-1,
valstep=1, valinit=chan_init, valfmt='%2d', color='dodgerblue')
slider_chan.on_changed(update_chan)
#*************
#BUTTONS
#*************
def go2cursor(event):
if self.interactive == self.cursor or self.interactive == self.point: return 0
interactive_obj[0].set_active(False)
self.interactive = self.cursor
interactive_obj[0] = get_interactive(self.interactive)
def go2box(event):
if self.interactive == self.box: return 0
fig.canvas.mpl_disconnect(interactive_obj[0])
self.interactive = self.box
interactive_obj[0] = get_interactive(self.interactive)
def go2trash(event):
print ('Cleaning interactive figure...')
plt.close()
chan = int(slider_chan.val)
self.show(extent=extent, chan_init=chan, compare_cubes=compare_cubes,
cursor_grid=cursor_grid, int_unit=int_unit, pos_unit=pos_unit,
vel_unit=vel_unit, surface=surface, **kwargs)
def go2surface(event):
self.surface(ax[0], *surface['args'], **surface['kwargs'])
fig.canvas.draw()
fig.canvas.flush_events()
box_img = plt.imread(path_file+'button_box.png')
cursor_img = plt.imread(path_file+'button_cursor.jpeg')
trash_img = plt.imread(path_file+'button_trash.jpg')
surface_img = plt.imread(path_file+'button_surface.png')
axbcursor = plt.axes([0.05, 0.779, 0.05, 0.05])
axbbox = plt.axes([0.05, 0.72, 0.05, 0.05])
axbtrash = plt.axes([0.05, 0.661, 0.05, 0.05], frameon=True, aspect='equal')
bcursor = Button(axbcursor, '', image=cursor_img)
bcursor.on_clicked(go2cursor)
bbox = Button(axbbox, '', image=box_img)
bbox.on_clicked(go2box)
btrash = Button(axbtrash, '', image=trash_img, color='white', hovercolor='lime')
btrash.on_clicked(go2trash)
if len(surface['args'])>0:
axbsurf = plt.axes([0.005, 0.759, 0.07, 0.07], frameon=True, aspect='equal')
bsurf = Button(axbsurf, '', image=surface_img)
bsurf.on_clicked(go2surface)
plt.show()
def show_side_by_side(self, cube1, extent=None, chan_init=0, cursor_grid=True, cmap='gnuplot2_r',
int_unit=r'Intensity [mJy beam$^{-1}$]', pos_unit='Offset [au]', vel_unit=r'km s$^{-1}$',
show_beam=False, surface={'args': (), 'kwargs': {}}, **kwargs):
from matplotlib.widgets import Slider, Cursor, Button
compare_cubes = [cube1]
v0, v1 = self.channels[0], self.channels[-1]
dv = v1-v0
fig, ax = plt.subplots(ncols=3, figsize=(17,5))
plt.subplots_adjust(wspace=0.25)
y0, y1 = ax[2].get_position().y0, ax[2].get_position().y1
axcbar = plt.axes([0.63, y0, 0.015, y1-y0])
max_data = np.nanmax([self.data]+[comp.data for comp in compare_cubes])
ax[0].set_xlabel(pos_unit)
ax[0].set_ylabel(pos_unit)
ax[2].set_xlabel('l.o.s velocity [%s]'%vel_unit)
PlotTools.mod_major_ticks(ax[0], axis='both', nbins=5)
ax[0].tick_params(direction='out')
ax[2].tick_params(direction='in', right=True, labelright=False, labelleft=False)
axcbar.tick_params(direction='out')
ax[2].set_ylabel(int_unit, labelpad=15)
ax[2].yaxis.set_label_position('right')
ax[2].set_xlim(v0-0.1, v1+0.1)
vmin, vmax = -1*max_data/100, 0.7*max_data#0.8*max_data#
ax[2].set_ylim(vmin, vmax)
cmap = plt.get_cmap(cmap)
cmap.set_bad(color=(0.9,0.9,0.9))
if show_beam and self.beam_kernel: self._plot_beam(ax[0])
img = ax[0].imshow(self.data[chan_init], cmap=cmap, extent=extent, origin='lower', vmin=vmin, vmax=vmax)
img1 = ax[1].imshow(cube1.data[chan_init], cmap=cmap, extent=extent, origin='lower', vmin=vmin, vmax=vmax)
cbar = plt.colorbar(img, cax=axcbar)
img.cmap.set_under('w')
img1.cmap.set_under('w')
current_chan = ax[2].axvline(self.channels[chan_init], color='black', lw=2, ls='--')
text_chan = ax[2].text((self.channels[chan_init]-v0)/dv, 1.02, #Converting xdata coords to Axes coords
'%4.1f %s'%(self.channels[chan_init], vel_unit), ha='center',
color='black', transform=ax[2].transAxes)
if cursor_grid: cg = Cursor(ax[0], useblit=True, color='lime', linewidth=1.5)
def get_interactive(func):
return func(fig, [ax[0], ax[2]], extent=extent, compare_cubes=compare_cubes, **kwargs)
interactive_obj = [get_interactive(self.interactive)]
#***************
#SLIDERS
#***************
def update_chan(val):
chan = int(val)
vchan = self.channels[chan]
img.set_data(self.data[chan])
img1.set_data(cube1.data[chan])
current_chan.set_xdata(vchan)
text_chan.set_x((vchan-v0)/dv)
text_chan.set_text('%4.1f %s'%(vchan, vel_unit))
fig.canvas.draw_idle()
ncubes = len(compare_cubes)
axchan = plt.axes([0.2, 0.9, 0.24, 0.05], facecolor='0.7')
slider_chan = Slider(axchan, 'Channel', 0, self.nchan-1,
valstep=1, valinit=chan_init, valfmt='%2d', color='dodgerblue')
slider_chan.on_changed(update_chan)
#*************
#BUTTONS
#*************
def go2cursor(event):
if self.interactive == self.cursor or self.interactive == self.point: return 0
interactive_obj[0].set_active(False)
self.interactive = self.cursor
interactive_obj[0] = get_interactive(self.interactive)
def go2box(event):
if self.interactive == self.box: return 0
fig.canvas.mpl_disconnect(interactive_obj[0])
self.interactive = self.box
interactive_obj[0] = get_interactive(self.interactive)
def go2trash(event):
print ('Cleaning interactive figure...')
plt.close()
chan = int(slider_chan.val)
self.show_side_by_side(cube1, extent=extent, chan_init=chan,
cursor_grid=cursor_grid, int_unit=int_unit, pos_unit=pos_unit,
vel_unit=vel_unit, surface=surface, **kwargs)
def go2surface(event):
self.surface(ax[0], *surface['args'], **surface['kwargs'])
self.surface(ax[1], *surface['args'], **surface['kwargs'])
fig.canvas.draw()
fig.canvas.flush_events()
box_img = plt.imread(path_file+'button_box.png')
cursor_img = plt.imread(path_file+'button_cursor.jpeg')
trash_img = plt.imread(path_file+'button_trash.jpg')
surface_img = plt.imread(path_file+'button_surface.png')
axbcursor = plt.axes([0.05, 0.779, 0.05, 0.05])
axbbox = plt.axes([0.05, 0.72, 0.05, 0.05])
axbtrash = plt.axes([0.05, 0.661, 0.05, 0.05], frameon=True, aspect='equal')
bcursor = Button(axbcursor, '', image=cursor_img)
bcursor.on_clicked(go2cursor)
bbox = Button(axbbox, '', image=box_img)
bbox.on_clicked(go2box)
btrash = Button(axbtrash, '', image=trash_img, color='white', hovercolor='lime')
btrash.on_clicked(go2trash)
if len(surface['args'])>0:
axbsurf = plt.axes([0.005, 0.759, 0.07, 0.07], frameon=True, aspect='equal')
bsurf = Button(axbsurf, '', image=surface_img)
bsurf.on_clicked(go2surface)
plt.show()
"""
#Lasso functions under development
def _plot_lasso(self, ax, x, y, chan, color=False, show_path=True, extent=None, compare_cubes=[], **kwargs):
if len(self._lasso_path) == 0: return
#for i in range(len(self.lasso_path))
if extent is None:
j = x.astype(np.int)
i = y.astype(np.int)
else:
nz, ny, nx = np.shape(self.data)
dx = extent[1] - extent[0]
dy = extent[3] - extent[2]
j = (nx*(x-extent[0])/dx).astype(np.int)
i = (ny*(y-extent[2])/dy).astype(np.int)
if color: self._plot_path = ax[1].step(np.arange(len(i)), self.data[chan,i,j], color=color)
else: self._plot_path = ax[1].step(np.arange(len(i)), self.data[chan,i,j])
self._plot_color = self._plot_path[0].get_color()
if show_path: self._path_on_cube = ax[0].plot(x,y, color=self._plot_color)
else: self._path_on_cube = None
def lasso(self, fig, ax, chan, color=False, show_path=True, extent=None, compare_cubes=[], **kwargs):
from matplotlib.widgets import LassoSelector
canvas = ax[0].figure.canvas
def onselect(verts):
#path = Path(verts)
canvas.draw_idle()
self._lasso_path.append(np.array(verts).T)
self._plot_lasso(ax, *np.array(verts).T, chan, color, show_path, extent, compare_cubes, **kwargs)
print (verts)
def disconnect():
self._lasso_obj.disconnect_events()
canvas.draw_idle()
self._lasso_obj = LassoSelector(ax[0], onselect, lineprops={'color': 'lime'})
def onclick(event):
if event.button == 3:
print ('Right click. Disconnecting click event...')
disconnect()
fig.canvas.draw()
cid = fig.canvas.mpl_connect('button_press_event', onclick)
"""
def curve(self, ax, x, y, chan, color=False, show_path=True, extent=None, compare_cubes=[], **kwargs):
kwargs_curve = dict(linewidth=2.5)#, label=r'x0:%d,x1:%d'%(x0,x1))
kwargs_curve.update(kwargs)
if extent is None:
j = x.astype(np.int)
i = y.astype(np.int)
else:
nz, ny, nx = np.shape(self.data)
dx = extent[1] - extent[0]
dy = extent[3] - extent[2]
j = (nx*(x-extent[0])/dx).astype(np.int)
i = (ny*(y-extent[2])/dy).astype(np.int)
pix_ids = np.arange(len(i))
path_val = self.data[chan,i,j]
if color: plot_path = ax[1].step(pix_ids, path_val, where='mid', color=color, **kwargs_curve)
else: plot_path = ax[1].step(pix_ids, path_val, where='mid', **kwargs_curve)
plot_color = plot_path[0].get_color()
if show_path: path_on_cube = ax[0].plot(x, y, color=plot_color, **kwargs_curve)
else: path_on_cube = None
cube_fill = []
plot_fill = None
ncubes = len(compare_cubes)
if ncubes > 0:
alpha = 0.2
dalpha = -alpha/ncubes
for cube in compare_cubes:
cube_fill.append(ax[1].fill_between(pix_ids, cube.data[chan,i,j], color=plot_color, step='mid', alpha=alpha))
alpha+=dalpha
else: plot_fill = ax[1].fill_between(pix_ids, path_val, color=plot_color, step='mid', alpha=0.2)
return path_on_cube, plot_path, plot_color, plot_fill, cube_fill
def show_path(self, x, y, extent=None, chan_init=20, compare_cubes=[], cursor_grid=True,
int_unit=r'Intensity [mJy beam$^{-1}$]', pos_unit='au', vel_unit=r'km s$^{-1}$',
show_beam=False, **kwargs):
from matplotlib.widgets import Slider, Cursor, Button
v0, v1 = self.channels[0], self.channels[-1]
dv = v1-v0
fig, ax = plt.subplots(ncols=2, figsize=(12,5))
plt.subplots_adjust(wspace=0.25)
y0, y1 = ax[1].get_position().y0, ax[1].get_position().y1
axcbar = plt.axes([0.47, y0, 0.03, y1-y0])
max_data = np.max(self.data)
ax[0].set_xlabel(pos_unit)
ax[0].set_ylabel(pos_unit)
ax[1].set_xlabel('Pixel id along path')
ax[1].tick_params(direction='in', right=True, labelright=False, labelleft=False)
axcbar.tick_params(direction='out')
ax[1].set_ylabel(int_unit, labelpad=15)
ax[1].yaxis.set_label_position('right')
#ax[1].set_xlim(v0-0.1, v1+0.1)
#ax[1].set_ylim(-1, max_data)
vmin, vmax = -max_data/30, max_data
ax[1].set_ylim(vmin, vmax)
ax[1].grid(lw=1.5, ls=':')
cmap = plt.get_cmap('brg')
cmap.set_bad(color=(0.9,0.9,0.9))
if show_beam and self.beam_kernel: self._plot_beam(ax[0])
img = ax[0].imshow(self.data[chan_init], cmap=cmap, extent=extent, origin='lower', vmin=vmin, vmax=vmax)
cbar = plt.colorbar(img, cax=axcbar)
text_chan = ax[1].text(0.15, 1.04, #Converting xdata coords to Axes coords
r'v$_{\rmchan}$=%4.1f %s'%(self.channels[chan_init], vel_unit), ha='center',
color='black', transform=ax[1].transAxes)
if cursor_grid: cg = Cursor(ax[0], useblit=True, color='lime', linewidth=1.5)
box_img = plt.imread(path_file+'button_box.png')
cursor_img = plt.imread(path_file+'button_cursor.jpeg')
def get_interactive(func, chan=chan_init, color=False, show_path=True):
return func(ax, x, y, chan, color=color, show_path=show_path, extent=extent, compare_cubes=compare_cubes, **kwargs)
interactive_obj = [get_interactive(self.interactive_path)]
#***************
#SLIDERS
#***************
def update_chan(val):
chan = int(val)
vchan = self.channels[chan]
img.set_data(self.data[chan])
text_chan.set_text(r'v$_{\rmchan}$=%4.1f %s'%(vchan, vel_unit))
path_on_cube, plot_path, plot_color, plot_fill, cube_fill = interactive_obj[0]
plot_path[0].remove()
if plot_fill is not None: plot_fill.remove()
for cbfill in cube_fill: cbfill.remove()
interactive_obj[0] = get_interactive(self.interactive_path, chan, color=plot_color, show_path=False)
fig.canvas.draw_idle()
def update_cubes(val):
i = int(slider_cubes.val)
chan = int(slider_chan.val)
vchan = self.channels[chan]
if i==0: img.set_data(self.data[chan])
else: img.set_data(compare_cubes[i-1].data[chan])
text_chan.set_text(r'v$_{\rmchan}$=%4.1f %s'%(vchan, vel_unit))
path_on_cube, plot_path, plot_color, plot_fill, cube_fill = interactive_obj[0]
plot_path[0].remove()
if plot_fill is not None: plot_fill.remove()
for cbfill in cube_fill: cbfill.remove()
interactive_obj[0] = get_interactive(self.interactive_path, chan, color=plot_color, show_path=False)
fig.canvas.draw_idle()
ncubes = len(compare_cubes)
if ncubes>0:
axcubes = plt.axes([0.2, 0.90, 0.24, 0.025], facecolor='0.7')
axchan = plt.axes([0.2, 0.95, 0.24, 0.025], facecolor='0.7')
slider_cubes = Slider(axcubes, 'Cube id', 0, ncubes,
valstep=1, valinit=0, valfmt='%1d', color='dodgerblue')
slider_chan = Slider(axchan, 'Channel', 0, self.nchan-1,
valstep=1, valinit=chan_init, valfmt='%2d', color='dodgerblue')
slider_cubes.on_changed(update_cubes)
slider_chan.on_changed(update_cubes)
else:
axchan = plt.axes([0.2, 0.9, 0.24, 0.05], facecolor='0.7')
slider_chan = Slider(axchan, 'Channel', 0, self.nchan-1,
valstep=1, valinit=chan_init, valfmt='%2d', color='dodgerblue')
slider_chan.on_changed(update_chan)
plt.show()
"""
self._path_on_cube, self._plot_path, self._plot_color = None, None, None
self._lasso_path = []
self.interactive_path(fig, ax, chan_init, color=False, show_path=True, extent=extent, compare_cubes=compare_cubes, **kwargs)
def get_interactive(func, chan=chan_init, color=False, show_path=True):
#func(fig, ax, chan, color=color, show_path=show_path, extent=extent, compare_cubes=compare_cubes, **kwargs)
if func == self.lasso:
return self._plot_lasso(ax, True, True, chan, color=color, show_path=show_path, extent=extent, compare_cubes=compare_cubes, **kwargs)
#interactive_obj = [get_interactive(self.interactive_path)]
#print (interactive_obj)
#***************
#SLIDERS
#***************
def update_chan(val):
chan = int(val)
vchan = self.channels[chan]
img.set_data(self.data[chan])
current_chan.set_xdata(vchan)
text_chan.set_x((vchan-v0)/dv)
text_chan.set_text('%4.1f km/s'%vchan)
#path_on_cube, plot_path, plot_color = interactive_obj[0]
if self._path_on_cube is not None:
self._plot_path[0].remove()
get_interactive(self.interactive_path, chan, color=self._plot_color, show_path=False)
fig.canvas.draw_idle()
"""
def make_fits(self, output, **kw_header):
from astropy.io import fits
hdr = fits.Header()
hdr.update(**kw_header)
data = np.where( | np.isfinite(self.data) | numpy.isfinite |
##~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~##
## ##
## This file forms part of the Badlands surface processes modelling application. ##
## ##
## For full license and copyright information, please refer to the LICENSE.md file ##
## located at the project root, or contact the authors. ##
## ##
##~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~##
"""
This file defines functions to compute stream network over the TIN.
.. note:
To solve channel incision and landscape evolution, the algorithm follows the O(n)-efficient ordering
method from Braun and Willett (2013) and is based on a *single-flow-direction* (**SFD**) approximation
assuming that water goes down the path of the steepest slope.
"""
import math
import time
import numpy
import warnings
from matplotlib import path
import h5py
import pandas as pd
import xml.etree.ElementTree as ETO
import os
if 'READTHEDOCS' not in os.environ:
from badlands import sfd
from badlands import pdalgo
from badlands import flowalgo
from scipy.spatial import cKDTree
from scipy.interpolate import RegularGridInterpolator
from scipy.ndimage.filters import gaussian_filter
class flowNetwork:
"""
Class used to define **flow network computation**.
.. image:: img/stack.jpg
:scale: 100 %
:alt: stack from Braun & Willett (2013)
:align: center
The left graph shows the stack order considering the single-flow-direction algorithm and the
right graph shows the inverted stack order from top to bottom as described in Braun and Willett
(2013).
.. seealso::
Braun and Willett, 2013: A very efficient O(n), implicit and parallel method to solve
the stream power equation governing fluvial incision and landscape evolution - *Geomorphology*,
170-179, `doi:10.1016/j.geomorph.2012.10.008`_.
.. _doi:10.1016/j.geomorph.2012.10.008: https://doi.org/10.1016/j.geomorph.2012.10.008
"""
def __init__(self, input):
"""
Initialisation.
"""
self.xycoords = None
self.base = None
self.base1 = None
self.localbase = None
self.localbase1 = None
self.receivers = None
self.receivers1 = None
self.arrdonor = None
self.delta = None
self.donors = None
self.donors1 = None
self.localstack = None
self.localstack1 = None
self.stack = None
self.stack1 = None
self.partFlow = None
self.maxdonors = 0
self.CFL = None
self.erodibility = None
self.mindt = None
self.spl = False
self.depo = 0
self.discharge = None
self.localsedflux = None
self.maxh = None
self.maxdep = None
self.diff_cfl = None
self.chi = None
self.basinID = None
self.pitID = None
self.pitVolume = None
self.pitDrain = None
self.allDrain = None
self.xgrid = None
self.ygrid = None
self.xi = None
self.yi = None
self.xyi = None
self.Afactor = None
self.parentIDs = None
self.dx = None
self.distances = None
self.indices = None
self.onIDs = None
self.mp = input.mp
self.mt = input.mt
self.nt = input.nt
self.kt = input.kt
self.kw = input.kw
self.b = input.b
self.deepb = input.deepbasin
self.critdens = input.denscrit
self.flowdensity = None
self.sedload = None
self.straTIN = 0
self.activelay = None
self.borders = None
self.domain = None
self.insideIDs = None
self.outsideIDs = None
self.borders2 = None
self.insideIDs2 = None
self.outsideIDs2 = None
flowalgo.eroparams(input.incisiontype,input.SPLm,input.SPLn,input.mt,
input.nt,input.kt,input.kw,input.b,input.bedslptype)
return
def compute_hillslope_diffusion(self, elev, neighbours, edges, distances, globalIDs, type, Sc):
"""
Perform hillslope evolution based on diffusion processes.
Args:
elev: numpy arrays containing the elevation of the TIN nodes.
neighbours: numpy integer-type array with the neighbourhood IDs.
edges: numpy real-type array with the voronoi edges length for each neighbours of the TIN nodes.
distances: numpy real-type array with the distances between each connection in the TIN.
globalIDs: numpy integer-type array containing for local nodes their global IDs.
type: flag to compute the diffusion when multiple rocks are used.
Sc: critical slope parameter for non-linear diffusion.
Returns:
- diff_flux - numpy array containing erosion/deposition thicknesses induced by hillslope processes.
"""
if type == 0:
if Sc > 0.:
tSc = numpy.zeros(1)
tSc[0] = Sc
diff_flux = sfd.diffusionnl(tSc, elev, self.borders2, neighbours, edges, distances, globalIDs)
else:
diff_flux = sfd.diffusion(elev, self.borders2, neighbours, edges, distances, globalIDs)
else:
diff_flux = sfd.diffusionero(elev, self.borders2, neighbours, edges, distances, globalIDs)
return diff_flux
def compute_marine_diffusion(self, elev, depoH, neighbours, edges, distances, coeff,
globalIDs, seal, maxth, tstep):
"""
Perform river transported marine sediments diffusion.
Args:
elev: numpy arrays containing the elevation of the TIN nodes.
dep: numpy arrays flagging the deposited nodes.
neighbours: numpy integer-type array with the neighbourhood IDs.
edges: numpy real-type array with the voronoi edges length for each neighbours of the TIN nodes.
distances: numpy real-type array with the distances between each connection in the TIN.
globalIDs: numpy integer-type array containing for local nodes their global IDs.
Returns
-------
diff_flux
numpy array containing marine erosion/deposition thicknesses induced by hillslope processes.
mindt
maximum time step (in years) to ensure stable results
"""
diff_flux, ndt = flowalgo.diffmarine(elev, self.borders, depoH, neighbours, edges,
distances, coeff, globalIDs, seal, maxth, tstep)
# Send local diffusion flux globally
mindt = numpy.array(ndt)
return diff_flux, mindt
def compute_failure_diffusion(self, elev, depoH, neighbours, edges, distances, coeff,
globalIDs, maxth, tstep):
"""
Perform slope failure transported sediments diffusion.
Args:
elev: numpy arrays containing the elevation of the TIN nodes.
dep: numpy arrays flagging the deposited nodes.
neighbours: numpy integer-type array with the neighbourhood IDs.
edges: numpy real-type array with the voronoi edges length for each neighbours of the TIN nodes.
distances: numpy real-type array with the distances between each connection in the TIN.
globalIDs: numpy integer-type array containing for local nodes their global IDs.
Returns
-------
diff_flux
numpy array containing erosion/deposition thicknesses induced by slope failure processes.
mindt
maximum time step (in years) to ensure stable results
"""
diff_flux, ndt = flowalgo.difffailure(elev, self.borders, depoH, neighbours, edges,
distances, coeff, globalIDs, maxth, tstep)
# Send local diffusion flux globally
mindt = numpy.array(ndt)
return diff_flux, mindt
def compute_failure(self, elev, sfail):
"""
Perform erosion induced by slope failure.
Args:
elev: numpy arrays containing the elevation of the TIN nodes.
sfail: critical slope to initiate slope failure.
Returns:
- erosion - numpy integer-type array containing for local nodes their global IDs.
"""
erosion = flowalgo.slumpero(self.localstack,self.receivers,self.xycoords, \
elev,sfail,self.borders)
return erosion
def compute_sediment_marine(self, elev, dep, sdep, coeff, neighbours, seal, maxth,
edges, distances, globalIDs):
"""
Perform marine sediment diffusion for multiple rock types.
Args:
elev: numpy arrays containing the elevation of the TIN nodes.
dep: numpy arrays containing the rock deposition.
coeff: numpy arrays containing the coefficient value for the diffusion algorithm.
neighbours: numpy integer-type array with the neighbourhood IDs.
edges: numpy real-type array with the voronoi edges length for each neighbours of the TIN nodes.
distances: numpy real-type array with the distances between each connection in the TIN.
globalIDs: numpy integer-type array containing for local nodes their global IDs.
Returns
-------
diff_prop
2D numpy array containing proportion of each sediment diffused by marine processes.
diff_flux
numpy array containing erosion/deposition thicknesses induced by marine processes.
"""
diff_prop, diff_flux = flowalgo.diffsedmarine(elev, self.borders, dep, sdep,
seal, maxth, coeff, neighbours, edges, distances, globalIDs)
return diff_prop, diff_flux
def compute_sediment_hillslope(self, elev, difflay, coeff, neighbours,
edges, layh, distances, globalIDs):
"""
Perform sediment diffusion for multiple rock types.
Args:
elev: numpy arrays containing the elevation of the TIN nodes.
difflay: numpy arrays containing the rock type fractions in the active layer.
coeff: numpy arrays containing the coefficient value for the diffusion algorithm.
neighbours: numpy integer-type array with the neighbourhood IDs.
edges: numpy real-type array with the voronoi edges length for each neighbours of the TIN nodes.
layh: numpy arrays containing the thickness of the active layer.
distances: numpy real-type array with the distances between each connection in the TIN.
globalIDs: numpy integer-type array containing for local nodes their global IDs.
Returns
-------
ero
2D numpy array containing erosion thicknesses for each sediment diffused by hillslope processes.
depo
2D numpy array containing deposition thicknesses for each sediment diffused by hillslope processes.
sumdiff
numpy array containing cumulative erosion/deposition thicknesses induced by hillslope processes.
"""
sumdiff, ero, depo = flowalgo.diffsedhillslope(elev, self.borders, difflay,
layh, coeff, neighbours, edges, distances, globalIDs)
return sumdiff, ero, depo
def SFD_receivers(self, fillH, elev, neighbours, edges, distances, globalIDs):
"""
**Single Flow Direction** function computes downslope flow directions by inspecting the neighborhood
elevations around each node. The SFD method assigns a unique flow direction towards the steepest
downslope neighbor.
Args:
fillH: numpy array containing the filled elevations from Planchon & Darboux depression-less algorithm.
elev: numpy arrays containing the elevation of the TIN nodes.
neighbours: numpy integer-type array with the neighbourhood IDs.
edges: numpy real-type array with the voronoi edges length for each neighbours of the TIN nodes.
distances: numpy real-type array with the distances between each connection in the TIN.
globalIDs: numpy integer-type array containing for local nodes their global IDs.
To solve channel incision and landscape evolution, the algorithm follows the O(n)-efficient ordering
method from Braun and Willett (2013) and is based on a *single-flow-direction* (**SFD**) approximation
assuming that water goes down the path of the steepest slope.
.. seealso::
<NAME>, <NAME>. A very efficient O(n), implicit and parallel method to solve the stream power equation governing fluvial incision and landscape evolution. Geomorphology. 2013;180–181(Supplement C):170–179.
"""
# Call the SFD function from libUtils
# Get the directions from true surface
base1, receivers1 = sfd.directions_base(elev, neighbours, edges, distances, globalIDs)
# Send local base level globally
bpos = numpy.where(base1 >= 0)[0]
self.base1 = base1[bpos]
# Send local receivers globally
self.receivers1 = receivers1
# Get the directions from filled surface
base, receivers, maxh, maxdep = sfd.directions(fillH, elev, neighbours, edges, distances, globalIDs)
# Send local base level globally
bpos = numpy.where(base >= 0)[0]
self.base = base[bpos]
numpy.random.shuffle(self.base)
# Send local receivers globally
self.receivers = receivers
# Send local maximum height globally
self.maxh = maxh
# Send local maximum deposition globally
self.maxdep = maxdep
return
def _donors_number_array(self):
"""
Creates an array containing the number of donors for each node.
"""
self.arrdonor = None
numPts = len(self.receivers)
self.arrdonor = numpy.zeros(numPts, dtype=int)
maxID = | numpy.max(self.receivers) | numpy.max |
#!/usr/bin/env python3
###############################################################################
#
# Project: Embedded Learning Library (ELL)
# File: common_importer_test.py (importers)
# Authors: <NAME>
#
# Requires: Python 3.x, cntk-2.4
#
###############################################################################
import os
import sys
import onnx
import numpy as np
import unittest
script_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(script_path, '..', '..', '..', 'utilities', 'pythonlibs'))
sys.path.append(os.path.join(script_path, '..', '..'))
sys.path.append(os.path.join(script_path, '..', '..', 'common', 'test'))
sys.path.append(os.path.join(script_path, '..'))
sys.path.append(os.path.join(script_path, '..', 'lib'))
import find_ell # noqa 401
import ell
import logger
_logger = logger.init(verbosity="WARN")
import common_importer_test
import common.importer
import common.converters
import onnx_converters as convert
class OnnxNodeTest(common_importer_test.EllImporterTestBase):
def _get_value_info(self, arr, name):
return onnx.helper.make_tensor_value_info(
name=name,
elem_type=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[arr.dtype],
shape=arr.shape)
def _build_graph(self, node, inputs, outputs):
present_inputs = [x for x in node.input if (x != '')]
present_outputs = [x for x in node.output if (x != '')]
inputs_vi = [self._get_value_info(arr, arr_name)
for arr, arr_name in zip(inputs, present_inputs)]
outputs_vi = [self._get_value_info(arr, arr_name)
for arr, arr_name in zip(outputs, present_outputs)]
graph = onnx.helper.make_graph(
nodes=[node],
name="test",
inputs=inputs_vi,
outputs=outputs_vi)
return graph
def convert(self, graph):
converter = convert.OnnxConverter()
importer_model = converter.set_graph(graph)
try:
importer_engine = common.importer.ImporterEngine(step_interval_msec=None, lag_threshold_msec=None)
ell_map = importer_engine.convert_nodes(importer_model)
except Exception as e:
_logger.error("Error occurred while attempting to convert the model: " + str(e))
raise
return ell_map
def compute(self, graph, input):
map = self.convert(graph)
# put tensor into ELL format (rows, cols, channels)
input = np.squeeze(input, 0) # remove initial batch dimension
input = np.rollaxis(input, 0, 3) # move channel to the end
input_vec = ell.math.FloatVector(input.ravel())
output = map.ComputeFloat(input_vec)
shape = map.GetOutputShape()
shape = (shape.rows, shape.columns, shape.channels)
output = np.array(output).reshape(shape)
# put tensor back in ONNX format (channels, rows, cols)
return np.rollaxis(output, 2)
def test_global_average_pool(self):
_logger.warning("----------- test_global_average_pool")
node = onnx.helper.make_node(
'GlobalAveragePool',
inputs=['x'],
outputs=['y'],
)
# ONNX is (batch, channels, rows, cols)
x = np.random.randn(1, 3, 5, 5).astype(np.float32)
# GlobalAveragePool consumes an input tensor X and applies average pooling
# across the values in the same channel. Therefore it will average the
# 5,5 spatial dimensions across the 3 channels, returning a result of shape
# 1,3,1,1.
spatial_shape = np.ndim(x) - 2
y = np.average(x, axis=tuple(range(spatial_shape, spatial_shape + 2)))
for _ in range(spatial_shape):
y = np.expand_dims(y, -1)
graph = self._build_graph(node, x, y)
output = self.compute(graph, x)
output = | np.expand_dims(output, 0) | numpy.expand_dims |
from deepautoencoder import StackedAutoEncoder
import numpy as np
import pandas as pd
from statsmodels.tsa.stattools import pacf
from sklearn import preprocessing
from hpelm import ELM
from deepautoencoder.para_optimization import ParaOptimization
from sklearn.model_selection import KFold
def add_lag(cons, thres=0.1):
cons = np.array(cons)
pa = pacf(cons, nlags=150)
above_thres_indices = np.argwhere(pa > thres)
above_thres_indices = np.delete(above_thres_indices, 0)
max_lag = max(above_thres_indices)
data = []
labels = []
for i in range(max_lag, len(cons) - 1):
new_indices = i - above_thres_indices
new_series = cons[new_indices]
data.append(new_series)
labels.append(cons[i])
return np.array(data), | np.array(labels) | numpy.array |
from cvs import *
import numpy as np
import os,sys
def checkAppleImage():
strTmp = "./res/hfs1.jpg"
im1 = cvs.imread(strTmp)
cvs.setLbs("长沙理工大学测控专业"+"苹果检测B116队"+"显示苹果原始图像")
cvs.imshow(im1)
sleep(1000)
gray1 = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
hsv1 = cv2.cvtColor(im1, cv2.COLOR_BGR2HSV)
# define range of red color in HSV
lower_red = np.array([0,50,50])
upper_red = np.array([20,255,255])
# Threshold the HSV image to get only blue colors
mask = cv2.inRange(hsv1,lower_red, upper_red)
im2,cnt, hierarchy= cv2.findContours(mask,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE) #寻找轮廓
n=len(cnt) #轮廓个数
contoursImg=[]
for i in range(n):
length = cv2.arcLength(cnt[i], True) #获取轮廓长度
area = cv2.contourArea(cnt[i]) #
if length <500 and area<500*500*0.1:
continue
#类似灰度图像的掩膜
tmp3=np.zeros(gray1.shape,np.uint8) #生成黑背景
mask3=cv2.drawContours(tmp3,cnt,i,(255,255,255),-1) #绘制轮廓,形成掩膜,-1轮廓内部被填充
#图割
bgdModel = np.zeros((1,65),np.float64)
fgdModel = np.zeros((1,65),np.float64)
mask3[mask3 == 255] = 1
mask4, bgdModel, fgdModel = cv2.grabCut(im1,mask3,None,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_MASK)
mask4[mask3 == 1] = 255
cvs.setLbs("长沙理工大学测控专业苹果检测B116队,"+"显示提取的苹果图像,"+"1.苹果面积是"+str(area)+" 2.苹果周长是"+str(length))
cvs.imshow(mask4)
def analyzeAppleImage(im1):
gray1 = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
hsv1 = cv2.cvtColor(im1, cv2.COLOR_BGR2HSV)
# define range of red color in HSV
lower_red = np.array([0,50,50])
upper_red = | np.array([20,255,255]) | numpy.array |
#!/usr/bin/env python3
"""
Binary Classification
"""
import numpy as np
class NeuralNetwork:
"""
define the NeuralNetwork class
"""
def __init__(self, nx, nodes):
"""initialize variables and methods"""
if not isinstance(nx, int):
raise TypeError('nx must be an integer')
if nx < 1:
raise ValueError('nx must be a positive integer')
if not isinstance(nodes, int):
raise TypeError('nodes must be an integer')
if nodes < 1:
raise ValueError('nodes must be a positive integer')
self.nx = nx
self.nodes = nodes
self.__W1 = np.random.normal(loc=0.0, scale=1.0, size=(nodes, nx))
self.__b1 = np.zeros(nodes).reshape(nodes, 1)
self.__A1 = 0
self.__W2 = np.random.normal(
loc=0.0, scale=1.0, size=nodes).reshape(1, nodes)
self.__b2 = 0
self.__A2 = 0
@property
def W1(self):
"""getter for W1"""
return self.__W1
@property
def b1(self):
"""getter for b1"""
return self.__b1
@property
def A1(self):
"""getter for A1"""
return self.__A1
@property
def W2(self):
"""getter for W2"""
return self.__W2
@property
def b2(self):
"""getter for b2"""
return self.__b2
@property
def A2(self):
"""getter for A2"""
return self.__A2
def forward_prop(self, X):
"""forward propagation function"""
Z1 = np.matmul(self.W1, X) + self.b1
self.__A1 = self.sigmoid(Z1)
Z2 = np.matmul(self.W2, self.A1) + self.b2
self.__A2 = self.sigmoid(Z2)
return self.A1, self.A2
def sigmoid(self, Y):
"""define the sigmoid activation function"""
return 1 / (1 + np.exp(-1 * Y))
def cost(self, Y, A):
"""define the cost function"""
m = Y.shape[1]
return (-1 / m) * np.sum(
Y * np.log(A) + (1 - Y) * ( | np.log(1.0000001 - A) | numpy.log |
import numpy as np
class DecisionTree:
'''Decision Tree Classifier.
Note that this class only supports binary classification.
'''
def __init__(self,
criterion,
max_depth,
min_samples_leaf,
sample_feature=False,
continuous=False):
'''Initialize the classifier.
Args:
criterion (str): the criterion used to select features and split nodes.
max_depth (int): the max depth for the decision tree. This parameter is
a trade-off between underfitting and overfitting.
min_samples_leaf (int): the minimal samples in a leaf. This parameter is a trade-off
between underfitting and overfitting.
sample_feature (bool): whether to sample features for each splitting. Note that for random forest,
we would randomly select a subset of features for learning. Here we select sqrt(p) features.
For single decision tree, we do not sample features.
'''
if criterion == 'infogain_ratio':
self.criterion = self._information_gain_ratio
elif criterion == 'entropy':
self.criterion = self._information_gain
elif criterion == 'gini':
self.criterion = self._gini_purification
else:
raise Exception('Criterion should be infogain_ratio or entropy or gini')
self._tree = None
self.max_depth = max_depth
self.min_samples_leaf = min_samples_leaf
self.sample_feature = sample_feature
self.continuous = continuous
self.crit_THRESH = 0.05
def fit(self, X, y, sample_weights=None):
"""Build the decision tree according to the training data.
Args:
X: (pd.Dataframe) training features, of shape (N, D). Each X[i] is a training sample.
y: (pd.Series) vector of training labels, of shape (N,). y[i] is the label for X[i], and each y[i] is
an integer in the range 0 <= y[i] <= C. Here C = 1.
sample_weights: weights for each samples, of shape (N,).
"""
if sample_weights is None:
# if the sample weights is not provided, then by default all
# the samples have unit weights.
sample_weights = np.ones(X.shape[0]) / X.shape[0]
else:
sample_weights = np.array(sample_weights) / np.sum(sample_weights)
feature_names = X.columns.tolist()
X = np.array(X)
y = np.array(y)
self._tree = self._build_tree(X, y, feature_names, depth=1, sample_weights=sample_weights)
return self
@staticmethod
def entropy(y, sample_weights):
"""Calculate the entropy for label.
Args:
y: vector of training labels, of shape (N,).
sample_weights: weights for each samples, of shape (N,).
Returns:
(float): the entropy for y.
"""
entropy = 0.0
# begin answer
if sample_weights is None:
sample_weights = np.ones(y.shape[0]) / y.shape[0]
y_set = np.unique(y)
prob = np.zeros(y_set.shape)
S = np.sum(sample_weights)
for i in range(len(y_set)):
prob[i] = np.sum(np.multiply(y == y_set[i], sample_weights)) / S
# end answer
return entropy
def _information_gain(self, X, y, index, sample_weights):
"""Calculate the information gain given a vector of features.
Args:
X: training features, of shape (N, D).
y: vector of training labels, of shape (N,).
index: the index of the feature for calculating. 0 <= index < D
sample_weights: weights for each samples, of shape (N,).
Returns:
(float): the information gain calculated.
"""
info_gain = 0
# YOUR CODE HERE
# begin answer
if sample_weights is None:
sample_weights = np.ones(y.shape[0]) / y.shape[0]
en = DecisionTree.entropy(y, sample_weights)
sub_en = 0
if self.continuous:
x_set = np.unique(X[:, index])
best_sub_en = np.Inf
best_x = None
for x in x_set:
for c_idx in range(2):
sub_X, sub_y, sub_sample_weights = self._split_dataset(X, y, index, x, sample_weights, c_idx)
sub_en += np.sum(sub_sample_weights) / np.sum(sample_weights) * DecisionTree.entropy(sub_y,
sub_sample_weights)
if sub_en < best_sub_en:
best_sub_en = sub_en
best_x = x
else:
thresh = np.unique(X[:, index])
for i in range(len(thresh)):
sub_X, sub_y, sub_sample_weights = self._split_dataset(X, y, index, thresh[i], sample_weights)
sub_en += np.sum(sub_sample_weights)/np.sum(sample_weights) * DecisionTree.entropy(sub_y, sub_sample_weights)
info_gain = en - sub_en
# end answer
return info_gain, best_x
def _information_gain_ratio(self, X, y, index, sample_weights):
"""Calculate the information gain ratio given a vector of features.
Args:
X: training features, of shape (N, D).
y: vector of training labels, of shape (N,).
index: the index of the feature for calculating. 0 <= index < D
sample_weights: weights for each samples, of shape (N,).
Returns:
(float): the information gain ratio calculated.
"""
info_gain_ratio = 0
split_information = 0.0
# YOUR CODE HERE
# begin answer
if sample_weights is None:
sample_weights = np.ones(y.shape[0]) / y.shape[0]
en = DecisionTree.entropy(y, sample_weights)
sub_en = 0
sub_info = 0
if self.continuous:
x_set = np.unique(X[:, index])
best_sub_info = np.Inf
best_x = None
for x in x_set:
for c_idx in range(2):
sub_X, sub_y, sub_sample_weights = self._split_dataset(X, y, index, x, sample_weights, c_idx)
coef = np.sum(sub_sample_weights) / np.sum(sample_weights)
sub_en += coef * DecisionTree.entropy(sub_y, sub_sample_weights)
sub_info = coef * np.log2(coef)
if sub_info < best_sub_info:
best_sub_info = sub_info
best_x = x
else:
thresh = np.unique(X[:, index])
for i in range(len(thresh)):
sub_X, sub_y, sub_sample_weights = self._split_dataset(X, y, index, thresh[i], sample_weights)
coef = np.sum(sub_sample_weights) / np.sum(sample_weights)
sub_en += coef * DecisionTree.entropy(sub_y, sub_sample_weights)
sub_info = coef * np.log2(coef)
info_gain = en - sub_en
if sub_info > 1e-10:
info_gain_ratio = info_gain / sub_info
else:
info_gain_ratio = np.Inf
# end answerself
return info_gain_ratio, best_x
@staticmethod
def gini_impurity(y, sample_weights):
"""Calculate the gini impurity for labels.
Args:
y: vector of training labels, of shape (N,).
sample_weights: weights for each samples, of shape (N,).
Returns:
(float): the gini impurity for y.
"""
gini = 1
# YOUR CODE HERE
# begin answer
if sample_weights is None:
sample_weights = np.ones(y.shape[0]) / y.shape[0]
y_set = np.unique(y)
prob = np.zeros(y_set.shape)
gini = 1 - np.sum(prob ** 2)
# end answer
return gini
def _gini_purification(self, X, y, index, sample_weights):
"""Calculate the resulted gini impurity given a vector of features.
Args:
X: training features, of shape (N, D).
y: vector of training labels, of shape (N,).
index: the index of the feature for calculating. 0 <= index < D
sample_weights: weights for each samples, of shape (N,).
Returns:
(float): the resulted gini impurity after splitting by this feature.
"""
new_impurity = 1
# YOUR CODE HERE
# begin answer
if sample_weights is None:
sample_weights = np.ones(y.shape[0]) / y.shape[0]
gini = DecisionTree.gini_impurity(y, sample_weights)
sub_gini = 0
if self.continuous:
x_set = np.unique(X[:, index])
best_sub_gini = np.Inf
best_x =None
for x in x_set:
for c_idx in range(2):
sub_X, sub_y, sub_sample_weights = self._split_dataset(X, y, index, x, sample_weights, c_idx)
coef = np.sum(sub_sample_weights) / np.sum(sample_weights)
sub_gini += coef * DecisionTree.gini_impurity(sub_y, sub_sample_weights)
if sub_gini < best_sub_gini:
best_sub_gini = sub_gini
best_x = x
else:
thresh = np.unique(X[:, index])
for i in range(len(thresh)):
sub_X, sub_y, sub_sample_weights = self._split_dataset(X, y, index, thresh[i], sample_weights)
coef = np.sum(sub_sample_weights) / np.sum(sample_weights)
sub_gini += coef * DecisionTree.gini_impurity(sub_y, sub_sample_weights)
new_impurity = gini - sub_gini
# end answer
return new_impurity, best_x
def _split_dataset(self, X, y, index, value, sample_weights, c_idx):
"""Return the split of data whose index-th feature equals value.
Args:
X: training features, of shape (N, D).
y: vector of training labels, of shape (N,).
index: the index of the feature for splitting.
value: the value of the index-th feature for splitting.
sample_weights: weights for each samples, of shape (N,).
Returns:
(np.array): the subset of X whose index-th feature equals value.
(np.array): the subset of y whose index-th feature equals value.
(np.array): the subset of sample weights whose index-th feature equals value.
"""
sub_X, sub_y, sub_sample_weights = X, y, sample_weights
# YOUR CODE HERE
# Hint: Do not forget to remove the index-th feature from X.
# begin answer
if self.continuous:
if c_idx == 1:
idx = np.where(X[:, index] > value)[0]
else:
idx = np.where(X[:, index] <= value)[0]
sub_X = X[idx, :]
else:
idx = np.where(X[:, index] == value)[0]
sub_X = np.delete(X[idx, :], index, axis=1)
sub_y = y[idx]
sub_sample_weights = sample_weights[idx]
# end answer
return sub_X, sub_y, sub_sample_weights
def _choose_best_feature(self, X, y, sample_weights):
"""Choose the best feature to split according to criterion.
Args:
X: training features, of shape (N, D).
y: vector of training labels, of shape (N,).
sample_weights: weights for each samples, of shape (N,).
Returns:
(int): the index for the best feature
"""
best_feature_idx = 0
# YOUR CODE HERE
# Note that you need to implement the sampling feature part here for random forest!
# Hint: You may find `np.random.choice` is useful for sampling.
# begin answer
best_score = 0
sub_fea = np.arange(X.shape[1])
if self.sample_feature:
sub_fea = np.random.choice(np.arange(X.shape[1]), int(np.sqrt(X.shape[1])))
for i in sub_fea:
cur_score, best_x = self.criterion(X, y, i, sample_weights)
if cur_score > best_score:
best_score = cur_score
best_feature_idx = i
# end answer
return best_feature_idx, best_x
@staticmethod
def majority_vote(y, sample_weights=None):
"""Return the label which appears the most in y.
Args:
y: vector of training labels, of shape (N,).
sample_weights: weights for each samples, of shape (N,).
Returns:
(int): the majority label
"""
if sample_weights is None:
sample_weights = np.ones(y.shape[0]) / y.shape[0]
majority_label = y[0]
# YOUR CODE HERE
# begin answer
y_set = np.unique(y)
num = np.zeros(y_set.shape)
for i in range(len(y_set)):
num[i] = np.sum((y == y_set[i]) * sample_weights)
majority_label = y_set[np.argmax(num)]
# end answer
return majority_label
def _build_tree(self, X, y, feature_names, depth, sample_weights):
"""Build the decision tree according to the data.
Args:
X: (np.array) training features, of shape (N, D).
y: (np.array) vector of training labels, of shape (N,).
feature_names (list): record the name of features in X in the original dataset.
depth (int): current depth for this node.
sample_weights: weights for each samples, of shape (N,).
Returns:
(dict): a dict denoting the decision tree.
Example:
The first best feature name is 'title', and it has 5 different values: 0,1,2,3,4. For 'title' == 4, the next best feature name is 'pclass', we continue split the remain data. If it comes to the leaf, we use the majority_label by calling majority_vote.
mytree = {
'titile': {
0: subtree0,
1: subtree1,
2: subtree2,
3: subtree3,
4: {
'pclass': {
1: majority_vote([1, 1, 1, 1]) # which is 1, majority_label
2: majority_vote([1, 0, 1, 1]) # which is 1
3: majority_vote([0, 0, 0]) # which is 0
}
}
}
}
"""
mytree = dict()
# YOUR CODE HERE
# TODO: Use `_choose_best_feature` to find the best feature to split the X. Then use `_split_dataset` to
# get subtrees.
# Hint: You may find `np.unique` is useful.
# begin answer
# Todo prune, early stop
if depth <= self.max_depth and X.shape[0] >= self.min_samples_leaf:
fea_idx, best_thresh = self._choose_best_feature(X, y, sample_weights)
fea_name = feature_names[fea_idx]
sub_fea_names =feature_names[:fea_idx] + feature_names[fea_idx+1:]
if self.continuous:
mytree[(fea_name, best_thresh)] = {}
for c_idx in range(2):
sub_X, sub_y, sub_sample_weights = self._split_dataset(X, y, fea_idx, best_thresh, sample_weights, c_idx)
if len(sub_y) > 0:
mytree[(fea_name, best_thresh)][c_idx] = self._build_tree(sub_X, sub_y, sub_fea_names, depth+1, sub_sample_weights)
else:
mytree[fea_name] = {}
fea_set = | np.unique(X[:, fea_idx]) | numpy.unique |
"""
Implementation of the method proposed in the paper:
'Adversarial Attacks on Neural Networks for Graph Data'
by <NAME>, <NAME> and <NAME>,
published at SIGKDD'18, August 2018, London, UK
Copyright (C) 2018
<NAME>
Technical University of Munich
"""
import numpy as np
import scipy.sparse as sp
from nettack import utils
from numba import jit
class Nettack:
"""
Nettack class used for poisoning attacks on node classification models.
Copyright (C) 2018
<NAME>
Technical University of Munich
"""
def __init__(self, adj, X_obs, z_obs, W1, W2, u, verbose=False):
# Adjacency matrix
self.adj = adj.copy().tolil()
self.adj_no_selfloops = self.adj.copy()
self.adj_no_selfloops.setdiag(0)
self.adj_orig = self.adj.copy().tolil()
self.u = u # the node being attacked
self.adj_preprocessed = utils.preprocess_graph(self.adj).tolil()
# Number of nodes
self.N = adj.shape[0]
# Node attributes
self.X_obs = X_obs.copy().tolil()
self.X_obs_orig = self.X_obs.copy().tolil()
# Node labels
self.z_obs = z_obs.copy()
self.label_u = self.z_obs[self.u]
self.K = np.max(self.z_obs)+1
# GCN weight matrices
self.W1 = W1
self.W2 = W2
self.W = sp.csr_matrix(self.W1.dot(self.W2))
self.cooc_matrix = self.X_obs.T.dot(self.X_obs).tolil()
self.cooc_constraint = None
self.structure_perturbations = []
self.feature_perturbations = []
self.influencer_nodes = []
self.potential_edges = []
self.verbose = verbose
def compute_cooccurrence_constraint(self, nodes):
"""
Co-occurrence constraint as described in the paper.
Parameters
----------
nodes: np.array
Nodes whose features are considered for change
Returns
-------
np.array [len(nodes), D], dtype bool
Binary matrix of dimension len(nodes) x D. A 1 in entry n,d indicates that
we are allowed to add feature d to the features of node n.
"""
words_graph = self.cooc_matrix.copy()
D = self.X_obs.shape[1]
words_graph.setdiag(0)
words_graph = (words_graph > 0)
word_degrees = np.sum(words_graph, axis=0).A1
inv_word_degrees = np.reciprocal(word_degrees.astype(float) + 1e-8)
sd = np.zeros([self.N])
for n in range(self.N):
n_idx = self.X_obs[n, :].nonzero()[1]
sd[n] = np.sum(inv_word_degrees[n_idx.tolist()])
scores_matrix = sp.lil_matrix((self.N, D))
for n in nodes:
common_words = words_graph.multiply(self.X_obs[n])
idegs = inv_word_degrees[common_words.nonzero()[1]]
nnz = common_words.nonzero()[0]
scores = np.array([idegs[nnz == ix].sum() for ix in range(D)])
scores_matrix[n] = scores
self.cooc_constraint = sp.csr_matrix(scores_matrix - 0.5 * sd[:, None] > 0)
def gradient_wrt_x(self, label):
"""
Compute the gradient of the logit belonging to the class of the input label with respect to the input features.
Parameters
----------
label: int
Class whose logits are of interest
Returns
-------
np.array [N, D] matrix containing the gradients.
"""
return self.adj_preprocessed.dot(self.adj_preprocessed)[self.u].T.dot(self.W[:, label].T)
def compute_logits(self):
"""
Compute the logits of the surrogate model, i.e. linearized GCN.
Returns
-------
np.array, [N, K]
The log probabilities for each node.
"""
return self.adj_preprocessed.dot(self.adj_preprocessed).dot(self.X_obs.dot(self.W))[self.u].toarray()[0]
def strongest_wrong_class(self, logits):
"""
Determine the incorrect class with largest logits.
Parameters
----------
logits: np.array, [N, K]
The input logits
Returns
-------
np.array, [N, L]
The indices of the wrong labels with the highest attached log probabilities.
"""
label_u_onehot = np.eye(self.K)[self.label_u]
return (logits - 1000*label_u_onehot).argmax()
def feature_scores(self):
"""
Compute feature scores for all possible feature changes.
"""
if self.cooc_constraint is None:
self.compute_cooccurrence_constraint(self.influencer_nodes)
logits = self.compute_logits()
best_wrong_class = self.strongest_wrong_class(logits)
gradient = self.gradient_wrt_x(self.label_u) - self.gradient_wrt_x(best_wrong_class)
surrogate_loss = logits[self.label_u] - logits[best_wrong_class]
gradients_flipped = (gradient * -1).tolil()
gradients_flipped[self.X_obs.nonzero()] *= -1
X_influencers = sp.lil_matrix(self.X_obs.shape)
X_influencers[self.influencer_nodes] = self.X_obs[self.influencer_nodes]
gradients_flipped = gradients_flipped.multiply((self.cooc_constraint + X_influencers) > 0)
nnz_ixs = np.array(gradients_flipped.nonzero()).T
sorting = np.argsort(gradients_flipped[tuple(nnz_ixs.T)]).A1
sorted_ixs = nnz_ixs[sorting]
grads = gradients_flipped[tuple(nnz_ixs[sorting].T)]
scores = surrogate_loss - grads
return sorted_ixs[::-1], scores.A1[::-1]
def struct_score(self, a_hat_uv, XW):
"""
Compute structure scores, cf. Eq. 15 in the paper
Parameters
----------
a_hat_uv: sp.sparse_matrix, shape [P,2]
Entries of matrix A_hat^2_u for each potential edge (see paper for explanation)
XW: sp.sparse_matrix, shape [N, K], dtype float
The class logits for each node.
Returns
-------
np.array [P,]
The struct score for every row in a_hat_uv
"""
logits = a_hat_uv.dot(XW)
label_onehot = np.eye(XW.shape[1])[self.label_u]
best_wrong_class_logits = (logits - 1000 * label_onehot).max(1)
logits_for_correct_class = logits[:,self.label_u]
struct_scores = logits_for_correct_class - best_wrong_class_logits
return struct_scores
def compute_XW(self):
"""
Shortcut to compute the dot product of X and W
Returns
-------
X.dot(W)
"""
return self.X_obs.dot(self.W)
def get_attacker_nodes(self, n=5, add_additional_nodes = False):
"""
Determine the influencer nodes to attack node i based on the weights W and the attributes X.
Parameters
----------
n: int, default: 5
The desired number of attacker nodes.
add_additional_nodes: bool, default: False
if True and the degree of node i (d_u) is < n, we select n-d_u additional attackers, which should
get connected to u afterwards (outside this function).
Returns
-------
np.array, shape [n,]:
The indices of the attacker nodes.
optional: np.array, shape [n - degree(n)]
if additional_nodes is True, we separately
return the additional attacker node indices
"""
assert n < self.N-1, "number of influencers cannot be >= number of nodes in the graph!"
neighbors = self.adj_no_selfloops[self.u].nonzero()[1]
assert self.u not in neighbors
potential_edges = np.column_stack((np.tile(self.u, len(neighbors)),neighbors)).astype("int32")
# The new A_hat_square_uv values that we would get if we removed the edge from u to each of the neighbors,
# respectively
a_hat_uv = self.compute_new_a_hat_uv(potential_edges)
XW = self.compute_XW()
# compute the struct scores for all neighbors
struct_scores = self.struct_score(a_hat_uv, XW).A1
if len(neighbors) >= n: # do we have enough neighbors for the number of desired influencers?
influencer_nodes = neighbors[ | np.argsort(struct_scores) | numpy.argsort |
# -----------------------------------------------------------------------------------------------------
# CONDOR
# Simulator for diffractive single-particle imaging experiments with X-ray lasers
# http://xfel.icm.uu.se/condor/
# -----------------------------------------------------------------------------------------------------
# Copyright 2016 <NAME>, <NAME>, <NAME>
# Condor is distributed under the terms of the BSD 2-Clause License
# -----------------------------------------------------------------------------------------------------
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------------------------------
# General note:
# All variables are in SI units by default. Exceptions explicit by variable name.
# -----------------------------------------------------------------------------------------------------
from __future__ import print_function, absolute_import # Compatibility with python 2 and 3
import sys,os
try:
from collections.abc import Iterable ## Python >= 3.3
except ImportError:
from collections import Iterable ## Python < 3.3
sys.path.append("utils")
import numpy
import logging
logger = logging.getLogger(__name__)
import condor.utils.log
from condor.utils.log import log_and_raise_error,log_warning,log_info,log_debug
import condor.utils.resample
from condor.utils.variation import Variation
from condor.utils.pixelmask import PixelMask
from condor.utils.linalg import length
import condor.utils.testing
import condor.utils.scattering_vector
class Detector:
"""
Class for a photon area-detector
.. image:: images/detector_schematic.jpg
**Arguments:**
:distance (float): Distance from interaction point to detector plane
:pixel_size (float): Edge length of detector pixel (square shape)
**Keyword arguments:**
:cx (float): Horizontal beam position in unit pixel. If ``cx=None`` beam will be positioned in the center (default ``None``)
:cy (float): Vertical beam position in unit pixel If ``cy=None`` beam will be positioned in the center (default ``None``)
:center_variation (str): See :meth:`condor.detector.Detector.set_center_variation` (default ``None``)
:center_spread_x (float): See :meth:`condor.detector.Detector.set_center_variation` (default ``None``)
:center_spread_y (float): See :meth:`condor.detector.Detector.set_center_variation` (default ``None``)
:center_variation_n (int): See :meth:`condor.detector.Detector.set_center_variation` (default ``None``)
:noise (str): See :meth:`condor.detector.Detector.set_noise` (default ``None``)
:noise_spread (float): See :meth:`condor.detector.Detector.set_noise` (default ``None``)
:noise_filename (str): See :meth:`condor.detector.Detector.set_noise` (default ``None``)
:noise_dataset (str): See :meth:`condor.detector.Detector.set_noise` (default ``None``)
:saturation_level (float): Value at which detector pixels satutrate (default ``None``)
:binning (int): Pixel binning factor, intensies are integrated over square patches that have an area of ``binning`` x ``binning`` pixels (default ``None``)
:mask_CXI_bitmask (bool): If ``True`` the provided mask (``mask_dataset`` or ``mask``) is a CXI bitmask. For documentation on the implementation of CXI bitmasks see :class:`condor.utils.pixelmask.PixelMask` (default ``False``)
:solid_angle_correction (bool): Whether or not solid angle correction shall be applied (default ``True``)
*Choose one of the following options:*
==================== =============================================================================
``mask_CXI_bitmask`` valid pixels
==================== =============================================================================
``False`` ``1``
``True`` ``(pixels & condor.utils.pixelmask.PixelMask.PIXEL_IS_IN_MASK_DEFAULT) == 0``
==================== =============================================================================
**There are 3 alternative options to specify shape and mask of the detector**
*A) Parameters*
:nx (int): Number of pixels in *x* direction (not including a potential gap or hole) (default ``None``)
:ny (int): Number of pixels in *y* direction (not including a potential gap or hole) (default ``None``)
:x_gap_size_in_pixel (int): Size of central gap along *x* in unit pixel (default ``None``)
:y_gap_size_in_pixel (int): Size of central gap along *y* in unit pixel (default ``None``)
:hole_diameter_in_pixel (int): Diameter of central hole in unit pixel (default ``None``)
*B) HDF5 dataset for mask*
:mask_filename (str): Location of HDF5 file that contains dataset for mask (default ``None``)
:mask_dataset (str): HDF5 dataset (in the file specified by the argument ``mask_filename``) that contains the mask data. Toggle the option ``mask_CXI_bitmask`` for decoding options (default ``None``)
*C) Numpy array for mask*
:mask (array): 2D numpy integer array that defines the mask. Toggle ``mask_CXI_bitmask`` for decoding options (default ``None``)
"""
def __init__(self, distance, pixel_size,
x_gap_size_in_pixel=0, y_gap_size_in_pixel=0, hole_diameter_in_pixel=0, cx_hole=None, cy_hole=None,
noise=None, noise_spread=None, noise_variation_n=None, noise_filename=None, noise_dataset=None,
cx=None, cy=None, center_variation=None, center_spread_x=None, center_spread_y=None, center_variation_n=None,
saturation_level=None, mask=None, mask_filename=None, mask_dataset=None, mask_is_cxi_bitmask=False, solid_angle_correction=True,
nx=None, ny=None, binning=None):
self.distance = distance
self.pixel_size = float(pixel_size)
self._init_mask(mask=mask, mask_is_cxi_bitmask=mask_is_cxi_bitmask, mask_filename=mask_filename, mask_dataset=mask_dataset, nx=nx, ny=ny,
x_gap_size_in_pixel=x_gap_size_in_pixel, y_gap_size_in_pixel=y_gap_size_in_pixel, cx_hole=cx_hole, cy_hole=cy_hole, hole_diameter_in_pixel=hole_diameter_in_pixel)
self.cx_mean = cx if cx != 'middle' else None
self.cy_mean = cy if cy != 'middle' else None
self.set_center_variation(center_variation=center_variation,
center_spread_x=center_spread_x,
center_spread_y=center_spread_y,
center_variation_n=center_variation_n)
self.set_noise(noise=noise,
noise_spread=noise_spread,
noise_variation_n=noise_variation_n,
noise_filename=noise_filename,
noise_dataset=noise_dataset)
self.saturation_level = saturation_level
self.binning = binning
self.solid_angle_correction = solid_angle_correction
def get_conf(self):
"""
Get configuration in form of a dictionary. Another identically configured Detector instance can be initialised by:
.. code-block:: python
conf = D0.get_conf() # D0: already existing Detector instance
D1 = condor.Detector(**conf) # D1: new Detector instance with the same configuration as D0
"""
conf = {}
conf["detector"] = {}
conf["detector"]["distance"] = self.distance
conf["detector"]["pixel_size"] = self.pixel_size
conf["detector"]["cx"] = self.cx_mean
conf["detector"]["cy"] = self.cy_mean
cvar = self._center_variation.get_conf()
conf["detector"]["center_variation"] = cvar["mode"]
conf["detector"]["center_spread_x"] = cvar["spread"][0]
conf["detector"]["center_spread_y"] = cvar["spread"][1]
conf["detector"]["center_variation_n"] = cvar["n"]
noise = self._noise.get_conf()
conf["detector"]["noise"] = noise["mode"]
conf["detector"]["noise_spread"] = noise["spread"]
conf["detector"]["noise_filename"] = self._noise_filename
conf["detector"]["noise_dataset"] = self._noise_dataset
conf["detector"]["saturation_level"] = self.saturation_level
conf["detector"]["mask"] = self._mask.copy()
conf["detector"]["mask_CXI_bitmask"] = True
conf["detector"]["solid_angle_correction"] = self.solid_angle_correction
return conf
def set_noise(self, noise=None, noise_spread=None, noise_variation_n=None, noise_filename=None, noise_dataset=None):
r"""
Set detector noise type and parameters (this method is called during initialisation)
Kwargs:
:noise (str): Noise added to the predicted intensities (default ``None``)
*Choose one of the following options:*
======================= ==================================================================
``noise`` Noise model
======================= ==================================================================
``None`` No noise
``'poisson'`` Poisson noise (*shot noise*)
``'normal'`` Normal (*Gaussian*) noise
``'uniform'`` Uniformly distributed values within spread limits
``'normal_poisson'`` Normal (*Gaussian*) noise on top of Poisson noise (*shot noise*)
``'file'`` Noise data from file
``'file_poisson'`` Noise data from file on top of Poisson noise (*shot noise*)
======================= ==================================================================
:noise_spread (float): Width (full width at half maximum) of the Gaussian or uniform noise distribution (default ``None``)
.. note:: The argument ``noise_spread`` takes only effect in combination with ``noise='normal'``, ``'uniform'`` or ``'normal_poisson'``
:noise_filename (str): Location of the HDF5 file that contains the noise data (default ``None``)
:noise_dataset (str): HDF5 dataset (in the file specified by the argument ``noise_filename``) that contains the noise data (default ``None``)
.. note:: The arguments ``noise_filename`` and ``noise_dataset`` takes effect only in combination with ``noise='file'`` or ``'file_poisson'``
"""
if noise in ["file","file_poisson"]:
self._noise_filename = noise_filename
self._noise_dataset = noise_dataset
self._noise = Variation("poisson" if noise == "file_poisson" else None, noise_spread, noise_variation_n, number_of_dimensions=1)
else:
self._noise_filename = None
self._noise_dataset = None
self._noise = Variation(noise, noise_spread, noise_variation_n, number_of_dimensions=1)
def set_center_variation(self, center_variation=None, center_spread_x=None, center_spread_y=None, center_variation_n=None):
"""
Set the variation of the beam center position (this method is called during initialisation)
Kwargs:
:center_variation(str): Variation of the beam center position (default ``None``)
*Choose one of the following options:*
===================== ==============================================
``center_variation`` Variation model
===================== ==============================================
``None`` No variation
``'normal'`` Normal (*Gaussian*) random distribution
``'uniform'`` Uniform random distribution
``'range'`` Equispaced grid around mean center position
===================== ==============================================
:center_spread_x (float): Width of the distribution of center position in *x* [pixel] (default ``None``)
:center_spread_y (float): Width of the distribution of center position in *y* [pixel] (default ``None``)
.. note:: The arguments ``center_spread_y`` and ``center_spread_x`` take effect only in combination with ``center_variation='normal'``, ``'uniform'`` or ``'range'``
:center_variation_n (int): Number of samples within the specified range (default ``None``)
.. note:: The argument ``center_variation_n`` takes effect only in combination with ``center_variation='range'``
"""
self._center_variation = Variation(center_variation, [center_spread_x,center_spread_y], center_variation_n, number_of_dimensions=2)
def _init_mask(self, mask, mask_is_cxi_bitmask, mask_filename, mask_dataset, nx, ny, x_gap_size_in_pixel, y_gap_size_in_pixel, cx_hole, cy_hole, hole_diameter_in_pixel):
if mask is not None or (mask_filename is not None and mask_dataset is not None):
if mask is not None:
# Copy mask from array
self._mask = numpy.array(mask, dtype=numpy.uint16)
else:
# Read mask from file
import h5py
with h5py.File(mask_filename,"r") as f:
self._mask = numpy.array(f[mask_dataset][:,:], dtype=numpy.uint16)
if not mask_is_cxi_bitmask:
# Convert maskt to CXI bit format
self._mask = (self._mask == 0) * PixelMask.PIXEL_IS_MISSING
elif nx is not None and ny is not None:
# Initialise empty mask
self._mask = numpy.zeros(shape=(int(ny+y_gap_size_in_pixel), int(nx+x_gap_size_in_pixel)),dtype=numpy.uint16)
else:
log_and_raise_error(logger, r"Either 'mask' or 'nx' and 'ny' have to be specified.")
sys.exit(1)
self._nx = self._mask.shape[1]
self._ny = self._mask.shape[0]
# Mask out pixels in gaps
if y_gap_size_in_pixel > 0:
cy = int(numpy.ceil((self._ny-1)/2.))
gy = int(numpy.round(y_gap_size_in_pixel))
self._mask[cy-gy//2:cy-gy//2+gy,:] |= PixelMask.PIXEL_IS_MISSING
if x_gap_size_in_pixel > 0:
cx = int(numpy.ceil((self._nx-1)/2.))
gx = int(numpy.round(x_gap_size_in_pixel))
self._mask[:,cx-gx//2:cx-gx//2+gx] |= PixelMask.PIXEL_IS_MISSING
# Mask out pixels in hole
if hole_diameter_in_pixel > 0:
if cx_hole is None:
cx_hole = (self._nx-1)/2.
if cy_hole is None:
cy_hole = (self._ny-1)/2.
Y,X = numpy.indices((self._ny,self._nx), dtype=numpy.float64)
X = X-cx_hole
Y = Y-cy_hole
R = numpy.sqrt(X**2 + Y**2)
tmp = R<=hole_diameter_in_pixel/2.0
if tmp.sum() > 0:
self._mask[tmp] |= PixelMask.PIXEL_IS_MISSING
def get_mask(self,intensities=None, boolmask=False):
"""
Return mask. The mask has information about the status of each individual detector pixel. The output can be either a CXI bitmask (default) or a boolean mask
For further information and the full bitcode go to :class:`condor.utils.pixelmask.PixelMask`
Kwargs:
:intensities: Numpy array of photon intensities for masking saturated pixels (default ``None``)
:boolmask (bool): If ``True`` the output will be a boolean array. Mask values are converted to ``True`` if no bit is set and to ``False`` otherwise
"""
if intensities is not None:
if not condor.utils.testing.same_shape(intensities, self._mask):
log_and_raise_error(logger, "Intensities and mask do not have the same shape")
M = self._mask.copy()
if self.saturation_level is not None and intensities is not None:
M[intensities >= self.saturation_level] |= PixelMask.PIXEL_IS_SATURATED
if boolmask:
return numpy.array(M == 0,dtype="bool")
else:
return M
def get_cx_mean_value(self):
"""
Return *x*-coordinate of the mean beam center position
"""
if self.cx_mean is None:
return (self._nx-1) / 2.
else:
return self.cx_mean
def get_cy_mean_value(self):
"""
Return *y*-coordinate of the mean beam center position
"""
if self.cy_mean is None:
return (self._ny-1) / 2.
else:
return self.cy_mean
def get_next(self):
"""
Iterate the parameters of the Detector instance and return them as a dictionary
"""
O = {}
cx_mean = self.get_cx_mean_value()
cy_mean = self.get_cy_mean_value()
cx, cy = self._center_variation.get([cx_mean, cy_mean])
O["cx"] = cx
O["cy"] = cy
O["nx"] = self._nx
O["ny"] = self._ny
O["pixel_size"] = self.pixel_size
O["distance"] = self.distance
if self.binning is not None:
O["cx_xxx"] = condor.utils.resample.downsample_pos(cx, self._nx, self.binning)
O["cy_xxx"] = condor.utils.resample.downsample_pos(cy, self._ny, self.binning)
return O
def get_pixel_solid_angle(self, x_off=0., y_off=0.):
"""
Get the solid angle for a pixel at position ``x_off``, ``y_off`` with respect to the beam center
Kwargs:
:x_off: *x*-coordinate of the pixel position (center) in unit pixel with respect to the beam center (default 0.)
:y_off: *y*-coordinate of the pixel position (center) in unit pixel with respect to the beam center (default 0.)
"""
r_max = numpy.sqrt(x_off**2+y_off**2) * self.pixel_size
it = isinstance(r_max, Iterable)
if it:
r_max = r_max.max()
if r_max/self.distance < 0.0001:
# Small angle approximation (fast)
omega = self.pixel_size**2 / self.distance**2
if it:
omega *= | numpy.ones_like(r_max) | numpy.ones_like |
#!/usr/bin/env python3
import rospy
import tf2_ros
import tf2_geometry_msgs
from tf.transformations import quaternion_from_euler, euler_from_quaternion
from nav_msgs.msg import Odometry
from std_msgs.msg import Float64MultiArray, MultiArrayLayout, MultiArrayDimension
from visualization_msgs.msg import MarkerArray
from smarc_msgs.msg import ThrusterFeedback
from vision_msgs.msg import ObjectHypothesisWithPose, Detection2DArray, Detection2D
from sensor_msgs.msg import Imu
import numpy as np
from scipy.stats import norm
class ParticleFilter:
def __init__(self, robot_name, num_particles, num_states,
positional_process_noise, rotational_process_noise,
measurement_noise, sidescan_half_horizontal_beam_width):
self.robot_name = robot_name
self.num_particles = num_particles
self.num_states = num_states
self.positional_process_noise = positional_process_noise
self.rotational_process_noise = rotational_process_noise
self.measurement_noise = measurement_noise
self.sidescan_half_horizontal_beam_width = sidescan_half_horizontal_beam_width
self.target_frame = 'utm'
self.tf_buffer = tf2_ros.Buffer()
self.tf_listener = tf2_ros.TransformListener(self.tf_buffer)
self.particles = self._init_particles_for_tracking()
self.particles_msg = self._init_particles_msg()
self.particles_topic = '/{}/localization/particles'.format(
self.robot_name)
self.particles_pub = rospy.Publisher(self.particles_topic,
Float64MultiArray,
queue_size=5)
# Motion model related
self.thrusts = {1: 0, 2: 0}
self.thruster1_sub = self._setup_thruster_sub(1)
self.thruster2_sub = self._setup_thruster_sub(2)
self.coeff = .0005
self.imu = Imu()
self.imu_sub = self._setup_imu_sub()
# Measurement model related
self.weights = self._init_weights()
self.landmarks = self._read_landmarks()
self.num_landmarks = self.landmarks.shape[0]
self.measurements = np.array([[]])
self.measurement_sub = self._setup_measurement_sub()
self.has_new_measurements = False
# Update
self.dt = .1
self.timer = rospy.Timer(rospy.Duration(self.dt), self.run)
def _setup_measurement_sub(self):
obs_topic = '/{}/payload/sidescan/detection_hypothesis'.format(
self.robot_name)
obs_sub = rospy.Subscriber(obs_topic, Detection2DArray,
self._update_measurement)
return obs_sub
def _update_measurement(self, msg):
self.has_new_measurements = True
trans = self._wait_for_transform(from_frame=msg.header.frame_id,
to_frame='{}/base_link'.format(
self.robot_name))
measurements = []
for d in msg.detections:
for r in d.results:
pose = r.pose
pose_transformed = tf2_geometry_msgs.do_transform_pose(
pose, trans)
measurements.append([pose_transformed.pose.position.y])
self.measurements = np.array(measurements)
def _read_landmarks(self):
"""Wait for /{robot_name}/sim/marked_positions to publish its first
message, use it to initialize landmarks array."""
marked_pos_topic = '/{}/sim/marked_positions'.format(self.robot_name)
msg = rospy.wait_for_message(marked_pos_topic, MarkerArray)
landmarks = []
for marker in msg.markers:
landmarks.append([
marker.pose.position.x, marker.pose.position.y,
marker.pose.position.z
])
return np.array(landmarks)
def _setup_imu_sub(self):
imu_topic = '/{}/core/sbg_imu'.format(self.robot_name)
imu_sub = rospy.Subscriber(imu_topic, Imu, self._update_imu)
return imu_sub
def _update_imu(self, msg):
self.imu = msg
def _setup_thruster_sub(self, i):
topic = '/{}/core/thruster{}_fb'.format(self.robot_name, i)
thruster_sub = rospy.Subscriber(topic, ThrusterFeedback,
self._update_thrust, i)
return thruster_sub
def _update_thrust(self, msg, i):
self.thrusts[i] = msg.rpm.rpm
def _init_particles_msg(self):
dim0 = MultiArrayDimension(label='particle_index',
size=self.num_particles,
stride=self.num_particles * self.num_states)
dim1 = MultiArrayDimension(label='particle_state',
size=self.num_states,
stride=self.num_states)
layout = MultiArrayLayout(data_offset=0, dim=[dim0, dim1])
particle_msg = Float64MultiArray(layout=layout,
data=self.particles.flatten())
return particle_msg
def _wait_for_transform(self, from_frame, to_frame):
trans = None
while trans is None:
try:
trans = self.tf_buffer.lookup_transform(
to_frame, from_frame, rospy.Time(), rospy.Duration(1.0))
except (tf2_ros.LookupException, tf2_ros.ConnectivityException,
tf2_ros.ExtrapolationException) as error:
print('Failed to transform. Error: {}'.format(error))
return trans
def _init_particles_for_tracking(self):
particles = np.zeros((self.num_particles, self.num_states))
# Get init pose in utm
odom_topic = '/{}/sim/odom'.format(self.robot_name)
msg = rospy.wait_for_message(odom_topic, Odometry)
print('odom msg: {}'.format(msg))
pose = msg.pose
trans = self._wait_for_transform(from_frame=msg.header.frame_id,
to_frame=self.target_frame)
init_pose = tf2_geometry_msgs.do_transform_pose(pose, trans).pose
(init_roll, init_pitch, init_yaw) = euler_from_quaternion([
init_pose.orientation.x, init_pose.orientation.y,
init_pose.orientation.z, init_pose.orientation.w
])
mean_state = [
init_pose.position.x, init_pose.position.y, init_pose.position.z,
init_roll, init_pitch, init_yaw
]
particles = np.array(mean_state * self.num_particles).reshape(
self.num_particles, self.num_states)
#TODO: set spread at the proper place and to the proper value
particles += np.random.uniform(low=-.1, high=.1, size=particles.shape)
# Angles should be between (-pi, pi)
particles = self._normalize_angles(particles)
return particles
def _normalize_angles(self, particles):
particles[:, -3:] = (particles[:, -3:] + np.pi) % (2 * np.pi) - np.pi
return particles
def _init_weights(self):
return np.array([1.0 / self.num_states] * self.num_states)
def run(self, timer):
self.motion_model()
if self.has_new_measurements:
self.weights = self.measurement_model()
# print(self.weights)
self.particles = self.systematic_resampling()
self.weights = self._init_weights()
self.particles_msg.data = self.particles.flatten()
self.particles_pub.publish(self.particles_msg)
def motion_model(self):
thrust = (self.thrusts[1] + self.thrusts[2]) * self.coeff
linear_velocity = np.array([thrust, 0, 0]).reshape(3, 1)
(roll, pitch, yaw) = euler_from_quaternion([
self.imu.orientation.x, self.imu.orientation.y,
self.imu.orientation.z, self.imu.orientation.w
])
rotation = self._compute_rotation(roll, pitch, yaw)
velocity = np.matmul(rotation, linear_velocity)
self.particles[:, :3] += (velocity * self.dt).reshape(1, 3)
self.particles[:, -3:] = [roll, pitch, yaw]
# Diffusion
self.particles[:, :3] += np.random.randn(
self.num_particles, 3) * self.positional_process_noise
self.particles[:, -3:] += np.random.randn(
self.num_particles, 3) * self.rotational_process_noise
self.particles = self._normalize_angles(self.particles)
def _compute_rotation(self, roll, pitch, yaw):
rx = np.array([[1, 0, 0], [0, np.cos(roll), -np.sin(roll)],
[0, np.sin(roll), np.cos(roll)]])
ry = np.array([[np.cos(pitch), 0, np.sin(pitch)], [0, 1, 0],
[-np.sin(pitch), 0, np.cos(pitch)]])
rz = np.array([[np.cos(yaw), -np.sin(yaw), 0],
[np.sin(yaw), np.cos(yaw), 0], [0, 0, 1]])
rotation = np.matmul(rz, np.matmul(ry, rx))
return rotation
def measurement_model(self):
self.has_new_measurements = False
# prediction: (num_landmarks, num_particles)
prediction = self.predicted_measurement()
# likelihood: (num_measurements, num_landmarks, num_particles)
likelihood = np.stack([
norm(prediction,
self.measurement_noise).pdf(self.measurements[i, :])
for i in range(len(self.measurements))
])
# association: (num_measurements, num_particles)
# association = np.argmax(likelihood, axis=1)
ml_likelihood = np.max(likelihood, axis=1)
# assumes i.i.d. measurements
weights = np.sum(ml_likelihood, axis=0)
weights += np.finfo(float).eps
weights /= np.sum(weights)
return weights
def predicted_measurement(self):
"""Compute predicted sidescan measurement for all landmarks from all
particles. Note that the sidescan measurement only consists of a range
measure in meters. Only landmarks within a certain threshold of
angles are deemed observable, the others have predicted measurement set
to inf.
Returns:
- dist: (num_landmarks, num_particles)
"""
# vectors pointing from particles to landmarks: (num_landmarks, num_particles, 3)
particle_to_landmark_vec = np.stack([
self.landmarks[i, :] - self.particles[:, :3]
for i in range(self.num_landmarks)
])
# distance between particles and landmarks: (num_landmarks, num_particles)
dist = np.linalg.norm(particle_to_landmark_vec, axis=2)
# convert particles to landmark vector into unit vectors
particle_to_landmark_vec_normalized = particle_to_landmark_vec / dist.reshape(
self.num_landmarks, self.num_particles, 1)
# compute heading
heading = np.stack([
np.cos(self.particles[:, -1]) * np.cos(self.particles[:, -2]),
np.sin(self.particles[:, -1]) * np.cos(self.particles[:, -2]),
| np.sin(self.particles[:, -2]) | numpy.sin |
""" Bayesian Optimization implementation from the thesis by Willemsen """
from copy import deepcopy
from random import randint, shuffle
import itertools
import warnings
import time
from typing import Tuple
import numpy as np
from scipy.stats import norm
# BO imports
try:
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import ConstantKernel, RBF, Matern
from sklearn.exceptions import ConvergenceWarning
from skopt.sampler import Lhs
bayes_opt_present = True
except ImportError:
bayes_opt_present = False
from kernel_tuner.strategies import minimize
from kernel_tuner import util
supported_methods = ["poi", "ei", "lcb", "lcb-srinivas", "multi", "multi-advanced", "multi-fast"]
def generate_normalized_param_dicts(tune_params: dict, eps: float) -> Tuple[dict, dict]:
""" Generates normalization and denormalization dictionaries """
original_to_normalized = dict()
normalized_to_original = dict()
for param_name in tune_params.keys():
original_to_normalized_dict = dict()
normalized_to_original_dict = dict()
for value_index, value in enumerate(tune_params[param_name]):
normalized_value = eps * value_index + 0.5 * eps
normalized_to_original_dict[normalized_value] = value
original_to_normalized_dict[value] = normalized_value
original_to_normalized[param_name] = original_to_normalized_dict
normalized_to_original[param_name] = normalized_to_original_dict
return original_to_normalized, normalized_to_original
def normalize_parameter_space(param_space: list, tune_params: dict, normalized: dict) -> list:
""" Normalize the parameter space given a normalization dictionary """
keys = list(tune_params.keys())
param_space_normalized = list(tuple(normalized[keys[i]][v] for i, v in enumerate(params)) for params in param_space)
return param_space_normalized
def prune_parameter_space(parameter_space, tuning_options, tune_params, normalize_dict):
""" Pruning of the parameter space to remove dimensions that have a constant parameter """
pruned_tune_params_mask = list()
removed_tune_params = list()
param_names = list(tune_params.keys())
for index, key in enumerate(tune_params.keys()):
pruned_tune_params_mask.append(len(tune_params[key]) > 1)
if len(tune_params[key]) > 1:
removed_tune_params.append(None)
else:
value = tune_params[key][0]
normalized = normalize_dict[param_names[index]][value]
removed_tune_params.append(normalized)
if 'verbose' in tuning_options and tuning_options.verbose is True and len(tune_params.keys()) != sum(pruned_tune_params_mask):
print(f"Number of parameters (dimensions): {len(tune_params.keys())}, after pruning: {sum(pruned_tune_params_mask)}")
parameter_space = list(tuple(itertools.compress(param_config, pruned_tune_params_mask)) for param_config in parameter_space)
return parameter_space, removed_tune_params
def tune(runner, kernel_options, device_options, tuning_options):
""" Find the best performing kernel configuration in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: kernel_tuner.interface.Options
:param tuning_options: A dictionary with all options regarding the tuning
process. Allows setting hyperparameters via the strategy_options key.
:type tuning_options: kernel_tuner.interface.Options
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
max_fevals = tuning_options.strategy_options.get("max_fevals", 100)
prune_parameterspace = tuning_options.strategy_options.get("pruneparameterspace", True)
if not bayes_opt_present:
raise ImportError("Error: optional dependencies for Bayesian Optimization not installed, please install scikit-learn and scikit-optimize")
# epsilon for scaling should be the evenly spaced distance between the largest set of parameter options in an interval [0,1]
tune_params = tuning_options.tune_params
tuning_options["scaling"] = True
_, _, eps = minimize.get_bounds_x0_eps(tuning_options, runner.dev.max_threads)
# compute cartesian product of all tunable parameters
parameter_space = itertools.product(*tune_params.values())
# check for search space restrictions
if tuning_options.restrictions is not None:
tuning_options.verbose = False
parameter_space = filter(lambda p: util.config_valid(p, tuning_options, runner.dev.max_threads), parameter_space)
parameter_space = list(parameter_space)
if len(parameter_space) < 1:
raise ValueError("Empty parameterspace after restrictionscheck. Restrictionscheck is possibly too strict.")
if len(parameter_space) == 1:
raise ValueError(f"Only one configuration after restrictionscheck. Restrictionscheck is possibly too strict. Configuration: {parameter_space[0]}")
# normalize search space to [0,1]
normalize_dict, denormalize_dict = generate_normalized_param_dicts(tune_params, eps)
parameter_space = normalize_parameter_space(parameter_space, tune_params, normalize_dict)
# prune the parameter space to remove dimensions that have a constant parameter
if prune_parameterspace:
parameter_space, removed_tune_params = prune_parameter_space(parameter_space, tuning_options, tune_params, normalize_dict)
else:
parameter_space = list(parameter_space)
removed_tune_params = [None] * len(tune_params.keys())
# initialize and optimize
try:
bo = BayesianOptimization(parameter_space, removed_tune_params, kernel_options, tuning_options, normalize_dict, denormalize_dict, runner)
bo.optimize(max_fevals)
except util.StopCriterionReached as e:
if tuning_options.verbose:
print(e)
return bo.results, runner.dev.get_environment()
class BayesianOptimization():
def __init__(self, searchspace: list, removed_tune_params: list, kernel_options: dict, tuning_options: dict, normalize_dict: dict, denormalize_dict: dict,
runner, opt_direction='min'):
time_start = time.perf_counter_ns()
# supported hyperparameter values
self.supported_cov_kernels = ["constantrbf", "rbf", "matern32", "matern52"]
self.supported_methods = supported_methods
self.supported_sampling_methods = ["random", "lhs"]
self.supported_sampling_criterion = ["correlation", "ratio", "maximin", None]
def get_hyperparam(name: str, default, supported_values=list()):
value = tuning_options.strategy_options.get(name, default)
if len(supported_values) > 0 and value not in supported_values:
raise ValueError(f"'{name}' is set to {value}, but must be one of {supported_values}")
return value
# get hyperparameters
cov_kernel_name = get_hyperparam("covariancekernel", "matern32", self.supported_cov_kernels)
cov_kernel_lengthscale = get_hyperparam("covariancelengthscale", 1.5)
acquisition_function = get_hyperparam("method", "multi-advanced", self.supported_methods)
acq = acquisition_function
acq_params = get_hyperparam("methodparams", {})
multi_af_names = get_hyperparam("multi_af_names", ['ei', 'poi', 'lcb'])
self.multi_afs_discount_factor = get_hyperparam("multi_af_discount_factor", 0.65 if acq == 'multi' else 0.95)
self.multi_afs_required_improvement_factor = get_hyperparam("multi_afs_required_improvement_factor", 0.15 if acq == 'multi-advanced-precise' else 0.1)
self.num_initial_samples = get_hyperparam("popsize", 20)
self.sampling_method = get_hyperparam("samplingmethod", "lhs", self.supported_sampling_methods)
self.sampling_crit = get_hyperparam("samplingcriterion", 'maximin', self.supported_sampling_criterion)
self.sampling_iter = get_hyperparam("samplingiterations", 1000)
# set acquisition function hyperparameter defaults where missing
if 'explorationfactor' not in acq_params:
acq_params['explorationfactor'] = 'CV'
if 'zeta' not in acq_params:
acq_params['zeta'] = 1
if 'skip_duplicate_after' not in acq_params:
acq_params['skip_duplicate_after'] = 5
# set arguments
self.kernel_options = kernel_options
self.tuning_options = tuning_options
self.tune_params = tuning_options.tune_params
self.param_names = list(self.tune_params.keys())
self.normalized_dict = normalize_dict
self.denormalized_dict = denormalize_dict
self.runner = runner
self.max_threads = runner.dev.max_threads
self.log_timings = False
# set optimization constants
self.invalid_value = 1e20
self.opt_direction = opt_direction
if opt_direction == 'min':
self.worst_value = np.PINF
self.argopt = np.argmin
elif opt_direction == 'max':
self.worst_value = np.NINF
self.argopt = np.argmax
else:
raise ValueError("Invalid optimization direction '{}'".format(opt_direction))
# set the acquisition function and surrogate model
self.optimize = self.__optimize
self.af_name = acquisition_function
self.af_params = acq_params
self.multi_afs = list(self.get_af_by_name(af_name) for af_name in multi_af_names)
self.set_acquisition_function(acquisition_function)
self.set_surrogate_model(cov_kernel_name, cov_kernel_lengthscale)
# set remaining values
self.results = []
self.__searchspace = searchspace
self.removed_tune_params = removed_tune_params
self.searchspace_size = len(self.searchspace)
self.num_dimensions = len(self.dimensions())
self.__current_optimum = self.worst_value
self.cv_norm_maximum = None
self.fevals = 0
self.__visited_num = 0
self.__visited_valid_num = 0
self.__visited_searchspace_indices = [False] * self.searchspace_size
self.__observations = [np.NaN] * self.searchspace_size
self.__valid_observation_indices = [False] * self.searchspace_size
self.__valid_params = list()
self.__valid_observations = list()
self.unvisited_cache = self.unvisited()
time_setup = time.perf_counter_ns()
self.error_message_searchspace_fully_observed = "The search space has been fully observed"
# take initial sample
if self.num_initial_samples > 0:
self.initial_sample()
time_initial_sample = time.perf_counter_ns()
# print the timings
if self.log_timings:
time_taken_setup = round(time_setup - time_start, 3) / 1000
time_taken_initial_sample = round(time_initial_sample - time_setup, 3) / 1000
time_taken_total = round(time_initial_sample - time_start, 3) / 1000
print(f"Initialization | total time: {time_taken_total} | Setup: {time_taken_setup} | Initial sample: {time_taken_initial_sample}", flush=True)
@property
def searchspace(self):
return self.__searchspace
@property
def observations(self):
return self.__observations
@property
def current_optimum(self):
return self.__current_optimum
@current_optimum.setter
def current_optimum(self, value: float):
self.__current_optimum = value
def is_better_than(self, a: float, b: float) -> bool:
""" Determines which one is better depending on optimization direction """
return a < b if self.opt_direction == 'min' else a > b
def is_not_visited(self, index: int) -> bool:
""" Returns whether a searchspace index has not been visited """
return not self.__visited_searchspace_indices[index]
def is_valid(self, observation: float) -> bool:
""" Returns whether an observation is valid """
return not (observation == None or observation == self.invalid_value or observation == np.NaN)
def get_af_by_name(self, name: str):
""" Get the basic acquisition functions by their name """
basic_af_names = ['ei', 'poi', 'lcb']
if name == 'ei':
return self.af_expected_improvement
elif name == 'poi':
return self.af_probability_of_improvement
elif name == 'lcb':
return self.af_lower_confidence_bound
raise ValueError(f"{name} not in {basic_af_names}")
def set_acquisition_function(self, acquisition_function: str):
""" Set the acquisition function """
if acquisition_function == 'poi':
self.__af = self.af_probability_of_improvement
elif acquisition_function == 'ei':
self.__af = self.af_expected_improvement
elif acquisition_function == 'lcb':
self.__af = self.af_lower_confidence_bound
elif acquisition_function == 'lcb-srinivas':
self.__af = self.af_lower_confidence_bound_srinivas
elif acquisition_function == 'random':
self.__af = self.af_random
elif acquisition_function == 'multi':
self.optimize = self.__optimize_multi
elif acquisition_function == 'multi-advanced':
self.optimize = self.__optimize_multi_advanced
elif acquisition_function == 'multi-fast':
self.optimize = self.__optimize_multi_fast
else:
raise ValueError("Acquisition function must be one of {}, is {}".format(self.supported_methods, acquisition_function))
def set_surrogate_model(self, cov_kernel_name: str, cov_kernel_lengthscale: float):
""" Set the surrogate model with a covariance function and lengthscale """
if cov_kernel_name == "constantrbf":
kernel = ConstantKernel(1.0, constant_value_bounds="fixed") * RBF(cov_kernel_lengthscale, length_scale_bounds="fixed")
elif cov_kernel_name == "rbf":
kernel = RBF(length_scale=cov_kernel_lengthscale, length_scale_bounds="fixed")
elif cov_kernel_name == "matern32":
kernel = Matern(length_scale=cov_kernel_lengthscale, nu=1.5, length_scale_bounds="fixed")
elif cov_kernel_name == "matern52":
kernel = Matern(length_scale=cov_kernel_lengthscale, nu=2.5, length_scale_bounds="fixed")
else:
raise ValueError("Acquisition function must be one of {}, is {}".format(self.supported_cov_kernels, cov_kernel_name))
self.__model = GaussianProcessRegressor(kernel=kernel, alpha=1e-10, normalize_y=True) # maybe change alpha to a higher value such as 1e-5?
def valid_params_observations(self) -> Tuple[list, list]:
""" Returns a list of valid observations and their parameter configurations """
# if you do this every iteration, better keep it as cache and update in update_after_evaluation
params = list()
observations = list()
for index, valid in enumerate(self.__valid_observation_indices):
if valid is True:
params.append(self.searchspace[index])
observations.append(self.observations[index])
return params, observations
def unvisited(self) -> list:
""" Returns a list of unvisited parameter configurations - attention: cached version exists! """
params = list(self.searchspace[index] for index, visited in enumerate(self.__visited_searchspace_indices) if visited is False)
return params
def find_param_config_index(self, param_config: tuple) -> int:
""" Find a parameter config index in the search space if it exists """
return self.searchspace.index(param_config)
def find_param_config_unvisited_index(self, param_config: tuple) -> int:
""" Find a parameter config index in the unvisited cache if it exists """
return self.unvisited_cache.index(param_config)
def normalize_param_config(self, param_config: tuple) -> tuple:
""" Normalizes a parameter configuration """
normalized = tuple(self.normalized_dict[self.param_names[index]][param_value] for index, param_value in enumerate(param_config))
return normalized
def denormalize_param_config(self, param_config: tuple) -> tuple:
""" Denormalizes a parameter configuration """
denormalized = tuple(self.denormalized_dict[self.param_names[index]][param_value] for index, param_value in enumerate(param_config))
return denormalized
def unprune_param_config(self, param_config: tuple) -> tuple:
""" In case of pruned dimensions, adds the removed dimensions back in the param config """
unpruned = list()
pruned_count = 0
for removed in self.removed_tune_params:
if removed is not None:
unpruned.append(removed)
else:
unpruned.append(param_config[pruned_count])
pruned_count += 1
return tuple(unpruned)
def update_after_evaluation(self, observation: float, index: int, param_config: tuple):
""" Adjust the visited and valid index records accordingly """
validity = self.is_valid(observation)
self.__visited_num += 1
self.__observations[index] = observation
self.__visited_searchspace_indices[index] = True
del self.unvisited_cache[self.find_param_config_unvisited_index(param_config)]
self.__valid_observation_indices[index] = validity
if validity is True:
self.__visited_valid_num += 1
self.__valid_params.append(param_config)
self.__valid_observations.append(observation)
if self.is_better_than(observation, self.current_optimum):
self.current_optimum = observation
def predict(self, x) -> Tuple[float, float]:
""" Returns a mean and standard deviation predicted by the surrogate model for the parameter configuration """
return self.__model.predict([x], return_std=True)
def predict_list(self, lst: list) -> Tuple[list, list, list]:
""" Returns a list of means and standard deviations predicted by the surrogate model for the parameter configurations, and separate lists of means and standard deviations """
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mu, std = self.__model.predict(lst, return_std=True)
return list(zip(mu, std)), mu, std
def fit_observations_to_model(self):
""" Update the model based on the current list of observations """
self.__model.fit(self.__valid_params, self.__valid_observations)
def evaluate_objective_function(self, param_config: tuple) -> float:
""" Evaluates the objective function """
param_config = self.unprune_param_config(param_config)
denormalized_param_config = self.denormalize_param_config(param_config)
if not util.config_valid(denormalized_param_config, self.tuning_options, self.max_threads):
return self.invalid_value
val = minimize._cost_func(param_config, self.kernel_options, self.tuning_options, self.runner, self.results)
self.fevals += 1
return val
def dimensions(self) -> list:
""" List of parameter values per parameter """
return self.tune_params.values()
def draw_random_sample(self) -> Tuple[list, int]:
""" Draw a random sample from the unvisited parameter configurations """
if len(self.unvisited_cache) < 1:
raise ValueError("Searchspace exhausted during random sample draw as no valid configurations were found")
index = randint(0, len(self.unvisited_cache) - 1) # NOSONAR
param_config = self.unvisited_cache[index]
actual_index = self.find_param_config_index(param_config)
return param_config, actual_index
def draw_latin_hypercube_samples(self, num_samples: int) -> list:
""" Draws an LHS-distributed sample from the search space """
if self.searchspace_size < num_samples:
raise ValueError("Can't sample more than the size of the search space")
if self.sampling_crit is None:
lhs = Lhs(lhs_type="centered", criterion=None)
else:
lhs = Lhs(lhs_type="classic", criterion=self.sampling_crit, iterations=self.sampling_iter)
param_configs = lhs.generate(self.dimensions(), num_samples)
indices = list()
normalized_param_configs = list()
for i in range(len(param_configs) - 1):
try:
param_config = self.normalize_param_config(param_configs[i])
index = self.find_param_config_index(param_config)
indices.append(index)
normalized_param_configs.append(param_config)
except ValueError:
""" Due to search space restrictions, the search space may not be an exact cartesian product of the tunable parameter values.
It is thus possible for LHS to generate a parameter combination that is not in the actual searchspace, which must be skipped. """
continue
return list(zip(normalized_param_configs, indices))
def initial_sample(self):
""" Draws an initial sample using random sampling """
if self.num_initial_samples <= 0:
raise ValueError("At least one initial sample is required")
if self.sampling_method == 'lhs':
samples = self.draw_latin_hypercube_samples(self.num_initial_samples)
elif self.sampling_method == 'random':
samples = list()
else:
raise ValueError("Sampling method must be one of {}, is {}".format(self.supported_sampling_methods, self.sampling_method))
# collect the samples
collected_samples = 0
for params, index in samples:
observation = self.evaluate_objective_function(params)
self.update_after_evaluation(observation, index, params)
if self.is_valid(observation):
collected_samples += 1
# collect the remainder of the samples
while collected_samples < self.num_initial_samples:
params, index = self.draw_random_sample()
observation = self.evaluate_objective_function(params)
self.update_after_evaluation(observation, index, params)
# check for validity to avoid having no actual initial samples
if self.is_valid(observation):
collected_samples += 1
self.fit_observations_to_model()
_, _, std = self.predict_list(self.unvisited_cache)
self.initial_sample_mean = np.mean(self.__valid_observations)
# Alternatively:
# self.initial_sample_std = np.std(self.__valid_observations)
# self.initial_sample_mean = np.mean(predictions)
self.initial_std = np.mean(std)
self.cv_norm_maximum = self.initial_std
def contextual_variance(self, std: list):
""" Contextual improvement to decide explore / exploit, based on CI proposed by (Jasrasaria, 2018) """
if not self.af_params['explorationfactor'] == 'CV':
return None
if self.opt_direction == 'min':
if self.current_optimum == self.worst_value:
return 0.01
if self.current_optimum <= 0:
# doesn't work well for minimization beyond 0, should that even be a thing?
return abs(np.mean(std) / self.current_optimum)
improvement_over_initial_sample = self.initial_sample_mean / self.current_optimum
cv = np.mean(std) / improvement_over_initial_sample
# normalize if available
if self.cv_norm_maximum:
cv = cv / self.cv_norm_maximum
return cv
return np.mean(std) / self.current_optimum
def __optimize(self, max_fevals):
""" Find the next best candidate configuration(s), evaluate those and update the model accordingly """
while self.fevals < max_fevals:
if self.__visited_num >= self.searchspace_size:
raise ValueError(self.error_message_searchspace_fully_observed)
predictions, _, std = self.predict_list(self.unvisited_cache)
hyperparam = self.contextual_variance(std)
list_of_acquisition_values = self.__af(predictions, hyperparam)
# afterwards select the best AF value
best_af = self.argopt(list_of_acquisition_values)
candidate_params = self.unvisited_cache[best_af]
candidate_index = self.find_param_config_index(candidate_params)
observation = self.evaluate_objective_function(candidate_params)
self.update_after_evaluation(observation, candidate_index, candidate_params)
self.fit_observations_to_model()
return self.results
def __optimize_multi(self, max_fevals):
""" Optimize with a portfolio of multiple acquisition functions. Predictions are always only taken once. Skips AFs if they suggest X/max_evals duplicates in a row, prefers AF with best discounted average. """
if self.opt_direction != 'min':
raise ValueError(f"Optimization direction must be minimization ('min'), is {self.opt_direction}")
# calculate how many times an AF can suggest a duplicate candidate before the AF is skipped
# skip_duplicates_fraction = self.af_params['skip_duplicates_fraction']
# skip_if_duplicate_n_times = int(min(max(round(skip_duplicates_fraction * max_fevals), 3), max_fevals))
skip_if_duplicate_n_times = self.af_params['skip_duplicate_after']
discount_factor = self.multi_afs_discount_factor
# setup the registration of duplicates and runtimes
duplicate_count_template = [0 for _ in range(skip_if_duplicate_n_times)]
duplicate_candidate_af_count = list(deepcopy(duplicate_count_template) for _ in range(3))
skip_af_index = list()
af_runtimes = [0, 0, 0]
af_observations = [list(), list(), list()]
initial_sample_mean = np.mean(self.__valid_observations)
while self.fevals < max_fevals:
time_start = time.perf_counter_ns()
# the first acquisition function is never skipped, so that should be the best for the endgame (EI)
aqfs = self.multi_afs
predictions, _, std = self.predict_list(self.unvisited_cache)
hyperparam = self.contextual_variance(std)
if self.__visited_num >= self.searchspace_size:
raise ValueError(self.error_message_searchspace_fully_observed)
time_predictions = time.perf_counter_ns()
actual_candidate_params = list()
actual_candidate_indices = list()
actual_candidate_af_indices = list()
duplicate_candidate_af_indices = list()
duplicate_candidate_original_af_indices = list()
for af_index, af in enumerate(aqfs):
if af_index in skip_af_index:
continue
if self.__visited_num >= self.searchspace_size or self.fevals >= max_fevals:
break
timer_start = time.perf_counter()
list_of_acquisition_values = af(predictions, hyperparam)
best_af = self.argopt(list_of_acquisition_values)
time_taken = time.perf_counter() - timer_start
af_runtimes[af_index] += time_taken
is_duplicate = best_af in actual_candidate_indices
if not is_duplicate:
candidate_params = self.unvisited_cache[best_af]
actual_candidate_params.append(candidate_params)
actual_candidate_indices.append(best_af)
actual_candidate_af_indices.append(af_index)
# register whether the AF suggested a duplicate candidate
duplicate_candidate_af_count[af_index].pop(0)
duplicate_candidate_af_count[af_index].append(1 if is_duplicate else 0)
if is_duplicate:
# find the index of the AF that first registered the duplicate
original_duplicate_af_index = actual_candidate_af_indices[actual_candidate_indices.index(best_af)]
# register that AF as duplicate as well
duplicate_candidate_af_count[original_duplicate_af_index][-1] = 1
duplicate_candidate_af_indices.append(af_index)
duplicate_candidate_original_af_indices.append(original_duplicate_af_index)
time_afs = time.perf_counter_ns()
# evaluate the non-duplicate candidates
for index, af_index in enumerate(actual_candidate_af_indices):
candidate_params = actual_candidate_params[index]
candidate_index = self.find_param_config_index(candidate_params)
observation = self.evaluate_objective_function(candidate_params)
self.update_after_evaluation(observation, candidate_index, candidate_params)
if observation != self.invalid_value:
# we use the registered observations for maximization of the discounted reward
reg_observation = observation if self.opt_direction == 'min' else -1 * observation
af_observations[actual_candidate_af_indices[index]].append(reg_observation)
else:
reg_invalid_observation = initial_sample_mean if self.opt_direction == 'min' else -1 * initial_sample_mean
af_observations[actual_candidate_af_indices[index]].append(reg_invalid_observation)
for index, af_index in enumerate(duplicate_candidate_af_indices):
original_observation = af_observations[duplicate_candidate_original_af_indices[index]][-1]
af_observations[af_index].append(original_observation)
self.fit_observations_to_model()
time_eval = time.perf_counter_ns()
# assert that all observation lists of non-skipped acquisition functions are of the same length
non_skipped_af_indices = list(af_index for af_index, _ in enumerate(aqfs) if af_index not in skip_af_index)
assert all(len(af_observations[non_skipped_af_indices[0]]) == len(af_observations[af_index]) for af_index in non_skipped_af_indices)
# find the AFs elligble for being skipped
candidates_for_skip = list()
for af_index, count in enumerate(duplicate_candidate_af_count):
if sum(count) >= skip_if_duplicate_n_times and af_index not in skip_af_index:
candidates_for_skip.append(af_index)
# do not skip the AF with the lowest runtime
if len(candidates_for_skip) > 1:
candidates_for_skip_discounted = list(
sum(list(obs * discount_factor**(len(observations) - 1 - i) for i, obs in enumerate(observations)))
for af_index, observations in enumerate(af_observations) if af_index in candidates_for_skip)
af_not_to_skip = candidates_for_skip[np.argmin(candidates_for_skip_discounted)]
for af_index in candidates_for_skip:
if af_index == af_not_to_skip:
# do not skip the AF with the lowest runtime and give it a clean slate
duplicate_candidate_af_count[af_index] = deepcopy(duplicate_count_template)
continue
skip_af_index.append(af_index)
if len(skip_af_index) >= len(aqfs):
raise ValueError("There are no acquisition functions left! This should not happen...")
time_af_selection = time.perf_counter_ns()
# printing timings
if self.log_timings:
time_taken_predictions = round(time_predictions - time_start, 3) / 1000
time_taken_afs = round(time_afs - time_predictions, 3) / 1000
time_taken_eval = round(time_eval - time_afs, 3) / 1000
time_taken_af_selection = round(time_af_selection - time_eval, 3) / 1000
time_taken_total = round(time_af_selection - time_start, 3) / 1000
print(
f"({self.fevals}/{max_fevals}) Total time: {time_taken_total} | Predictions: {time_taken_predictions} | AFs: {time_taken_afs} | Eval: {time_taken_eval} | AF selection: {time_taken_af_selection}",
flush=True)
return self.results
def __optimize_multi_advanced(self, max_fevals, increase_precision=False):
""" Optimize with a portfolio of multiple acquisition functions. Predictions are only taken once, unless increase_precision is true. Skips AFs if they are consistently worse than the mean of discounted observations, promotes AFs if they are consistently better than this mean. """
if self.opt_direction != 'min':
raise ValueError(f"Optimization direction must be minimization ('min'), is {self.opt_direction}")
aqfs = self.multi_afs
discount_factor = self.multi_afs_discount_factor
required_improvement_factor = self.multi_afs_required_improvement_factor
required_improvement_worse = 1 + required_improvement_factor
required_improvement_better = 1 - required_improvement_factor
min_required_count = self.af_params['skip_duplicate_after']
skip_af_index = list()
single_af = len(aqfs) <= len(skip_af_index) + 1
af_observations = [list(), list(), list()]
af_performs_worse_count = [0, 0, 0]
af_performs_better_count = [0, 0, 0]
while self.fevals < max_fevals:
if single_af:
return self.__optimize(max_fevals)
if self.__visited_num >= self.searchspace_size:
raise ValueError(self.error_message_searchspace_fully_observed)
observations_median = | np.median(self.__valid_observations) | numpy.median |
import numpy as np
import random
#import matplotlib.pyplot as plt
import cv2
import mxnet as mx
import os
import math
from mxnet.io import DataIter, DataBatch
class FileIter(DataIter):
def __init__(self, data_shapes, set_num, per_set_num, duplicate_num, ctx, hdfs_path, data_name="data", label_name="label"):
#duplicate_num <= set_num/2
super(FileIter, self).__init__()
self.batch_size = set_num*per_set_num
self.data_shapes = data_shapes
self.set_num = set_num
self.per_set_num = per_set_num
self.duplicate_num = duplicate_num
self.data_name = data_name
self.label_name = label_name
self.ctx = ctx
#self.data = mx.nd.zeros((self.batch_size, self.data_shapes[0], self.data_shapes[1], self.data_shapes[2]), self.ctx)
#self.label = mx.nd.zeros((self.batch_size, ), self.ctx)
self.train = mx.io.ImageRecordIter(
path_imgrec = hdfs_path + 'hangzhougongan_train.rec',
data_shape = self.data_shapes,
batch_size = self.batch_size,
#shuffle = 0,
round_batch = 0,
#scale = 0.007843137,
#mean_r = 127.5,
#preprocess_threads = 2,
)
def _shuffle(self):
random.shuffle(self.train_list)
@property
def provide_data(self):
return [(self.data_name, (self.batch_size, self.data_shapes[0], self.data_shapes[1], self.data_shapes[2]))]
@property
def provide_label(self):
return [(self.label_name, (self.batch_size, ))]
def get_total_list(self):
self.train.reset()
cur_num = 0
label_img = {}
while self.train.iter_next():
cur_num += 1
img_batch = self.train.getdata()[0].asnumpy()
label_batch = self.train.getlabel().asnumpy()
for i in range(len(label_batch)):
label = int(label_batch[i])
img = img_batch[i].astype(np.uint8)
label_img[str(label)+' '+str(cur_num)+' '+str(i)] = img
#if cur_num == 10:
# break
self.sorted_label_img = sorted(label_img.items(), key = lambda x:int(x[0].split(' ')[0])) #label = sorted_label_img[i][0] img = sorted_label_img[i][1]
def reset(self):
self.get_total_list()
#tmp_list[i] tuple -> list
self.tmp_list = []
for i in range(len(self.sorted_label_img)):
self.tmp_list.append([self.sorted_label_img[i][0], self.sorted_label_img[i][1]])
self.tmp_list[i].append(1)
def iter_next(self):
if self.get_label_idx():
return len(self.label_idx) >= self.set_num
else:
return False
def get_label_idx(self):
self.label_idx = {}
if self.tmp_list:
tmp = int(self.tmp_list[0][0].split(' ')[0]) #the first one
self.label_idx[tmp] = 0
for i in range(len(self.tmp_list)):
cur_label = int(self.tmp_list[i][0].split(' ')[0])
if tmp != cur_label:
tmp = cur_label
self.label_idx[cur_label] = i
return True
else:
return False
def get_train_list(self):
random_list = random.sample(self.label_idx.keys(), self.set_num) #label list
self.duplicate_clean_position = []
self.duplicate_noise_position = []
self.others_position = []
if self.duplicate_num==0:
random_list = random_list
else:
duplicate_list = random.sample(random_list, self.duplicate_num)
duplicate_tmp = 0
for i in range(len(random_list)):
if random_list[i] in duplicate_list:
for j in range(self.per_set_num):
self.duplicate_clean_position.append(i*self.per_set_num+j)
#continue
else:
if duplicate_tmp < len(duplicate_list):
random_list[i] = duplicate_list[duplicate_tmp]
for j in range(self.per_set_num):
self.duplicate_noise_position.append(i*self.per_set_num+j)
duplicate_tmp += 1
elif duplicate_tmp == len(duplicate_list):
for j in range(self.per_set_num):
self.others_position.append(i*self.per_set_num+j)
total_list = []
# len(self.label_idx) = 53950
for i in range(self.set_num):
if random_list[i] == max(self.label_idx.keys()): #the last one
per_random_list = [random.randint(self.label_idx[random_list[i]], len(self.tmp_list)-1) for _ in range(self.per_set_num)]
else:
k = 0
while not(random_list[i]+k+1 in self.label_idx.keys()):
k += 1
per_random_list = [random.randint(self.label_idx[random_list[i]], self.label_idx[random_list[i]+k+1]-1) for _ in range(self.per_set_num)]
total_list.extend(per_random_list)
self.train_list = total_list
#import pdb; pdb.set_trace()
def next(self):
if self.iter_next():
self.get_train_list()
data = np.ones((self.batch_size, self.data_shapes[0], self.data_shapes[1], self.data_shapes[2]), dtype='float32')
label = np.zeros((self.batch_size, ), dtype=np.float32)
#self._shuffle()
if self.duplicate_num==0:
for i in range(len(self.train_list)):
prob = np.random.uniform()
if prob <= 0.30:
data_origin = self.tmp_list[self.train_list[i]][1].astype('float32').transpose((1, 2, 0))
kernel, anchor = motion_blur(random.randint(15, 40), random.randint(20, 60))
data[i] = cv2.filter2D(data_origin, -1, kernel, anchor=anchor).transpose((2, 0, 1))
else:
data[i] = self.tmp_list[self.train_list[i]][1].astype('float32')
else:
for i in range(len(self.train_list)):
if i in self.duplicate_clean_position:
data[i] = self.tmp_list[self.train_list[i]][1].astype('float32')
elif i in self.duplicate_noise_position:
prob = np.random.uniform()
if prob <= 0.80:
data_origin = self.tmp_list[self.train_list[i]][1].astype('float32').transpose((1, 2, 0))
kernel, anchor = motion_blur(random.randint(20, 60), random.randint(20, 60))
data[i] = cv2.filter2D(data_origin, -1, kernel, anchor=anchor).transpose((2, 0, 1))
else:
data[i] = self.tmp_list[self.train_list[i]][1].astype('float32')
elif i in self.others_position:
prob = np.random.uniform()
if prob <= 0.30:
data_origin = self.tmp_list[self.train_list[i]][1].astype('float32').transpose((1, 2, 0))
kernel, anchor = motion_blur(random.randint(20, 60), random.randint(20, 60))
data[i] = cv2.filter2D(data_origin, -1, kernel, anchor=anchor).transpose((2, 0, 1))
else:
data[i] = self.tmp_list[self.train_list[i]][1].astype('float32')
occlusion_aug(self.batch_size, self.data_shapes, max_w=60, max_h=100,
min_prob=0.0, max_prob=0.3, img=data)
'''
for i in range(len(data)):
cv2.imwrite(str(i)+'.jpg', cv2.cvtColor(data[i].transpose((1, 2, 0)), cv2.COLOR_RGB2BGR))
#print i
import pdb; pdb.set_trace()
'''
for i in range(self.batch_size):
data_aug = data[i]
data_mean = data_aug
data_mean[0,:] = data_aug[0,:]-127.5
data_scale = data_mean/127.5
data[i] = data_scale
label[i] = int(self.tmp_list[self.train_list[i]][0].split(' ')[0])
self.data = [mx.nd.array(data)]
self.label = [mx.nd.array(label)]
#import pdb; pdb.set_trace()
self.train_list = list(set(self.train_list))
#delete samples which are trained already
del_num = 0
for i in range(len(self.train_list)):
self.tmp_list[self.train_list[i]][2] = 0
for i in range(len(self.tmp_list)):
if self.tmp_list[i-del_num][2] == 0:
del self.tmp_list[i-del_num]
del_num += 1
return DataBatch(data=self.data, label=self.label)
else:
raise StopIteration
def occlusion_aug(batch_size, img_shape, max_w, max_h, min_prob, max_prob, img):
shape = [batch_size] + list(img_shape)
channel_num = img_shape[1]
img_w = shape[3]
img_h = shape[2]
prob = np.random.uniform(min_prob, max_prob)
rand_num = int(prob * batch_size)
if rand_num <= 0:
return img
rand_index = np.random.choice(batch_size, rand_num, False)
rand_source = np.random.randint(0, 1000000, rand_num * 4)
x_rand = rand_source[0:rand_num] % img_w #np.random.randint(0, img_w, rand_num)
y_rand = rand_source[rand_num:2*rand_num] % img_h #np.random.randint(0, img_h, rand_num)
w_rand = rand_source[rand_num*2:3*rand_num] % max_w + 1 #np.random.randint(0, img_w, rand_num)
h_rand = rand_source[rand_num*3:4*rand_num] % max_h + 1#np.random.randint(0, img_h, rand_num)
indices = np.where(img_w - (x_rand + w_rand) < 0)
w_rand[indices] = img_w - x_rand[indices]
indices = np.where(img_h - (y_rand + h_rand) < 0)
h_rand[indices] = img_h - y_rand[indices]
for k in range(rand_num):
index = rand_index[k]
img[index][:,y_rand[k]:y_rand[k] + h_rand[k],x_rand[k]:x_rand[k]+w_rand[k]] = random.sample([0, 255], 1)[0]
return img
def motion_blur(length, angle):
half = length / 2
EPS = np.finfo(float).eps
alpha = (angle - math.floor(angle / 180) * 180) /180 * math.pi
cosalpha = math.cos(alpha)
sinalpha = math.sin(alpha)
if cosalpha < 0:
xsign = -1
elif angle == 90:
xsign = 0
else:
xsign = 1
psfwdt = 1
# blur kernel size
sx = int(math.fabs(length * cosalpha + psfwdt * xsign - length * EPS))
sy = int(math.fabs(length * sinalpha + psfwdt - length * EPS))
psf1 = np.zeros((sy, sx))
# psf1 is getting small when (x,y) move from left-top to right-bottom
# at this moment (x,y) is moving from right-bottom to left-top
for i in range(0, sy):
for j in range(0, sx):
psf1[i][j] = i * math.fabs(cosalpha) - j * sinalpha
rad = math.sqrt(i*i + j*j)
if rad >= half and math.fabs(psf1[i][j]) <= psfwdt:
temp = half - math.fabs((j + psf1[i][j] * sinalpha) / cosalpha)
psf1[i][j] = math.sqrt(psf1[i][j] * psf1[i][j] + temp*temp)
psf1[i][j] = psfwdt + EPS - math.fabs(psf1[i][j])
if psf1[i][j] < 0:
psf1[i][j] = 0
# anchor is (0,0) when (x,y) is moving towards left-top
anchor = (0, 0)
# anchor is (width, heigth) when (x, y) is moving towards right-top
# flip kernel at this moment
if angle < 90 and angle > 0:
psf1 = np.fliplr(psf1)
anchor = (psf1.shape[1] - 1, 0)
elif angle > -90 and angle < 0: # moving towards right-bottom
psf1 = np.flipud(psf1)
psf1 = np.fliplr(psf1)
anchor = (psf1.shape[1] - 1, psf1.shape[0] - 1)
elif anchor < -90: # moving towards left-bottom
psf1 = | np.flipud(psf1) | numpy.flipud |
import os
from openl3.cli import run
import tempfile
import numpy as np
import shutil
import pytest
TEST_DIR = os.path.dirname(__file__)
TEST_AUDIO_DIR = os.path.join(TEST_DIR, 'data', 'audio')
TEST_IMAGE_DIR = os.path.join(TEST_DIR, 'data', 'image')
TEST_VIDEO_DIR = os.path.join(TEST_DIR, 'data', 'video')
# Test audio file paths
CHIRP_MONO_PATH = os.path.join(TEST_AUDIO_DIR, 'chirp_mono.wav')
CHIRP_STEREO_PATH = os.path.join(TEST_AUDIO_DIR, 'chirp_stereo.wav')
CHIRP_44K_PATH = os.path.join(TEST_AUDIO_DIR, 'chirp_44k.wav')
CHIRP_1S_PATH = os.path.join(TEST_AUDIO_DIR, 'chirp_1s.wav')
EMPTY_PATH = os.path.join(TEST_AUDIO_DIR, 'empty.wav')
SHORT_PATH = os.path.join(TEST_AUDIO_DIR, 'short.wav')
SILENCE_PATH = os.path.join(TEST_AUDIO_DIR, 'silence.wav')
# Test image file paths
DAISY_PATH = os.path.join(TEST_IMAGE_DIR, 'daisy.jpg')
BLANK_PATH = os.path.join(TEST_IMAGE_DIR, 'blank.png')
SMALL_PATH = os.path.join(TEST_IMAGE_DIR, 'smol.png')
# Test video file paths
BENTO_PATH = os.path.join(TEST_VIDEO_DIR, 'bento.mp4')
# Regression file paths
TEST_REG_DIR = os.path.join(TEST_DIR, 'data', 'regression')
REG_CHIRP_44K_PATH = os.path.join(TEST_REG_DIR, 'chirp_44k_{}.npz')
REG_CHIRP_44K_LINEAR_PATH = os.path.join(TEST_REG_DIR, 'chirp_44k_{}_linear.npz')
REG_DAISY_PATH = os.path.join(TEST_REG_DIR, 'daisy.npz')
REG_DAISY_LINEAR_PATH = os.path.join(TEST_REG_DIR, 'daisy_linear.npz')
REG_BENTO_AUDIO_PATH = os.path.join(TEST_REG_DIR, 'bento_audio_{}.npz')
REG_BENTO_AUDIO_LINEAR_PATH = os.path.join(TEST_REG_DIR, 'bento_audio_{}_linear.npz')
REG_BENTO_IMAGE_PATH = os.path.join(TEST_REG_DIR, 'bento_image_{}.npz')
REG_BENTO_IMAGE_LINEAR_PATH = os.path.join(TEST_REG_DIR, 'bento_image_{}_linear.npz')
@pytest.mark.parametrize("frontend", ['kapre', 'librosa'])
def test_audio_regression(capsys, frontend):
# test correct execution on test audio file (regression)
tempdir = tempfile.mkdtemp()
run('audio', CHIRP_44K_PATH, output_dir=tempdir, audio_frontend=frontend, verbose=True)
# check output file created
audio_outfile = os.path.join(tempdir, 'chirp_44k.npz')
assert os.path.isfile(audio_outfile)
# regression test
audio_data_reg = np.load(REG_CHIRP_44K_PATH.format(frontend))
audio_data_out = np.load(audio_outfile)
assert sorted(audio_data_out.files) == sorted(audio_data_reg.files) == sorted(
['embedding', 'timestamps'])
assert np.allclose(audio_data_out['timestamps'], audio_data_reg['timestamps'],
rtol=1e-05, atol=1e-05, equal_nan=False)
assert np.allclose(audio_data_out['embedding'], audio_data_reg['embedding'],
rtol=1e-05, atol=1e-05, equal_nan=False)
# SECOND regression test
run('audio', CHIRP_44K_PATH, output_dir=tempdir, suffix='linear', input_repr='linear',
content_type='env', audio_embedding_size=512, audio_center=False, audio_hop_size=0.5,
audio_frontend=frontend, verbose=False)
# check output file created
audio_outfile = os.path.join(tempdir, 'chirp_44k_linear.npz')
assert os.path.isfile(audio_outfile)
# regression test
audio_data_reg = np.load(REG_CHIRP_44K_LINEAR_PATH.format(frontend))
audio_data_out = np.load(audio_outfile)
assert sorted(audio_data_out.files) == sorted(audio_data_reg.files) == sorted(
['embedding', 'timestamps'])
assert np.allclose(audio_data_out['timestamps'], audio_data_reg['timestamps'],
rtol=1e-05, atol=1e-05, equal_nan=False)
assert np.allclose(audio_data_out['embedding'], audio_data_reg['embedding'],
rtol=1e-05, atol=1e-05, equal_nan=False)
# delete output file and temp folder
shutil.rmtree(tempdir)
def test_image_regression(capsys):
# test correct execution on test image file (regression)
tempdir = tempfile.mkdtemp()
run('image', DAISY_PATH, output_dir=tempdir, verbose=True)
# check output file created
image_outfile = os.path.join(tempdir, 'daisy.npz')
assert os.path.isfile(image_outfile)
# regression test
image_data_reg = np.load(REG_DAISY_PATH)
image_data_out = np.load(image_outfile)
assert sorted(image_data_out.files) == sorted(image_data_reg.files) == ['embedding']
assert np.allclose(image_data_out['embedding'], image_data_reg['embedding'],
rtol=1e-05, atol=1e-05, equal_nan=False)
# SECOND regression test
run('image', DAISY_PATH, output_dir=tempdir, suffix='linear', input_repr='linear',
content_type='env', image_embedding_size=512, verbose=False)
# check output file created
image_outfile = os.path.join(tempdir, 'daisy_linear.npz')
assert os.path.isfile(image_outfile)
# regression test
image_data_reg = np.load(REG_DAISY_LINEAR_PATH)
image_data_out = | np.load(image_outfile) | numpy.load |
from __future__ import print_function
import os
import re
import sys
import logging
import numpy as np
from itertools import cycle
try: import pandas
except ImportError: pandas = None
try: import scipy.optimize as sciopt
except ImportError: sciopt = None
try: import matplotlib.pyplot as plt
except ImportError: plt = None
try: import bokeh.plotting as bp
except ImportError: bp = None
__all__ = ['MasterCurve', 'CurveFitter', 'mc_init_notebook',
'MODIFIED_POWER', 'POWER', 'PRONY', 'POLYNOMIAL',
'COBYLA', 'POWELL', 'FMIN']
MODIFIED_POWER = 'Modified Power'
POWER = 'Power'
PRONY = 'Prony'
POLYNOMIAL = 'Polynomial'
COBYLA = 'Cobyla'
POWELL = 'Powell'
FMIN = 'Fmin'
PHONY = 123456.789
EPS = np.finfo(float).eps
BASE = 10.
class Environment:
pass
environ = Environment()
environ.notebook = 0
def _loadcsv(filename):
"""Load the csv file written out by MasterCurve.dump"""
class CSVData:
data = {}
def get(self, temp):
return self.data[temp]
def __iter__(self):
return iter(self.data.items())
dtype = np.float64
array = np.array
cstack = np.column_stack
assert os.path.isfile(filename)
lines = open(filename).readlines()
for (i, line) in enumerate(lines):
if re.search('Version', line):
version = float(lines[i+1])
assert version > 1.
if re.search('Curve Fitter', line):
fitter = lines[i+1]
if re.search('Fit Error', line):
fiterr = float(lines[i+1])
if re.search('WLF', line):
wlf_coeffs = [float(x) for x in lines[i+1].split(',')]
assert len(wlf_coeffs) == 2
if re.search('Data', line):
j = i + 1
if re.search('Master Curve', line):
k = i + 1
d = CSVData()
desc = lines[j].split(',')
temps = array([float(x) for x in desc[2:]])
data = array([[None if not x.split() else float(x) for x in y.split(',')]
for y in lines[j+1:k-1]])
d.master = array([[float(x) for x in y.split(',')] for y in lines[k+1:]])
d.toat = data[:,0]
d.logtoat = data[:,1]
d.raw_master = cstack((d.toat, d.logtoat, array(data[:,2], dtype=dtype)))
d.ref_temp = temps[0]
d.temps = temps[1:]
for (i, temp) in enumerate(temps[1:]):
td = data[:,i+3]
idx = [j for (j,x) in enumerate(td) if x is not None]
a = array(td[idx], dtype=dtype)
if len(a) == 0:
continue
a = cstack((d.toat[idx], d.logtoat[idx], a))
d.data[temp] = a
return d
def log(x, base=BASE):
e = 2.718281828459045
if abs(base - 10.) < EPS:
return np.log10(x)
elif abs(base - 2.) < EPS:
return np.log2(x)
elif abs(base - e) < EPS:
return np.log(x)
else:
return np.log(x) / np.log(BASE)
def interp1d(xy, x, findx=False, clip=False):
"""Wrapper around numpy's interp
"""
xp = xy[:, 0]
yp = xy[:, 1]
if findx:
xp, yp = yp, xp
xd = np.diff(xp)
if np.allclose(-1, np.sign(np.diff(xp))):
# descending curve, reverse it
xp, yp = xp[::-1], yp[::-1]
if not clip:
return np.interp(x, xp, yp)
yval = np.interp(x, xp, yp, left=PHONY, right=PHONY)
if abs(yval - PHONY) < 1.e-12:
return None
return yval
def islist(a):
return isinstance(a, (list, tuple, np.ndarray))
def multidim(a):
try:
if islist(a) and islist(a[0]):
return True
except IndexError:
return False
def joinn(l, sep=',', num=False, end='\n', ffmt='.18f'):
if num:
realfmt = lambda r: '{0:{1}}'.format(float(r), ffmt)
l = [realfmt(x) if x is not None else '' for x in l]
if not multidim(l):
line = sep.join(l)
else:
line = '\n'.join(sep.join(s) for s in l)
return line + end
def bounding_box(curve):
"""Determine the box that bounds curve
Parameters
----------
curve : ndarray
curve[i, 0] is the x coordinate of the ith data point
curve[i, 1] is the y coordinate of the ith data point
Returns
-------
box : ndarray
box[0, i] is xm[in,ax]
box[1, i] is ym[in,ax]
"""
if curve[0, 1] > curve[-1, 1]:
# y values are decreasing from left to right
xmin, ymin = curve[-1, 0], curve[-1, 1]
xmax, ymax = curve[0, 0], curve[0, 1]
else:
xmin, ymin = curve[0, 0], curve[0, 1]
xmax, ymax = curve[-1, 0], curve[-1, 1]
return np.array([[xmin, ymin], [xmax, ymax]])
def area(x, y, yaxis=False):
if not yaxis:
return np.trapz(y, x)
return np.trapz(x, y)
COLORS = ['Blue', 'Red', 'Purple', 'Green', 'Orange', 'HotPink', 'Cyan',
'Magenta', 'Chocolate', 'Yellow', 'Black', 'DodgerBlue', 'DarkRed',
'DarkViolet', 'DarkGreen', 'OrangeRed', 'Teal', 'DarkSlateGray',
'RoyalBlue', 'Crimson', 'SeaGreen', 'Plum', 'DarkGoldenRod',
'MidnightBlue', 'DarkOliveGreen', 'DarkMagenta', 'DarkOrchid',
'DarkTurquoise', 'Lime', 'Turquoise', 'DarkCyan', 'Maroon']
def gen_colors(keys):
colors = {}
c = cycle(COLORS)
for key in keys:
colors[key] = next(c).lower()
return colors
def aslist(item):
if item is None:
return []
if not isinstance(item, (list, tuple, np.ndarray)):
item = [skip_temps]
return [x for x in item]
class MasterCurve(object):
fiterr = None
def __init__(self, txy, ref_temp=75., apply_log=False, xfac=1., yfac=1.,
skip_temps=None, wlf_coeffs=None,
xvar='Time', xunits='min', yvar='Er', yunits='psi',
optimizer=FMIN, fitter=PRONY, optwlf=sciopt is not None,
**kwargs):
"""Initialize the master curve object
Parameters
----------
txy : array_like (n, 3)
txy[i] is the ith [Temp, X, Y]
ref_temp : real
Reference temperature
optimizer : str [FMIN]
The scipy.optimize optimizing procedure
optwlf : bool [True]
Optimize the WLF parameters
fitter : str [prony]
Name of CurveFitter
skip_temps : list [None]
Temperatures to skip
wlf_coeffs : list [None]
Initial guesses for C1 and C2
kwargs : dict
keywords [optional] to pass to fitter
"""
if pandas is None:
raise RuntimeError('master curve fitting requires pandas')
columns = ('Temp', 'X', 'Log[X]', 'Y')
txy = np.asarray(txy)
txy[:, -1] *= yfac
if apply_log:
txy[:, 1] *= xfac
logx = log(txy[:, 1])
txy = np.insert(txy, 2, logx, axis=1)
else:
x = (BASE ** txy[:, 1]) * xfac
logx = log(x)
txy = np.insert(txy, 1, x, axis=1)
txy = np.insert(txy, 2, logx, axis=1)
self.ref_temp = ref_temp
self.skip_temps = aslist(skip_temps)
self.df = pandas.DataFrame(txy, columns=columns)
self.wlf_coeffs = wlf_coeffs
self.optwlf = optwlf
self.optimizer = optimizer
self.kwds = dict(**kwargs)
cf = CurveFitter(fitter)
self._cf = {0: cf(**kwargs)}
self.xvar = xvar
self.yvar = yvar
self.xunits = xvar
self.yunits = yunits
self._fit = 0
@property
def cf(self):
return self._cf.get(1, self._cf[0])
@cf.setter
def cf(self, item):
if item is not None:
self._cf[1] = item
def fit(self, wlf_coeffs=None, skip_temps=None, ref_temp=None, optimize=None,
fitter=None, **kwargs):
skip_temps = aslist(skip_temps)
skip_temps.extend(self.skip_temps)
if fitter is not None:
fitter = CurveFitter(fitter)(**kwargs)
self.cf = fitter
# make skip temps a list, if not already
df = self.df.copy()
for temp in skip_temps:
df = df[~( | np.abs(df['Temp'] - temp) | numpy.abs |
from itertools import accumulate as _accumulate
from operator import add as _add
import numpy as _np
from scipy import signal as _signal
from scipy.stats import norm as _norm
def reshape(u_seq):
if (ndim:=u_seq.ndim) == 1:
u_seq = u_seq.reshape(len(u_seq), 1)
elif ndim >= 3:
raise NotImplementedError(f'{ndim} ndim is not supported')
return u_seq
def embedding_seq(u_seq, T, D):
idx = _np.arange(0,D,1)*T
return _np.array([u_seq[idx+i,:] for i in range(len(u_seq)-(D-1)*T)])
def embedding_seq_1dim(u_seq, T, D):
idx = _np.arange(0,D,1)*T
e_seq = _np.array([u_seq[idx+i,:] for i in range(len(u_seq)-(D-1)*T)])
return e_seq.reshape(len(e_seq), D)
def embedding(u_seq, T, D):
dim, length = u_seq.ndim, len(u_seq)
if len(u_seq.shape)==1:
u_seq = u_seq.reshape(length, dim)
idx = _np.arange(0,D,1)*T
e_seq = _np.array([u_seq[idx+i,:] for i in range(length-(D-1)*T)])
if u_seq.shape[1] == 1:
e_seq = e_seq.reshape(len(e_seq), D)
return e_seq
def get_bottom(seq, threshold=float('inf')):
_, dim = seq.shape
lags = []
for i in range(dim):
us = seq[:, i]
min_idx = _signal.argrelmin(us, order=1)[0]
candidate = min_idx[us[min_idx]<=threshold]
if len(candidate):
lags.append(candidate[0])
else:
lags.append(None)
return tuple(lags)
def bartlett(seq, alpha=0.95):
_, dim = seq.shape
N = len(seq)
var = [_np.ones(dim)/N,
*[(1+2*rho)/N for rho in _accumulate(seq[1:,]**2, _add)]]
z = _norm.ppf(alpha)
return z* | _np.array(var) | numpy.array |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.