prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import cv2
import numpy as np
import time
from darty.gui import GUI
from darty.image_tools import Image_Tools
#print(cv2.__version__)
class Arrow_Detector:
ENV = {
'DETECTION_KERNEL_SIZE' : (100,100),
'DETECTION_RADIAL_STEP' : 10,
'DETECTION_KERNEL_THICKNESS' : 1,
'DETECTION_APEX_OFFSET' : 20, #20
'DETECTION_APEX_LINE_THICKNESS' : 20, #20
'DETECTION_APEX_LINE_THICKNESS_PEAK' : 10, #20
'APEX_CLIPPING_OFFSET' : 50,
'APEX_MARK_SIZE' : 10
}
def detectArrowState(self,IM_arrow):
lu = IM_arrow[0:IM_arrow.shape[0]//2,0:IM_arrow.shape[1]//2]
ru = IM_arrow[0:IM_arrow.shape[0]//2,IM_arrow.shape[1]//2:IM_arrow.shape[1]]
lb = IM_arrow[IM_arrow.shape[0]//2:IM_arrow.shape[0],0:IM_arrow.shape[1]//2]
rb = IM_arrow[IM_arrow.shape[0]//2:IM_arrow.shape[0],IM_arrow.shape[1]//2:IM_arrow.shape[1]]
verbs = [('l','u'),('r','u'),('l','b'),('r','b')]
stack = [lu,ru,lb,rb]
max = -1
maxIdx = 0
for i in range(len(stack)):
if np.sum(stack[i]) > max:
max = np.sum(stack[i])
maxIdx = i
#print(verbs[maxIdx])
return verbs[maxIdx]
def computeArrowOrientation(self,IM,arange,kernel):
max_contour_length = 0
max_angle = 0
max_contour = 0
max_img = 0
for i in arange:
kernel_rot = Image_Tools.rotateImage(kernel,i)
closed = cv2.morphologyEx(IM, cv2.MORPH_CLOSE, kernel_rot)
im2, contours, hierarchy = cv2.findContours(closed.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
for j in range(len(contours)):
length = cv2.arcLength(contours[j],True)
if length > max_contour_length:
max_contour_length = length
max_angle = i
max_contour = contours[j]
max_img = closed
return max_contour_length,max_angle,max_contour,max_img
def _detectArrowLine(self,IM_closed,max_contour,xx,yy,ww,hh):
# Improve with fitting line
line_image = np.zeros(IM_closed.shape,"uint8")
line_image_peak = | np.zeros(IM_closed.shape,"uint8") | numpy.zeros |
# -*- coding: utf-8 -*-
from PIL import Image
import math
import numpy as np
import os
import random
import sys
import glob
import h5py
import tensorflow as tf
from .dataset_tools import *
def read_text(filename):
v = []
for l in open(filename, 'r'):
v.append(l.strip())
# v.append([x for x in l.strip().split(' ')])
return np.array(v)
class SE3PairwiseDataset(object):
# class for ScanNet or SfM dataset
def __init__(self, batch_size, offset_val=2, random_offset=False, out_width=320, out_height=240, crop_center=False, max_degree=0, max_scale=np.sqrt(2), warp_aug_mode='none', num_threads=8):
self.batch_size = batch_size
self.offset_val = offset_val
self.random_offset = random_offset
self.depth_factor = 0.001
self.far_depth_val = 1000
self.src_width = 640
self.src_height = 480
self.dst_width = out_width
self.dst_height = out_height
self.num_threads = num_threads
self.max_rad = np.pi / 180 * max_degree
self.max_scale_log = np.log(max_scale)
self.warp_aug_mode = warp_aug_mode
self.scale_table_path = '../../params/scale/scl-{:.2f}/rnd_table.npy'.format(max_scale)
self.ori_table_path = '../../params/ori/deg-{}/rnd_table.npy'.format(max_degree)
self.depth_thresh = 1.0
self.crop_center = crop_center
def set_files(self, root_dir, render_paths, max_img_num=-1):
if isinstance(render_paths, str):
render_paths = [render_paths]
num_seq = len(render_paths)
print('Number of sequences:{0}'.format(len(render_paths)))
# load from render_paths
photo_intrinsics = [None] * num_seq
depth_intrinsics = [None] * num_seq
cTws = [None] * num_seq
num_photos_per_seq = [None] * num_seq
photo_dirs = [None] * num_seq
depth_dirs = [None] * num_seq
for i, render_path in enumerate(render_paths):
calib = np.load(os.path.join(root_dir, render_path, 'calib/calib.npz'))
photo_intrinsics[i] = calib['photo_intrinsic']
depth_intrinsics[i] = calib['depth_intrinsic']
cTw = np.load(os.path.join(root_dir, render_path, 'pose/cTw.npy'))
num_photos = (len(cTw) // self.batch_size) * self.batch_size # guarantee to be divisible by batch size
if max_img_num > 0:
num_photos = min(num_photos, max_img_num)
print('#{} {} use {} / {} images'.format(i, render_path, num_photos, len(cTw)))
cTws[i] = cTw[:num_photos]
num_photos_per_seq[i] = num_photos
photo_dirs[i] = os.path.join(root_dir, render_path, 'photo')
depth_dirs[i] = os.path.join(root_dir, render_path, 'depth')
self.cTws_data = np.concatenate(cTws, axis=0)
self.cTws = tf.convert_to_tensor(self.cTws_data, dtype=tf.float32)
self.photo_intrinsics_data = np.array(photo_intrinsics)
self.photo_intrinsics = tf.convert_to_tensor(self.photo_intrinsics_data, dtype=tf.float32)
self.depth_intrinsics_data = np.array(depth_intrinsics)
self.depth_intrinsics = tf.convert_to_tensor(self.depth_intrinsics_data, dtype=tf.float32)
self.num_photos_per_seq_data = np.array(num_photos_per_seq, dtype=np.int32)
self.num_photos_per_seq = tf.convert_to_tensor(self.num_photos_per_seq_data, dtype=tf.int32)
self.seq_offsets_data = np.concatenate([np.zeros([1], dtype=np.int32), np.cumsum(self.num_photos_per_seq_data)])
self.seq_offsets = tf.convert_to_tensor(self.seq_offsets_data, tf.int32)
self.intrinsics_3x3_data = self.photo_intrinsics_data
photo_path_list = []
depth_path_list = []
seq_id_list = []
for i, (pdir, ddir) in enumerate(zip(photo_dirs, depth_dirs)):
numbers = sorted([int(os.path.splitext(x.name)[0]) for x in os.scandir(pdir)])
num_photos = self.num_photos_per_seq_data[i]
numbers = numbers[:num_photos]
photos = [os.path.join(pdir, '{}.jpg'.format(img_n)) for img_n in numbers]
depths = [os.path.join(ddir, '{}.png'.format(img_n)) for img_n in numbers]
seq_ids = [i] * len(numbers)
assert len(photos) == len(depths)
assert len(photos) == num_photos_per_seq[i]
photo_path_list += photos
depth_path_list += depths
seq_id_list += seq_ids
# set other members
self.photo_path_list_data = photo_path_list
self.depth_path_list_data = depth_path_list
self.seq_id_list_data = np.array(seq_id_list, dtype=np.int32)
self.photo_path_list = tf.convert_to_tensor(photo_path_list)
self.depth_path_list = tf.convert_to_tensor(depth_path_list)
self.seq_id_list = tf.convert_to_tensor(self.seq_id_list_data, dtype=tf.int32)
self.num_seq = num_seq
self.total_num_photos = len(photo_path_list)
if self.warp_aug_mode == 'table':
ori_table_path = os.path.join(root_dir, self.ori_table_path)
scale_table_path = os.path.join(root_dir, self.scale_table_path)
self.ori_random_table_data = self._load_random_table(ori_table_path, self.total_num_photos)
self.scale_random_table_data = self._load_random_table(scale_table_path, self.total_num_photos)
self.ori_random_table = tf.convert_to_tensor(self.ori_random_table_data, dtype=tf.float32)
self.scale_random_table = tf.convert_to_tensor(self.scale_random_table_data, dtype=tf.float32)
print('[ScanNet] #sep={}, #total={}'.format(self.num_seq, self.total_num_photos))
def get_dataset(self, shuffle=True, num_epoch=None, seed=None):
dataset = tf.data.Dataset.range(self.total_num_photos)
if shuffle:
dataset = dataset.shuffle(self.total_num_photos, seed=seed)
dataset = dataset.repeat(count=num_epoch)
dataset = dataset.map(lambda x: self.mapfn_read_and_decode(x), num_parallel_calls=self.num_threads)
dataset = dataset.map(self.mapfn_augment, num_parallel_calls=self.num_threads)
dataset = dataset.batch(self.batch_size)
return dataset
def mapfn_read_and_decode(self, tgt_idx):
tgt_idx = tf.cast(tgt_idx, tf.int32) # tf.int64->tf.int32
seq_idx = self.seq_id_list[tgt_idx]
file_idx = tgt_idx - self.seq_offsets[seq_idx]
num_photos = self.num_photos_per_seq[seq_idx]
if self.random_offset:
offset = tf.random_uniform((), -self.offset_val, self.offset_val, dtype=tf.int32)
else:
offset = self.offset_val # fixed value
offset = tf.clip_by_value(file_idx+offset, 0, num_photos-1) - file_idx
ref_idx = tgt_idx + offset
photo1 = self._load_photo(tgt_idx)
photo2 = self._load_photo(ref_idx)
depth1, valid_mask1 = self._load_depth(tgt_idx)
depth2, valid_mask2 = self._load_depth(ref_idx)
# pose
c1Tw = self.cTws[tgt_idx]
c2Tw = self.cTws[ref_idx]
c2Tc1, c1Tc2 = self._get_delta_pose(c1Tw, c2Tw)
intrinsics_3x3 = self.photo_intrinsics[seq_idx,:3,:3]
intrinsics_3x3.set_shape([3,3])
# warp params
print('WARP_AUG_MODE={} max_rad={}, max_scale_log={}'.format(self.warp_aug_mode, self.max_rad, self.max_scale_log))
if self.warp_aug_mode == 'none':
scales = tf.zeros([2], tf.float32)
oris = tf.zeros([2], tf.float32)
elif self.warp_aug_mode == 'fix':
scales = tf.constant([self.max_scale_log, self.max_scale_log], tf.float32)
oris = tf.constant([self.max_rad, self.max_rad], tf.float32)
elif self.warp_aug_mode == 'random':
scales = tf.random_uniform([2], minval=-self.max_scale_log, maxval=self.max_scale_log, dtype=tf.float32)
oris = tf.random_uniform([2], minval=-self.max_rad, maxval=self.max_rad, dtype=tf.float32)
elif self.warp_aug_mode == 'table':
scales = self.scale_random_table[tgt_idx]
oris = self.ori_random_table[tgt_idx]
else:
raise ValueError('Unknown warp_aug_mode: {}'.format(self.warp_aug_mode))
theta_params = tf.concat([scales, oris], axis=0)
use_aug = tf.constant(False) if self.warp_aug_mode == 'none' else tf.constant(True)
# add in-plane rotation
intheta_c2Rc1 = tf.py_func(get_inplane_rotation, [c2Tc1[:3,:3]], [tf.float32])
intheta_c1Rc2 = tf.py_func(get_inplane_rotation, [c1Tc2[:3,:3]], [tf.float32])
theta_params = tf.concat([theta_params, intheta_c2Rc1, intheta_c1Rc2], axis=0)
return photo1, photo2, depth1, depth2, valid_mask1, valid_mask2, c2Tc1, c1Tc2, c1Tw, c2Tw, intrinsics_3x3, intrinsics_3x3, theta_params, use_aug
def mapfn_augment(self, photo1, photo2, depth1, depth2, valid_mask1, valid_mask2, c2Tc1, c1Tc2, c1Tw, c2Tw, K1, K2, theta_params, use_aug):
dv1 = tf.concat([depth1, valid_mask1], axis=-1)
dv2 = tf.concat([depth2, valid_mask2], axis=-1)
# Crop center
if self.crop_center:
# image size = [480,640] --> [480,640]
assert self.src_width > self.src_height
x_offset = (self.src_width-self.src_height) // 2
new_height = new_width = self.src_height
photo1 = tf.slice(photo1, [0,x_offset,0], [-1, new_width, -1])
photo2 = tf.slice(photo2, [0,x_offset,0], [-1, new_width, -1])
dv1 = tf.slice(dv1, [0,x_offset,0], [-1, new_width, -1])
dv2 = tf.slice(dv2, [0,x_offset,0], [-1, new_width, -1])
# modify intrinsic matrix
K1 = fix_intrinsic_center(K1, tf.to_float(new_width)/2, tf.to_float(new_height)/2)
K2 = fix_intrinsic_center(K2, tf.to_float(new_width)/2, tf.to_float(new_height)/2)
dx = float(self.dst_width) / new_width
dy = float(self.dst_height) / new_height
else:
dx = float(self.dst_width) / self.src_width
dy = float(self.dst_height) / self.src_height
# Resizing
scale_T = self._make_scale_theta(dx, dy)
K1 = tf.matmul(scale_T, K1)
K2 = tf.matmul(scale_T, K2)
photo1 = tf.image.resize_images(photo1, (self.dst_height, self.dst_width))
photo2 = tf.image.resize_images(photo2, (self.dst_height, self.dst_width))
# do not use linear interpolation on valid_masks1
dv1 = tf.image.resize_images(dv1, (self.dst_height, self.dst_width),
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
dv2 = tf.image.resize_images(dv2, (self.dst_height, self.dst_width),
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
depth1 = tf.slice(dv1, [0,0,0],[-1,-1,1])
valid_mask1 = tf.slice(dv1, [0,0,1],[-1,-1,1])
depth2 = tf.slice(dv2, [0,0,0],[-1,-1,1])
valid_mask2 = tf.slice(dv2, [0,0,1],[-1,-1,1])
return photo1, photo2, depth1, depth2, valid_mask1, valid_mask2, c2Tc1, c1Tc2, c1Tw, c2Tw, K1, K2, theta_params, use_aug
def _load_photo(self, idx):
photo = tf.read_file(self.photo_path_list[idx])
photo = tf.image.decode_jpeg(photo, 1) # force to load as grayscale
# photo = tf.image.rgb_to_grayscale(photo)
photo = tf.cast(photo, tf.float32) / 255.0 # normalize
photo.set_shape((self.src_height, self.src_width, 1))
photo.set_shape((None, None, 1))
return photo
def _load_depth(self, idx):
depth = tf.read_file(self.depth_path_list[idx])
depth = tf.image.decode_png(depth, 1, dtype=tf.uint16) # force to load as grayscale
depth = tf.scalar_mul(self.depth_factor, tf.cast(depth, tf.float32))
is_zero = tf.equal(depth, tf.constant(0, dtype=tf.float32))
valid_mask = tf.cast(tf.logical_not(is_zero), tf.float32)
far_depth = tf.scalar_mul(self.far_depth_val, tf.ones_like(depth, dtype=tf.float32))
# pay attention when you visualize depth due to dynamic range (0~1000)
depth = tf.where(is_zero, far_depth, depth)
# depth.set_shape((self.src_height, self.src_width, 1))
depth.set_shape((None, None, 1))
return depth, valid_mask
def _get_delta_pose(self, c1Tw, c2Tw):
# cTw = world to camera pose [4x4 matrix]
# return = c2Tc1, which means c1 to c2 pose
c1Rw = tf.slice(c1Tw, [0,0], [3,3])
c2Rw = tf.slice(c2Tw, [0,0], [3,3])
c1Pw = tf.slice(c1Tw, [0,3], [3,1])
c2Pw = tf.slice(c2Tw, [0,3], [3,1])
wPc1 = -tf.matmul(c1Rw, c1Pw, transpose_a=True) # wPc = -wRc cPw
wPc2 = -tf.matmul(c2Rw, c2Pw, transpose_a=True) # wPc = -wRc cPw
c2Rc1 = tf.matmul(c2Rw, c1Rw, transpose_b=True) # c2Rc1 = c2Rw wRc1
c2Pc1 = tf.matmul(c2Rw, wPc1-wPc2) # c2Pc1 = c2Rw (wPc1-wPc2)
# c2Tc1 (4x4) =
# | c2Rc1 c2Pc1 |
# | 0 1 |
c2Tc1 = tf.concat([c2Rc1, c2Pc1], axis=1)
c2Tc1 = tf.concat([c2Tc1, tf.constant([[0,0,0,1]], dtype=tf.float32)], axis=0)
c1Tc2 = tf.matrix_inverse(c2Tc1)
return c2Tc1, c1Tc2
def _make_scale_theta(self, sx, sy):
# 3x3 matrix
theta = tf.stack(
[sx, 0, 0,
0, sy, 0,
0, 0, 1])
return tf.cast(tf.reshape(theta, [3,3]), tf.float32)
def _load_random_table(self, table_path, min_table_size):
if not os.path.join(table_path):
raise ValueError('Cannot load random-table from {}'.format(table_path))
random_table = np.load(table_path) # [N, 2]
if len(random_table) < min_table_size:
raise ValueError('Shortage of table size, table size should be larger than {} but the actual size is {} in {}'.format(min_table_size, random_table, table_path))
print('load random table ({}) from {}'.format(random_table.shape, table_path))
return random_table
class SubsampleSE3PairwiseDataset(SE3PairwiseDataset):
def __init__(self, batch_size, offset_val=2, random_offset=False, out_width=320, out_height=240, max_degree=0, max_scale=np.sqrt(2), warp_aug_mode='none', num_threads=8):
self.batch_size = batch_size
self.offset_val = offset_val
self.random_offset = random_offset
self.depth_factor = 0.001
self.far_depth_val = 1000
self.src_width = 640
self.src_height = 480
self.dst_width = out_width
self.dst_height = out_height
self.num_threads = num_threads
self.max_rad = np.pi / 180 * max_degree
self.max_scale_log = np.log(max_scale)
self.warp_aug_mode = warp_aug_mode
self.scale_table_path = '../../params/scale/scl-{:.2f}/rnd_table.npy'.format(max_scale)
self.ori_table_path = '../../params/ori/deg-{}/rnd_table.npy'.format(max_degree)
self.crop_center = False
self.depth_thresh = 1.0
def set_files(self, root_dir, render_paths, max_img_num=-1):
if isinstance(render_paths, str):
render_paths = [render_paths]
num_seq = len(render_paths)
print('Number of sequences:{0}'.format(len(render_paths)))
# load from render_paths
photo_intrinsics = [None] * num_seq
depth_intrinsics = [None] * num_seq
cTws = [None] * num_seq
num_photos_per_seq = [None] * num_seq
photo_dirs = [None] * num_seq
depth_dirs = [None] * num_seq
photo_path_list = []
depth_path_list = []
seq_id_list = []
for i, render_path in enumerate(render_paths):
valid_numbers = read_text(os.path.join(root_dir, render_path, 'valid_number.txt')).astype(np.int32)
subsample_inds = np.where(valid_numbers%10==0)[0]
subsample_numbers = valid_numbers[subsample_inds]
calib = np.load(os.path.join(root_dir, render_path, 'calib/calib.npz'))
photo_intrinsics[i] = calib['photo_intrinsic']
depth_intrinsics[i] = calib['depth_intrinsic']
cTw = np.load(os.path.join(root_dir, render_path, 'pose/cTw.npy'))
cTws[i] = cTw[subsample_inds]
num_photos_per_seq[i] = len(cTws[i])
photos = [os.path.join(root_dir, render_path, 'photo/{}.jpg'.format(n)) for n in subsample_numbers]
depths = [os.path.join(root_dir, render_path, 'depth/{}.png'.format(n)) for n in subsample_numbers]
seq_ids = [i] * len(subsample_numbers)
assert len(photos) == len(depths)
assert len(photos) == num_photos_per_seq[i]
photo_path_list += photos
depth_path_list += depths
seq_id_list += seq_ids
self.cTws_data = np.concatenate(cTws, axis=0)
self.cTws = tf.convert_to_tensor(self.cTws_data, dtype=tf.float32)
self.photo_intrinsics_data = np.array(photo_intrinsics)
self.photo_intrinsics = tf.convert_to_tensor(self.photo_intrinsics_data, dtype=tf.float32)
self.depth_intrinsics_data = np.array(depth_intrinsics)
self.depth_intrinsics = tf.convert_to_tensor(self.depth_intrinsics_data, dtype=tf.float32)
self.num_photos_per_seq_data = | np.array(num_photos_per_seq, dtype=np.int32) | numpy.array |
#!/usr/bin/env python2
# CHAP - The Channel Annotation Package
#
# Copyright (c) 2016 - 2018 <NAME>, <NAME>, <NAME>, and
# <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
################################################################################
# CONFIGURATION
################################################################################
# load libraries:
import json # read in JSON files
import numpy as np # manipulate numeric vectors
from matplotlib import pyplot as pl # plotting facilities
import argparse # parse command line arguments
# get parameters from user input:
parser = argparse.ArgumentParser()
parser.add_argument(
"-filename",
nargs = "?",
const = "output.json",
default = "output.json")
parser.add_argument("-dpi",
nargs = "?",
const = 1200,
default = 1200,
type = int)
args = parser.parse_args()
################################################################################
# DATA READ-IN
################################################################################
# load output data from JSON file:
with open(args.filename) as data_file:
data = json.load(data_file)
################################################################################
# PATHWAY PROFILE PLOTS
################################################################################
# Radius Profile with Residue Positions
#-------------------------------------------------------------------------------
pl.figure("radius_profile")
pl.plot(
np.array(data["pathwayProfile"]["s"]),
np.array(data["pathwayProfile"]["radiusMean"]),
"k-")
pl.fill_between(
np.array(data["pathwayProfile"]["s"]),
np.array(data["pathwayProfile"]["radiusMin"]),
np.array(data["pathwayProfile"]["radiusMax"]),
facecolor = "#000000",
alpha = 0.1)
radius_sd = np.array(data["pathwayProfile"]["radiusSd"])
pl.fill_between(
np.array(data["pathwayProfile"]["s"]),
np.array(data["pathwayProfile"]["radiusMean"]) - radius_sd,
np.array(data["pathwayProfile"]["radiusMean"]) + radius_sd,
facecolor = "#000000",
alpha = 0.2)
pf = np.array(data["residueSummary"]["poreFacing"]["mean"]) > 0.5
pl.scatter(
np.array(data["residueSummary"]["s"]["mean"])[pf],
np.array(data["residueSummary"]["rho"]["mean"])[pf],
c = np.array(data["residueSummary"]["hydrophobicity"])[pf],
marker = "o",
cmap = "BrBG_r")
pl.clim(
-max(abs(np.array(data["residueSummary"]["hydrophobicity"]))),
max(abs(np.array(data["residueSummary"]["hydrophobicity"]))))
cbar = pl.colorbar()
cbar.ax.set_ylabel("Hydrophobicity (a.u.)")
pl.margins(x = 0)
pl.title("Time-Averaged Radius Profile")
pl.xlabel("s (nm)")
pl.ylabel("R (nm)")
pl.savefig(
"time_averaged_radius_profile.png",
dpi = args.dpi)
pl.close("radius_profile")
# Hydrophobicity Profile with Residue Positions
#-------------------------------------------------------------------------------
pl.figure("hydrophobicity_profile")
pl.plot(
np.array(data["pathwayProfile"]["s"]),
np.array(data["pathwayProfile"]["pfHydrophobicityMean"]),
"k-")
pl.fill_between(
np.array(data["pathwayProfile"]["s"]),
np.array(data["pathwayProfile"]["pfHydrophobicityMin"]),
np.array(data["pathwayProfile"]["pfHydrophobicityMax"]),
facecolor = "#000000",
alpha = 0.1)
hydrophobicity_sd = np.array(data["pathwayProfile"]["pfHydrophobicitySd"])
pl.fill_between(
np.array(data["pathwayProfile"]["s"]),
np.array(data["pathwayProfile"]["pfHydrophobicityMean"]) - hydrophobicity_sd,
np.array(data["pathwayProfile"]["pfHydrophobicityMean"]) + hydrophobicity_sd,
facecolor = "#000000",
alpha = 0.2)
pf = np.array(data["residueSummary"]["poreFacing"]["mean"]) > 0.5
pl.scatter(
np.array(data["residueSummary"]["s"]["mean"])[pf],
np.array(data["residueSummary"]["hydrophobicity"])[pf],
c = np.array(data["residueSummary"]["hydrophobicity"])[pf],
marker = "o",
cmap = "BrBG_r")
pl.clim(
-max(abs(np.array(data["residueSummary"]["hydrophobicity"]))),
max(abs(np.array(data["residueSummary"]["hydrophobicity"]))))
cbar = pl.colorbar()
cbar.ax.set_ylabel("Hydrophobicity (a.u.)")
pl.margins(x = 0)
pl.title("Time-Averaged Hydrophobicity Profile")
pl.xlabel("s (nm)")
pl.ylabel("H (a.u.)")
pl.savefig(
"time_averaged_hydrophobicity_profile.png",
dpi = args.dpi)
pl.close("hydrophobicity_profile")
# Solvent Number Density Profile
#-------------------------------------------------------------------------------
pl.figure("density_profile")
pl.axhline(
y = 33.3679,
linestyle = "dashed")
pl.plot(
np.array(data["pathwayProfile"]["s"]),
np.array(data["pathwayProfile"]["densityMean"]),
"k-")
pl.fill_between(
np.array(data["pathwayProfile"]["s"]),
np.array(data["pathwayProfile"]["densityMin"]),
np.array(data["pathwayProfile"]["densityMax"]),
facecolor = "#000000",
alpha = 0.1)
density_sd = np.array(data["pathwayProfile"]["densitySd"])
pl.fill_between(
np.array(data["pathwayProfile"]["s"]),
np.array(data["pathwayProfile"]["densityMean"]) - density_sd,
np.array(data["pathwayProfile"]["densityMean"]) + density_sd,
facecolor = "#000000",
alpha = 0.2)
pl.margins(x = 0)
pl.title("Time-Averaged Solvent Number Density Profile")
pl.xlabel("s (nm)")
pl.ylabel("n (nm$^{-3}$)")
pl.savefig(
"time_averaged_solvent_number_density_profile.png",
dpi = args.dpi)
pl.close("density_profile")
# Solvent Number Density Profile
#-------------------------------------------------------------------------------
pl.figure("energy_profile")
pl.plot(
| np.array(data["pathwayProfile"]["s"]) | numpy.array |
import numpy as np
import open3d as o3d
import torch
import torch.nn as nn
import torch.nn.functional as F
import time
import cv2
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
from tensorflow.keras.models import load_model
from tqdm import tqdm
from gpuinfo import GPUInfo
# data generator to fetch the h5 tsdf file arugemt recieves file no
class Generator():
def __init__(self):
import numpy as np
import pandas as pd
import h5py
def get_data(self, file_no):
import h5py
import numpy as np
filename = 'D:/DataSets/nyu/test/TSDF/' + str(file_no) + '.h5'
# getting 3d input from h5 files
h5 = h5py.File(filename, 'r')
input = np.array(h5['TSDF'])
input = np.reshape(input, (1, 1, 32, 32, 32))
# VSTOXX futures data
h5.close()
inputs = np.array(input).tolist()
inputs = torch.FloatTensor(inputs)
return inputs
# while loading the model saved in training we must run this class
# this is needed always as the class was used during trainnig
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv3d(1, 48, (5, 5, 5), padding=1)
self.conv1_1 = nn.Conv3d(1, 48, (3, 3, 3), padding=0)
self.pool = nn.MaxPool3d(2, stride=2)
self.conv2 = nn.Conv3d(96, 96, (5, 5, 5), padding=1)
self.conv2_2 = nn.Conv3d(96, 96, (3, 3, 3), padding=0)
self.pool1 = nn.MaxPool3d(2, stride=2)
self.conv3 = nn.Conv3d(192, 192, (5, 5, 5), padding=1)
self.conv3_1 = nn.Conv3d(192, 192, (3, 3, 3), padding=0)
self.pool2 = nn.MaxPool3d(2, stride=2)
self.ln1 = nn.Linear(in_features=3072, out_features=4096)
self.ln2 = nn.Linear(in_features=4096, out_features=1024)
self.out1 = nn.Linear(in_features=1024, out_features=30)
def forward(self, x):
# branch one
x_1 = self.conv1(x)
x_2 = self.conv1_1(x)
x_3 = torch.cat([x_2, x_1], dim=1)
x_4_4 = self.pool(x_3)
x_4_5 = self.conv2(x_4_4)
x_4_1 = self.conv2_2(x_4_4)
x_4 = torch.cat([x_4_1, x_4_5], dim=1)
x_4 = F.relu(x_4)
x_4 = self.pool1(x_4)
x_4_6 = self.conv3(x_4)
x_4_7 = self.conv3_1(x_4)
x_4 = torch.cat([x_4_6, x_4_7], dim=1)
x_4 = F.relu(x_4)
x_4 = self.pool2(x_4)
x_4 = x_4.view(-1, 3072)
x_4 = self.ln1(x_4)
x_4 = F.relu(x_4)
x_4 = self.ln2(x_4)
x_4 = F.relu(x_4)
x_4 = self.out1(x_4)
ret = x_4.view(-1, 30)
return ret
Net = torch.load('C:\\Users\\Use\\Downloads\\pca_30_points_relational_model_2 (1).pt', map_location=torch.device('cuda'))
model = load_model('C:\\Users\\Use\\Downloads\\architecture_3.h5')
Net.eval()
Net.cuda()
gen = Generator()
x = np.genfromtxt('D:\\DataSets\\ground_truth\\test\\joint_x.csv', delimiter=',')
y = np.genfromtxt('D:\\DataSets\\ground_truth\\test\\joint_y.csv', delimiter=',')
z = np.genfromtxt('D:\\DataSets\\ground_truth\\test\\joint_z.csv', delimiter=',')
x = x[:, [0, 3, 6, 9, 12, 15, 18, 21, 24, 26, 28, 30, 31, 32]]
y = y[:, [0, 3, 6, 9, 12, 15, 18, 21, 24, 26, 28, 30, 31, 32]]
z = z[:, [0, 3, 6, 9, 12, 15, 18, 21, 24, 26, 28, 30, 31, 32]]
point_names = np.array(['P1', 'P2', 'R1', 'R2', 'M1', 'M2', 'I1', 'I2', 'T1', 'T2', 'T3', 'W1', 'W2', 'C'])
# mask = np.zeros((5000, 5000, 3), np.uint8)
# for point in range(14):
# cv2.circle(mask, (int(1500 * (x[1, point]))+1000, int(-1500 * (y[1, point]))+2000), 10, [255, 255, 255], -1)
# cv2.putText(mask, f'{point}', (int(1500 * (x[1, point]))+1010, int(-1500 * (y[1, point]))+2010), cv2.FONT_HERSHEY_DUPLEX, 1, [255, 255, 255], 1)
# cv2.putText(mask, point_names[point], (int(1500 * (x[1, point]))+970, int(-1500 * (y[1, point]))+1980), cv2.FONT_HERSHEY_DUPLEX, 1, [255, 255, 255], 1)
# cv2.imwrite('mask.png', mask)
log = open("benchmark_time.txt", "w")
logg = open("benchmark_error.txt", "w")
log_time = np.array([])
rng = 8252
errors = np.zeros((rng, 14))
log.write('Using device cuda:0\n')
log.write(torch.cuda.get_device_name(0))
log.write('\nMemory total: 4.0 GB\n')
log.write(f'Memory allocated: {round(torch.cuda.memory_allocated(0)/1024**3,1)} GB\n')
log.write(f'Memory cached: {round(torch.cuda.memory_reserved(0)/1024**3,1)} GB\n')
log.write("\nTime in seconds for each iteration.\n")
logg.write("Percentage error for each point is.\n")
for instance in tqdm(range(rng)):
inputs = gen.get_data(instance)
inputs = inputs.cuda()
t = time.time()
outputs = Net(inputs)
k = outputs.cpu().detach().numpy()
predicted_xyz = model.predict(k)
elapsed_time = time.time() - t
elapsed_time = elapsed_time*1000
log.write(f'{instance:04}: {round(elapsed_time, 2):05} ms\n')
logg.write(f'{instance:04}: ')
log_time = np.append(log_time, elapsed_time)
predicted_xyz = predicted_xyz.reshape((14, 3))
xx = x[instance+1]
yy = y[instance+1]
zz = z[instance+1]
xyz = np.zeros((14, 3))
xyz[:, 0] = xx
xyz[:, 1] = yy
xyz[:, 2] = zz
error = np.array([])
for point in range(14):
logg.write(f'{round(np.linalg.norm(xyz[point] - predicted_xyz[point])*100, 4):07}, ')
errors[instance, point] = np.linalg.norm(xyz[point] - predicted_xyz[point])*100
avg = sum(errors[instance, :])/14
logg.write(f'Best: {round(np.min(errors[instance, :]), 4):07} %, Worst: {round( | np.max(errors[instance, :]) | numpy.max |
import numpy as np
import matplotlib.pyplot as plt
def RandomPattern(xsize, ysize):
X, Y = np.meshgrid(np.arange(ysize), np.arange(xsize))
yco =X.flatten().astype('int')
np.random.shuffle(yco)
xco =Y.flatten().astype('int')
np.random.shuffle(xco)
return xco, yco
def NormalPattern(xsize, ysize):
X, Y = np.meshgrid(np.arange(ysize), np.arange(xsize))
yco =X.flatten().astype('int')
xco =Y.flatten().astype('int')
return xco, yco
def SnakeScan(xsize, ysize):
X, Y = np.meshgrid(np.arange(ysize), np.arange(xsize))
X[1::2, :] = X[1::2, ::-1]
yco =X.flatten().astype('int')
xco =Y.flatten().astype('int')
return xco, yco
def HilberPattern(xsize, ysize):
order = int(np.max(np.ceil([ | np.log2(xsize) | numpy.log2 |
"""
Tests for the algorithms.spectral submodule
"""
import numpy as np
import scipy
from scipy import fftpack
import numpy.testing as npt
import numpy.testing.decorators as dec
import nose.tools as nt
import nitime.algorithms as tsa
import nitime.utils as utils
def test_get_spectra():
"""
Testing spectral estimation
"""
methods = (None,
{"this_method": 'welch', "NFFT": 256, "Fs": 2 * np.pi},
{"this_method": 'welch', "NFFT": 1024, "Fs": 2 * np.pi})
for method in methods:
avg_pwr1 = []
avg_pwr2 = []
est_pwr1 = []
est_pwr2 = []
arsig1, _, _ = utils.ar_generator(N=2 ** 16) # needs to be that long
# for the answers to converge
arsig2, _, _ = utils.ar_generator(N=2 ** 16)
avg_pwr1.append((arsig1 ** 2).mean())
avg_pwr2.append((arsig2 ** 2).mean())
tseries = np.vstack([arsig1, arsig2])
f, c = tsa.get_spectra(tseries, method=method)
# \sum_{\omega} psd d\omega:
est_pwr1.append(np.sum(c[0, 0]) * (f[1] - f[0]))
est_pwr2.append(np.sum(c[1, 1]) * (f[1] - f[0]))
# Get it right within the order of magnitude:
npt.assert_array_almost_equal(est_pwr1, avg_pwr1, decimal=-1)
npt.assert_array_almost_equal(est_pwr2, avg_pwr2, decimal=-1)
def test_get_spectra_complex():
"""
Testing spectral estimation
"""
methods = (None,
{"this_method": 'welch', "NFFT": 256, "Fs": 2 * np.pi},
{"this_method": 'welch', "NFFT": 1024, "Fs": 2 * np.pi})
for method in methods:
avg_pwr1 = []
avg_pwr2 = []
est_pwr1 = []
est_pwr2 = []
# Make complex signals:
r, _, _ = utils.ar_generator(N=2 ** 16) # It needs to be that long for
# the answers to converge
c, _, _ = utils.ar_generator(N=2 ** 16)
arsig1 = r + c * scipy.sqrt(-1)
r, _, _ = utils.ar_generator(N=2 ** 16)
c, _, _ = utils.ar_generator(N=2 ** 16)
arsig2 = r + c * scipy.sqrt(-1)
avg_pwr1.append((arsig1 * arsig1.conjugate()).mean())
avg_pwr2.append((arsig2 * arsig2.conjugate()).mean())
tseries = np.vstack([arsig1, arsig2])
f, c = tsa.get_spectra(tseries, method=method)
# \sum_{\omega} psd d\omega:
est_pwr1.append(np.sum(c[0, 0]) * (f[1] - f[0]))
est_pwr2.append(np.sum(c[1, 1]) * (f[1] - f[0]))
# Get it right within the order of magnitude:
npt.assert_array_almost_equal(est_pwr1, avg_pwr1, decimal=-1)
npt.assert_array_almost_equal(est_pwr2, avg_pwr2, decimal=-1)
def test_get_spectra_unknown_method():
"""
Test that providing an unknown method to get_spectra rasies a ValueError
"""
tseries = np.array([[1, 2, 3], [4, 5, 6]])
npt.assert_raises(ValueError,
tsa.get_spectra, tseries, method=dict(this_method='foo'))
def test_periodogram():
"""Test some of the inputs to periodogram """
arsig, _, _ = utils.ar_generator(N=1024)
Sk = fftpack.fft(arsig)
f1, c1 = tsa.periodogram(arsig)
f2, c2 = tsa.periodogram(arsig, Sk=Sk)
npt.assert_equal(c1, c2)
# Check that providing a complex signal does the right thing
# (i.e. two-sided spectrum):
N = 1024
r, _, _ = utils.ar_generator(N=N)
c, _, _ = utils.ar_generator(N=N)
arsig = r + c * scipy.sqrt(-1)
f, c = tsa.periodogram(arsig)
| npt.assert_equal(f.shape[0], N) | numpy.testing.assert_equal |
import re
import os
import math
import logging
logger = logging.getLogger(__name__)
import numpy as np
from scipy.ndimage.filters import median_filter
import scipy.interpolate as intp
import scipy.signal as sg
import scipy.optimize as opt
import astropy.io.fits as fits
from astropy.table import Table
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmap
import matplotlib.ticker as tck
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.figure import Figure
from ..echelle.trace import ApertureSet
from ..utils.onedarray import get_local_minima, get_edge_bin
from ..utils.regression import get_clip_mean
from ..utils.regression2d import polyfit2d, polyval2d
from .imageproc import table_to_array, array_to_table
def find_background2(data, mask, channels, apertureset_lst,
method='poly', scale='linear', scan_step=200,
xorder=2, yorder=2, maxiter=5, upper_clip=3, lower_clip=3,
extend=True, display=True, fig_file=None, reg_file=None):
"""Subtract the background for an input FITS image.
Args:
data (:class:`numpy.ndarray`): Input data image.
mask (:class:`numpy.ndarray`): Mask of input data image.
channels (list): List of channels as strings.
apertureset_lst (dict): Dict of :class:`~edrs.echelle.trace.ApertureSet`
at different channels.
method (str): Method of finding background light.
scale (str): Scale of the 2-D polynomial fitting. If 'log', fit the
polynomial in the logrithm scale.
scan_step (int): Steps of scan in pixels.
xorder (int): Order of 2D polynomial along the main dispersion
direction (only applicable if **method** = "poly").
yorder (int): Order of 2D polynomial along the cross-dispersion
direction (only applicable if **method** = "poly").
maxiter (int): Maximum number of iteration of 2D polynomial fitting
(only applicable if **method** = "poly").
upper_clip (float): Upper sigma clipping threshold (only applicable if
**method** = "poly").
lower_clip (float): Lower sigma clipping threshold (only applicable if
**method** = "poly").
extend (bool): Extend the grid to the whole CCD image if *True*.
display (bool): Display figures on the screen if *True*.
fig_file (str): Name of the output figure. No image file generated if
*None*.
reg_file (string): Name of the output DS9 region file. No file generated
if *None*.
Returns:
:class:`numpy.ndarray`: Image of background light.
"""
plot = (display or fig_file is not None)
plot_paper_fig = False
h, w = data.shape
meddata = median_filter(data, size=(3,3), mode='reflect')
xnodes, ynodes, znodes = [], [], []
# find the minimum and maximum aperture number
min_aper = min([min(apertureset_lst[ch].keys()) for ch in channels])
max_aper = max([max(apertureset_lst[ch].keys()) for ch in channels])
# generate the horizontal scan list
x_lst = np.arange(0, w-1, scan_step)
# add the last column to the list
if x_lst[-1] != w-1:
x_lst = np.append(x_lst, w-1)
# find intra-order pixels
_message_lst = ['Column, N (between), N (extend), N (removed), N (total)']
for x in x_lst:
xsection = meddata[:,x]
inter_aper = []
prev_newy = None
# loop for every aperture
for aper in range(min_aper, max_aper+1):
# for a new aperture, initialize the count of channel
count_channel = 0
for ich, channel in enumerate(channels):
# check every channel in this frame
if aper in apertureset_lst[channel]:
count_channel += 1
this_newy = apertureset_lst[channel][aper].position(x)
if count_channel == 1 and prev_newy is not None:
# this channel is the first channel in this aperture and
# there is a previous y
mid_newy = (prev_newy + this_newy)//2
i1 = min(h-1, max(0, int(prev_newy)))
i2 = min(h-1, max(0, int(this_newy)))
#if len(inter_aper)==0 or \
# abs(mid_newy - inter_aper[-1])>scan_step*0.7:
# if i2-i1>0:
if i2-i1>0:
mid_newy = i1 + xsection[i1:i2].argmin()
inter_aper.append(mid_newy)
prev_newy = this_newy
inter_aper = np.array(inter_aper)
# count how many nodes found between detected orders
n_nodes_inter = inter_aper.size
# if extend = True, expand the grid with polynomial fitting to
# cover the whole CCD area
n_nodes_extend = 0
if extend:
if x==2304:
_fig = plt.figure(dpi=150)
_ax = _fig.gca()
for _x in inter_aper:
_ax.axvline(x=_x,color='g', ls='--',lw=0.5, alpha=0.6)
_ax.plot(data[:, x],'b-',lw=0.5)
_fig2 = plt.figure(dpi=150)
_ax2 = _fig2.gca()
print(inter_aper)
coeff = np.polyfit(np.arange(inter_aper.size), inter_aper, deg=3)
if x== 2304:
_ax2.plot(np.arange(inter_aper.size), inter_aper,'go', alpha=0.6)
_newx = np.arange(0, inter_aper.size, 0.1)
_ax2.plot(_newx, np.polyval(coeff, _newx),'g-')
# find the points after the end of inter_aper
ii = inter_aper.size-1
new_y = inter_aper[-1]
while(new_y<h-1):
ii += 1
new_y = int(np.polyval(coeff,ii))
inter_aper = np.append(inter_aper,new_y)
n_nodes_extend += 1
# find the points before the beginning of order_mid
ii = 0
new_y = inter_aper[0]
while(new_y>0):
ii -= 1
new_y = int(np.polyval(coeff,ii))
inter_aper = np.insert(inter_aper,0,new_y)
n_nodes_extend += 1
if x==2304:
#for _x in np.polyval(coeff, np.arange(0,25)):
# _ax.axvline(x=_x, color='r',ls='--',lw=0.5)
#_newx = np.arange(0, 25)
#_ax2.plot(_newx, np.polyval(coeff, _newx), 'ro', alpha=0.6)
plt.show()
# remove those points with y<0 or y>h-1
m1 = inter_aper > 0
m2 = inter_aper < h-1
inter_aper = inter_aper[np.nonzero(m1*m2)[0]]
# filter those masked pixels
m = mask[inter_aper, x]==0
inter_aper = inter_aper[m]
# remove backward points
tmp = np.insert(inter_aper,0,0.)
m = np.diff(tmp)>0
inter_aper = inter_aper[np.nonzero(m)[0]]
# count how many nodes removed
n_nodes_removed = (n_nodes_inter + n_nodes_extend) - inter_aper.size
# pack infos into message list
_message_lst.append('| %6d | %6d | %6d | %6d | %6d |'%(
x, n_nodes_inter, n_nodes_extend, n_nodes_removed, inter_aper.size))
# pack all nodes
for y in inter_aper:
xnodes.append(x)
ynodes.append(y)
znodes.append(meddata[y,x])
# extrapolate
#if extrapolate:
if False:
_y0, _y1 = inter_aper[0], inter_aper[1]
newy = _y0 - (_y1 - _y0)
newz = meddata[_y0, x] - (meddata[_y1, x] - meddata[_y0, x])
xnodes.append(x)
ynodes.append(newy)
znodes.append(newz)
_y1, _y2 = inter_aper[-2], inter_aper[-1]
newy = _y2 + (_y2 - _y1)
newz = meddata[_y2, x] + (meddata[_y2, x] - meddata[_y1, x])
xnodes.append(x)
ynodes.append(newy)
znodes.append(newz)
# convert to numpy array
xnodes = np.array(xnodes)
ynodes = np.array(ynodes)
znodes = np.array(znodes)
# write to running log
_message_lst.append('Total: %4d'%xnodes.size)
logger.info((os.linesep+' ').join(_message_lst))
# if scale='log', filter the negative values
if scale=='log':
pmask = znodes > 0
znodes[~pmask] = znodes[pmask].min()
znodes = | np.log10(znodes) | numpy.log10 |
import os, pdb
import sys
import time
import math
import cv2
import numpy as np
from matplotlib import pyplot as plt
import tensorflow as tf
#######################################################
# Auxiliary matrices used to solve DLT
Aux_M1 = np.array([
[ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ],
[ 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ],
[ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ],
[ 0 , 0 , 1 , 0 , 0 , 0 , 0 , 0 ],
[ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ],
[ 0 , 0 , 0 , 0 , 1 , 0 , 0 , 0 ],
[ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ],
[ 0 , 0 , 0 , 0 , 0 , 0 , 1 , 0 ]], dtype=np.float64)
Aux_M2 = np.array([
[ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ],
[ 0 , 1 , 0 , 0 , 0 , 0 , 0 , 0 ],
[ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ],
[ 0 , 0 , 0 , 1 , 0 , 0 , 0 , 0 ],
[ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ],
[ 0 , 0 , 0 , 0 , 0 , 1 , 0 , 0 ],
[ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ],
[ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 ]], dtype=np.float64)
Aux_M3 = np.array([
[0],
[1],
[0],
[1],
[0],
[1],
[0],
[1]], dtype=np.float64)
Aux_M4 = np.array([
[-1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ],
[0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ],
[0 , 0 ,-1 , 0 , 0 , 0 , 0 , 0 ],
[0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ],
[0 , 0 , 0 , 0 ,-1 , 0 , 0 , 0 ],
[0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ],
[0 , 0 , 0 , 0 , 0 , 0 ,-1 , 0 ],
[0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ]], dtype=np.float64)
Aux_M5 = np.array([
[0 ,-1 , 0 , 0 , 0 , 0 , 0 , 0 ],
[0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ],
[0 , 0 , 0 ,-1 , 0 , 0 , 0 , 0 ],
[0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ],
[0 , 0 , 0 , 0 , 0 ,-1 , 0 , 0 ],
[0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ],
[0 , 0 , 0 , 0 , 0 , 0 , 0 ,-1 ],
[0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ]], dtype=np.float64)
Aux_M6 = np.array([
[-1 ],
[ 0 ],
[-1 ],
[ 0 ],
[-1 ],
[ 0 ],
[-1 ],
[ 0 ]], dtype=np.float64)
Aux_M71 = np.array([
[0 , 1 , 0 , 0 , 0 , 0 , 0 , 0 ],
[1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ],
[0 , 0 , 0 , 1 , 0 , 0 , 0 , 0 ],
[0 , 0 , 1 , 0 , 0 , 0 , 0 , 0 ],
[0 , 0 , 0 , 0 , 0 , 1 , 0 , 0 ],
[0 , 0 , 0 , 0 , 1 , 0 , 0 , 0 ],
[0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 ],
[0 , 0 , 0 , 0 , 0 , 0 , 1 , 0 ]], dtype=np.float64)
Aux_M72 = np.array([
[1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ],
[-1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ],
[0 , 0 , 1 , 0 , 0 , 0 , 0 , 0 ],
[0 , 0 ,-1 , 0 , 0 , 0 , 0 , 0 ],
[0 , 0 , 0 , 0 , 1 , 0 , 0 , 0 ],
[0 , 0 , 0 , 0 ,-1 , 0 , 0 , 0 ],
[0 , 0 , 0 , 0 , 0 , 0 , 1 , 0 ],
[0 , 0 , 0 , 0 , 0 , 0 ,-1 , 0 ]], dtype=np.float64)
Aux_M8 = np.array([
[0 , 1 , 0 , 0 , 0 , 0 , 0 , 0 ],
[0 ,-1 , 0 , 0 , 0 , 0 , 0 , 0 ],
[0 , 0 , 0 , 1 , 0 , 0 , 0 , 0 ],
[0 , 0 , 0 ,-1 , 0 , 0 , 0 , 0 ],
[0 , 0 , 0 , 0 , 0 , 1 , 0 , 0 ],
[0 , 0 , 0 , 0 , 0 ,-1 , 0 , 0 ],
[0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 ],
[0 , 0 , 0 , 0 , 0 , 0 , 0 ,-1 ]], dtype=np.float64)
Aux_Mb = np.array([
[0 ,-1 , 0 , 0 , 0 , 0 , 0 , 0 ],
[1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ],
[0 , 0 , 0 , -1 , 0 , 0 , 0 , 0 ],
[0 , 0 , 1 , 0 , 0 , 0 , 0 , 0 ],
[0 , 0 , 0 , 0 , 0 ,-1 , 0 , 0 ],
[0 , 0 , 0 , 0 , 1 , 0 , 0 , 0 ],
[0 , 0 , 0 , 0 , 0 , 0 , 0 ,-1 ],
[0 , 0 , 0 , 0 , 0 , 0 , 1 , 0 ]], dtype=np.float64)
########################################################
_, term_width = os.popen('stty size', 'r').read().split()
term_width = int(term_width)
TOTAL_BAR_LENGTH = 65.
last_time = time.time()
begin_time = last_time
def progress_bar(current, total, msg=None):
global last_time, begin_time
if current == 0:
begin_time = time.time() # Reset for new bar.
cur_len = int(TOTAL_BAR_LENGTH*current/total)
rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1
sys.stdout.write(' [')
for i in range(cur_len):
sys.stdout.write('=')
sys.stdout.write('>')
for i in range(rest_len):
sys.stdout.write('.')
sys.stdout.write(']')
cur_time = time.time()
step_time = cur_time - last_time
last_time = cur_time
tot_time = cur_time - begin_time
L = []
L.append(' Step: %s' % format_time(step_time))
L.append(' | Tot: %s' % format_time(tot_time))
if msg:
L.append(' | ' + msg)
msg = ''.join(L)
sys.stdout.write(msg)
for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):
sys.stdout.write(' ')
# Go back to the center of the bar.
for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):
sys.stdout.write('\b')
sys.stdout.write(' %d/%d ' % (current+1, total))
if current < total-1:
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
sys.stdout.flush()
return tot_time
def format_time(seconds):
days = int(seconds / 3600/24)
seconds = seconds - days*3600*24
hours = int(seconds / 3600)
seconds = seconds - hours*3600
minutes = int(seconds / 60)
seconds = seconds - minutes*60
secondsf = int(seconds)
seconds = seconds - secondsf
millis = int(seconds*1000)
f = ''
i = 1
if days > 0:
f += str(days) + 'D'
i += 1
if hours > 0 and i <= 2:
f += str(hours) + 'h'
i += 1
if minutes > 0 and i <= 2:
f += str(minutes) + 'm'
i += 1
if secondsf > 0 and i <= 2:
f += str(secondsf) + 's'
i += 1
if millis > 0 and i <= 2:
f += str(millis) + 'ms'
i += 1
if f == '':
f = '0ms'
return f
def save_correspondences_img(img1, img2, corr1, corr2, pred_corr2, results_dir, img_name):
""" Save pair of images with their correspondences into a single image. Used for report"""
# Draw prediction
copy_img2 = img2.copy()
copy_img1 = img1.copy()
cv2.polylines(copy_img2, np.int32([pred_corr2]), 1, (5, 225, 225),3)
point_color = (0,255,255)
line_color_set = [(255,102,255), (51,153,255), (102,255,255), (255,255,0), (102, 102, 244), (150, 202, 178), (153,240,142), (102,0,51), (51,51,0) ]
# Draw 4 points (ground truth)
full_stack_images = draw_matches(copy_img1, corr1, copy_img2 , corr2, '/tmp/tmp.jpg', color_set = line_color_set, show=False)
# Save image
visual_file_name = os.path.join(results_dir, img_name)
#cv2.putText(full_stack_images, 'RMSE %.2f'%h_loss,(800, 100), cv2.FONT_HERSHEY_SIMPLEX, 1,(0,0,255),2)
cv2.imwrite(visual_file_name, full_stack_images)
print('Wrote file %s', visual_file_name)
def draw_matches(img1, kp1, img2, kp2, output_img_file=None, color_set=None, show=True):
"""Draws lines between matching keypoints of two images without matches.
This is a replacement for cv2.drawMatches
Places the images side by side in a new image and draws circles
around each keypoint, with line segments connecting matching pairs.
You can tweak the r, thickness, and figsize values as needed.
Args:
img1: An openCV image ndarray in a grayscale or color format.
kp1: A list of cv2.KeyPoint objects for img1.
img2: An openCV image ndarray of the same format and with the same
element type as img1.
kp2: A list of cv2.KeyPoint objects for img2.
color_set: The colors of the circles and connecting lines drawn on the images.
A 3-tuple for color images, a scalar for grayscale images. If None, these
values are randomly generated.
"""
# We're drawing them side by side. Get dimensions accordingly.
# Handle both color and grayscale images.
if len(img1.shape) == 3:
new_shape = (max(img1.shape[0], img2.shape[0]), img1.shape[1]+img2.shape[1], img1.shape[2])
elif len(img1.shape) == 2:
new_shape = (max(img1.shape[0], img2.shape[0]), img1.shape[1]+img2.shape[1])
new_img = np.zeros(new_shape, type(img1.flat[0]))
# Place images onto the new image.
new_img[0:img1.shape[0],0:img1.shape[1]] = img1
new_img[0:img2.shape[0],img1.shape[1]:img1.shape[1]+img2.shape[1]] = img2
# Draw lines between points
kp2_on_stack_image = (kp2 + np.array([img1.shape[1], 0])).astype(np.int32)
kp1 = kp1.astype(np.int32)
# kp2_on_stack_image[0:4,0:2]
line_color1 = (2, 10, 240)
line_color2 = (2, 10, 240)
# We want to make connections between points to make a square grid so first count the number of rows in the square grid.
grid_num_rows = int(np.sqrt(kp1.shape[0]))
if output_img_file is not None and grid_num_rows >= 3:
for i in range(grid_num_rows):
cv2.line(new_img, tuple(kp1[i*grid_num_rows]), tuple(kp1[i*grid_num_rows + (grid_num_rows-1)]), line_color1, 1, LINE_AA)
cv2.line(new_img, tuple(kp1[i]), tuple(kp1[i + (grid_num_rows-1)*grid_num_rows]), line_color1, 1, cv2.LINE_AA)
cv2.line(new_img, tuple(kp2_on_stack_image[i*grid_num_rows]), tuple(kp2_on_stack_image[i*grid_num_rows + (grid_num_rows-1)]), line_color2, 1, cv2.LINE_AA)
cv2.line(new_img, tuple(kp2_on_stack_image[i]), tuple(kp2_on_stack_image[i + (grid_num_rows-1)*grid_num_rows]), line_color2, 1, cv2.LINE_AA)
if output_img_file is not None and grid_num_rows == 2:
cv2.polylines(new_img, np.int32([kp2_on_stack_image]), 1, line_color2, 3)
cv2.polylines(new_img, | np.int32([kp1]) | numpy.int32 |
import numpy as np
import pandas as pd
from scipy.stats import expon, uniform
import sys
sys.path.append('../../well_mixed')
from well_mixed_death_clock import (WellMixedSimulator,
WellMixedSimulationData, exponential_ccm, uniform_ccm,
normalised_g2_death_signal)
# Cell cycle parameters
tG1_fun = lambda beta, tG: beta * tG
tG2_fun = lambda beta, tG: (1 - beta) * tG
# normalised G2 death signal
f = normalised_g2_death_signal
coef = 1
Tdeath_fun = lambda eta, tG: eta * coef * tG
# Simulation parameters
tstart = 0
tend = np.inf
min_cell_count = 10
max_cell_count = 1000
num_iter = 100
initial_cell_count = 100
num_beta = 10
# Arguments to f
f_args = (coef,)
# Helper function
def run_g1_proportion_range_exponential_simulation(tG, eta, beta, seed=None):
# We create a random_state seeded with seed + 1 to sample the initial
# conditions in order to avoid correlations with the simulation.
if not seed is None:
random_state = np.random.RandomState(seed + 1)
else:
random_state = None
tG1 = tG1_fun(beta, tG)
tG2 = tG2_fun(beta, tG)
Tdeath = Tdeath_fun(eta, tG)
ccm = exponential_ccm
ccm_args = (tG1,)
# Initialise simulator
simulator = WellMixedSimulator(f, ccm, Tdeath, tG2, tstart, tend,
f_args, ccm_args, max_cell_count, min_cell_count)
# Generate initial conditions
tau_0 = np.zeros(initial_cell_count)
tbirth_0 = uniform.rvs(loc= - (tG1 + tG2), scale = tG1 + tG2,
size=initial_cell_count, random_state=random_state)
clone_0 = np.arange(initial_cell_count)
# Sample G1 durations until birth invariant is satisfied.
tG1_0 = []
for tbirth in tbirth_0:
candidate_tG1 = - np.inf
while not - tbirth - tG2 < candidate_tG1:
candidate_tG1 = expon.rvs(scale=tG1, random_state=random_state)
tG1_0.append(candidate_tG1)
tG1_0 = | np.array(tG1_0) | numpy.array |
import tensorflow as tf
import numpy as np
import time
from copy import deepcopy
from tensorflow.python.ops.parallel_for import gradients
from tensorflow.contrib import rnn
class ALPaCA:
def __init__(self, config, sess, graph=None, preprocess=None, f_nom=None):
self.config = deepcopy(config)
self.lr = config['lr']
self.formulation = config['formulation']
self.x_dim = config['x_dim']
self.y_dim = config['y_dim']
self.phi_dim = config['nn_layers'][-1] # Last layer
self.sigma_eps = self.config['sigma_eps']
self.updates_so_far = 0
self.sess = sess
self.graph = graph if graph is not None else tf.get_default_graph()
# y = K^T phi( preprocess(x) ) + f_nom(x)
self.preprocess = preprocess
self.f_nom = f_nom
arch_string = [str(config_layer) for config_layer in self.config['nn_layers']]
arch_string = '-'.join(arch_string)
self.model_name = self.formulation+'_'+str(time.time())+'_action='+self.config['action']+'_basis='+self.config['basis']+ \
'_nn-layers='+arch_string+'_activation='+self.config['activation']+'_lr='+str(self.lr)+ \
'_sigma-eps='+str(self.sigma_eps)+'_batch-size='+str(self.config['meta_batch_size'])+ \
'_num-input-points='+str(self.config['num_input_points'])+ \
'_data-horizon='+str(self.config['data_horizon'])+'_test-horizon='+str(self.config['test_horizon'])+'_row-length='+str(self.config['row_length'])
def construct_model(self):
with self.graph.as_default():
last_layer = self.config['nn_layers'][-1]
if self.sigma_eps is list:
self.SigEps = tf.diag( np.array(self.sigma_eps) )
else:
self.SigEps = self.sigma_eps*tf.eye(self.y_dim)
self.SigEps = tf.reshape(self.SigEps, (1,1,self.y_dim,self.y_dim))
# try making it learnable
# self.SigEps = tf.get_variable('sigma_eps', initializer=self.SigEps )
# Prior Parameters of last layer
self.K = tf.get_variable('K_init',shape=[last_layer,self.y_dim]) #\bar{K}_0
self.L_asym = tf.get_variable('L_asym',shape=[last_layer,last_layer]) # cholesky decomp of \Lambda_0
self.L = self.L_asym @ tf.transpose(self.L_asym) # \Lambda_0
# x: query points (M, N_test, x_dim)
# y: target points (M, N_test, y_dim) ( what K^T phi(x) should approximate )
self.x = tf.placeholder(tf.float32, shape=[None,None,self.x_dim], name="x")
self.y = tf.placeholder(tf.float32, shape=[None,None,self.y_dim], name="y")
# Points used to compute posterior using BLR
# context_x: x points available for context (M, N_context, x_dim)
# context_y: y points available for context (M, N_context, y_dim)
self.context_x = tf.placeholder(tf.float32, shape=[None,None,self.x_dim], name="cx")
self.context_y = tf.placeholder(tf.float32, shape=[None,None,self.y_dim], name="cy")
# num_updates: number of context points from context_x,y to use when computing posterior. size (M,)
self.num_models = tf.shape(self.context_x)[0]
self.max_num_context = tf.shape(self.context_x)[1]*tf.ones((self.num_models,), dtype=tf.int32)
self.num_context = tf.placeholder_with_default(self.max_num_context, shape=(None,))
# Map input to feature space
with tf.variable_scope('phi',reuse=None):
# self.phi is (M, N_test, phi_dim)
if self.basis == 'lstm':
self.phi = tf.map_fn( lambda x: self.basis_lstm(x),
elems=self.x,
dtype=tf.float32)
else:
self.phi = tf.map_fn( lambda x: self.basis(x),
elems=self.x,
dtype=tf.float32)
# Map context input to feature space
with tf.variable_scope('phi', reuse=True):
# self.context_phi is (M, N_context, phi_dim)
if self.basis == 'lstm':
self.context_phi = tf.map_fn( lambda x: self.basis_lstm(x),
elems=self.context_x,
dtype=tf.float32)
else:
self.context_phi = tf.map_fn( lambda x: self.basis(x),
elems=self.context_x,
dtype=tf.float32)
# Evaluate f_nom if given, else use 0
self.f_nom_cx = tf.zeros_like(self.context_y)
self.f_nom_x = 0 #tf.zeros_like(self.y)
if self.f_nom is not None:
self.f_nom_cx = self.f_nom(self.context_x)
self.f_nom_x = self.f_nom(self.x)
# Subtract f_nom from context points before BLR
self.context_y_blr = self.context_y - self.f_nom_cx
# Compute posterior weights from context data
with tf.variable_scope('blr', reuse=None):
# posterior_K is (M, phi_dim, y_dim), posterior_L_inv is (M, phi_dim, phi_dim)
self.posterior_K, self.posterior_L_inv = tf.map_fn( lambda x: self.batch_blr(*x),
elems=(self.context_phi, self.context_y_blr, self.num_context),
dtype=(tf.float32, tf.float32) )
# Compute posterior predictive distribution, and evaluate targets self.y under this distribution
self.mu_pred, self.Sig_pred, self.predictive_nll = self.compute_pred_and_nll()
# The loss function is expectation of this predictive nll.
self.total_loss = tf.reduce_mean(self.predictive_nll)
tf.summary.scalar('total_loss', self.total_loss)
self.optimizer = tf.train.AdamOptimizer(self.lr)
global_step = tf.Variable(0, trainable=False, name='global_step')
self.train_op = self.optimizer.minimize(self.total_loss,global_step=global_step)
self.train_writer = tf.summary.FileWriter('summaries/train_'+self.model_name, self.sess.graph, flush_secs=10)
self.val_writer = tf.summary.FileWriter('summaries/val_'+self.model_name, self.sess.graph, flush_secs=10)
self.merged = tf.summary.merge_all()
self.saver = tf.train.Saver()
self.sess.run(tf.global_variables_initializer())
# ---- TF operations ---- #
def basis(self,x,name='basis'):
layer_sizes = self.config['nn_layers']
activations = {
'relu': tf.nn.relu,
'tanh': tf.nn.tanh,
'sigmoid': tf.nn.sigmoid
}
activation = activations[ self.config['activation'] ]
if self.preprocess is None:
inp = x
else:
inp = self.preprocess(x)
with tf.variable_scope(name,reuse=tf.AUTO_REUSE):
for units in layer_sizes:
inp = tf.layers.dense(inputs=inp, units=units,activation=activation)
return inp
def basis_lstm(self, x, name='basis_lstm'):
layer_sizes = self.config['nn_layers']
activations = {
'relu': tf.nn.relu,
'tanh': tf.nn.tanh,
'sigmoid': tf.nn.sigmoid
}
activation = activations[ self.config['activation'] ]
x = tf.expand_dims(x, axis=0)
with tf.variable_scope(name,reuse=tf.AUTO_REUSE):
cells = [rnn.LSTMCell(num_units=layer, activation=activation) for layer in layer_sizes]
stacked_cell = rnn.MultiRNNCell(cells)
outputs, state = tf.nn.dynamic_rnn(stacked_cell, x, dtype=tf.float32)
# outputs, state = tf.nn.dynamic_rnn(cell, x, sequence_length=tf.expand_dims(seq_len, axis=0), dtype=tf.float32)
return outputs[0,:,:]
def batch_blr(self,X,Y,num):
X = X[:num,:]
Y = Y[:num,:]
Ln_inv = tf.matrix_inverse(tf.transpose(X) @ X + self.L)
Kn = Ln_inv @ (tf.transpose(X) @ Y + self.L @ self.K)
return tf.cond( num > 0, lambda : (Kn,Ln_inv), lambda : (self.K, tf.linalg.inv(self.L)) )
def compute_pred_and_nll(self):
"""
Uses self.posterior_K and self.posterior_L_inv and self.f_nom_x to generate the posterior predictive.
Returns:
mu_pred = posterior predictive mean at query points self.x
shape (M, T, y_dim)
Sig_pred = posterior predictive variance at query points self.x
shape (M, T, y_dim, y_dim)
predictive_nll = negative log likelihood of self.y under the posterior predictive density
shape (M, T)
"""
mu_pred = batch_matmul(tf.matrix_transpose(self.posterior_K), self.phi) + self.f_nom_x
spread_fac = 1 + batch_quadform(self.posterior_L_inv, self.phi)
Sig_pred = tf.expand_dims(spread_fac, axis=-1)*tf.reshape(self.SigEps, (1,1,self.y_dim,self.y_dim))
# Score self.y under predictive distribution to obtain loss
with tf.variable_scope('loss', reuse=None):
logdet = self.y_dim*tf.log(spread_fac) + tf.linalg.logdet(self.SigEps)
Sig_pred_inv = tf.linalg.inv(Sig_pred)
quadf = batch_quadform(Sig_pred_inv, (self.y - mu_pred))
predictive_nll = tf.squeeze(logdet + quadf, axis=-1)
# log stuff for summaries
self.rmse_1 = tf.reduce_mean( tf.sqrt( tf.reduce_sum( tf.square(mu_pred - self.y)[:,0,:], axis=-1 ) ) )
self.mpv_1 = tf.reduce_mean( tf.matrix_determinant( Sig_pred[:,0,:,:]) )
tf.summary.scalar('RMSE_1step', self.rmse_1)
tf.summary.scalar('MPV_1step', self.mpv_1)
return mu_pred, Sig_pred, predictive_nll
# ---- Train and Test functions ------ #
def train(self, dataset, dataset_val, num_train_updates):
batch_size = self.config['meta_batch_size']
horizon = self.config['data_horizon']
test_horizon = self.config['test_horizon']
# minimize loss
for i in range(num_train_updates):
x, y = dataset.sample(n_funcs=batch_size, n_samples=horizon+test_horizon)
feed_dict = {
self.context_y: y[:,:horizon,:],
self.context_x: x[:,:horizon,:],
self.y: y[:,horizon:,:],
self.x: x[:,horizon:,:],
self.num_context: np.random.randint(horizon+1,size=batch_size)
}
summary, loss, _ = self.sess.run([self.merged,self.total_loss,self.train_op],feed_dict)
x_val, y_val = dataset_val.sample(n_funcs=batch_size, n_samples=horizon+test_horizon)
feed_dict_val = {
self.context_y: y_val[:,:horizon,:],
self.context_x: x_val[:,:horizon,:],
self.y: y_val[:,horizon:,:],
self.x: x_val[:,horizon:,:],
self.num_context: | np.random.randint(horizon+1,size=batch_size) | numpy.random.randint |
#!/usr/bin/env python
import numpy as np
import math
import commentjson as json
import os
import sys, os
from colorama import Fore, Back, Style
if len(sys.argv) < 2:
print('Usage: onshape-to-robot-pure-shape {STL file} [prefix=PureShapes]')
else:
fileName = sys.argv[1]
robotDir = os.path.dirname(fileName)
configFile = os.path.join(robotDir, 'config.json')
prefix = 'PureShapes'
if len(sys.argv) > 2:
prefix = sys.argv[2]
from .onshape_api.client import Client
client = Client(logging=False, creds=configFile)
parts = fileName.split('.')
parts[-1] = 'part'
partFileName = '.'.join(parts)
parts[-1] = 'scad'
scadFileName = '.'.join(parts)
f = open(partFileName, 'r')
part = json.load(f)
partid = part['partId']
result = client.get_sketches(part['documentId'], part['documentMicroversion'], part['elementId'], part['configuration'])
scad = "% scale(1000) import(\""+os.path.basename(fileName)+"\");\n"
sketchDatas = []
for sketch in result['sketches']:
if sketch['sketch'].startswith(prefix):
parts = sketch['sketch'].split(' ')
if len(parts) >= 2:
sketch['thickness'] = float(parts[1])
else:
print(Fore.RED + "ERROR: The sketch name should contain extrusion size (e.g \"PureShapes 5.3\")"
+ Style.RESET_ALL)
exit(0)
sketchDatas.append(sketch)
if len(sketchDatas):
print(Fore.GREEN + "* Found "+str(len(sketchDatas))+" PureShapes sketches" + Style.RESET_ALL)
for sketchData in sketchDatas:
# Retrieving sketch transform matrix
m = sketchData['transformMatrix']
mm = [m[0:4], m[4:8], m[8:12], m[12:16]]
mm[0][3] *= 1000
mm[1][3] *= 1000
mm[2][3] *= 1000
scad += "\n"
scad += "// Sketch "+sketchData['sketch']+"\n"
scad += 'multmatrix('+str(mm)+') {'+"\n"
scad += "thickness = %f;\n" % sketchData['thickness']
scad += "translate([0, 0, -thickness]) {\n"
boxes = {}
def boxSet(id, pointName, point):
if id not in boxes:
boxes[id] = {}
boxes[id][pointName] = point
for entry in sketchData['geomEntities']:
if entry['entityType'] == 'circle':
center = entry['center']
scad += " translate([%f, %f, 0]) {\n" % (center[0]*1000, center[1]*1000)
scad += " cylinder(r=%f,h=thickness);\n" % (entry['radius']*1000)
scad += " }\n"
if entry['entityType'] == 'point':
parts = entry['id'].split('.')
if len(parts) == 3:
if parts[1] == 'top' and parts[2] == 'start':
boxSet(parts[0], 'A', entry['point'])
if parts[1] == 'top' and parts[2] == 'end':
boxSet(parts[0], 'B', entry['point'])
if parts[1] == 'bottom' and parts[2] == 'start':
boxSet(parts[0], 'C', entry['point'])
if parts[1] == 'bottom' and parts[2] == 'end':
boxSet(parts[0], 'D', entry['point'])
for id in boxes:
if len(boxes[id]) == 4:
A, B = np.array(boxes[id]['A']), | np.array(boxes[id]['B']) | numpy.array |
"""
This is some test code to play around with generating surfaces from just Fourier magnitudes.
1. Read in rescaled data (scaled as per your MATLAB code)
2. Get the frequencies (these are equivalent to k, but I call them x just to make things confusing)
3. Generate random complex values with the correct magnitude
4. Find the location of the carrier wave
5. Shift over the k data so that the 0th mode lines up with the carrier with the max amp.
6. Restrict the frequency range to +/- 10 modes (for now)
7. Get B using the shifted k values and the restricted amplitudes
8. Plot results
"""
import numpy as np
import os
import glob
import matplotlib.pyplot as plt
from numpy.fft import fft, ifft
import NLS
import random as rand
subdirs = ['Aug1Data','Aug2Data','JulyData']
# Define something that will list directories that are not hidden
def listdirNH(path):
return glob.glob(os.path.join(path, '*'))
# Read in the data
j = 0
for sd in subdirs:
files = listdirNH(sd+'/Rescaled')
n = 0
for f in files:
datavals = np.transpose(np.loadtxt(f).view(float))
N = len(datavals[1])
x = datavals[0] # Frequencies
sly = datavals[1] # Magnitudes
ly = np.sqrt(sly*x)*0.01
L = 3600*3
k = (0.001*x)//(2*np.pi/L) # Gets me to milliHertz THIS IS THE K VECTOR
scalevals = np.sqrt(ly/2)
randvals = np.zeros(len(ly))
randpn = np.zeros(len(ly))
pn = [-1,1]
for l in range(len(ly)):
rand.seed(l) # Seed random number generator
randpn[l] = rand.choice(pn)
rand.seed(l+1)
value = rand.random()*rand.choice(pn) # Get a random value with a random sign
randvals[l] = value
ascale = randvals*scalevals # Scale the value
bscale = 1j* | np.sqrt(ly-ascale**2) | numpy.sqrt |
#
# Copyright (c) 2020 IBM Corp.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# char_span.py
#
# Part of text_extensions_for_pandas
#
# Pandas extensions to support columns of spans with character offsets.
#
import textwrap
from typing import *
import numpy as np
import pandas as pd
from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
from pandas.core.indexers import check_array_indexer
from pandas.api.types import is_bool_dtype
from memoized_property import memoized_property
# Internal imports
import text_extensions_for_pandas.jupyter as jupyter
def _check_same_text(o1, o2):
if not ((o1.target_text is o2.target_text) or (o1.target_text == o2.target_text)):
raise ValueError(
f"Spans are over different target text "
f"(got {o1.target_text} and {o2.target_text})"
)
class SpanOpMixin:
"""
Mixin class to define common operations between Span and SpanArray.
"""
def __add__(self, other) -> Union["Span", "SpanArray"]:
"""
Add a pair of spans and/or span arrays.
span1 + span2 == minimal span that covers both spans
:param other: Span or SpanArray
:return: minimal span (or array of spans) that covers both inputs.
"""
if isinstance(self, Span) and isinstance(other, Span):
# Span + *Span = Span
_check_same_text(self, other)
return Span(self.target_text, min(self.begin, other.begin),
max(self.end, other.end))
elif isinstance(self, (Span, SpanArray)) and isinstance(other, (Span, SpanArray)):
# SpanArray + *Span* = SpanArray
_check_same_text(self, other)
return SpanArray(self.target_text,
np.minimum(self.begin, other.begin),
np.maximum(self.end, other.end))
else:
raise TypeError(f"Unexpected combination of span types for add operation: "
f"{type(self)} and {type(other)}")
class Span(SpanOpMixin):
"""
Python object representation of a single span with character offsets; that
is, a single row of a `SpanArray`.
An offset of `Span.NULL_OFFSET_VALUE` (currently -1) indicates
"not a span" in the sense that NaN is "not a number".
"""
# Begin/end value that indicates "not a span" in the sense that NaN is
# "not a number".
NULL_OFFSET_VALUE = -1 # Type: int
def __init__(self, text: str, begin: int, end: int):
"""
Args:
text: target document text on which the span is defined
begin: Begin offset (inclusive) within `text`
end: End offset (exclusive, one past the last char) within `text`
"""
if Span.NULL_OFFSET_VALUE == begin:
if Span.NULL_OFFSET_VALUE != end:
raise ValueError("Begin offset with special 'null' value {} "
"must be paired with an end offset of {}",
Span.NULL_OFFSET_VALUE,
Span.NULL_OFFSET_VALUE)
elif begin < 0:
raise ValueError("begin must be >= 0")
elif end < 0:
raise ValueError("end must be >= 0")
elif end > len(text):
raise ValueError(f"end must be less than length of target string "
f"({end} > {len(text)}")
self._text = text
self._begin = begin
self._end = end
def __repr__(self) -> str:
if self.begin == Span.NULL_OFFSET_VALUE:
return "NA"
else:
return "[{}, {}): '{}'".format(self.begin, self.end,
textwrap.shorten(self.covered_text, 80))
def __eq__(self, other):
if isinstance(other, Span):
return (self.begin == other.begin
and self.end == other.end
and self.target_text == other.target_text)
elif isinstance(other, SpanArray):
return other == self
else:
# Different type ==> not equal
return False
def __hash__(self):
result = hash((self.target_text, self.begin, self.end))
return result
def __lt__(self, other):
"""
span1 < span2 if span1.end <= span2.begin
"""
# TODO: Should we compare target strings?
if isinstance(other, (Span, SpanArray)):
return self.end <= other.begin
else:
raise ValueError("Less-than relationship not defined for {} and {} "
"of types {} and {}"
"".format(self, other, type(self), type(other)))
def __gt__(self, other):
return other < self
def __le__(self, other):
return self < other or self == other
def __ge__(self, other):
return other <= self
@property
def begin(self):
return self._begin
@property
def end(self):
return self._end
@property
def target_text(self):
return self._text
@memoized_property
def covered_text(self):
"""
Returns the substring of `self.target_text` that this `Span`
represents.
"""
if Span.NULL_OFFSET_VALUE == self._begin:
return None
else:
return self.target_text[self.begin:self.end]
def overlaps(self, other: "text_extensions_for_pandas.Span"):
"""
:param other: Another Span or TokenSpan
:return: True if the two spans overlap. Also True if a zero-length
span is contained within the other.
"""
if self.begin == other.begin and self.end == other.end:
# Ensure that pairs of identical zero-length spans overlap.
return True
elif other.begin >= self.end:
return False # other completely to the right of self
elif other.end <= self.begin:
return False # other completely to the left of self
else: # other.begin < self.end and other.end >= self.begin
return True
def contains(self, other: "text_extensions_for_pandas.Span"):
"""
:param other: Another Span or TokenSpan
:return: True if `other` is entirely within the bounds of this span. Also
True if a zero-length span is contained within the other.
"""
return other.begin >= self.begin and other.end <= self.end
def context(self, num_chars: int = 40) -> str:
"""
Show the location of this span in the context of the target string.
:param num_chars: How many characters on either side to display
:return: A string in the form:
```<text before>[<text inside>]<text after>```
describing the text within and around the span.
"""
before_text = self.target_text[self.begin - num_chars:self.begin]
after_text = self.target_text[self.end:self.end + num_chars]
if self.begin > num_chars:
before_text = "..." + before_text
if self.end + num_chars < len(self.target_text):
after_text = after_text + "..."
return f"{before_text}[{self.covered_text}]{after_text}"
@pd.api.extensions.register_extension_dtype
class SpanDtype(pd.api.extensions.ExtensionDtype):
"""
Panda datatype for a span that represents a range of characters within a
target string.
"""
@property
def type(self):
# The type for a single row of a column of type Span
return Span
@property
def name(self) -> str:
"""A string representation of the dtype."""
return "SpanDtype"
@classmethod
def construct_from_string(cls, string: str):
"""
See docstring in `ExtensionDType` class in `pandas/core/dtypes/base.py`
for information about this method.
"""
if not isinstance(string, str):
raise TypeError(
f"'construct_from_string' expects a string, got {type(string)}"
)
# Upstream code uses exceptions as part of its normal control flow and
# will pass this method bogus class names.
if string == cls.__name__:
return cls()
else:
raise TypeError(
f"Cannot construct a '{cls.__name__}' from '{string}'")
@classmethod
def construct_array_type(cls):
"""
See docstring in `ExtensionDType` class in `pandas/core/dtypes/base.py`
for information about this method.
"""
return SpanArray
def __from_arrow__(self, extension_array):
"""
Convert the given extension array of type ArrowSpanType to a
SpanArray.
"""
from text_extensions_for_pandas.array.arrow_conversion import arrow_to_span
return arrow_to_span(extension_array)
class SpanArray(pd.api.extensions.ExtensionArray, SpanOpMixin):
"""
A Pandas `ExtensionArray` that represents a column of character-based spans
over a single target text.
Spans are represented as `[begin, end)` intervals, where `begin` and `end`
are character offsets into the target text.
"""
def __init__(self, text: str,
begins: Union[pd.Series, np.ndarray, Sequence[int]],
ends: Union[pd.Series, np.ndarray, Sequence[int]]):
"""
:param text: Target text from which the spans of this array are drawn
:param begins: Begin offsets of spans (closed)
:param ends: End offsets (open)
"""
if not isinstance(begins, (pd.Series, np.ndarray, list)):
raise TypeError(f"begins is of unsupported type {type(begins)}. "
f"Supported types are Series, ndarray and List[int].")
if not isinstance(ends, (pd.Series, np.ndarray, list)):
raise TypeError(f"ends is of unsupported type {type(ends)}. "
f"Supported types are Series, ndarray and List[int].")
begins = np.array(begins) if not isinstance(begins, np.ndarray) else begins
ends = np.array(ends) if not isinstance(ends, np.ndarray) else ends
if not np.issubdtype(begins.dtype, np.integer):
raise TypeError(f"Begins array is of dtype {begins.dtype}, "
f"which is not an integer type.")
if not np.issubdtype(ends.dtype, np.integer):
raise TypeError(f"Ends array is of dtype {begins.dtype}, "
f"which is not an integer type.")
self._text = text # Type: str
self._begins = begins # Type: np.ndarray
self._ends = ends # Type: np.ndarray
# Monotonically increasing version number for tracking changes and
# invalidating caches
self._version = 0 # Type: int
# Cached list of other SpanArrays that are exactly the same as this
# one. Each element is the result of calling id()
self._equivalent_arrays = [] # Type: List[int]
# Version numbers of elements in self._equivalent_arrays, to ensure that
# a change hasn't made the arrays no longer equal
self._equiv_array_versions = [] # Type: List[int]
# Cached hash value of this array
self._hash = None # Type: int
# Flag that tells whether to display details of offsets in Jupyter notebooks
self._repr_html_show_offsets = True # Type: bool
##########################################
# Overrides of superclass methods go here.
@property
def dtype(self) -> pd.api.extensions.ExtensionDtype:
"""
See docstring in `ExtensionArray` class in `pandas/core/arrays/base.py`
for information about this method.
"""
return SpanDtype()
def astype(self, dtype, copy=True):
"""
See docstring in `ExtensionArray` class in `pandas/core/arrays/base.py`
for information about this method.
"""
dtype = pd.api.types.pandas_dtype(dtype)
if isinstance(dtype, SpanDtype):
data = self.copy() if copy else self
elif isinstance(dtype, pd.StringDtype):
return dtype.construct_array_type()._from_sequence(self, copy=False)
else:
na_value = Span(
self.target_text, Span.NULL_OFFSET_VALUE, Span.NULL_OFFSET_VALUE
)
data = self.to_numpy(dtype=dtype, copy=copy, na_value=na_value)
return data
@property
def nbytes(self) -> int:
"""
See docstring in `ExtensionArray` class in `pandas/core/arrays/base.py`
for information about this method.
"""
return self._begins.nbytes + self._ends.nbytes + len(self._text.encode("utf-8"))
def __len__(self) -> int:
return len(self._begins)
def __getitem__(self, item) -> Union[Span, "SpanArray"]:
"""
See docstring in `ExtensionArray` class in `pandas/core/arrays/base.py`
for information about this method.
"""
if isinstance(item, int):
return Span(self._text, int(self._begins[item]),
int(self._ends[item]))
else:
# item not an int --> assume it's a numpy-compatible index
item = check_array_indexer(self, item)
return SpanArray(self._text,
self._begins[item],
self._ends[item])
def __setitem__(self, key: Union[int, np.ndarray], value: Any) -> None:
"""
See docstring in `ExtensionArray` class in `pandas/core/arrays/base.py`
for information about this method.
"""
key = check_array_indexer(self, key)
if isinstance(value, ABCSeries) and isinstance(value.dtype, SpanDtype):
value = value.values
if value is None or isinstance(value, Sequence) and len(value) == 0:
self._begins[key] = Span.NULL_OFFSET_VALUE
self._ends[key] = Span.NULL_OFFSET_VALUE
elif isinstance(value, Span) or \
((isinstance(key, slice) or
(isinstance(key, np.ndarray) and is_bool_dtype(key.dtype)))
and isinstance(value, SpanArray)):
self._begins[key] = value.begin
self._ends[key] = value.end
elif isinstance(key, np.ndarray) and len(value) > 0 and len(value) == len(key) and \
((isinstance(value, Sequence) and isinstance(value[0], Span)) or
isinstance(value, SpanArray)):
for k, v in zip(key, value):
self._begins[k] = v.begin
self._ends[k] = v.end
else:
raise ValueError(
f"Attempted to set element of SpanArray with "
f"an object of type {type(value)}")
# We just changed the contents of this array, so invalidate any cached
# results computed from those contents.
self.increment_version()
def __eq__(self, other):
"""
Pandas/Numpy-style array/series comparison function.
:param other: Second operand of a Pandas "==" comparison with the series
that wraps this TokenSpanArray.
:return: Returns a boolean mask indicating which rows match `other`.
"""
if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)):
# Rely on pandas to unbox and dispatch to us.
return NotImplemented
if isinstance(other, Span):
mask = np.full(len(self), True, dtype=np.bool)
mask[self.target_text != other.target_text] = False
mask[self.begin != other.begin] = False
mask[self.end != other.end] = False
return mask
elif isinstance(other, SpanArray):
if len(self) != len(other):
raise ValueError("Can't compare arrays of differing lengths "
"{} and {}".format(len(self), len(other)))
if self.target_text != other.target_text:
return np.zeros(self.begin.shape, dtype=np.bool)
return np.logical_and(
self.begin == self.begin,
self.end == self.end
)
else:
# TODO: Return False here once we're sure that this
# function is catching all the comparisons that really matter.
raise ValueError("Don't know how to compare objects of type "
"'{}' and '{}'".format(type(self), type(other)))
def __ne__(self, other):
return ~(self == other)
def __hash__(self):
if self._hash is None:
self._hash = hash((self._text, self._begins.tobytes(),
self._ends.tobytes()))
return self._hash
def equals(self, other: "SpanArray"):
"""
:param other: A second `SpanArray`
:return: True if both arrays have the same target text (can be a
different string object with the same contents) and the same spans
in the same order.
"""
if not isinstance(other, SpanArray):
raise TypeError(f"equals() not defined for arguments of type "
f"{type(other)}")
if self is other:
return True
# Check for cached result
if id(other) in self._equivalent_arrays:
cache_ix = self._equivalent_arrays.index(id(other))
else:
cache_ix = -1
if (cache_ix >= 0
and other.version == self._equiv_array_versions[cache_ix]):
# Cached "equal" result
return True
elif (self.target_text != other.target_text
or not np.array_equal(self.begin, other.begin)
or not np.array_equal(self.end, other.end)):
# "Not equal" result from slow path
if cache_ix >= 0:
del self._equivalent_arrays[cache_ix]
del self._equiv_array_versions[cache_ix]
return False
else:
# If we get here, self and other are equal, and we had to expend
# quite a bit of effort to figure that out.
# Cache the result so we don't have to do that again.
if cache_ix >= 0:
self._equiv_array_versions[cache_ix] = other.version
else:
self._equivalent_arrays.append(id(other))
self._equiv_array_versions.append(other.version)
return True
@classmethod
def _concat_same_type(
cls, to_concat: Sequence[pd.api.extensions.ExtensionArray]
) -> pd.api.extensions.ExtensionArray:
"""
See docstring in `ExtensionArray` class in `pandas/core/arrays/base.py`
for information about this method.
"""
text = {a.target_text for a in to_concat}
if len(text) != 1:
raise ValueError("Spans must all be over the same target text")
text = text.pop()
begins = np.concatenate([a.begin for a in to_concat])
ends = np.concatenate([a.end for a in to_concat])
return SpanArray(text, begins, ends)
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""
See docstring in `ExtensionArray` class in `pandas/core/arrays/base.py`
for information about this method.
"""
text = None
if isinstance(scalars, Span):
scalars = [scalars]
if isinstance(scalars, SpanArray):
text = scalars.target_text
begins = np.full(len(scalars), Span.NULL_OFFSET_VALUE, np.int)
ends = np.full(len(scalars), Span.NULL_OFFSET_VALUE, np.int)
i = 0
for s in scalars:
if not isinstance(s, Span):
raise ValueError(f"Can only convert a sequence of Span "
f"objects to a SpanArray. Found an "
f"object of type {type(s)}")
if text is None:
text = s.target_text
if s.target_text != text:
raise ValueError(
f"Mixing different target texts is not currently "
f"supported. Received two different strings:\n"
f"{text}\nand\n{s.target_text}")
begins[i] = s.begin
ends[i] = s.end
i += 1
return SpanArray(text, begins, ends)
@classmethod
def _from_factorized(cls, values, original):
"""
See docstring in `ExtensionArray` class in `pandas/core/arrays/base.py`
for information about this method.
"""
return cls._from_sequence(values)
def _values_for_factorize(self) -> Tuple[np.ndarray, Any]:
"""
See docstring in `ExtensionArray` class in `pandas/core/arrays/base.py`
for information about this method.
"""
na_value = Span(self.target_text, Span.NULL_OFFSET_VALUE,
Span.NULL_OFFSET_VALUE)
return self.astype(object), na_value
def isna(self) -> np.array:
"""
See docstring in `ExtensionArray` class in `pandas/core/arrays/base.py`
for information about this method.
"""
return np.equal(self._begins, Span.NULL_OFFSET_VALUE)
def copy(self) -> "SpanArray":
"""
See docstring in `ExtensionArray` class in `pandas/core/arrays/base.py`
for information about this method.
"""
ret = SpanArray(
self.target_text,
self.begin.copy(),
self.end.copy()
)
# TODO: Copy cached properties too
return ret
def take(
self, indices: Sequence[int], allow_fill: bool = False,
fill_value: Any = None
) -> "SpanArray":
"""
See docstring in `ExtensionArray` class in `pandas/core/arrays/base.py`
for information about this method.
"""
if allow_fill:
# From API docs: "[If allow_fill == True, then] negative values in
# `indices` indicate missing values. These values are set to
# `fill_value`. Any other negative values raise a ``ValueError``."
if fill_value is None or \
(np.isscalar(fill_value) and np.math.isnan(fill_value)):
# Replace with a "nan span"
fill_value = Span(
self.target_text,
Span.NULL_OFFSET_VALUE,
Span.NULL_OFFSET_VALUE)
elif not isinstance(fill_value, Span):
raise ValueError("Fill value must be Null, nan, or a Span "
"(was {})".format(fill_value))
else:
# Dummy fill value to keep code below happy
fill_value = Span(self.target_text, Span.NULL_OFFSET_VALUE,
Span.NULL_OFFSET_VALUE)
# Pandas' internal implementation of take() does most of the heavy
# lifting.
begins = pd.api.extensions.take(
self.begin, indices, allow_fill=allow_fill,
fill_value=fill_value.begin
)
ends = pd.api.extensions.take(
self.end, indices, allow_fill=allow_fill,
fill_value=fill_value.end
)
return SpanArray(self.target_text, begins, ends)
def __lt__(self, other):
"""
Pandas-style array/series comparison function.
:param other: Second operand of a Pandas "<" comparison with the series
that wraps this TokenSpanArray.
:return: Returns a boolean mask indicating which rows are less than
`other`. span1 < span2 if span1.end <= span2.begin.
"""
if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)):
# Rely on pandas to unbox and dispatch to us.
return NotImplemented
if isinstance(other, (SpanArray, Span)):
return self.end <= other.begin
else:
raise ValueError("'<' relationship not defined for {} and {} "
"of types {} and {}"
"".format(self, other, type(self), type(other)))
def __gt__(self, other):
if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)):
# Rely on pandas to unbox and dispatch to us.
return NotImplemented
if isinstance(other, (SpanArray, Span)):
return other.__lt__(self)
else:
raise ValueError("'>' relationship not defined for {} and {} "
"of types {} and {}"
"".format(self, other, type(self), type(other)))
def __le__(self, other):
# TODO: Figure out what the semantics of this operation should be.
raise NotImplementedError()
def __ge__(self, other):
# TODO: Figure out what the semantics of this operation should be.
raise NotImplementedError()
def _reduce(self, name, skipna=True, **kwargs):
"""
See docstring in `ExtensionArray` class in `pandas/core/arrays/base.py`
for information about this method.
"""
if name == "sum":
# Sum ==> combine, i.e. return the smallest span that contains all
# spans in the series
return Span(self.target_text, np.min(self.begin),
np.max(self.end))
else:
raise TypeError(f"'{name}' aggregation not supported on a series "
f"backed by a SpanArray")
####################################################
# Methods that don't override the superclass go here
@classmethod
def make_array(cls, o) -> "SpanArray":
"""
Make a `SpanArray` object out of any of several types of input.
:param o: a SpanArray object represented as a `pd.Series`, a list
of `TokenSpan` objects, or maybe just an actual `SpanArray`
(or `TokenSpanArray`) object.
:return: SpanArray version of `o`, which may be a pointer to `o` or
one of its fields.
"""
if isinstance(o, SpanArray):
return o
elif isinstance(o, pd.Series):
return cls.make_array(o.values)
elif isinstance(o, Iterable):
return cls._from_sequence(o)
@property
def target_text(self) -> str:
"""
Returns the common "document" text that the spans in this array
reference.
"""
return self._text
@property
def begin(self) -> np.ndarray:
return self._begins
@property
def end(self) -> np.ndarray:
return self._ends
@property
def version(self) -> int:
"""
:return: Monotonically increasing version number that changes every time
this array is modified. **NOTE:** This number might not change if a
caller obtains a pointer to an internal array and modifies it.
Callers who perform such modifications should call `increment_version()`
"""
return self._version
def increment_version(self):
"""
Manually increase the version counter of this array to indicate that
the array's contents have changed. Also invalidates any internal cached
data derived from the array's state.
"""
# Invalidate cached computation
self._equivalent_arrays = []
self._equiv_array_versions = []
self._hash = None
# Increment the counter
self._version += 1
def as_tuples(self) -> np.ndarray:
"""
Returns (begin, end) pairs as an array of tuples
"""
return np.concatenate(
(self.begin.reshape((-1, 1)), self.end.reshape((-1, 1))),
axis=1)
@property
def covered_text(self) -> np.ndarray:
"""
:return: an array of the substrings of `target_text` corresponding to
the spans in this array.
"""
# TODO: Vectorized version of this
text = self.target_text
# Need dtype=np.object so we can return nulls
result = np.zeros(len(self), dtype=np.object)
for i in range(len(self)):
if self._begins[i] == Span.NULL_OFFSET_VALUE:
# Null value at this index
result[i] = None
else:
result[i] = text[self._begins[i]:self._ends[i]]
return result
@memoized_property
def normalized_covered_text(self) -> np.ndarray:
"""
:return: A normalized version of the covered text of the spans in this
array. Currently "normalized" means "lowercase".
"""
# Currently we can't use np.char.lower directly because
# self.covered_text needs to be an object array, not a numpy string
# array, to allow for null values.
return np.vectorize(np.char.lower)(self.covered_text)
def as_frame(self) -> pd.DataFrame:
"""
Returns a dataframe representation of this column based on Python
atomic types.
"""
return pd.DataFrame({
"begin": self.begin,
"end": self.end,
"covered_text": self.covered_text
})
def overlaps(self, other: Union["SpanArray", Span]):
"""
:param other: Either a single span or an array of spans of the same
length as this one
:return: Numpy array containing a boolean mask of all entries that
overlap the corresponding element of `other`
"""
if not isinstance(other, (Span, SpanArray)):
raise TypeError(f"overlaps not defined for input type "
f"{type(other)}")
# Replicate the logic in Span.overlaps() with boolean masks
exact_equal_mask = np.logical_and(self.begin == other.begin,
self.end == other.end)
begin_ge_end_mask = other.begin >= self.end
end_le_begin_mask = other.end <= self.begin
# (self.begin == other.begin and self.end == other.end)
# or not (other.begin >= self.end or other.end <= self.begin)
return np.logical_or(exact_equal_mask,
np.logical_not(
np.logical_or(begin_ge_end_mask,
end_le_begin_mask)))
def contains(self, other: Union["SpanArray", Span]):
"""
:param other: Either a single span or an array of spans of the same
length as this one
:return: Numpy array containing a boolean mask of all entries that
contain the corresponding element of `other`
"""
if not isinstance(other, (Span, SpanArray)):
raise TypeError(f"contains not defined for input type "
f"{type(other)}")
# Replicate the logic in Span.contains() with boolean masks
begin_ge_begin_mask = other.begin >= self.begin
end_le_end_mask = other.end <= self.end
return | np.logical_and(begin_ge_begin_mask, end_le_end_mask) | numpy.logical_and |
import numpy as np
import matplotlib
matplotlib.use('agg')
from matplotlib import pyplot as plt
from matplotlib import dates as md
from sklearn.linear_model import LinearRegression
from sklearn.svm import SVR
from sklearn.preprocessing import normalize, scale
import sklearn.metrics as metrics
import pickle
import stat_tools as st
import configparser
import os, subprocess
from datetime import datetime, timezone, timedelta
from ast import literal_eval as le
import pytz
def localToUTCtimestamp(t, local_tz):
t_local = local_tz.localize(t, is_dst=None)
t_utc = t_local.astimezone(pytz.utc)
return t_utc.timestamp()
def UTCtimestampTolocal(ts, local_tz):
t_utc = datetime.fromtimestamp(ts,tz=pytz.timezone("UTC"))
t_local = t_utc.astimezone(local_tz)
return t_local
try:
try:
config_path = sys.argv[1]
except Exception:
config_path = "./config.conf"
cp = configparser.ConfigParser()
cp.read(config_path)
inpath=le(cp["paths"]["feature_path"])
GHI_path=le(cp["paths"]["GHI_path"])
forecast_path=le(cp["paths"]["forecast_path"])
lead_minutes=le(cp["forecast"]["lead_minutes"])
days=le(cp["forecast"]["days"])
#lead_minutes=[1,3,5,10,15,30,45];
#sensors = np.arange(99,100)
try:
sensors = le(cp["forecast"]["sensors"])
except Exception:
GHI_Coor = le(cp["GHI_sensors"]["GHI_Coor"]) #if sensor list isn't provided, forecast for all GHI points
sensors = range(0,len(GHI_Coor))
try:
forecast_timezone=pytz.timezone(cp["forecast"]["forecast_timezone"])
print("Using camera timezone: %s" % str(forecast_timezone))
except Exception:
forecast_timezone=pytz.timezone("utc")
print("Error processsing forecast timezone config, assuming UTC")
except KeyError as e:
print("Error loading config: %s" % e)
if not os.path.isdir(forecast_path):
try:
os.mkdir(forecast_path[:-1])
except:
print('Cannot create directory,', forecast_path[:-1])
plt.ioff() #Turn off interactive plotting for running automatically
for day in days:
MAE, MSE = [], []
MAE2, MSE2 = [], []
print("Predicting for " + day)
if not os.path.isdir(forecast_path+day[:8]):
try:
subprocess.call(['mkdir', forecast_path+day[:8]])
except:
print('Cannot create directory,',forecast_path+day[:8])
continue
if not os.path.isdir(forecast_path+day[:8] + "/plots"):
try:
os.mkdir(forecast_path+day[:8] + "/plots")
except:
print('Cannot create directory,', forecast_path+day[:8] + "/plots")
for forward in lead_minutes:
timestamp, DataX, DataY = {},{},{}
MAE_period, MSE_period = [], []
MAE2_period, MSE2_period = [], []
for sensor in sensors:
timestamp[sensor] = []
DataX[sensor] = []
DataY[sensor] = []
try:
x = np.genfromtxt(inpath+day[:8]+'/GHI'+str(sensor)+'.csv',delimiter=',',skip_header=1); # < ORIGINAL
#x = np.genfromtxt(inpath+'/GHI'+str(sensor)+'.csv',delimiter=','); # Temp change to allow running of old data in dhuang3
x = x[x[:,0]==forward]; #Take all rows where forecast period == forward
#if sensor == 26: # Temp added for 2018-09-22 test with location 99
# with np.load(GHI_path+day[:6]+'/GHI_'+str(99)+'.npz') as data: #
# ty, y = data['timestamp'], data['ghi'] #
#else: #
with np.load(GHI_path+day[:6]+'/GHI_'+str(sensor)+'.npz') as data: # < ORIGINAL
ty, y = data['timestamp'], data['ghi'] # < ORIGINAL
#ty -= 3600 #Add an hour (testing only!)
x = x[x[:,1]<=ty[-1]] #Take all "feature" elements where timestamp is less than last GHI timestamp
tx=x[:,1].copy(); #Create copy of feature timestamps
itx = ((tx-ty[0]+30)//60).astype(int) #Create array of relative time based on first GHI timestamp, add 30 secs, floor to minutes, convert to int
print("len(x): %i\tlen y: %i\n" % (len(tx), len(ty)))
try:
print("tx: %i\ty: %i\titx: %i\n" % (tx[0],ty[0],itx[0]))
except IndexError:
pass
x[:,1] = (y[itx]) #Select x values corresponding to times in itx
DataX[sensor] += [x[:,1:]] #Append timestamp and x values to DataX (does NOT copy forecast period "forward" column)
DataY[sensor] += [(y[itx + forward])] #Get future actual GHI
timestamp[sensor] += [tx];
DataX[sensor] = | np.vstack(DataX[sensor]) | numpy.vstack |
import types
import numpy as np
import torch
import math
from torch import nn, optim
import torch.nn.functional as F
from torch.autograd.variable import Variable
CROSS_ENTROPY_ONE_HOT_WARNING = False
def to_categorical(y, num_classes=None, dtype='float32'):
"""Converts a class vector (integers) to binary class matrix.
E.g. for use with categorical_crossentropy.
# Arguments
y: class vector to be converted into a matrix
(integers from 0 to num_classes).
num_classes: total number of classes.
dtype: The data type expected by the input, as a string
(`float32`, `float64`, `int32`...)
# Returns
A binary matrix representation of the input. The classes axis
is placed last.
# Example
```python
# Consider an array of 5 labels out of a set of 3 classes {0, 1, 2}:
> labels
array([0, 2, 1, 2, 0])
# `to_categorical` converts this into a matrix with as many
# columns as there are classes. The number of rows
# stays the same.
> to_categorical(labels)
array([[ 1., 0., 0.],
[ 0., 0., 1.],
[ 0., 1., 0.],
[ 0., 0., 1.],
[ 1., 0., 0.]], dtype=float32)
```
"""
y = np.array(y, dtype='int')
input_shape = y.shape
if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:
input_shape = tuple(input_shape[:-1])
y = y.ravel()
if not num_classes:
num_classes = np.max(y) + 1
n = y.shape[0]
categorical = np.zeros((n, num_classes), dtype=dtype)
categorical[np.arange(n), y] = 1
output_shape = input_shape + (num_classes,)
categorical = np.reshape(categorical, output_shape)
return categorical
def get_optimizer(optmizer_type, model_params, lr=0.1, pmomentum=0.9, pweight_decay=5e-4, palpha=0.9):
# Funcion para rehacer el optmizador -> Ayuda para cambiar learning rate
if optmizer_type=="SGD":
return optim.SGD(filter(lambda p: p.requires_grad, model_params), lr=lr, momentum=pmomentum)
elif optmizer_type=="Adam":
return optim.Adam(filter(lambda p: p.requires_grad, model_params), lr=lr, weight_decay=pweight_decay)
elif optmizer_type=="RMSprop":
return optim.RMSprop(filter(lambda p: p.requires_grad, model_params), lr=lr, alpha=palpha)
assert False, 'No optimizers with that name!'
def get_current_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
def anneal_lr_lineal(models, lr_init, total_epochs, current_epoch, optimizer_type, flag=True):
# flag nos indica si realmente queremos hacer el annel sobre las models
if not flag: lr_new = lr_init
else: lr_new = -(lr_init/total_epochs) * current_epoch + lr_init
redes_resultado = []
for model in models:
redes_resultado.append(get_optimizer(optimizer_type, model.parameters(), lr=lr_new))
if len(redes_resultado) == 1: return lr_new, redes_resultado[0]
return lr_new, redes_resultado
def defrost_model_params(model):
# Funcion para descongelar redes!
for param in model.parameters():
param.requires_grad = True
def simple_target_creator(samples, value):
"""
Funcion para crear un vector utilizado para asignar la clase de las
diferentes muestras durante el entrenamiento de tamaño 'samples'
El vector sera de (samplesx1) lleno de 'value'
"""
return Variable(torch.ones(samples, 1)).type(torch.cuda.FloatTensor)*value
def train_simple_model(model, data, target, loss, optimizer, out_pos=-1, target_one_hot=False, net_type="convolutional", do_step=True, get_corrects=False):
# Losses: https://pytorch.org/docs/stable/nn.html
if(model.training==False): model.train()
if net_type == "fully-connected":
model_out = model.forward(Variable(data.float().view(data.shape[0], -1)))
elif net_type == "convolutional":
model_out = model.forward(Variable(data.float()))
# Algunos modelos devuelven varias salidas como pueden ser la capa
# reshape y los logits, etc... Para conocer la salida a utilizar en el
# loss lo que hacemos es tomar la que se indique en le parametro out_pos
if type(model_out) is list or type(model_out) is tuple:
model_out = model_out[out_pos]
if target_one_hot: _, target = target.max(dim=1)
n_correct = (torch.max(model_out, 1)[1].view(target.size()) == target).sum().item()
# Calculo el error obtenido
# Cuidado con la codificacion one hot! https://discuss.pytorch.org/t/runtimeerror-multi-target-not-supported-newbie/10216/8
try: cost = loss(model_out, target)
except:
global CROSS_ENTROPY_ONE_HOT_WARNING
if not CROSS_ENTROPY_ONE_HOT_WARNING:
print("\nWARNING-INFO: Crossentropy not works with one hot target encoding!\n")
CROSS_ENTROPY_ONE_HOT_WARNING = True
cost = loss(model_out, target[:,0])
cost.backward()
if do_step:
# Actualizamos pesos y gradientes
optimizer.step()
optimizer.zero_grad()
if get_corrects: return n_correct, cost.item()
else: return cost.item()
def evaluate_accuracy_models_generator(models, data, max_data=0, topk=(1,), target_one_hot=False, net_type="convolutional"):
"""Computes the accuracy (sobre 1) over the k top predictions for the specified values of k"""
# Si paso un modelo y topk(1,5) -> acc1, acc5,
# Si paso dos modelo y topk(1,5) -> m1_acc1, m1_acc5, m2_acc1, m2_acc5
with torch.no_grad():
if type(topk)==int:
maxk = topk
topk = (topk,)
else: maxk = max(topk)
correct_models, total_samples = [0]*len(models), 0
for batch_idx, (batch, target) in enumerate(data):
if target_one_hot: _, target = target.max(dim=1)
batch_size = target.size(0)
# calculo predicciones para el error de test de todos los modelos
# Tengo que hacer el forward para cada modelo y ver que clases acierta
for model_indx, model in enumerate(models):
if(model.training==True): model.eval()
if net_type == "fully-connected":
model_out = model.forward(Variable(batch.float().view(batch.shape[0], -1).cuda()))
elif net_type == "convolutional":
model_out = model.forward(Variable(batch.float().cuda()))
else: assert False, "Please define your model type!"
# Algunos modelos devuelven varias salidas como pueden ser la capa
# reshape y los logits, etc... Por lo que se establece el standar
# de que la ultima salida sean los logits del modelo para hacer la clasificacion
if type(model_out) is list or type(model_out) is tuple:
model_out = model_out[-1]
# Transformamos los logits a salida con el indice con el mayor valor
# de las tuplas que continen los logits
res_topk = np.array(topk_accuracy(model_out, target.cuda(), topk=topk))
correct_models[model_indx] += res_topk
total_samples += batch_size
if max_data != 0 and total_samples >= max_data: break
accuracies = []
for result_model in correct_models:
for topkres in result_model:
accuracies.append((topkres*1.0)/total_samples)
#accuracies = list(((np.array(correct_models) * 1.0) / total_samples))
if len(accuracies) == 1: return accuracies[0]
return accuracies
def evaluate_accuracy_loss_models_generator(models, data, loss, max_data=0, topk=(1,), target_one_hot=False, net_type="convolutional"):
"""Computes the accuracy (sobre 1) over the k top predictions for the specified values of k"""
# Si paso un modelo y topk(1,5) -> acc1, acc5,
# Si paso dos modelo y topk(1,5) -> m1_acc1, m1_acc5, m2_acc1, m2_acc5
with torch.no_grad():
if type(topk)==int:
maxk = topk
topk = (topk,)
else: maxk = max(topk)
correct_models, loss_models, total_samples = [0]*len(models), [0]*len(models), 0
for batch_idx, (batch, target) in enumerate(data):
if target_one_hot: _, target = target.max(dim=1)
batch_size = target.size(0)
# calculo predicciones para el error de test de todos los modelos
# Tengo que hacer el forward para cada modelo y ver que clases acierta
for model_indx, model in enumerate(models):
if(model.training==True): model.eval()
if net_type == "fully-connected":
model_out = model.forward(Variable(batch.float().view(batch.shape[0], -1).cuda()))
elif net_type == "convolutional":
model_out = model.forward(Variable(batch.float().cuda()))
else: assert False, "Please define your model type!"
# Algunos modelos devuelven varias salidas como pueden ser la capa
# reshape y los logits, etc... Por lo que se establece el standar
# de que la ultima salida sean los logits del modelo para hacer la clasificacion
if type(model_out) is list or type(model_out) is tuple:
model_out = model_out[-1]
# Transformamos los logits a salida con el indice con el mayor valor
# de las tuplas que continen los logits
res_topk = np.array(topk_accuracy(model_out, target.cuda(), topk=topk))
correct_models[model_indx] += res_topk
try: cost = loss(model_out, target.cuda())
except:
global CROSS_ENTROPY_ONE_HOT_WARNING
if not CROSS_ENTROPY_ONE_HOT_WARNING:
print("\nWARNING-INFO: Crossentropy not works with one hot target encoding!\n")
CROSS_ENTROPY_ONE_HOT_WARNING = True
cost = loss(model_out, target[:,0])
loss_models[model_indx] += cost.item()
total_samples += batch_size
if max_data != 0 and total_samples >= max_data: break
"""
accuracies = []
for result_model in correct_models:
for topkres in result_model:
accuracies.append((topkres*1.0)/total_samples)
#accuracies = list(((np.array(correct_models) * 1.0) / total_samples))
if len(accuracies) == 1: return accuracies[0]
return accuracies
"""
accuracies, losses = [], []
for indx, result_model in enumerate(correct_models):
for topkres in result_model:
accuracies.append((topkres*1.0)/total_samples)
losses.append((loss_models[indx]*1.0)/total_samples)
#accuracies = list(((np.array(correct_models) * 1.0) / total_samples))
if len(accuracies) == 1: return accuracies[0], losses[0]
return accuracies[0], accuracies[1], losses[0]
#zipped = [a for a in zip(accuracies,losses)]
#return [item for sublist in zipped for item in sublist]
def evaluate_accuracy_models_data(models, X_data, y_data, batch_size=100, max_data=0, topk=(1,), net_type="convolutional"):
"""Computes the accuracy over the k top predictions for the specified values of k"""
# Si paso un modelo y topk(1,5) -> acc1, acc5,
# Si paso dos modelo y topk(1,5) -> m1_acc1, m1_acc5, m2_acc1, m2_acc5
with torch.no_grad():
if type(topk)==int:
maxk = topk
topk = (topk,)
else: maxk = max(topk)
correct_models, total_samples = [0]*len(models), 0
total_samples = 0
while True:
# Debemos comprobar que no nos pasamos con el batch_size
if total_samples + batch_size >= len(X_data): batch_size = (len(X_data)) - total_samples
batch = X_data[total_samples:total_samples+batch_size]
target = y_data[total_samples:total_samples+batch_size]
# calculo predicciones para el error de test de todos los modelos
# Tengo que hacer el forward para cada modelo y ver que clases acierta
for model_indx, model in enumerate(models):
if(model.training==True): model.eval()
if net_type == "fully-connected":
model_out = model.forward(Variable(batch.float().view(batch.shape[0], -1).cuda()))
elif net_type == "convolutional":
model_out = model.forward(Variable(batch.float().cuda()))
else: assert False, "Please define your model type!"
# Algunos modelos devuelven varias salidas como pueden ser la capa
# reshape y los logits, etc... Por lo que se establece el standar
# de que la ultima salida sean los logits del modelo para hacer la clasificacion
if type(model_out) is list or type(model_out) is tuple:
model_out = model_out[-1]
# Transformamos los logits a salida con el indice con el mayor valor
# de las tuplas que continen los logits
res_topk = np.array(topk_accuracy(model_out, target.cuda(), topk=topk))
correct_models[model_indx] += res_topk
total_samples+=batch_size
if max_data != 0 and total_samples >= max_data or total_samples == len(X_data): break
accuracies = []
for result_model in correct_models:
for topkres in result_model:
accuracies.append((topkres*1.0)/total_samples)
#accuracies = list(((np.array(correct_models) * 1.0) / total_samples))
if len(accuracies) == 1: return accuracies[0]
return accuracies
def evaluate_accuracy_loss_models_data(models, X_data, y_data, loss, batch_size=100, max_data=0, topk=(1,), net_type="convolutional"):
"""Computes the accuracy over the k top predictions for the specified values of k"""
# Si paso un modelo y topk(1,5) -> acc1, acc5,
# Si paso dos modelo y topk(1,5) -> m1_acc1, m1_acc5, m2_acc1, m2_acc5
with torch.no_grad():
if type(topk)==int:
maxk = topk
topk = (topk,)
else: maxk = max(topk)
correct_models, loss_models, total_samples = [0]*len(models), [0]*len(models), 0
total_samples = 0
while True:
# Debemos comprobar que no nos pasamos con el batch_size
if total_samples + batch_size >= len(X_data): batch_size = (len(X_data)) - total_samples
batch = X_data[total_samples:total_samples+batch_size]
target = y_data[total_samples:total_samples+batch_size]
# calculo predicciones para el error de test de todos los modelos
# Tengo que hacer el forward para cada modelo y ver que clases acierta
for model_indx, model in enumerate(models):
if(model.training==True): model.eval()
#if(model.training==True): model.eval()
if net_type == "fully-connected":
model_out = model.forward(Variable(batch.float().view(batch.shape[0], -1).cuda()))
elif net_type == "convolutional":
model_out = model.forward(Variable(batch.float().cuda()))
else: assert False, "Please define your model type!"
# Algunos modelos devuelven varias salidas como pueden ser la capa
# reshape y los logits, etc... Por lo que se establece el standar
# de que la ultima salida sean los logits del modelo para hacer la clasificacion
if type(model_out) is list or type(model_out) is tuple:
model_out = model_out[-1]
# Transformamos los logits a salida con el indice con el mayor valor
# de las tuplas que continen los logits
res_topk = np.array(topk_accuracy(model_out, target.cuda(), topk=topk))
correct_models[model_indx] += res_topk
try: cost = loss(model_out, target.cuda())
except:
global CROSS_ENTROPY_ONE_HOT_WARNING
if not CROSS_ENTROPY_ONE_HOT_WARNING:
print("\nWARNING-INFO: Crossentropy not works with one hot target encoding!\n")
CROSS_ENTROPY_ONE_HOT_WARNING = True
cost = loss(model_out, target[:,0])
loss_models[model_indx] += cost.item()
total_samples+=batch_size
if max_data != 0 and total_samples >= max_data or total_samples == len(X_data): break
accuracies, losses = [], []
for indx, result_model in enumerate(correct_models):
for topkres in result_model:
accuracies.append((topkres*1.0)/total_samples)
losses.append((loss_models[indx]*1.0)/total_samples)
#accuracies = list(((np.array(correct_models) * 1.0) / total_samples))
if len(accuracies) == 1: return accuracies[0], losses[0]
return accuracies[0], accuracies[1], losses[0]
#zipped = [a for a in zip(accuracies,losses)]
#return [item for sublist in zipped for item in sublist]
def evaluate_accuracy_model_predictions(model_out, y_data, batch_size=100, max_data=0, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
# Si paso un modelo y topk(1,5) -> acc1, acc5,
# Solo permite pasar una salida models_out!
with torch.no_grad():
if type(topk)==int:
maxk = topk
topk = (topk,)
else: maxk = max(topk)
# Algunos modelos devuelven varias salidas como pueden ser la capa
# reshape y los logits, etc... Por lo que se establece el standar
# de que la ultima salida sean los logits del modelo para hacer la clasificacion
if type(model_out) is list or type(model_out) is tuple:
model_out = model_out[-1]
correct_models, total_samples = 0, 0
total_samples = 0
while True:
# Debemos comprobar que no nos pasamos con el batch_size
if total_samples + batch_size >= len(model_out): batch_size = (len(model_out)) - total_samples
batch_out = model_out[total_samples:total_samples+batch_size]
target = y_data[total_samples:total_samples+batch_size]
# Transformamos los logits a salida con el indice con el mayor valor
# de las tuplas que continen los logits
res_topk = np.array(topk_accuracy(batch_out, target.cuda(), topk=topk))
correct_models += res_topk
total_samples+=batch_size
if max_data != 0 and total_samples >= max_data or total_samples == len(model_out): break
return (correct_models*1.0 / total_samples)
def predictions_models_data(models, X_data, batch_size=100, net_type="convolutional"):
"""Computes the predictions for the specified data X_data"""
with torch.no_grad():
outs_models, total_samples = [torch.zeros(0,0).cuda()]*len(models), 0
total_samples = 0
while True:
# Debemos comprobar que no nos pasamos con el batch_size
if total_samples + batch_size >= len(X_data): batch_size = (len(X_data)) - total_samples
batch = X_data[total_samples:total_samples+batch_size]
# calculo predicciones para el error de test de todos los modelos
# Tengo que hacer el forward para cada modelo y ver que clases acierta
for model_indx, model in enumerate(models):
if net_type == "fully-connected":
model_out = model.forward(Variable(batch.float().view(batch.shape[0], -1).cuda()))
elif net_type == "convolutional":
model_out = model.forward(Variable(batch.float().cuda()))
else: assert False, "Please define your model type!"
# Algunos modelos devuelven varias salidas como pueden ser la capa
# reshape y los logits, etc... Por lo que se establece el standar
# de que la ultima salida sean los logits del modelo para hacer la clasificacion
if type(model_out) is list or type(model_out) is tuple:
model_out = model_out[-1]
outs_models[0]=torch.cat((outs_models[0], model_out))
total_samples+=batch_size
if total_samples == len(X_data): break
if len(outs_models) == 1: return outs_models[0]
return outs_models
# INPUTS: output have shape of [batch_size, category_count]
# and target in the shape of [batch_size] * there is only one true class for each sample
# topk is tuple of classes to be included in the precision
# topk have to a tuple so if you are giving one number, do not forget the comma
def topk_accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
#we do not need gradient calculation for those
with torch.no_grad():
#we will use biggest k, and calculate all precisions from 0 to k
maxk = max(topk)
batch_size = target.size(0)
#topk gives biggest maxk values on dimth dimension from output
#output was [batch_size, category_count], dim=1 so we will select biggest category scores for each batch
# input=maxk, so we will select maxk number of classes
#so result will be [batch_size,maxk]
#topk returns a tuple (values, indexes) of results
# we only need indexes(pred)
_, pred = output.topk(maxk, dim=1, largest=True, sorted=True)
# then we transpose pred to be in shape of [maxk, batch_size]
pred = pred.t()
#we flatten target and then expand target to be like pred
# target [batch_size] becomes [1,batch_size]
# target [1,batch_size] expands to be [maxk, batch_size] by repeating same correct class answer maxk times.
# when you compare pred (indexes) with expanded target, you get 'correct' matrix in the shape of [maxk, batch_size] filled with 1 and 0 for correct and wrong class assignments
correct = pred.eq(target.view(1, -1).expand_as(pred))
""" correct=([[0, 0, 1, ..., 0, 0, 0],
[1, 0, 0, ..., 0, 0, 0],
[0, 0, 0, ..., 1, 0, 0],
[0, 0, 0, ..., 0, 0, 0],
[0, 1, 0, ..., 0, 0, 0]], device='cuda:0', dtype=torch.uint8) """
res = []
# then we look for each k summing 1s in the correct matrix for first k element.
for k in topk:
res.append(correct[:k].view(-1).float().sum(0, keepdim=True))
return res
##########################################################################################################
##########################################################################################################
##########################################################################################################
def findLR(model, optimizer, criterion, trainloader, final_value=10, init_value=1e-8, verbose=1):
#https://medium.com/coinmonks/training-neural-networks-upto-10x-faster-3246d84caacd
'''
findLR plots the graph for the optimum learning rates for the model with the
corresponding dataset.
The technique is quite simple. For one epoch,
1. Start with a very small learning rate (around 1e-8) and increase the learning rate linearly.
2. Plot the loss at each step of LR.
3. Stop the learning rate finder when loss stops going down and starts increasing.
A graph is created with the x axis having learning rates and the y axis
having the losses.
Arguments:
1. model - (torch.nn.Module) The deep learning pytorch network.
2. optimizer: (torch.optim) The optimiser for the model eg: SGD,CrossEntropy etc
3. criterion: (torch.nn) The loss function that is used for the model.
4. trainloader: (torch.utils.data.DataLoader) The data loader that loads data in batches for input into model
5. final_value: (float) Final value of learning rate
6. init_value: (float) Starting learning rate.
Returns:
learning rates used and corresponding losses
'''
model.train() # setup model for training configuration
num = len(trainloader) - 1 # total number of batches
mult = (final_value / init_value) ** (1/num)
losses = []
lrs = []
best_loss = 0.
avg_loss = 0.
beta = 0.98 # the value for smooth losses
lr = init_value
for batch_num, (inputs, targets) in enumerate(trainloader):
if verbose==1: print("Testint LR: {}".format(lr))
optimizer.param_groups[0]['lr'] = lr
batch_num += 1 # for non zero value
inputs, targets = inputs.cuda(), targets.cuda() # convert to cuda for GPU usage
optimizer.zero_grad() # clear gradients
outputs = model(inputs) # forward pass
loss = criterion(outputs, targets.long().cuda()) # compute loss
#Compute the smoothed loss to create a clean graph
avg_loss = beta * avg_loss + (1-beta) *loss.item()
smoothed_loss = avg_loss / (1 - beta**batch_num)
#Record the best loss
if smoothed_loss < best_loss or batch_num==1:
best_loss = smoothed_loss
# append loss and learning rates for plotting
lrs.append(math.log10(lr))
losses.append(smoothed_loss)
# Stop if the loss is exploding
if batch_num > 1 and smoothed_loss > 4 * best_loss:
break
# backprop for next step
loss.backward()
optimizer.step()
# update learning rate
lr = mult*lr
#plt.xlabel('Learning Rates')
#plt.ylabel('Losses')
#plt.plot(lrs,losses)
#plt.show()
return lrs, losses
def get_total_parameters(model):
return sum(p.numel() for p in model.parameters())
def get_trainable_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
##########################################################################################################
##########################################################################################################
##########################################################################################################
def train_discriminator(discriminator_net, discriminator_optimizer, real_data, fake_data, loss):
num_samples = real_data.size(0) # Para conocer el numero de muestras
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
# 1.1 ----> Train with real
# Reseteamos los gradientes
discriminator_optimizer.zero_grad()
discriminator_net.zero_grad()
# prediction on Real Data
prediction_real = discriminator_net(real_data)
# Calculate error and backpropagate
# Debemos tener en cuenta que son reales -> 1s
error_real = loss(prediction_real, simple_target_creator(num_samples, 1))
error_real.backward()
# 1.2 ----> Train on Fake Data
prediction_fake = discriminator_net(fake_data)
# Calculate error and backpropagate
# Debemos tener en cuenta que son falsos -> 0s
error_fake = loss(prediction_fake, simple_target_creator(num_samples, 0))
error_fake.backward()
# 1.3 Update weights with gradients of discriminator
discriminator_optimizer.step()
# Return error
return error_real.item() + error_fake.item()
def train_generator(discriminator_net, generator_optimizer, fake_data, loss):
num_samples = fake_data.size(0) # Para conocer el numero de muestras
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
# Reseteamos gradientes
generator_optimizer.zero_grad()
# Inferimos nuestros datos falsos a traves del discriminador para
# posteriormente tratar de 'engañarlo'
prediction = discriminator_net(fake_data)
# Calculate error and backpropagate
# IMPORTANTE -> Queremos que el generador aprenda a que
# sus muestras sean clasificadas como reales, por lo que
# CALCULAMOS EL LOSS CON 1s! como si fueran reales
error = loss(prediction, simple_target_creator(num_samples, 1))
error.backward()
# 3. Actualizamos pesos y gradientes del generador
generator_optimizer.step()
# Return error
return error.item()
def loss_fn_kd_kldivloss(outputs, teacher_outputs, labels, temperature, alpha=0.9):
"""
Compute the knowledge-distillation (KD) loss given outputs, labels.
"Hyperparameters": temperature and alpha
NOTE: the KL Divergence for PyTorch comparing the softmaxs of teacher
and student expects the input tensor to be log probabilities! See Issue #2
source: https://github.com/peterliht/knowledge-distillation-pytorch/blob/master/model/net.py
"""
alpha = alpha
T = temperature
KD_loss = nn.KLDivLoss()(F.log_softmax(outputs/T, dim=1),
F.softmax(teacher_outputs/T, dim=1)) * (alpha * T * T) + \
F.cross_entropy(outputs, labels) * (1. - alpha)
return KD_loss
'''
mixup: BEYOND EMPIRICAL RISK MINIMIZATION: https://arxiv.org/abs/1710.09412
https://github.com/facebookresearch/mixup-cifar10
'''
def mixup_data(x, y, alpha=1.0, use_cuda=True):
'''Returns mixed inputs, pairs of targets, and lambda'''
if alpha > 0:
lam = np.random.beta(alpha, alpha)
else:
lam = 1
batch_size = x.size()[0]
if use_cuda:
index = torch.randperm(batch_size).cuda()
else:
index = torch.randperm(batch_size)
mixed_x = lam * x + (1 - lam) * x[index, :]
y_a, y_b = y, y[index]
return mixed_x, y_a, y_b, lam
def mixup_criterion(criterion, pred, y_a, y_b, lam):
return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)
### Ejemplo de uso
# inputs, targets_a, targets_b, lam = mixup_data(batch_data, batch_target, alpha_mixup)
# inputs, targets_a, targets_b = map(Variable, (inputs, targets_a, targets_b))
# outputs = model(inputs)
# loss = mixup_criterion(loss_ce, outputs, targets_a, targets_b, lam)
# total_loss += loss.item()
''' ######################################################################## '''
''' ############################# CUTOUT ################################## '''
''' ######################################################################## '''
# https://github.com/uoguelph-mlrg/Cutout
# Para usarlo si estamos usando albumentations añadir otro transform separado que sea
# por ejemplo transforms_torchvision y a traves de ese lo usamos como self.torchvision_transform(feature)
# Hay un ejemplo en el dataloader de LFW -> data_generator.py -> NPDatasetLFW
class Cutout(object):
"""Randomly mask out one or more patches from an image.
Args:
n_holes (int): Number of patches to cut out of each image.
length (int): The length (in pixels) of each square patch.
"""
def __init__(self, n_holes, length):
self.n_holes = n_holes
self.length = length
def __call__(self, img):
"""
Args:
img (Tensor): Tensor image of size (C, H, W).
Returns:
Tensor: Image with n_holes of dimension length x length cut out of it.
"""
h = img.size(1)
w = img.size(2)
mask = np.ones((h, w), np.float32)
for n in range(self.n_holes):
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img = img.cuda() * mask.cuda()
return img
class BatchCutout(object):
"""Randomly mask out one or more patches from an image.
Args:
n_holes (int): Number of patches to cut out of each image.
length (int): The length (in pixels) of each square patch.
"""
def __init__(self, n_holes, length):
self.n_holes = n_holes
self.length = length
def __call__(self, imgs):
"""
Args:
img (Tensor): Tensor image of size (Batch, C, H, W).
Returns:
Tensor: Images with n_holes of dimension length x length cut out of it.
"""
h = imgs.size(2)
w = imgs.size(3)
outputs = torch.empty(imgs.shape)
for index, img in enumerate(imgs):
mask = np.ones((h, w), np.float32)
for n in range(self.n_holes):
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = | np.clip(y + self.length // 2, 0, h) | numpy.clip |
import numpy as np
import nibabel as nib
import pytest
from numpy.testing import assert_array_equal, assert_equal, assert_raises
from nilabels.tools.caliber.volumes_and_values import get_total_num_nonzero_voxels, get_num_voxels_from_labels_list, \
get_values_below_labels_list, get_volumes_per_label
def cube_shape(omega, center, side_length, background_intensity=0, foreground_intensity=100, dtype=np.uint8):
sky = background_intensity * np.ones(omega, dtype=dtype)
half_side_length = int(np.ceil(int(side_length / 2)))
for lx in range(-half_side_length, half_side_length + 1):
for ly in range(-half_side_length, half_side_length + 1):
for lz in range(-half_side_length, half_side_length + 1):
sky[center[0] + lx, center[1] + ly, center[2] + lz] = foreground_intensity
return sky
def test_volumes_and_values_total_num_voxels():
omega = [80, 80, 80]
cube_a = [[10, 60, 55], 11, 1]
cube_b = [[50, 55, 42], 17, 2]
cube_c = [[25, 20, 20], 19, 3]
cube_d = [[55, 16, 9], 9, 4]
sky = cube_shape(omega, center=cube_a[0], side_length=cube_a[1], foreground_intensity=cube_a[2]) + \
cube_shape(omega, center=cube_b[0], side_length=cube_b[1], foreground_intensity=cube_b[2]) + \
cube_shape(omega, center=cube_c[0], side_length=cube_c[1], foreground_intensity=cube_c[2]) + \
cube_shape(omega, center=cube_d[0], side_length=cube_d[1], foreground_intensity=cube_d[2])
im_segm = nib.Nifti1Image(sky, affine=np.eye(4))
num_voxels = get_total_num_nonzero_voxels(im_segm)
assert num_voxels == 11 ** 3 + 17 ** 3 + 19 ** 3 + 9 **3
num_voxels = get_total_num_nonzero_voxels(im_segm, list_labels_to_exclude=[2, 4])
assert_equal(num_voxels, 11 ** 3 + 19 ** 3)
def test_volumes_and_values_total_num_voxels_empty():
omega = [80, 80, 80]
im_segm = nib.Nifti1Image(np.zeros(omega), affine=np.eye(4))
num_voxels = get_total_num_nonzero_voxels(im_segm)
print(num_voxels)
assert_equal(num_voxels, 0)
def test_volumes_and_values_total_num_voxels_full():
omega = [80, 80, 80]
im_segm = nib.Nifti1Image(np.ones(omega), affine=np.eye(4))
num_voxels = get_total_num_nonzero_voxels(im_segm)
assert_equal(num_voxels, 80 ** 3)
def test_get_num_voxels_from_labels_list():
omega = [80, 80, 80]
cube_a = [[10, 60, 55], 11, 1]
cube_b = [[50, 55, 42], 15, 2]
cube_c = [[25, 20, 20], 13, 3]
cube_d = [[55, 16, 9], 7, 4]
sky = cube_shape(omega, center=cube_a[0], side_length=cube_a[1], foreground_intensity=cube_a[2]) + \
cube_shape(omega, center=cube_b[0], side_length=cube_b[1], foreground_intensity=cube_b[2]) + \
cube_shape(omega, center=cube_c[0], side_length=cube_c[1], foreground_intensity=cube_c[2]) + \
cube_shape(omega, center=cube_d[0], side_length=cube_d[1], foreground_intensity=cube_d[2])
im_segm = nib.Nifti1Image(sky, affine=np.eye(4))
num_voxels = get_num_voxels_from_labels_list(im_segm, labels_list=[1, 2, 3, 4])
print(num_voxels, [11 **3, 15 **3, 13 **3, 7 ** 3])
assert_array_equal(num_voxels, [11 **3, 15 **3, 13 **3, 7 ** 3])
num_voxels = get_num_voxels_from_labels_list(im_segm, labels_list=[1, [2, 3], 4])
print(num_voxels, [11 ** 3, 15 ** 3 + 13 ** 3, 7 ** 3])
assert_array_equal(num_voxels, [11 ** 3, 15 ** 3 + 13 ** 3, 7 ** 3])
num_voxels = get_num_voxels_from_labels_list(im_segm, labels_list=[[1, 2, 3], 4])
print(num_voxels, [11 ** 3, 15 ** 3 + 13 ** 3, 7 ** 3])
assert_array_equal(num_voxels, [11 ** 3 + 15 ** 3 + 13 ** 3, 7 ** 3])
num_voxels = get_num_voxels_from_labels_list(im_segm, labels_list=[[1, 2, 3, 4]])
print(num_voxels, [11 ** 3, 15 ** 3 + 13 ** 3, 7 ** 3])
assert_array_equal(num_voxels, [11 ** 3 + 15 ** 3 + 13 ** 3 + 7 ** 3])
def test_get_num_voxels_from_labels_list_unexisting_labels():
omega = [80, 80, 80]
cube_a = [[10, 60, 55], 11, 1]
cube_b = [[50, 55, 42], 15, 2]
cube_c = [[25, 20, 20], 13, 3]
cube_d = [[55, 16, 9], 7, 4]
sky = cube_shape(omega, center=cube_a[0], side_length=cube_a[1], foreground_intensity=cube_a[2]) + \
cube_shape(omega, center=cube_b[0], side_length=cube_b[1], foreground_intensity=cube_b[2]) + \
cube_shape(omega, center=cube_c[0], side_length=cube_c[1], foreground_intensity=cube_c[2]) + \
cube_shape(omega, center=cube_d[0], side_length=cube_d[1], foreground_intensity=cube_d[2])
im_segm = nib.Nifti1Image(sky, affine=np.eye(4))
num_voxels = get_num_voxels_from_labels_list(im_segm, labels_list=[1, 2, 3, 5])
print(num_voxels, [11 ** 3, 15 ** 3, 13 ** 3, 0])
assert_array_equal(num_voxels, [11 ** 3, 15 ** 3, 13 ** 3, 0])
num_voxels = get_num_voxels_from_labels_list(im_segm, labels_list=[1, 2, [3, 5]])
print(num_voxels, [11 ** 3, 15 ** 3, 13 ** 3 + 0])
assert_array_equal(num_voxels, [11 ** 3, 15 ** 3, 13 ** 3 + 0])
num_voxels = get_num_voxels_from_labels_list(im_segm, labels_list=[[1, 2], [7, 8]])
print(num_voxels, [11 ** 3 + 15 ** 3, 0])
assert_array_equal(num_voxels, [11 ** 3 + 15 ** 3, 0])
num_voxels = get_num_voxels_from_labels_list(im_segm, labels_list=[[1, 2], [7, -8]])
print(num_voxels, [11 ** 3 + 15 ** 3, 0])
assert_array_equal(num_voxels, [11 ** 3 + 15 ** 3, 0])
def test_get_num_voxels_from_labels_list_wrong_input():
omega = [80, 80, 80]
cube_a_seg = [[10, 60, 55], 11, 1]
sky_s = cube_shape(omega, center=cube_a_seg[0], side_length=cube_a_seg[1], foreground_intensity=cube_a_seg[2])
im_segm = nib.Nifti1Image(sky_s, affine=np.eye(4))
with assert_raises(IOError):
get_num_voxels_from_labels_list(im_segm, [1, [2, 3], '3'])
def test_get_values_below_labels_list():
omega = [80, 80, 80]
cube_a_seg = [[10, 60, 55], 11, 1]
cube_b_seg = [[50, 55, 42], 15, 2]
cube_c_seg = [[25, 20, 20], 13, 3]
cube_d_seg = [[55, 16, 9], 7, 4]
cube_a_anat = [[10, 60, 55], 11, 1.5]
cube_b_anat = [[50, 55, 42], 15, 2.5]
cube_c_anat = [[25, 20, 20], 13, 3.5]
cube_d_anat = [[55, 16, 9], 7, 4.5]
sky_s = cube_shape(omega, center=cube_a_seg[0], side_length=cube_a_seg[1], foreground_intensity=cube_a_seg[2])
sky_s += cube_shape(omega, center=cube_b_seg[0], side_length=cube_b_seg[1], foreground_intensity=cube_b_seg[2])
sky_s += cube_shape(omega, center=cube_c_seg[0], side_length=cube_c_seg[1], foreground_intensity=cube_c_seg[2])
sky_s += cube_shape(omega, center=cube_d_seg[0], side_length=cube_d_seg[1], foreground_intensity=cube_d_seg[2])
sky_a = cube_shape(omega, center=cube_a_anat[0], side_length=cube_a_anat[1], foreground_intensity=cube_a_anat[2], dtype=np.float32)
sky_a += cube_shape(omega, center=cube_b_anat[0], side_length=cube_b_anat[1], foreground_intensity=cube_b_anat[2], dtype=np.float32)
sky_a += cube_shape(omega, center=cube_c_anat[0], side_length=cube_c_anat[1], foreground_intensity=cube_c_anat[2], dtype=np.float32)
sky_a += cube_shape(omega, center=cube_d_anat[0], side_length=cube_d_anat[1], foreground_intensity=cube_d_anat[2], dtype=np.float32)
im_segm = nib.Nifti1Image(sky_s, affine=np.eye(4))
im_anat = nib.Nifti1Image(sky_a, affine=np.eye(4))
assert im_segm.shape == im_anat.shape
labels_list = [[1, 2], [3, 4], 4]
vals_below = get_values_below_labels_list(im_segm, im_anat, labels_list)
assert_array_equal(vals_below[0], np.array([1.5, ] * (11**3) + [2.5] * (15**3)) )
assert_array_equal(vals_below[1], np.array([3.5, ] * (13**3) + [4.5] * (7**3)) )
assert_array_equal(vals_below[2], np.array([4.5] * (7 ** 3)))
def test_get_values_below_labels_list_wrong_input():
omega = [80, 80, 80]
cube_a_seg = [[10, 60, 55], 11, 1]
cube_a_anat = [[10, 60, 55], 11, 1.5]
sky_a = cube_shape(omega, center=cube_a_anat[0], side_length=cube_a_anat[1], foreground_intensity=cube_a_anat[2], dtype=np.float32)
sky_s = cube_shape(omega, center=cube_a_seg[0], side_length=cube_a_seg[1], foreground_intensity=cube_a_seg[2])
im_segm = nib.Nifti1Image(sky_s, affine=np.eye(4))
im_anat = nib.Nifti1Image(sky_a, affine=np.eye(4))
with assert_raises(IOError):
get_values_below_labels_list(im_segm, im_anat, [1, [2, 3], '3', '4'])
def test_get_volumes_per_label_ok_and_with_prior():
omega = [10, 10, 3]
data_test = np.zeros(omega)
data_test[:2, :2, :2] = 2
data_test[-3:, -3:, -2:] = 3
im_test = nib.Nifti1Image(data_test, affine=np.eye(4))
df_vol = get_volumes_per_label(im_test, [0, 2, 3, 4], labels_names=['bkg', 'wm', 'gm', 'nada'])
np.testing.assert_equal(df_vol.loc[0]['Region'], 'bkg')
np.testing.assert_equal(df_vol.loc[0]['Label'], 0)
np.testing.assert_equal(df_vol.loc[0]['Num voxels'], 274)
np.testing.assert_equal(df_vol.loc[0]['Volume'], 274)
np.testing.assert_almost_equal(df_vol.loc[0]['Vol over Tot'], 274 / float(18 + 8))
np.testing.assert_equal(df_vol.loc[1]['Region'], 'wm')
np.testing.assert_equal(df_vol.loc[1]['Label'], 2)
np.testing.assert_equal(df_vol.loc[1]['Num voxels'], 8)
np.testing.assert_equal(df_vol.loc[1]['Volume'], 8)
np.testing.assert_almost_equal(df_vol.loc[1]['Vol over Tot'], 8 / float(18 + 8))
np.testing.assert_equal(df_vol.loc[2]['Region'], 'gm')
np.testing.assert_equal(df_vol.loc[2]['Label'], 3)
np.testing.assert_equal(df_vol.loc[2]['Num voxels'], 18)
np.testing.assert_equal(df_vol.loc[2]['Volume'], 18)
np.testing.assert_almost_equal(df_vol.loc[2]['Vol over Tot'], 18 / float(18 + 8))
np.testing.assert_equal(df_vol.loc[3]['Region'], 'nada')
np.testing.assert_equal(df_vol.loc[3]['Label'], 4)
np.testing.assert_equal(df_vol.loc[3]['Num voxels'], 0)
np.testing.assert_equal(df_vol.loc[3]['Volume'], 0)
np.testing.assert_almost_equal(df_vol.loc[3]['Vol over Tot'], 0)
df_vol_prior = get_volumes_per_label(im_test, [0, 2, 3, 4], labels_names=['bkg', 'wm', 'gm', 'nada'],
tot_volume_prior=10)
np.testing.assert_almost_equal(df_vol_prior.loc[0]['Vol over Tot'], 27.4)
np.testing.assert_almost_equal(df_vol_prior.loc[1]['Vol over Tot'], 0.8)
np.testing.assert_almost_equal(df_vol_prior.loc[2]['Vol over Tot'], 1.8)
np.testing.assert_almost_equal(df_vol_prior.loc[3]['Vol over Tot'], 0)
def test_get_volumes_per_label_tot_labels():
omega = [10, 10, 3]
data_test = np.zeros(omega)
data_test[:2, :2, :2] = 2
data_test[-3:, -3:, -2:] = 3
im_test = nib.Nifti1Image(data_test, affine=np.eye(4))
df_vol_all = get_volumes_per_label(im_test, [0, 2, 3, 4], labels_names='all')
np.testing.assert_equal(df_vol_all.loc[0]['Region'], 'reg 0')
np.testing.assert_equal(df_vol_all.loc[0]['Label'], 0)
np.testing.assert_equal(df_vol_all.loc[0]['Num voxels'], 274)
np.testing.assert_equal(df_vol_all.loc[0]['Volume'], 274)
np.testing.assert_almost_equal(df_vol_all.loc[0]['Vol over Tot'], 274 / float(18 + 8))
df_vol_tot = get_volumes_per_label(im_test, [0, 2, 3, 4], labels_names='tot')
np.testing.assert_equal(df_vol_tot.loc['tot']['Num voxels'], 26)
np.testing.assert_equal(df_vol_tot.loc['tot']['Volume'], 26)
np.testing.assert_almost_equal(df_vol_tot.loc['tot']['Vol over Tot'], 1.0)
def test_get_volumes_per_label_inconsistent_labels_labels_names():
omega = [10, 10, 3]
data_test = np.zeros(omega)
data_test[:2, :2, :2] = 2
data_test[-3:, -3:, -2:] = 3
im_test = nib.Nifti1Image(data_test, affine=np.eye(4))
with np.testing.assert_raises(IOError):
get_volumes_per_label(im_test, [0, 2, 3, 4], labels_names=['a', 'b'])
def test_get_volumes_per_label_sublabels():
omega = [10, 10, 3]
data_test = np.zeros(omega)
data_test[:2, :2, :2] = 2
data_test[-3:, -3:, -2:] = 3
im_test = nib.Nifti1Image(data_test, affine=np.eye(4))
df_vol = get_volumes_per_label(im_test, [0, [2, 3], 4],
labels_names=['bkg', 'gm and wm', 'nada'])
| np.testing.assert_equal(df_vol.loc[0]['Region'], 'bkg') | numpy.testing.assert_equal |
'''
pyFIR - 2022
------------------------------------------------------------------------------
Python FIR filters for real-time convolution
------------------------------------------------------------------------------
MIT License
Copyright (c) 2022 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import numpy as np
from numpy.core.fromnumeric import partition
from numpy.core.numeric import Inf
from numpy.fft import fft, ifft
from time import time
class FIRfilter():
'''
method: 'overlap-save'(default) or 'overlap-add'
B: defines the block length
h: optional call, initializes the impulse response to be convolved
'''
def __init__(self, method="overlap-save", B=512, h=None, partition=None):
self.method = method
self.B = B # block size (audio input len, which will also be the output size)
self.NFFT = None # fft/ifft size
self.flagIRchanged = True # check if the IR changed or it's still the same
self.stored_h = h # save IR for comparison next frame (optional input)
self.left_overs = | np.zeros((B,)) | numpy.zeros |
# Zeit wird gezaehlt
import pygame, math, random, time
from pygame.locals import *
import PIL.Image as Image
import numpy as np
import random
import time
import torch
import torch.nn as nn
from torch.autograd import Variable
import os, sys
os.environ["SDL_VIDEODRIVER"] = "dummy"
def GameStart(model,options,optimizer,ceriterion):
num_games_played = 0
obs = options.observation
pygame.init()
bg = (255, 255, 255)
# bg = (20, 20, 50)
black = (0, 0, 0)
diff_bg = (255, 255, 0)
ww = 400
wh = 300
fenster = pygame.display.set_mode((ww, wh))
pygame.mouse.set_visible(0)
fenster.fill(bg)
pygame.display.set_caption("Don't hit the balls!!!")
pygame.display.update()
spieler = pygame.image.load("player_1.png")
# Startposition player
player_x = ww / 2
player_y = wh / 2
player = pygame.Rect(player_x, player_y, spieler.get_rect().width, spieler.get_rect().height)
# pygame.mixer.music.load("noise.mp3")
########################################################################################################################################################################
difficult = "Normal"
game = 1
reward = 1
while game == 1:
ball_rot = pygame.image.load("ball_rot.png")
rot_rect = pygame.Rect(random.randint(0, ww-ball_rot.get_rect().width), random.randint(0, wh-ball_rot.get_rect().height), ball_rot.get_rect().width, ball_rot.get_rect().height)
ball_gruen = pygame.image.load("ball_gruen.png")
gruen_rect = pygame.Rect(random.randint(0, ww-ball_gruen.get_rect().width), random.randint(0, wh-ball_gruen.get_rect().height), ball_gruen.get_rect().width, ball_gruen.get_rect().height)
ball_blau = pygame.image.load("ball_blau.png")
blau_rect = pygame.Rect(random.randint(0, ww-ball_blau.get_rect().width), random.randint(0, wh-ball_blau.get_rect().height), ball_blau.get_rect().width, ball_blau.get_rect().height)
# explosion = pygame.image.load("explosion.png")
angle_rot = random.randint(0, 360)
angle_gruen = random.randint(0, 360)
angle_blau = random.randint(0, 360)
bilder_baelle = [ball_rot, ball_gruen, ball_blau]
baelle = [rot_rect, gruen_rect, blau_rect]
angle_baelle = [angle_rot, angle_gruen, angle_blau]
#print(angle_rot)
#print(angle_gruen)
#print(angle_blau)
angle_player = 0
pr_player = "false"
pr_player_left = "false"
pr_player_right = "false"
mvsp = 3.5 # Movespeed
mvsp_baelle = 4
spawn_count = 150
zeit_zaehler = 0
clock = pygame.time.Clock()
fps = 50
time_count = 0
start = time.time()
x = 1
x2 = 0
end = 0
action = 5
# Main-loop
while x == 1:
time_count += 1
#print(angle_player)
if pr_player_left == "true":
angle_player += 5
if pr_player_right == "true":
angle_player -= 5
if pr_player == "true":
b = math.cos(math.radians(angle_player)) * mvsp # Berechnet die Laenge der am angle anliegenden Kathete.
a = math.sin(math.radians(angle_player)) * mvsp # Brechnet die Laenge der des angles gegenueberliegenden Seite.
#if player.top >= 0 and player.bottom <= wh:
player.top += round(b)
#if player.left >= 0 and player.right <= ww:
player.left += round(a)
'''
We take image data here
and get inputs based on states (images,reward,terminal)
'''
# agent_ouput = model.forward(out)
# print(agent_ouput)
# result.save('i.png')
# exit()
pygame.display.update()
'''
Number of Actions: 6
0 : KeyUp + KeyLeft
1 : KeyUp + KeyRight
2 : KeyUp
3 : KeyLeft
4 : KeyRight
5 : DoNothing
'''
if action == 0:
pr_player_left = "true"
pr_player = "true"
pr_player_right = "false"
if action == 1:
pr_player_left = "false"
pr_player = "true"
pr_player_right = "true"
if action == 2:
pr_player_left = "false"
pr_player = "true"
pr_player_right = "false"
if action == 3:
pr_player_left = "true"
pr_player = "false"
pr_player_right = "false"
if action == 4:
pr_player_left = "false"
pr_player = "false"
pr_player_right = "true"
if action == 5:
pr_player_left = "false"
pr_player = "false"
pr_player_right = "false"
# BaeLLE BEWEGEN SICH HIER:######################################################
for i in range(len(baelle)):
zaehler = 0
if baelle[i].top <= 0 or baelle[i].bottom >= wh:
zaehler += 1
#print("hallo")
#print(angle_baelle)
#print(angle_baelle[i])
angle_baelle[i] = 360 - angle_baelle[i]
b = math.cos(math.radians(angle_baelle[i])) * mvsp_baelle # Berechnet die Laenge der am angle anliegenden Kathete.
a = math.sin(math.radians(angle_baelle[i])) * mvsp_baelle
baelle[i].left += b
baelle[i].top += a
#print(b)
#print(a)
#print(angle_baelle[i])
#print()
if baelle[i].left <= 0 or baelle[i].right >= ww:
zaehler += 1
#print("hallo")
#print(angle_baelle[i])
angle_baelle[i] = 180 - angle_baelle[i]
b = math.cos(math.radians(angle_baelle[i])) * mvsp_baelle # Berechnet die Laenge der am angle anliegenden Kathete.
a = math.sin(math.radians(angle_baelle[i])) * mvsp_baelle
baelle[i].left += b
baelle[i].top += a
#print(angle_baelle[i])
#print()
if zaehler == 0:
b = math.cos(math.radians(angle_baelle[i])) * mvsp_baelle # Berechnet die Laenge der am angle anliegenden Kathete.
a = math.sin(math.radians(angle_baelle[i])) * mvsp_baelle
baelle[i].left += b
baelle[i].top += a
fenster.fill(bg)
# pygame.draw.rect(fenster,(0, 0, 0), (10,10, ww-20, wh-20),3)
player_rect = spieler.get_rect().center
player_neu = pygame.transform.rotate(spieler, angle_player-180)
player_neu.get_rect().center = player_rect
player_rect = spieler.get_rect()
player_center_neu = player_neu.get_rect().center
player_center_diff = (player.center[0]-player_center_neu[0], player.center[1]-player_center_neu[1])
for i in range(len(baelle)):
fenster.blit(bilder_baelle[i], baelle[i])
#fenster.blit(ball_rot, rot_rect)
#fenster.blit(ball_gruen, gruen_rect)
#fenster.blit(ball_blau, blau_rect)
fenster.blit(player_neu, player_center_diff)
zeit_zaehler += 1
if zeit_zaehler >= spawn_count:
baelle.append(pygame.Rect(random.randint(0, ww-ball_rot.get_rect().width), random.randint(0, wh-ball_rot.get_rect().height), ball_rot.get_rect().width, ball_rot.get_rect().height))
bilder_baelle.append(bilder_baelle[random.randint(0,2)])
angle_baelle.append(random.randint(0, 360))
#print("Hallo")
#mvsp_baelle += 0.25
#print(mvsp_baelle)
zeit_zaehler = 0
reward = 0
for element in baelle:
if player.colliderect(element):
# fenster.blit(explosion, (player.left-explosion.get_rect().width/2+12, player.top-explosion.get_rect().height/2+12))
pygame.display.update()
# pygame.mixer.music.play()
# time.sleep(1)
x = 0
end = 1
reward = -1 #min(-10,-(time_count*0.1))
player.left = ww/2 - player.width/2
player.top = wh/2 - player.height/2
pygame.display.update()
clock.tick(fps)
if not player.colliderect(20, 20, ww-40, wh-40):
x = 0
end = 1
reward = -1#min(-10,-(time_count*0.1))
player.left = ww/2 - player.width/2
player.top = wh/2 - player.height/2
pygame.event.pump()
image_data = pygame.surfarray.array3d(pygame.display.get_surface())
agent_input = Image.fromarray(image_data).resize((100,75)).convert(mode='L')
# agent_input.save("a.png")
# exit()
agent_input = np.asarray(agent_input).astype(np.float32)
# agent_input = Image.fromarray(np.asarray(image_data).astype(np.float32))
# agent_input = agent_input.rotate(-90, expand=1 ).resize((400,300)).convert('L')
agent_ouput,obs = step_model(agent_input,model,optimizer,ceriterion,obs,reward,action,options)
action = np.argmax(agent_ouput)
# print(action)
# print(obs)
pygame.display.update()
# Ende:
if end == 1:
x = 1
# reset reward
reward = 0
if model.epsilon > options.final_e:
delta = (options.init_e - options.final_e)/options.exploration
model.epsilon -= delta
num_games_played += 1
end = time.time()
print("time: " + str(round((end - start), 2)) + " seconds.")
if options.mode == "Train" and num_games_played % options.save_checkpoint_freq == 0:
print("saving model",num_games_played,model.epsilon)
torch.save(model.state_dict(),options.model_name)
if num_games_played == options.max_episode:
print("Max episodes reached! exiting.")
exit()
# while x == 1:
# for event in pygame.event.get():
# if event.type == KEYDOWN:
# x = 0
# fenster.fill((255, 255, 50))
# basicFont = pygame.font.SysFont(None, 100)#150)
# text = basicFont.render("You hit a ball. =(", True, black)
# text_time = text_subt.render("time: " + str(round(time_count/fps, 2)) + " seconds.", True, black)
# text_Esc = text_subt.render("Press any key to continue.", True, black)
# fenster.blit(text, (50, 100))
# fenster.blit(text_time, (75, 300))
# fenster.blit(text_Esc, (75, 500))
# pygame.display.update()
########################################################################################################################################################################
pygame.quit()
def step_model(agent_input,model,optimizer,ceriterion,obs,reward,prev_action,options):
# print("reward",reward,"obs",obs)
if reward < 0 :
terminal = True
else:
terminal = False
if obs > 0:
# if obs % 500 == 0:
# print(obs)
obs -= 1
action = model.get_action_randomly()
# saving previous action response
action_set = | np.zeros(model.actions, dtype=np.float32) | numpy.zeros |
import cv2
import os
import sys
import pcl
from math import copysign, log10
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.spatial import ConvexHull
import random
import math
import itertools
from sklearn.mixture import GaussianMixture
from sklearn.neighbors import KDTree
SYMMETRY_MEASURE_CLOUD_NORMALS_TRADEOFF= 0.2 # scaling factor for difference in normals wrt. difference in position for points,
# when computing difference between two point clouds.
# 0 => Only look at difference between point and its closest point
# Higher value => matching normals are more important than point distances
SMOOTHNESS_MEASURE_NUMBINS = 8 # Number of bins in histogram. We found 8 to work best quite consistently.
NNRADIUS = 0.004 # Used in Local Convexity and Smoothness measure for local neighborhood finding
dir_name=os.path.dirname(__file__)
image_path = os.path.join(dir_name, "object")
def get_image(filename):
path=os.path.join(image_path,filename)
if "depthcrop" in filename or 'maskcrop' in filename:
im = cv2.imread(path,0)
else:
im = cv2.imread(path)
return im
def apply_mask(mask,image):
i=image.copy()
i[mask == 0]=0
return i
def depth_to_meter(depth):
depth=depth.astype(float)
try:
return 1/((depth * 4 * -0.0030711016) + 3.3309495161)
except:
return 0.0
# just return mean of distances from points in cloud1 to their nearest neighbors in cloud2
def cloudAlignmentScoreDense(cloud1, cloud2):
tree = KDTree(cloud2)
N=cloud1.shape[0]
accum=0.0
result = tree.query(cloud1, k=1)
for i,(dist, ind) in enumerate(zip(*result)):
accum += dist[0]
return accum/N
def cloudAlignmentScoreDenseWithNormalsNormalized(cloud1, normals1, cloud2, normals2, relweight, dnormalize):
tree = KDTree(cloud2)
N=cloud1.shape[0]
accum=0.0
result = tree.query(cloud1, k=1)
for i,(dist, ind) in enumerate(zip(*result)):
accum += dist[0] / dnormalize
dot = np.dot(normals1[i],normals2[ind[0]])
accum += relweight*(1.0 - dot)
return accum/N
def calculate_compactness_3d(points):
max_length = np.max(points,axis=0)[0]
min_length = np.min(points,axis=0)[0]
return points.shape[0] / (max(max_length-min_length, 0.0000001)**2)
def calculate_symmetry_3d(points_np, normals, relweight=SYMMETRY_MEASURE_CLOUD_NORMALS_TRADEOFF):
mins=points_np.min(axis=0)
maxes=points_np.max(axis=0)
ranges = maxes - mins
ranges /= ranges.sum()
score=0.0
for i,vector in enumerate(np.array([[-1,1,1],[1,-1,1],[1,1,-1]])):
dest=points_np*vector
normdest=normals*vector
overlap = cloudAlignmentScoreDenseWithNormalsNormalized(points_np, normals, dest, normdest, relweight, ranges[i])\
+cloudAlignmentScoreDenseWithNormalsNormalized(dest, normdest, points_np, normals, relweight, ranges[i])
score += ranges[i]*overlap
return -score
def calculate_global_convexity_3d(points):
hull=ConvexHull(points)
overlap= cloudAlignmentScoreDense(points, hull.points[hull.vertices])
return -overlap
def calculate_local_convexity_and_smoothness_3d(points, normals, NNradius=NNRADIUS, NUMBINS=SMOOTHNESS_MEASURE_NUMBINS):
tree = KDTree(points)
N=points.shape[0]
score=0.0
Hs=0.0
bins= | np.ones(NUMBINS) | numpy.ones |
import calendar
import datetime
import numpy as np
import pandas as pd
from pandas.util.testing import (assert_frame_equal, assert_series_equal,
assert_index_equal)
from numpy.testing import assert_allclose
import pytest
from pvlib.location import Location
from pvlib import solarposition, spa
from conftest import (requires_ephem, needs_pandas_0_17,
requires_spa_c, requires_numba)
# setup times and locations to be tested.
times = pd.date_range(start=datetime.datetime(2014,6,24),
end=datetime.datetime(2014,6,26), freq='15Min')
tus = Location(32.2, -111, 'US/Arizona', 700) # no DST issues possible
# In 2003, DST in US was from April 6 to October 26
golden_mst = Location(39.742476, -105.1786, 'MST', 1830.14) # no DST issues possible
golden = Location(39.742476, -105.1786, 'America/Denver', 1830.14) # DST issues possible
times_localized = times.tz_localize(tus.tz)
tol = 5
@pytest.fixture()
def expected_solpos():
return pd.DataFrame({'elevation': 39.872046,
'apparent_zenith': 50.111622,
'azimuth': 194.340241,
'apparent_elevation': 39.888378},
index=['2003-10-17T12:30:30Z'])
@pytest.fixture()
def expected_solpos_multi():
return pd.DataFrame({'elevation': [39.872046, 39.505196],
'apparent_zenith': [50.111622, 50.478260],
'azimuth': [194.340241, 194.311132],
'apparent_elevation': [39.888378, 39.521740]},
index=[['2003-10-17T12:30:30Z', '2003-10-18T12:30:30Z']])
# the physical tests are run at the same time as the NREL SPA test.
# pyephem reproduces the NREL result to 2 decimal places.
# this doesn't mean that one code is better than the other.
@requires_spa_c
def test_spa_c_physical(expected_solpos):
times = pd.date_range(datetime.datetime(2003,10,17,12,30,30),
periods=1, freq='D', tz=golden_mst.tz)
ephem_data = solarposition.spa_c(times, golden_mst.latitude,
golden_mst.longitude,
pressure=82000,
temperature=11)
expected_solpos.index = times
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
@requires_spa_c
def test_spa_c_physical_dst(expected_solpos):
times = pd.date_range(datetime.datetime(2003,10,17,13,30,30),
periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.spa_c(times, golden.latitude,
golden.longitude,
pressure=82000,
temperature=11)
expected_solpos.index = times
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
def test_spa_python_numpy_physical(expected_solpos):
times = pd.date_range(datetime.datetime(2003,10,17,12,30,30),
periods=1, freq='D', tz=golden_mst.tz)
ephem_data = solarposition.spa_python(times, golden_mst.latitude,
golden_mst.longitude,
pressure=82000,
temperature=11, delta_t=67,
atmos_refract=0.5667,
how='numpy')
expected_solpos.index = times
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
def test_spa_python_numpy_physical_dst(expected_solpos):
times = pd.date_range(datetime.datetime(2003,10,17,13,30,30),
periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.spa_python(times, golden.latitude,
golden.longitude,
pressure=82000,
temperature=11, delta_t=67,
atmos_refract=0.5667,
how='numpy')
expected_solpos.index = times
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
@requires_numba
def test_spa_python_numba_physical(expected_solpos):
times = pd.date_range(datetime.datetime(2003,10,17,12,30,30),
periods=1, freq='D', tz=golden_mst.tz)
ephem_data = solarposition.spa_python(times, golden_mst.latitude,
golden_mst.longitude,
pressure=82000,
temperature=11, delta_t=67,
atmos_refract=0.5667,
how='numba', numthreads=1)
expected_solpos.index = times
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
@requires_numba
def test_spa_python_numba_physical_dst(expected_solpos):
times = pd.date_range(datetime.datetime(2003,10,17,13,30,30),
periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.spa_python(times, golden.latitude,
golden.longitude, pressure=82000,
temperature=11, delta_t=67,
atmos_refract=0.5667,
how='numba', numthreads=1)
expected_solpos.index = times
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
@needs_pandas_0_17
def test_get_sun_rise_set_transit():
south = Location(-35.0, 0.0, tz='UTC')
times = pd.DatetimeIndex([datetime.datetime(1996, 7, 5, 0),
datetime.datetime(2004, 12, 4, 0)]
).tz_localize('UTC')
sunrise = pd.DatetimeIndex([datetime.datetime(1996, 7, 5, 7, 8, 15),
datetime.datetime(2004, 12, 4, 4, 38, 57)]
).tz_localize('UTC').tolist()
sunset = pd.DatetimeIndex([datetime.datetime(1996, 7, 5, 17, 1, 4),
datetime.datetime(2004, 12, 4, 19, 2, 2)]
).tz_localize('UTC').tolist()
result = solarposition.get_sun_rise_set_transit(times, south.latitude,
south.longitude,
delta_t=64.0)
frame = pd.DataFrame({'sunrise':sunrise, 'sunset':sunset}, index=times)
result_rounded = pd.DataFrame(index=result.index)
# need to iterate because to_datetime does not accept 2D data
# the rounding fails on pandas < 0.17
for col, data in result.iteritems():
result_rounded[col] = pd.to_datetime(
np.floor(data.values.astype(np.int64) / 1e9)*1e9, utc=True)
del result_rounded['transit']
assert_frame_equal(frame, result_rounded)
# tests from USNO
# Golden
golden = Location(39.0, -105.0, tz='MST')
times = pd.DatetimeIndex([datetime.datetime(2015, 1, 2),
datetime.datetime(2015, 8, 2),]
).tz_localize('MST')
sunrise = pd.DatetimeIndex([datetime.datetime(2015, 1, 2, 7, 19, 2),
datetime.datetime(2015, 8, 2, 5, 1, 26)
]).tz_localize('MST').tolist()
sunset = pd.DatetimeIndex([datetime.datetime(2015, 1, 2, 16, 49, 10),
datetime.datetime(2015, 8, 2, 19, 11, 31)
]).tz_localize('MST').tolist()
result = solarposition.get_sun_rise_set_transit(times, golden.latitude,
golden.longitude,
delta_t=64.0)
frame = pd.DataFrame({'sunrise':sunrise, 'sunset':sunset}, index=times)
result_rounded = pd.DataFrame(index=result.index)
# need to iterate because to_datetime does not accept 2D data
# the rounding fails on pandas < 0.17
for col, data in result.iteritems():
result_rounded[col] = (pd.to_datetime(
np.floor(data.values.astype(np.int64) / 1e9)*1e9, utc=True)
.tz_convert('MST'))
del result_rounded['transit']
assert_frame_equal(frame, result_rounded)
@requires_ephem
def test_pyephem_physical(expected_solpos):
times = pd.date_range(datetime.datetime(2003,10,17,12,30,30),
periods=1, freq='D', tz=golden_mst.tz)
ephem_data = solarposition.pyephem(times, golden_mst.latitude,
golden_mst.longitude, pressure=82000,
temperature=11)
expected_solpos.index = times
assert_frame_equal(expected_solpos.round(2),
ephem_data[expected_solpos.columns].round(2))
@requires_ephem
def test_pyephem_physical_dst(expected_solpos):
times = pd.date_range(datetime.datetime(2003,10,17,13,30,30), periods=1,
freq='D', tz=golden.tz)
ephem_data = solarposition.pyephem(times, golden.latitude,
golden.longitude, pressure=82000,
temperature=11)
expected_solpos.index = times
assert_frame_equal(expected_solpos.round(2),
ephem_data[expected_solpos.columns].round(2))
@requires_ephem
def test_calc_time():
import pytz
import math
# validation from USNO solar position calculator online
epoch = datetime.datetime(1970,1,1)
epoch_dt = pytz.utc.localize(epoch)
loc = tus
loc.pressure = 0
actual_time = pytz.timezone(loc.tz).localize(
datetime.datetime(2014, 10, 10, 8, 30))
lb = pytz.timezone(loc.tz).localize(datetime.datetime(2014, 10, 10, tol))
ub = pytz.timezone(loc.tz).localize(datetime.datetime(2014, 10, 10, 10))
alt = solarposition.calc_time(lb, ub, loc.latitude, loc.longitude,
'alt', math.radians(24.7))
az = solarposition.calc_time(lb, ub, loc.latitude, loc.longitude,
'az', math.radians(116.3))
actual_timestamp = (actual_time - epoch_dt).total_seconds()
assert_allclose((alt.replace(second=0, microsecond=0) -
epoch_dt).total_seconds(), actual_timestamp)
assert_allclose((az.replace(second=0, microsecond=0) -
epoch_dt).total_seconds(), actual_timestamp)
@requires_ephem
def test_earthsun_distance():
times = pd.date_range(datetime.datetime(2003,10,17,13,30,30),
periods=1, freq='D')
distance = solarposition.pyephem_earthsun_distance(times).values[0]
assert_allclose(1, distance, atol=0.1)
def test_ephemeris_physical(expected_solpos):
times = pd.date_range(datetime.datetime(2003,10,17,12,30,30),
periods=1, freq='D', tz=golden_mst.tz)
ephem_data = solarposition.ephemeris(times, golden_mst.latitude,
golden_mst.longitude,
pressure=82000,
temperature=11)
expected_solpos.index = times
expected_solpos = np.round(expected_solpos, 2)
ephem_data = np.round(ephem_data, 2)
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
def test_ephemeris_physical_dst(expected_solpos):
times = pd.date_range(datetime.datetime(2003,10,17,13,30,30),
periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.ephemeris(times, golden.latitude,
golden.longitude, pressure=82000,
temperature=11)
expected_solpos.index = times
expected_solpos = np.round(expected_solpos, 2)
ephem_data = np.round(ephem_data, 2)
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
def test_get_solarposition_error():
times = pd.date_range(datetime.datetime(2003,10,17,13,30,30),
periods=1, freq='D', tz=golden.tz)
with pytest.raises(ValueError):
ephem_data = solarposition.get_solarposition(times, golden.latitude,
golden.longitude,
pressure=82000,
temperature=11,
method='error this')
@pytest.mark.parametrize(
"pressure, expected", [
(82000, expected_solpos()),
(90000, pd.DataFrame(
np.array([[ 39.88997, 50.11003, 194.34024, 39.87205, 14.64151,
50.12795]]),
columns=['apparent_elevation', 'apparent_zenith', 'azimuth', 'elevation',
'equation_of_time', 'zenith'],
index=expected_solpos().index))
])
def test_get_solarposition_pressure(pressure, expected):
times = pd.date_range(datetime.datetime(2003,10,17,13,30,30),
periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.get_solarposition(times, golden.latitude,
golden.longitude,
pressure=pressure,
temperature=11)
this_expected = expected.copy()
this_expected.index = times
this_expected = np.round(this_expected, 5)
ephem_data = np.round(ephem_data, 5)
assert_frame_equal(this_expected, ephem_data[this_expected.columns])
@pytest.mark.parametrize(
"altitude, expected", [
(golden.altitude, expected_solpos()),
(2000, pd.DataFrame(
np.array([[ 39.88788, 50.11212, 194.34024, 39.87205, 14.64151,
50.12795]]),
columns=['apparent_elevation', 'apparent_zenith', 'azimuth', 'elevation',
'equation_of_time', 'zenith'],
index=expected_solpos().index))
])
def test_get_solarposition_altitude(altitude, expected):
times = pd.date_range(datetime.datetime(2003,10,17,13,30,30),
periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.get_solarposition(times, golden.latitude,
golden.longitude,
altitude=altitude,
temperature=11)
this_expected = expected.copy()
this_expected.index = times
this_expected = np.round(this_expected, 5)
ephem_data = np.round(ephem_data, 5)
assert_frame_equal(this_expected, ephem_data[this_expected.columns])
@pytest.mark.parametrize(
"delta_t, method, expected", [
(None, 'nrel_numpy', expected_solpos_multi()),
(67.0, 'nrel_numpy', expected_solpos_multi()),
pytest.mark.xfail(raises=ValueError, reason = 'spa.calculate_deltat not implemented for numba yet')
((None, 'nrel_numba', expected_solpos_multi())),
(67.0, 'nrel_numba', expected_solpos_multi())
])
def test_get_solarposition_deltat(delta_t, method, expected):
times = pd.date_range(datetime.datetime(2003,10,17,13,30,30),
periods=2, freq='D', tz=golden.tz)
ephem_data = solarposition.get_solarposition(times, golden.latitude,
golden.longitude,
pressure=82000,
delta_t=delta_t,
temperature=11,
method=method)
this_expected = expected.copy()
this_expected.index = times
this_expected = np.round(this_expected, 5)
ephem_data = np.round(ephem_data, 5)
assert_frame_equal(this_expected, ephem_data[this_expected.columns])
def test_get_solarposition_no_kwargs(expected_solpos):
times = pd.date_range(datetime.datetime(2003,10,17,13,30,30),
periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.get_solarposition(times, golden.latitude,
golden.longitude)
expected_solpos.index = times
expected_solpos = np.round(expected_solpos, 2)
ephem_data = np.round(ephem_data, 2)
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
@requires_ephem
def test_get_solarposition_method_pyephem(expected_solpos):
times = pd.date_range(datetime.datetime(2003, 10, 17, 13, 30, 30),
periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.get_solarposition(times, golden.latitude,
golden.longitude,
method='pyephem')
expected_solpos.index = times
expected_solpos = np.round(expected_solpos, 2)
ephem_data = np.round(ephem_data, 2)
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
def test_nrel_earthsun_distance():
times = pd.DatetimeIndex([datetime.datetime(2015, 1, 2),
datetime.datetime(2015, 8, 2),]
).tz_localize('MST')
result = solarposition.nrel_earthsun_distance(times, delta_t=64.0)
expected = pd.Series(np.array([0.983289204601, 1.01486146446]),
index=times)
assert_series_equal(expected, result)
times = datetime.datetime(2015, 1, 2)
result = solarposition.nrel_earthsun_distance(times, delta_t=64.0)
expected = pd.Series(np.array([0.983289204601]),
index=pd.DatetimeIndex([times, ]))
assert_series_equal(expected, result)
def test_equation_of_time():
times = pd.DatetimeIndex(start="1/1/2015 0:00", end="12/31/2015 23:00",
freq="H")
output = solarposition.spa_python(times, 37.8, -122.25, 100)
eot = output['equation_of_time']
eot_rng = eot.max() - eot.min() # range of values, around 30 minutes
eot_1 = solarposition.equation_of_time_spencer71(times.dayofyear)
eot_2 = solarposition.equation_of_time_pvcdrom(times.dayofyear)
assert np.allclose(eot_1 / eot_rng, eot / eot_rng, atol=0.3) # spencer
assert np.allclose(eot_2 / eot_rng, eot / eot_rng, atol=0.4) # pvcdrom
def test_declination():
times = pd.DatetimeIndex(start="1/1/2015 0:00", end="12/31/2015 23:00",
freq="H")
atmos_refract = 0.5667
delta_t = spa.calculate_deltat(times.year, times.month)
unixtime = np.array([calendar.timegm(t.timetuple()) for t in times])
_, _, declination = spa.solar_position(unixtime, 37.8, -122.25, 100,
1013.25, 25, delta_t, atmos_refract,
sst=True)
declination = np.deg2rad(declination)
declination_rng = declination.max() - declination.min()
declination_1 = solarposition.declination_cooper69(times.dayofyear)
declination_2 = solarposition.declination_spencer71(times.dayofyear)
a, b = declination_1 / declination_rng, declination / declination_rng
assert np.allclose(a, b, atol=0.03) # cooper
a, b = declination_2 / declination_rng, declination / declination_rng
assert np.allclose(a, b, atol=0.02) # spencer
def test_analytical_zenith():
times = pd.DatetimeIndex(start="1/1/2015 0:00", end="12/31/2015 23:00",
freq="H").tz_localize('Etc/GMT+8')
lat, lon = 37.8, -122.25
lat_rad = np.deg2rad(lat)
output = solarposition.spa_python(times, lat, lon, 100)
solar_zenith = np.deg2rad(output['zenith']) # spa
# spencer
eot = solarposition.equation_of_time_spencer71(times.dayofyear)
hour_angle = np.deg2rad(solarposition.hour_angle(times, lon, eot))
decl = solarposition.declination_spencer71(times.dayofyear)
zenith_1 = solarposition.solar_zenith_analytical(lat_rad, hour_angle, decl)
# pvcdrom and cooper
eot = solarposition.equation_of_time_pvcdrom(times.dayofyear)
hour_angle = np.deg2rad(solarposition.hour_angle(times, lon, eot))
decl = solarposition.declination_cooper69(times.dayofyear)
zenith_2 = solarposition.solar_zenith_analytical(lat_rad, hour_angle, decl)
assert | np.allclose(zenith_1, solar_zenith, atol=0.015) | numpy.allclose |
import subprocess
import matplotlib
matplotlib.rcParams['mathtext.fontset'] = 'cm'
matplotlib.rc('font', family='serif', serif='CMU Serif')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib import rc
#rc('text', usetex=True)
from polaris import viz, util
import numpy as np
from dipy.viz import window, actor
from dipy.data import get_sphere
import vtk
from tqdm import tqdm
import tifffile
import os
import logging
log = logging.getLogger('log')
class Spang:
"""
A Spang (short for spatio-angular density) is a representation of a
spatio-angular density f(r, s) stored as a 4D array of voxel values
and spherical harmonic coefficients [x, y, z, j]. A Spang object is
a discretized member of object space U.
"""
def __init__(self, f=np.zeros((3,3,3,15), dtype=np.float32),
vox_dim=(1,1,1), sphere=get_sphere('symmetric724')):
self.X = f.shape[0]
self.Y = f.shape[1]
self.Z = f.shape[2]
# Calculate band dimensions
self.lmax, mm = util.j2lm(f.shape[-1] - 1)
self.J = util.maxl2maxj(self.lmax)
# Fill the rest of the last l band with zeros
if f.shape[-1] != self.J:
temp = np.zeros((self.X, self.Y, self.Z, self.J))
temp[...,:f.shape[-1]] = f
self.f = temp
else:
self.f = f
self.vox_dim = vox_dim
self.sphere = sphere
self.sphere = sphere.subdivide()
self.N = len(self.sphere.theta)
self.calc_B()
def calc_B(self):
# Calculate odf to sh matrix
B = np.zeros((self.N, self.J))
for (n, j), x in np.ndenumerate(B):
l, m = util.j2lm(j)
B[n, j] = util.spZnm(l, m, self.sphere.theta[n], self.sphere.phi[n])
self.B = B
self.Binv = np.linalg.pinv(self.B, rcond=1e-15)
def density(self, norm=True):
if norm:
return self.f[...,0]/np.max(self.f[...,0])
else:
return self.f[...,0]
def gfa(self):
return np.nan_to_num(np.sqrt(1 - (self.f[...,0]**2)/np.sum(self.f**2, axis=-1)))
def op(self, xyz):
sft = util.xyz_sft(xyz, max_l=2)[1:]
return np.sqrt(4*np.pi/5)*np.einsum('ijkl,l->ijk', self.f[...,1:6], sft)
def tensor(self):
log.info("Calculating tensor fits")
M = np.load(os.path.join(os.path.dirname(__file__), 'harmonics/sh2tensor.npy'))
Di = np.einsum('ijkl,lm->ijkm', self.f[...,0:6], M)
D = np.zeros(self.f.shape[0:3]+(3,3), dtype=np.float32)
D[...,0,0] = Di[...,0]; D[...,0,1] = Di[...,3]; D[...,0,2] = Di[...,5];
D[...,1,0] = Di[...,3]; D[...,1,1] = Di[...,1]; D[...,1,2] = Di[...,4];
D[...,2,0] = Di[...,5]; D[...,2,1] = Di[...,4]; D[...,2,2] = Di[...,2];
eigs = np.linalg.eigh(D)
principal = eigs[1][...,-1]*eigs[1][...,-1]
return Di.astype(np.float32), principal.astype(np.float32)
def save_summary(self, filename='out.pdf', density_filter=None, mag=4,
mask=None, scale=1.0, keep_parallels=False, skip_n=1):
log.info('Generating ' + filename)
if density_filter is not None:
density_mask = self.density() > density_filter
mask = np.logical_or(mask, density_mask).astype(np.bool)
pos = (-0.05, 1.05, 0.5, 0.55) # Arrow and label positions
vmin = 0
vmax = 1
inches = 4
rows = 2
cols = 3
colormap = 'Reds'
widths = [1]*cols
heights = [1]*rows
M = np.max(self.f.shape)
x_frac = self.f.shape[0]/M
if density_filter is None:
filter_label = ''
else:
filter_label = '\n where density $>$ ' + str(density_filter)
if skip_n == 1:
skip_label = ''
else:
skip_label = '\n downsampled ' + str(skip_n) + '$\\times$'
col_labels = np.array([['ODF', 'Density', 'GFA'], ['Peak', 'Ellipsoid', 'Principal']])
f = plt.figure(figsize=(inches*np.sum(widths), inches*np.sum(heights)))
spec = gridspec.GridSpec(ncols=cols, nrows=rows, width_ratios=widths,
height_ratios=heights, hspace=0.1, wspace=0.075)
for row in range(rows):
for col in range(cols):
if col < 3:
yscale_label = None
if row == 0 and col == 0:
bar = True
bar_label = 'ODF radius' + skip_label + filter_label
colormap = 'Reds'
self.visualize(out_path='parallels/', zoom_start=1.7,
outer_box=False, axes=False,
clip_neg=False, azimuth=0, elevation=0,
n_frames=1, mag=mag, video=False, scale=scale,
interact=False, viz_type='ODF',
save_parallels=True, mask=mask, skip_n=skip_n)
if row == 1 and col == 1:
bar = False
bar_label = 'Principal' + skip_label + filter_label
self.visualize(out_path='parallels/', zoom_start=1.7,
outer_box=False, axes=False,
clip_neg=False, azimuth=0, elevation=0,
n_frames=1, mag=mag, video=False, scale=scale,
interact=False, viz_type='Ellipsoid',
save_parallels=True, mask=mask, skip_n=skip_n)
if row == 1 and col == 2:
bar = False
bar_label = 'Principal' + skip_label + filter_label
self.yscale = 1e-3*self.vox_dim[1]*self.f.shape[0]
yscale_label = '{:.2f}'.format(self.yscale) + ' $\mu$m'
self.visualize(out_path='parallels/', zoom_start=1.7,
outer_box=False, axes=False,
clip_neg=False, azimuth=0, elevation=0,
n_frames=1, mag=mag, video=False, scale=scale,
interact=False, viz_type='Principal',
save_parallels=True, mask=mask, skip_n=skip_n)
if row == 1 and col == 0:
bar = False
bar_label = 'Peak' + skip_label + filter_label
self.visualize(out_path='parallels/', zoom_start=1.7,
outer_box=False, axes=False,
clip_neg=False, azimuth=0, elevation=0,
n_frames=1, mag=mag, video=False, scale=scale,
interact=False, viz_type='Peak',
save_parallels=True, mask=mask, skip_n=skip_n)
if row == 0 and col == 1:
colormap = 'gray'
bar = True
bar_label = 'Density'
viz.plot_parallels(self.density(), out_path='parallels/', outer_box=False,
axes=False, clip_neg=False, azimuth=0,
elevation=0, scale=scale)
if row == 0 and col == 2:
colormap = 'gray'
bar = True
bar_label = 'GFA' + filter_label
viz.plot_parallels(self.gfa(), out_path='parallels/', outer_box=False,
axes=False, clip_neg=False, azimuth=0,
elevation=0, scale=scale, mask=mask)
viz.plot_images(['parallels/yz.tif', 'parallels/xy.tif', 'parallels/xz.tif'],
f, spec, row, col,
col_labels=col_labels, row_labels=None,
vmin=vmin, vmax=vmax, colormap=colormap,
rows=rows, cols=cols, x_frac=x_frac,
yscale_label=yscale_label, pos=pos, bar=bar, bar_label=bar_label)
#if not keep_parallels:
#subprocess.call(['rm', '-r', 'parallels'])
elif col == 3:
viz.plot_colorbar(f, spec, row, col, vmin, vmax, colormap)
log.info('Saving ' + filename)
f.savefig(filename, bbox_inches='tight')
def save_mips(self, filename='spang_mips.pdf'):
log.info('Writing '+filename)
col_labels = np.apply_along_axis(util.j2str, 1, np.arange(self.J)[:,None])[None,:]
viz.plot5d(filename, self.f[...,None], col_labels=col_labels)
def save_tiff(self, filename='sh.tif', data=None):
util.mkdir(filename)
if data is None:
data = self.f
log.info('Writing '+filename)
with tifffile.TiffWriter(filename, imagej=True) as tif:
if data.ndim == 4:
dat = np.moveaxis(data, [2, 3, 1, 0], [0, 1, 2, 3])
tif.save(dat[None,:,:,:,:].astype(np.float32)) # TZCYXS
elif data.ndim == 3:
d = np.moveaxis(data, [2, 1, 0], [0, 1, 2])
tif.save(d[None,:,None,:,:].astype(np.float32)) # TZCYXS
def read_tiff(self, filename):
log.info('Reading '+filename)
with tifffile.TiffFile(filename) as tf:
self.f = np.ascontiguousarray(np.moveaxis(tf.asarray(), [0, 1, 2, 3], [2, 3, 1, 0]))
self.X = self.f.shape[0]
self.Y = self.f.shape[1]
self.Z = self.f.shape[2]
def save_stats(self, folder='./', save_sh=False):
if not os.path.exists(folder):
os.makedirs(folder)
if save_sh:
self.save_tiff(filename=folder+'sh.tif', data=self.f)
self.save_tiff(filename=folder+'density.tif', data=self.density())
self.save_tiff(filename=folder+'gfa.tif', data=self.gfa())
def visualize(self, out_path='out/', outer_box=True, axes=True,
clip_neg=False, azimuth=0, elevation=0, n_frames=1, mag=1,
video=False, viz_type='ODF', mask=None, mask_roi=None,
skip_n=1, skip_n_roi=1, scale=1, roi_scale=1, zoom_start=1.0,
zoom_end=1.0, top_zoom=1, interact=False,
save_parallels=False, my_cam=None, compress=True, roi=None,
corner_text='', scalemap=None, titles_on=True,
scalebar_on=True, invert=False, flat=False, colormap='bwr',
global_cm=True, camtilt=False, axes_on=False, colors=None,
arrows=None, arrow_color=np.array([0,0,0]), linewidth=0.1,
mark_slices=None, shift=[0,0,0], profiles=[], markers=[],
marker_colors=[], marker_scale=1, normalize_glyphs=True,
gamma=1, density_max=1):
log.info('Preparing to render ' + out_path)
# Handle scalemap
if scalemap is None:
scalemap = util.ScaleMap(min=np.min(self.f[...,0]), max=np.max(self.f[...,0]))
# Prepare output
util.mkdir(out_path)
# Setup vtk renderers
renWin = vtk.vtkRenderWindow()
if not interact:
renWin.SetOffScreenRendering(1)
if isinstance(viz_type, str):
viz_type = [viz_type]
# Rows and columns
cols = len(viz_type)
if roi is None:
rows = 1
else:
rows = 2
renWin.SetSize(np.int(500*mag*cols), np.int(500*mag*rows))
# Select background color
if save_parallels:
bg_color = [1,1,1]
line_color = np.array([0,0,0])
line_bcolor = np.array([1,1,1])
else:
if not invert:
bg_color = [0,0,0]
line_color = np.array([1,1,1])
line_bcolor = np.array([0,0,0])
else:
bg_color = [1,1,1]
line_color = np.array([0,0,0])
line_bcolor = np.array([1,1,1])
# For each viz_type
rens = []
zoom_start = []
zoom_end = []
for row in range(rows):
for col in range(cols):
# Render
ren = window.Scene()
rens.append(ren)
if viz_type[col] == 'Density':
ren.background([0,0,0])
line_color = np.array([1,1,1])
else:
ren.background(bg_color)
ren.SetViewport(col/cols,(rows - row - 1)/rows,(col+1)/cols,(rows - row)/rows)
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Mask
if mask is None:
mask = np.ones((self.X, self.Y, self.Z), dtype=np.bool)
if mask_roi is None:
mask_roi = mask
# Main vs roi
if row == 0:
data = self.f
skip_mask = np.zeros(mask.shape, dtype=np.bool)
skip_mask[::skip_n,::skip_n,::skip_n] = 1
my_mask = np.logical_and(mask, skip_mask)
scale = scale
scalemap = scalemap
if np.sum(my_mask) == 0:
my_mask[0,0,0] = True
else:
data = self.f[roi[0][0]:roi[1][0], roi[0][1]:roi[1][1], roi[0][2]:roi[1][2], :]
roi_mask = mask_roi[roi[0][0]:roi[1][0], roi[0][1]:roi[1][1], roi[0][2]:roi[1][2]]
skip_mask = np.zeros(roi_mask.shape, dtype=np.bool)
skip_mask[::skip_n_roi,::skip_n_roi,::skip_n_roi] = 1
my_mask = np.logical_and(roi_mask, skip_mask)
scale = roi_scale
scalemap = scalemap
# Add visuals to renderer
if viz_type[col] == "ODF":
renWin.SetMultiSamples(4)
log.info('Rendering '+str(np.sum(my_mask)) + ' ODFs')
fodf_spheres = viz.odf_sparse(data, self.Binv, sphere=self.sphere,
scale=skip_n*scale*0.5, norm=False,
colormap=colormap, mask=my_mask,
global_cm=global_cm, scalemap=scalemap,
odf_sphere=False, flat=flat, normalize=normalize_glyphs)
ren.add(fodf_spheres)
elif viz_type[col] == "ODF Sphere":
renWin.SetMultiSamples(4)
log.info('Rendering '+str(np.sum(my_mask)) + ' ODFs')
fodf_spheres = viz.odf_sparse(data, self.Binv, sphere=self.sphere,
scale=skip_n*scale*0.5, norm=False,
colormap=colormap, mask=my_mask,
global_cm=global_cm, scalemap=scalemap,
odf_sphere=True, flat=flat)
ren.add(fodf_spheres)
elif viz_type[col] == "Ellipsoid":
renWin.SetMultiSamples(4)
log.info('Warning: scaling is not implemented for ellipsoids')
log.info('Rendering '+str(np.sum(my_mask)) + ' ellipsoids')
fodf_peaks = viz.tensor_slicer_sparse(data,
sphere=self.sphere,
scale=skip_n*scale*0.5,
mask=my_mask)
ren.add(fodf_peaks)
elif viz_type[col] == "Peak":
renWin.SetMultiSamples(4)
log.info('Rendering '+str(np.sum(my_mask)) + ' peaks')
fodf_peaks = viz.peak_slicer_sparse(data, self.Binv, self.sphere.vertices,
linewidth=linewidth, scale=skip_n*scale*0.5, colors=colors,
mask=my_mask, scalemap=scalemap, normalize=normalize_glyphs)
# fodf_peaks.GetProperty().LightingOn()
# fodf_peaks.GetProperty().SetDiffuse(0.4) # Doesn't work (VTK bug I think)
# fodf_peaks.GetProperty().SetAmbient(0.15)
# fodf_peaks.GetProperty().SetSpecular(0)
# fodf_peaks.GetProperty().SetSpecularPower(0)
ren.add(fodf_peaks)
elif viz_type[col] == "Principal":
log.info('Warning: scaling is not implemented for principals')
log.info('Rendering '+str(np.sum(my_mask)) + ' principals')
fodf_peaks = viz.principal_slicer_sparse(data, self.Binv, self.sphere.vertices,
scale=skip_n*scale*0.5,
mask=my_mask)
ren.add(fodf_peaks)
elif viz_type[col] == "Density":
renWin.SetMultiSamples(0) # Must be zero for smooth
# renWin.SetAAFrames(4) # Slow antialiasing for volume renders
log.info('Rendering density')
gamma_corr = np.where(data[...,0]>0, data[...,0]**gamma, data[...,0])
scalemap.max = density_max*scalemap.max**gamma
volume = viz.density_slicer(gamma_corr, scalemap)
ren.add(volume)
X = np.float(data.shape[0]) - shift[0]
Y = np.float(data.shape[1]) - shift[1]
Z = np.float(data.shape[2]) - shift[2]
# Titles
if row == 0 and titles_on:
viz.add_text(ren, viz_type[col], 0.5, 0.96, mag)
# Scale bar
if col == cols - 1 and not save_parallels and scalebar_on:
yscale = 1e-3*self.vox_dim[1]*data.shape[1]
yscale_label = '{:.2g}'.format(yscale) + ' um'
viz.add_text(ren, yscale_label, 0.5, 0.03, mag)
viz.draw_scale_bar(ren, X, Y, Z, [1,1,1])
# Corner text
if row == rows - 1 and col == 0 and titles_on:
viz.add_text(ren, corner_text, 0.03, 0.03, mag, ha='left')
# Draw boxes
Nmax = np.max([X, Y, Z])
if outer_box:
if row == 0:
viz.draw_outer_box(ren, np.array([[0,0,0],[X,Y,Z]]) - 0.5, line_color)
if row == 1:
viz.draw_outer_box(ren, np.array([[0,0,0],[X,Y,Z]]) - 0.5, [0,1,1])
# Add colored axes
if axes:
viz.draw_axes(ren, np.array([[0,0,0], [X,Y,Z]]) - 0.5)
# Add custom arrows
if arrows is not None:
for i in range(arrows.shape[0]):
viz.draw_single_arrow(ren, arrows[i,0,:], arrows[i,1,:], color=arrow_color)
viz.draw_unlit_line(ren, [np.array([arrows[i,0,:],[X/2,Y/2,Z/2]])], [arrow_color], lw=0.3, scale=1.0)
# Draw roi box
if row == 0 and roi is not None:
maxROI = np.max([roi[1][0] - roi[0][0], roi[1][1] - roi[0][1], roi[1][2] - roi[0][2]])
maxXYZ = np.max([self.X, self.Y, self.Z])
viz.draw_outer_box(ren, roi, [0,1,1], lw=0.3*maxXYZ/maxROI)
viz.draw_axes(ren, roi, lw=0.3*maxXYZ/maxROI)
# Draw marked slices
if mark_slices is not None:
for slicen in mark_slices:
md = np.max((X, Z))
frac = slicen/data.shape[1]
rr = 0.83*md
t1 = 0
t2 = np.pi/2
t3 = np.pi
t4 = 3*np.pi/2
points = [np.array([[X/2+rr*np.cos(t1),frac*Y,Z/2+rr*np.sin(t1)],
[X/2+rr*np.cos(t2),frac*Y,Z/2+rr*np.sin(t2)],
[X/2+rr*np.cos(t3),frac*Y,Z/2+rr*np.sin(t3)],
[X/2+rr*np.cos(t4),frac*Y,Z/2+rr*np.sin(t4)],
[X/2+rr*np.cos(t1),frac*Y,Z/2+rr*np.sin(t1)],
[X/2+rr*np.cos(t2),frac*Y,Z/2+rr*np.sin(t2)]])]
viz.draw_unlit_line(ren, points, 6*[line_color+0.6], lw=0.3, scale=1.0)
# Draw markers
for i, marker in enumerate(markers):
# Draw sphere
source = vtk.vtkSphereSource()
source.SetCenter(marker)
source.SetRadius(marker_scale)
source.SetThetaResolution(30)
source.SetPhiResolution(30)
# mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
# actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(marker_colors[i,:])
actor.GetProperty().SetLighting(0)
ren.AddActor(actor)
# Draw profile lines
colors = np.array([[1,0,0],[0,1,0],[0,0,1],[1,1,0],[0,1,1]])
for i, profile in enumerate(profiles):
n_seg = profile.shape[0]
viz.draw_unlit_line(ren, [profile], n_seg*[colors[i,:]], lw=0.5, scale=1.0)
# Draw sphere
source = vtk.vtkSphereSource()
source.SetCenter(profile[0])
source.SetRadius(1)
source.SetThetaResolution(30)
source.SetPhiResolution(30)
# mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
# actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# actor.GetProperty().SetColor(colors[i,:])
actor.GetProperty().SetLighting(0)
# assign actor to the renderer
ren.AddActor(actor)
# Setup cameras
Rmax = np.linalg.norm([Z/2, X/2, Y/2])
Rcam_rad = Rmax/np.tan(np.pi/12)
Ntmax = np.max([X, Y])
ZZ = Z
if ZZ > Ntmax:
Rcam_edge = np.max([X/2, Y/2])
else:
Rcam_edge = np.min([X/2, Y/2])
Rcam = Rcam_edge + Rcam_rad
if my_cam is None:
cam = ren.GetActiveCamera()
if camtilt:
cam.SetPosition(((X-1)/2, (Y-1)/2, (Z-1)/2 + Rcam))
cam.SetViewUp((-1, 0, 1))
if axes_on:
max_dim = np.max((X, Z))
viz.draw_unlit_line(ren, [np.array([[(X- max_dim)/2,Y/2,Z/2],[X/2,Y/2,+Z/2],[X/2,Y/2,(Z + max_dim)/2]])], 3*[line_color], lw=max_dim/250, scale=1.0)
else:
cam.SetPosition(((X-1)/2 + Rcam, (Y-1)/2, (Z-1)/2))
cam.SetViewUp((0, 0, 1))
cam.SetFocalPoint(((X-1)/2, (Y-1)/2, (Z-1)/2))
#ren.reset_camera()
else:
ren.set_camera(*my_cam)
ren.azimuth(azimuth)
ren.elevation(elevation)
# Set zooming
if save_parallels:
zoom_start.append(1.7)
zoom_end.append(1.7)
else:
if row == 0:
zoom_start.append(1.3*top_zoom)
zoom_end.append(1.3*top_zoom)
else:
zoom_start.append(1.3)
zoom_end.append(1.3)
# Setup writer
writer = vtk.vtkTIFFWriter()
if not compress:
writer.SetCompressionToNoCompression()
# Execute renders
az = 90
naz = np.ceil(360/n_frames)
log.info('Rendering ' + out_path)
if save_parallels:
# Parallel rendering for summaries
filenames = ['yz', 'xy', 'xz']
zooms = [zoom_start[0], 1.0, 1.0]
azs = [90, -90, 0]
els = [0, 0, 90]
ren.projection(proj_type='parallel')
ren.reset_camera()
for i in tqdm(range(3)):
ren.zoom(zooms[i])
ren.azimuth(azs[i])
ren.elevation(els[i])
ren.reset_clipping_range()
renderLarge = vtk.vtkRenderLargeImage()
renderLarge.SetMagnification(1)
renderLarge.SetInput(ren)
renderLarge.Update()
writer.SetInputConnection(renderLarge.GetOutputPort())
writer.SetFileName(out_path + filenames[i] + '.tif')
writer.Write()
else:
# Rendering for movies
for j, ren in enumerate(rens):
ren.zoom(zoom_start[j])
for i in tqdm(range(n_frames)):
for j, ren in enumerate(rens):
ren.zoom(1 + ((zoom_end[j] - zoom_start[j])/n_frames))
ren.azimuth(az)
ren.reset_clipping_range()
renderLarge = vtk.vtkRenderLargeImage()
renderLarge.SetMagnification(1)
renderLarge.SetInput(ren)
renderLarge.Update()
writer.SetInputConnection(renderLarge.GetOutputPort())
if n_frames != 1:
writer.SetFileName(out_path + str(i).zfill(3) + '.tif')
else:
writer.SetFileName(out_path + '.tif')
writer.Write()
az = naz
# Interactive
if interact:
window.show(ren)
# Generate video (requires ffmpeg)
if video:
log.info('Generating video from frames')
fps = np.ceil(n_frames/12)
subprocess.call(['ffmpeg', '-nostdin', '-y', '-framerate', str(fps),
'-loglevel', 'panic', '-i', out_path+'%03d'+'.png',
'-pix_fmt', 'yuvj420p', '-vcodec', 'mjpeg',
out_path[:-1]+'.avi'])
# subprocess.call(['rm', '-r', out_path])
return my_cam
def vis_profiles(self, filename, profilesi, dx=0.13, prof_type='density'):
from scipy.interpolate import interpn
out = []
xpos_out = []
for profilei in profilesi:
grid = (np.arange(self.X), np.arange(self.Y), np.arange(self.Z))
N = profilei.shape[0]
diffs = profilei[1:N] - profilei[0:N-1]
dirs = diffs/np.linalg.norm(diffs, axis=1, keepdims=True)
if prof_type == 'density':
out.append(interpn(grid, self.f[...,0], profilei))
ylabel = 'Normalized density'
ylim = [0,1]
# Calculate x positions
xpos = np.zeros(N) # N
xpos[1:] = np.linalg.norm(profilei[1:,:] - profilei[0:-1,:], axis=-1)
xpos = np.cumsum(xpos)*dx
elif prof_type == 'gfa':
out.append(interpn(grid, self.gfa(), profilei))
ylabel = 'GFA'
ylim = [0,1]
# Calculate x positions
xpos = | np.zeros(N) | numpy.zeros |
"""
"""
import sys
import os
import json
import logging
import copy
import numpy as np
import pandas as pd
from collections import OrderedDict, defaultdict, Iterable
from tabulate import tabulate
from monty.json import MontyEncoder
from monty.string import list_strings, is_string
from monty.termcolor import cprint
from monty.bisect import find_le
from pymatgen.core.periodic_table import Element
from abipy.tools.plotting import add_fig_kwargs, get_ax_fig_plt
from pseudo_dojo.refdata.deltafactor import df_database, df_compute
from pseudo_dojo.refdata.gbrv import gbrv_database
from pseudo_dojo.refdata.lantanides.database import raren_database
from pseudo_dojo.util.dojo_eos import EOS
logger = logging.getLogger(__name__)
def dojo_dfact_results(pseudo, num_sites, volumes, etotals):
"""
This function computes the deltafactor and returns the dictionary to be inserted
in the dojoreport file.
Args:
pseudo: Pseudopotential object.
num_sites: Number of sites in unit cell
volumes: List with unit cell volumes in Ang**3
etotals: List of total energies in eV.
Return:
(dojo_entry, eos_fit)
where dojo_entry is the Dictionary with results to be inserted in the djrepo file.
eos_fit is the object storing the results of the EOS fit.
"""
nan = float('NaN')
dojo_entry = dict(
etotals=list(etotals),
volumes=list(volumes),
num_sites=num_sites,
dfact_meV=nan,
dfactprime_meV=nan,
v0=nan,
b0=nan,
b0_GPa=nan,
b1=nan,
)
volumes, etotals = np.asarray(volumes), np.asarray(etotals)
eos_fit = None
try:
# Use same fit as the one employed for the deltafactor.
eos_fit = EOS.DeltaFactor().fit(volumes/num_sites, etotals/num_sites)
# Get reference results (Wien2K).
wien2k = df_database(pseudo.xc).get_entry(pseudo.symbol)
# Compute deltafactor estimator.
dfact = df_compute(wien2k.v0, wien2k.b0_GPa, wien2k.b1,
eos_fit.v0, eos_fit.b0_GPa, eos_fit.b1, b0_GPa=True)
dfactprime_meV = dfact * (30 * 100) / (eos_fit.v0 * eos_fit.b0_GPa)
dfres = {
"dfact_meV": dfact,
"dfactprime_meV": dfactprime_meV,
"v0": eos_fit.v0,
"b0": eos_fit.b0,
"b0_GPa": eos_fit.b0_GPa,
"b1": eos_fit.b1,
}
for k, v in dfres.items():
v = v if not isinstance(v, complex) else nan
dfres[k] = v
dojo_entry.update(dfres)
except EOS.Error as exc:
dojo_entry["_exceptions"] = str(exc)
return dojo_entry, eos_fit
def dojo_gbrv_results(pseudo, struct_type, num_sites, volumes, etotals):
"""
This function computes the GBRV results and returns the dictionary
to be inserted in the dojoreport file.
Args:
pseudo: Pseudopotential object.
struct_type: "fcc" or "bcc"
num_sites: Number of sites in unit cell
volumes: List with unit cell volumes in Ang**3
etotals: List of total energies in eV.
Return:
(dojo_entry, eos_fit)
where dojo_entry is the Dictionary with results to be inserted in the djrepo file.
eos_fit is the object storing the results of the EOS fit.
"""
# Read etotals and fit E(V) with a parabola to find the minimum
assert len(etotals) == len(volumes)
dojo_entry = dict(
volumes=list(volumes),
etotals=list(etotals),
num_sites=num_sites,
)
eos_fit = None
try:
eos_fit = EOS.Quadratic().fit(volumes, etotals)
except EOS.Error as exc:
dojo_entry["_exceptions"] = str(exc)
return dojo_entry, eos_fit
# Function to compute cubic a0 from primitive v0 (depends on struct_type)
vol2a = {"fcc": lambda vol: (4 * vol) ** (1/3.),
"bcc": lambda vol: (2 * vol) ** (1/3.),
}[struct_type]
a0 = vol2a(eos_fit.v0)
dojo_entry.update(dict(
v0=eos_fit.v0,
b0=eos_fit.b0,
#b1=eos_fit.b1, # infinity
a0=a0,
struct_type=struct_type
))
db = gbrv_database(pseudo.xc)
ref = db.get_entry(pseudo.symbol, stype=struct_type)
pawabs_err = a0 - ref.gbrv_paw
pawrel_err = 100 * (a0 - ref.gbrv_paw) / ref.gbrv_paw
# AE results for P and Hg are missing.
if ref.ae is not None:
abs_err = a0 - ref.ae
rel_err = 100 * (a0 - ref.ae) / ref.ae
else:
# Use GBRV_PAW as reference.
abs_err = pawabs_err
rel_err = pawrel_err
print("for GBRV struct_type: ", struct_type, "a0= ", a0, "Angstrom")
print("AE - THIS: abs_err = %f, rel_err = %f %%" % (abs_err, rel_err))
print("GBRV-PAW - THIS: abs_err = %f, rel_err = %f %%" % (pawabs_err, pawrel_err))
dojo_entry["a0_abs_err"] = abs_err
dojo_entry["a0_rel_err"] = rel_err
return dojo_entry, eos_fit
class DojoReportError(Exception):
"""Exception raised by DoJoReport."""
class DojoReport(dict):
"""
Dict-like object with the validation results.
This object is usually created via the class methods:
DojoReport.from_file and DojoReport.empty_from_pseudo.
{
"version": "1.0"
"symbol": "H",
"pseudo_type": "NC",
"md5": "13198abb7506a840b7d46ef46b54d789",
"ppgen_hints": {
"low": {"ecut": 30.0, "pawecutdg": 30.0},
"normal": {"ecut": 34.0, "pawecutdg": 34.0},
"high": {"ecut": 39.0, "pawecutdg": 39.0}
},
"hints": {
"low": {"ecut": 30.0, "pawecutdg": 30.0},
"normal": {"ecut": 34.0, "pawecutdg": 34.0},
"high": {"ecut": 39.0, "pawecutdg": 39.0},
},
"ecuts": [29.0, 31.0, 33.0],
"deltafactor": {}
"gbrv_bcc": {},
"gbrv_fcc": {},
"ghosts": []
"phonons": []
}
"ecut": 32.0
"pawecutdg": 64.0,
"b0": 0.06400805819081799,
"b0_GPa": 10.255221080448488,
"b1": 2.6449207740813594,
"dfact_meV": 0.2774768889565598,
"dfactprime_meV": 4.701668998922405,
"etotals": []
"num_sites": 4,
"v0": 17.264380250637252,
"volumes": [],
"""
# List of dojo_trials. Remember to update the list if you add a new test to the DOJO_REPORT
ALL_TRIALS = [
"deltafactor",
"gbrv_bcc",
"gbrv_fcc",
"phgamma",
"ghosts",
]
# Add trials done with SOC.
ALL_TRIALS += [n + "_soc" for n in ALL_TRIALS]
_TRIALS2KEY = {
"deltafactor": "dfact_meV",
"gbrv_bcc": "a0_rel_err",
"gbrv_fcc": "a0_rel_err",
"phgamma": "all",
#"ghosts": "all",
}
# We use three different level of accuracy.
ALL_ACCURACIES = ("low", "normal", "high")
ACC2COLOR = {"low": "yellow", "normal": "green", "high": "red"}
# Tolerances on the deltafactor prime (in eV) used for the hints.
#ATOLS = (0.5, 0.1, 0.02)
ATOLS = (0.5, 0.3, 0.1)
# For noble gasses:
#ATOLS = (1.0, 0.2, 0.04)
# Version of the DojoReport.
LAST_VERSION = "1.0"
Error = DojoReportError
@classmethod
def from_file(cls, filepath):
"""Read the DojoReport from file."""
filepath = os.path.abspath(filepath)
with open(filepath, "rt") as fh:
d = json.load(fh)
new = cls(**d)
new.path = filepath
# TODO
#new.xc = XcFunc.from_dict(new["xc"])
return new
@classmethod
def empty_from_pseudo(cls, pseudo, ppgen_hints, devel=False):
"""
Initialize an empty `DojoReport` from the pseudo and an initial guess
for the cutoff energies in Hartree
Args:
pseudo: :class:`Pseudo` object.
ppgen_hints: Initial hints on the cutoff energy provided by the pp generator.
Dictionary [accuracy]["ecut"] --> ecut_value
"""
# Build initial list of cutoff energies for tests.
#dense_right = np.arange(ppgen_ecut, ppgen_ecut + 6*2, step=2)
#dense_left = np.arange(max(ppgen_ecut-6, 2), ppgen_ecut, step=2)
#coarse_high = np.arange(ppgen_ecut + 15, ppgen_ecut + 35, step=5)
new = cls()
estart = ppgen_hints["high"]["ecut"]
dense_right = np.linspace(estart - 10, estart + 10, num=11)
ecuts = list(dense_right) + [dense_right[-1] + 8, dense_right[-1] + 10,]
# devel is for tuning the pseudo, only two cutoffs
# development run: few, relatively high ecut calculations
if devel: ecuts = [estart, estart + 2]
if pseudo.isnc:
pseudo_type = "NC"
elif pseudo.ispaw:
pseudo_type = "PAW"
else:
raise TypeError("Neither NC nor PAW pseudo!")
new.update(
basename=pseudo.basename,
version=cls.LAST_VERSION,
symbol=pseudo.symbol,
pseudo_type=pseudo_type,
xc=pseudo.xc.as_dict(),
md5=pseudo.compute_md5(),
ppgen_hints=ppgen_hints,
ecuts=ecuts,
)
new.path = pseudo.djrepo_path
return new
# TODO Remove
@classmethod
def from_hints(cls, ppgen_ecut, symbol):
"""
Initialize an empty DojoReport from the initial guesses for
the cutoff energy in Hartree
Args:
ppgen_ecut: tuple(3) cutoff energies for the 3 accuracy levels.
symbol: Chemical symbol.
"""
dense_right = | np.arange(ppgen_ecut, ppgen_ecut + 6*2, step=2) | numpy.arange |
# -*- coding: utf-8 -*-
"""coronasense_analysis.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1SptFyUf_Y4y1APZxBY-ZteB3q3mcQkPE
"""
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import matplotlib
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.cbook as cbook
from matplotlib import cm
from matplotlib.colors import Normalize
from scipy.interpolate import interpn
df = pd.read_csv('data.csv')
df['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
df = df.set_index(pd.DatetimeIndex(df['timestamp']))
df = df[df['timestamp'] >= pd.Timestamp('2020/08/30')]
df = df[df['timestamp'] <= pd.Timestamp('2020/10/03')]
df = df[df['obj_score'] <= 39]
df = df[df['obj_score'] >= 33.5]
df['obj_score'] = df['obj_score'] + 0.1
print(df['timestamp'])
print(df.index)
def density_scatter( x , y, ax = None, sort = True, bins = 15, **kwargs ) :
if ax is None :
fig , ax = plt.subplots()
data , x_e, y_e = np.histogram2d( x, y, bins = bins, density = True)
z = interpn( ( 0.5*(x_e[1:] + x_e[:-1]) , 0.5*(y_e[1:]+y_e[:-1]) ) , data , np.vstack([x,y]).T , method = "splinef2d", bounds_error = False)
z[np.where(np.isnan(z))] = 0.0
if sort :
idx = z.argsort()
x, y, z = x[idx], y[idx], z[idx]
ax.scatter( x, y, c=z, s=2.5, **kwargs )
norm = Normalize(vmin = np.min(z), vmax = np.max(z))
#cbar = fig.colorbar(cm.ScalarMappable(norm = norm), ax=ax)
#cbar.ax.set_ylabel('Density')
return ax
fig, ax = plt.subplots(figsize=(6, 2.5), dpi=180)
plt.plot(df['timestamp'], df['obj_score'],'.', alpha=0.1, label='Forehead temperature')
df_mean = df.resample('D').apply({'obj_score':'mean'})
df_std = df.resample('D').apply({'obj_score':'std'})
plt.plot(df_mean.index+pd.Timedelta('0.5 day'), df_mean['obj_score'], label='Average over 24h')
plt.fill_between(df_mean.index+pd.Timedelta('0.5 day'), df_mean['obj_score'] - df_std['obj_score']/2, df_mean['obj_score'] + df_std['obj_score']/2,
color='gray', alpha=1.0)
# Set title and labels for axes
ax.set( ylabel="Forehead temp. (deg. C)", xlabel="Time (days)")
# Rotate tick marks on x-axis
#plt.setp(ax.get_xticklabels(), rotation=0)
frame1 = plt.gca()
#frame1.axes.xaxis.set_ticks([])
#ax.set_legend['Forehead temperature', 'Average over a day']
ax.legend()
print(len(df))
print("avg {} std {}".format(df['obj_score'].mean(), df['obj_score'].std()))
fig.subplots_adjust(bottom=0.2)
mean_raw = df['obj_score'].mean()
plt.tight_layout()
ax.set_ylim(33,39)
years = mdates.YearLocator() # every year
months = mdates.MonthLocator() # every month
days = mdates.DayLocator() # every month
years_fmt = mdates.DateFormatter('%Y')
ax.xaxis.set_major_locator(months)
ax.xaxis.set_minor_locator(days)
# format the coords message box
#ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
# rotates and right aligns the x labels, and moves the bottom of the
# axes up to make room for them
#fig.autofmt_xdate()
plt.savefig('all_data.png')
groups = df.groupby(['rfid_uid']).std()['obj_score']
print('Personal mean {}, personal std {}'.format(groups.mean(), groups.std()))
print("Location A")
df_filtered = df[df['machine_id'].isin([4428])]
print("len {} avg {} std {}".format(len(df_filtered), df_filtered['obj_score'].mean(), df_filtered['obj_score'].std()))
groups = df_filtered.groupby(['rfid_uid']).std()['obj_score']
print('Personal mean {}, personal std {}'.format(groups.mean(), groups.std()))
print("Location B")
df_filtered = df[df['machine_id'].isin([2952,3075,3690,3813,3936,4059,4182,4305])]
groups = df_filtered.groupby(['rfid_uid']).std()['obj_score']
print('Personal mean {}, personal std {}'.format(groups.mean(), groups.std()))
print("len {} avg {} std {}".format(len(df_filtered), df_filtered['obj_score'].mean(), df_filtered['obj_score'].std()))
print("Location C")
df_filtered = df[df['machine_id'].isin([6396])]
groups = df_filtered.groupby(['rfid_uid']).std()['obj_score']
print('Personal mean {}, personal std {}'.format(groups.mean(), groups.std()))
print("len {} avg {} std {}".format(len(df_filtered), df_filtered['obj_score'].mean(), df_filtered['obj_score'].std()))
more_than_37 = df['obj_score']
print(len(more_than_37[more_than_37 > more_than_37.mean() + 3*more_than_37.std()]))
df_filtered_notouch = df[df['meteo_realtemp'] > 0]
df_filtered_notouch = df_filtered_notouch[df_filtered_notouch['machine_id'].isin([3075, 3936, 4059, 5781, 4428, 5535, 7134, 2706, 5904, 6396])]
plt.rcParams["figure.dpi"] = 180
fig, ax = plt.subplots(figsize=(6, 2.5), dpi=180)
df.hist(column=['obj_score'], bins=20, figsize=(6, 3), ax = plt.gca())
ax.set( ylabel="# Measurements", xlabel="Forehead temperature (degrees C)")
ax.set_title("")
ax.set_xlim(33,39)
# x coordinates for the lines
xcoords = [37.81]
# colors for the lines
colors = ['r']
for xc,c in zip(xcoords,colors):
plt.axvline(x=xc, label='Fever threshold (μ+3σ = {})'.format(xc), c=c)
fig.subplots_adjust(bottom=0.2)
plt.legend()
plt.tight_layout()
plt.savefig('hist_all.png')
fig, ax = plt.subplots(figsize=(6, 2.5), dpi=180)
df_outside = df[df['meteo_realtemp'] > 0]
df_outside = df_outside[df_outside['machine_id'].isin([3075, 3936, 4059, 5781, 4428, 5535, 7134, 2706, 5904, 6396])]
linear_regressor = LinearRegression()
linear_regressor.fit(df_outside['meteo_realtemp'].values.reshape(-1, 1), df_outside['obj_score'].values.reshape(-1, 1))
X = np.linspace(df_outside['meteo_realtemp'].min(), df_outside['meteo_realtemp'].max()).reshape(-1, 1)
Y_pred = linear_regressor.predict(X)
score_r2 = linear_regressor.score(df_outside['meteo_realtemp'].values.reshape(-1, 1), df_outside['obj_score'].values.reshape(-1, 1))
print('R2 = ',score_r2)
ax.set(xlabel="Outside temperature (deg. C)",
ylabel="Forehead temp. (deg. C)")
density_scatter(df_outside['meteo_realtemp'], df_outside['obj_score'], ax=ax,label='Forehead temperature')
plt.plot(X, Y_pred, 'red', label=r'Linear fit $R^2={:.2f}$'.format(score_r2))
#df_ambient = df.resample('D').apply({'amb_temp':'mean'})
#plt.plot(df_mean.index, df_ambient['amb_temp'], label='Ambient temperature')
# Rotate tick marks on x-axis
plt.setp(ax.get_xticklabels(), rotation=45)
#ax.set_legend['Forehead temperature', 'Average over a day']
ax.legend()
print(len(df_outside))
#plt.title("Effect of outside temperature on forehead temperature")
more_than_37 = df_outside['obj_score']
print(len(more_than_37[more_than_37 > more_than_37.mean() + 3*more_than_37.std()]))
plt.tight_layout()
plt.savefig('outside_forehead.png')
fig, ax = plt.subplots(figsize=(6, 2.5), dpi=180)
df_outside = df[df['meteo_realtemp'] > 0]
df_outside = df_outside[df_outside['machine_id'].isin([3075, 3936, 4059, 5781, 4428, 5535, 7134, 2706, 5904, 6396])]
linear_regressor = LinearRegression()
linear_regressor.fit(df_outside['meteo_realtemp'].values.reshape(-1, 1), df_outside['obj_score'].values.reshape(-1, 1))
X = np.linspace(df_outside['meteo_realtemp'].min(), df_outside['meteo_realtemp'].max()).reshape(-1, 1)
Y_pred = linear_regressor.predict(X)
score_r2 = linear_regressor.score(df_outside['meteo_realtemp'].values.reshape(-1, 1), df_outside['obj_score'].values.reshape(-1, 1))
print('R2 = ',score_r2)
ax.set(xlabel="Outside temperature (degrees C)",
ylabel="Forehead temp. (deg. C)")
curve = linear_regressor.predict(df_outside['meteo_realtemp'].values.reshape(-1, 1))[:,0]
density_scatter(df_outside['meteo_realtemp'], df_outside['obj_score']-curve+mean_raw,ax = ax, label='Forehead temperature')
ax.set_ylim(33,39)
#df_ambient = df.resample('D').apply({'amb_temp':'mean'})
#plt.plot(df_mean.index, df_ambient['amb_temp'], label='Ambient temperature')
# Rotate tick marks on x-axis
plt.setp(ax.get_xticklabels(), rotation=45)
#ax.set_legend['Forehead temperature', 'Average over a day']
print(len(df_outside))
#plt.title("Measurements corrected from outside temperature model")
new_df = df_outside.copy()
new_df['obj_score'] = new_df['obj_score']-curve+mean_raw
more_than_37 = df_outside['obj_score']-curve+mean_raw
print(len(more_than_37[more_than_37 > more_than_37.mean() + 2*more_than_37.std()]))
plt.tight_layout()
plt.savefig('outside_forehead_corr.png')
fig, ax = plt.subplots(figsize=(6, 2.5), dpi=180)
df_ambient = df[df['amb_temp'] > 0]
linear_regressor = LinearRegression()
linear_regressor.fit(df_ambient['amb_temp'].values.reshape(-1, 1), df_ambient['obj_score'].values.reshape(-1, 1))
X = np.linspace(df_ambient['amb_temp'].min(), df_ambient['amb_temp'].max()).reshape(-1, 1)
Y_pred = linear_regressor.predict(X)
score_r2 = linear_regressor.score(df_ambient['amb_temp'].values.reshape(-1, 1), df_ambient['obj_score'].values.reshape(-1, 1))
print('R2 = ',score_r2)
ax.set(xlabel="Ambient temperature (degrees C)",
ylabel="Forehead temp. (deg. C)")
ax.set_ylim(33,39)
density_scatter(df_ambient['amb_temp'], df_ambient['obj_score'],ax = ax, label='Forehead temperature')
plt.plot(X, Y_pred, 'red', label=r'Linear fit $R^2={:.2f}$'.format(score_r2))
#df_ambient = df.resample('D').apply({'amb_temp':'mean'})
#plt.plot(df_mean.index, df_ambient['amb_temp'], label='Ambient temperature')
# Rotate tick marks on x-axis
plt.setp(ax.get_xticklabels(), rotation=45)
#ax.set_legend['Forehead temperature', 'Average over a day']
ax.legend()
#plt.title("Effect of ambient temperature on forehead temperature")
print(len(df_ambient))
plt.tight_layout()
plt.savefig('ambient_forehead.png')
fig, ax = plt.subplots(figsize=(6, 2.5), dpi=180)
df_ambient = df[df['amb_temp'] > 0]
linear_regressor = LinearRegression()
linear_regressor.fit(df_ambient['amb_temp'].values.reshape(-1, 1), df_ambient['obj_score'].values.reshape(-1, 1))
X = np.linspace(df_ambient['amb_temp'].min(), df_ambient['amb_temp'].max()).reshape(-1, 1)
Y_pred = linear_regressor.predict(X)
score_r2 = linear_regressor.score(df_ambient['amb_temp'].values.reshape(-1, 1), df_ambient['obj_score'].values.reshape(-1, 1))
print('R2 = ',score_r2)
ax.set(xlabel="Ambient temperature (degrees C)",
ylabel="Forehead temp. (deg. C)")
curve = linear_regressor.predict(df_ambient['amb_temp'].values.reshape(-1, 1))[:,0]
ax.set_ylim(33,39)
density_scatter(df_ambient['amb_temp'], df_ambient['obj_score']-curve+mean_raw,ax = ax, label='Forehead temperature')
#df_ambient = df.resample('D').apply({'amb_temp':'mean'})
#plt.plot(df_mean.index, df_ambient['amb_temp'], label='Ambient temperature')
# Rotate tick marks on x-axis
plt.setp(ax.get_xticklabels(), rotation=45)
#ax.set_legend['Forehead temperature', 'Average over a day']
#plt.title("Measurements corrected from ambient temperature model")
more_than_37 = df_ambient['obj_score']-curve+mean_raw
print(len(more_than_37[more_than_37 > more_than_37.mean() + 2*more_than_37.std()]))
plt.tight_layout()
plt.savefig('ambient_forehead_corr.png')
fig, ax = plt.subplots(figsize=(6, 2.5), dpi=180)
df_ambient = new_df[new_df['amb_temp'] > 0]
linear_regressor = LinearRegression()
linear_regressor.fit(df_ambient['amb_temp'].values.reshape(-1, 1), df_ambient['obj_score'].values.reshape(-1, 1))
X = np.linspace(df_ambient['amb_temp'].min(), df_ambient['amb_temp'].max()).reshape(-1, 1)
Y_pred = linear_regressor.predict(X)
score_r2 = linear_regressor.score(df_ambient['amb_temp'].values.reshape(-1, 1), df_ambient['obj_score'].values.reshape(-1, 1))
print('R2 = ',score_r2)
ax.set(xlabel="Ambient temperature (degrees C)",
ylabel="Forehead temp. (degrees C)")
curve = linear_regressor.predict(df_ambient['amb_temp'].values.reshape(-1, 1))[:,0]
ax.set_ylim(33,39)
density_scatter(df_ambient['amb_temp'], df_ambient['obj_score']-curve+mean_raw,ax=ax, label='Forehead temperature')
#plt.plot(df_ambient['amb_temp'], df_ambient['obj_score'],'.', alpha=0.1, label='Forehead temperature')
#plt.plot(X, Y_pred, 'red', label=r'Linear fit $R^2={:.2f}$'.format(score_r2))
#df_ambient = df.resample('D').apply({'amb_temp':'mean'})
#plt.plot(df_mean.index, df_ambient['amb_temp'], label='Ambient temperature')
# Rotate tick marks on x-axis
plt.setp(ax.get_xticklabels(), rotation=45)
#ax.set_legend['Forehead temperature', 'Average over a day']
ax.legend()
#plt.title("Measurements corrected from outside + ambient models")
more_than_37 = df_ambient['obj_score']-curve+mean_raw
new_df_all = df_ambient.copy()
new_df_all['obj_score'] = new_df_all['obj_score']-curve+mean_raw
print(len(df_ambient))
print(len(new_df_all))
print(len(more_than_37[more_than_37 > more_than_37.mean() + 2*more_than_37.std()]))
plt.tight_layout()
plt.savefig('foreheah_both_corr.png')
fig, ax = plt.subplots(figsize=(6, 2.5), dpi=180)
df_hours = df
df_hours['hours'] = df_hours.index.hour
linear_regressor = make_pipeline(
PolynomialFeatures(degree=2),
LinearRegression()
)
linear_regressor.fit(df_hours['hours'].values.reshape(-1, 1), df_hours['obj_score'].values.reshape(-1, 1))
X = np.linspace(df_hours['hours'].min(), df_hours['hours'].max()).reshape(-1, 1)
Y_pred = linear_regressor.predict(X)
score_r2 = linear_regressor.score(df_hours['hours'].values.reshape(-1, 1), df_hours['obj_score'].values.reshape(-1, 1))
print('R2 = ',score_r2)
ax.set_ylim(33,39)
ax.set(xlabel="Hours in the day (GMT)",
ylabel="Forehead temp. (deg. C)")
by_hour = df_hours.groupby(df_hours.index.hour+2).mean()
by_hour_std = df_hours.groupby(df_hours.index.hour+2).std()
density_scatter(df_hours.index.hour, df_hours['obj_score'],ax=ax,label='Forehead temperature')
plt.plot(by_hour.index, by_hour['obj_score'],'-', alpha=1.0, label='Temperature average')
plt.fill_between(by_hour.index, by_hour['obj_score'] - by_hour_std['obj_score']/2, by_hour['obj_score'] + by_hour_std['obj_score']/2,
color='gray', alpha=0.3)
plt.xlim(4.1,21)
plt.plot(X, Y_pred, 'red', label=r'Polynomial fit $R^2={:.2f}$'.format(score_r2))
#df_ambient = df.resample('D').apply({'amb_temp':'mean'})
#plt.plot(df_mean.index, df_ambient['amb_temp'], label='Ambient temperature')
# Rotate tick marks on x-axis
plt.setp(ax.get_xticklabels(), rotation=45)
#ax.set_legend['Forehead temperature', 'Average over a day']
ax.legend()
print(len(df_hours))
#plt.title("Effect of time in the day on forehead temperature")
#more_than_37 = df_outside['obj_score']
#print(len(more_than_37[more_than_37 > more_than_37.mean() + 2*more_than_37.std()]))
plt.tight_layout()
plt.savefig('timeday.png')
fig, ax = plt.subplots(figsize=(6, 2.5), dpi=180)
df_hours = new_df_all
df_hours['hours'] = df_hours.index.hour
linear_regressor = make_pipeline(
PolynomialFeatures(degree=2),
LinearRegression()
)
linear_regressor.fit(df_hours['hours'].values.reshape(-1, 1), df_hours['obj_score'].values.reshape(-1, 1))
X = np.linspace(df_hours['hours'].min(), df_hours['hours'].max()).reshape(-1, 1)
Y_pred = linear_regressor.predict(X)
score_r2 = linear_regressor.score(df_hours['hours'].values.reshape(-1, 1), df_hours['obj_score'].values.reshape(-1, 1))
print('R2 = ',score_r2)
ax.set_ylim(33,39)
curve = linear_regressor.predict(df_hours['hours'].values.reshape(-1, 1))[:,0]
by_hour = df_hours.groupby(df_hours.index.hour+2).mean()
by_hour_std = df_hours.groupby(df_hours.index.hour+2).std()
df_hours['obj_score'] = df_hours['obj_score']-curve+df_hours['obj_score'].mean()
plt.plot(df_hours['timestamp'], df_hours['obj_score'],'.', alpha=0.1, label='Forehead temperature')
df_mean = df_hours.resample('D').apply({'obj_score':'mean'})
df_std = df_hours.resample('D').apply({'obj_score':'std'})
plt.plot(df_mean.index+pd.Timedelta('0.5 day'), df_mean['obj_score'], label='Average over 24h')
plt.fill_between(df_mean.index+pd.Timedelta('0.5 day'), df_mean['obj_score'] - df_std['obj_score']/2, df_mean['obj_score'] + df_std['obj_score']/2,
color='gray', alpha=1.0)
# Set title and labels for axes
ax.set( ylabel="Forehead temp. (deg. C)", xlabel="Time (days)")
frame1 = plt.gca()
frame1.axes.xaxis.set_ticks([])
#plt.plot(df_hours['amb_temp'], df_hours['obj_score']-curve+df_hours['obj_score'].mean(),'.', alpha=0.1, label='Forehead temperature')
#plt.plot(df_hours.index.hour, df_hours['obj_score'],'.', alpha=0.1, label='Forehead temperature')
#plt.plot(by_hour.index, by_hour['obj_score'],'-', alpha=1.0, label='Temperature average')
#plt.fill_between(by_hour.index, by_hour['obj_score'] - by_hour_std['obj_score']/2, by_hour['obj_score'] + by_hour_std['obj_score']/2,
# color='gray', alpha=0.3)
#plt.xlim(4.1,21)
#plt.plot(X, Y_pred, 'red', label=r'Polynomial fit $R^2={:.2f}$'.format(score_r2))
#df_ambient = df.resample('D').apply({'amb_temp':'mean'})
#plt.plot(df_mean.index, df_ambient['amb_temp'], label='Ambient temperature')
# Rotate tick marks on x-axis
plt.setp(ax.get_xticklabels(), rotation=45)
#ax.set_legend['Forehead temperature', 'Average over a day']
ax.legend()
print(len(df_hours))
#plt.title("Correction from outside + ambient + seasonal models")
more_than_37 = df_hours['obj_score']
fig.subplots_adjust(bottom=0.2)
ax.set_ylim(33,39)
years = mdates.YearLocator() # every year
months = mdates.MonthLocator() # every month
days = mdates.DayLocator() # every month
years_fmt = mdates.DateFormatter('%Y')
ax.xaxis.set_major_locator(months)
ax.xaxis.set_minor_locator(days)
plt.tight_layout()
print(len(more_than_37[more_than_37 > more_than_37.mean() + 2*more_than_37.std()]))
plt.savefig('corrected_outside_ambient_seasonal.png')
print("avg {} std {}".format(df_hours['obj_score'].mean(), df_hours['obj_score'].std()))
groups = df_hours.groupby(['rfid_uid']).std()['obj_score']
print('Personal mean {}, personal std {}'.format(groups.mean(), groups.std()))
print("Location A")
df_filtered = df_hours[df_hours['machine_id'].isin([4428])]
print("len {} avg {} std {}".format(len(df_filtered), df_filtered['obj_score'].mean(), df_filtered['obj_score'].std()))
groups = df_filtered.groupby(['rfid_uid']).std()['obj_score']
print('Personal mean {}, personal std {}'.format(groups.mean(), groups.std()))
print("Location B")
df_filtered = df_hours[df_hours['machine_id'].isin([2952, 3075, 3690, 3813, 3936, 4059, 4182, 4305])]
groups = df_filtered.groupby(['rfid_uid']).std()['obj_score']
print('Personal mean {}, personal std {}'.format(groups.mean(), groups.std()))
print("len {} avg {} std {}".format(len(df_filtered), df_filtered['obj_score'].mean(), df_filtered['obj_score'].std()))
print("Location C")
df_filtered = df_hours[df_hours['machine_id'].isin([5396])]
groups = df_filtered.groupby(['rfid_uid']).std()['obj_score']
print('Personal mean {}, personal std {}'.format(groups.mean(), groups.std()))
print("len {} avg {} std {}".format(len(df_filtered), df_filtered['obj_score'].mean(), df_filtered['obj_score'].std()))
more_than_37 = df_filtered ['obj_score']
print(len(more_than_37[more_than_37 > more_than_37.mean() + 3*more_than_37.std()]))
plt.rcParams["figure.dpi"] = 180
fig, ax = plt.subplots(figsize=(6, 2.5), dpi=180)
df_hours.hist(column=['obj_score'], bins=20, figsize=(6, 3), ax = plt.gca())
ax.set( ylabel="# Measurements", xlabel="Forehead temperature (degrees C)")
ax.set_title("")
ax.set_xlim(33,39)
# x coordinates for the lines
xcoords = [37.38]
# colors for the lines
colors = ['r']
for xc,c in zip(xcoords,colors):
plt.axvline(x=xc, label='Fever threshold (μ+3σ = {})'.format(xc), c=c)
fig.subplots_adjust(bottom=0.2)
plt.legend()
plt.tight_layout()
plt.savefig('hist_corr.png')
print(len(df_filtered_notouch))
print(len(df_hours['obj_score']))
data1 = np.asarray(df_filtered_notouch['obj_score'])
data2 = np.asarray(df_hours['obj_score'])
mean = | np.mean([data1, data2], axis=0) | numpy.mean |
#
#
# 0=================================0
# | Kernel Point Convolutions |
# 0=================================0
#
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Class handling the visualization
#
# ----------------------------------------------------------------------------------------------------------------------
#
# <NAME> - 11/06/2018
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Imports and global variables
# \**********************************/
#
# Basic libs
import torch
import os
os.environ.update(OMP_NUM_THREADS='1',
OPENBLAS_NUM_THREADS='1',
NUMEXPR_NUM_THREADS='1',
MKL_NUM_THREADS='1',)
import numpy as np
from sklearn.neighbors import KDTree
from os import makedirs, remove, rename, listdir
from os.path import exists, join
import time
from mayavi import mlab
import sys
from models.blocks import KPConv
# PLY reader
from utils.ply import write_ply, read_ply
# Configuration class
from utils.config import Config, bcolors
# ----------------------------------------------------------------------------------------------------------------------
#
# Trainer Class
# \*******************/
#
class ModelVisualizer:
# Initialization methods
# ------------------------------------------------------------------------------------------------------------------
def __init__(self, net, config, chkp_path, on_gpu=True):
"""
Initialize training parameters and reload previous model for restore/finetune
:param net: network object
:param config: configuration object
:param chkp_path: path to the checkpoint that needs to be loaded (None for new training)
:param finetune: finetune from checkpoint (True) or restore training from checkpoint (False)
:param on_gpu: Train on GPU or CPU
"""
############
# Parameters
############
# Choose to train on CPU or GPU
if on_gpu and torch.cuda.is_available():
self.device = torch.device("cuda:0")
else:
self.device = torch.device("cpu")
net.to(self.device)
##########################
# Load previous checkpoint
##########################
checkpoint = torch.load(chkp_path)
new_dict = {}
for k, v in checkpoint['model_state_dict'].items():
if 'blocs' in k:
k = k.replace('blocs', 'blocks')
new_dict[k] = v
net.load_state_dict(new_dict)
self.epoch = checkpoint['epoch']
net.eval()
print("\nModel state restored from {:s}.".format(chkp_path))
return
# Main visualization methods
# ------------------------------------------------------------------------------------------------------------------
def top_relu_activations(self, model, dataset, relu_idx=0, top_num=5):
"""
Test the model on test dataset to see which points activate the most each neurons in a relu layer
:param model: model used at training
:param dataset: dataset used at training
:param relu_idx: which features are to be visualized
:param top_num: how many top candidates are kept per features
"""
#####################################
# First choose the visualized feature
#####################################
# List all relu ops
all_ops = [op for op in tf.get_default_graph().get_operations() if op.name.startswith('KernelPointNetwork')
and op.name.endswith('LeakyRelu')]
# List all possible Relu indices
print('\nPossible Relu indices:')
for i, t in enumerate(all_ops):
print(i, ': ', t.name)
# Print the chosen one
if relu_idx is not None:
features_tensor = all_ops[relu_idx].outputs[0]
else:
relu_idx = int(input('Choose a Relu index: '))
features_tensor = all_ops[relu_idx].outputs[0]
# Get parameters
layer_idx = int(features_tensor.name.split('/')[1][6:])
if 'strided' in all_ops[relu_idx].name and not ('strided' in all_ops[relu_idx+1].name):
layer_idx += 1
features_dim = int(features_tensor.shape[1])
radius = model.config.first_subsampling_dl * model.config.density_parameter * (2 ** layer_idx)
print('You chose to compute the output of operation named:\n' + all_ops[relu_idx].name)
print('\nIt contains {:d} features.'.format(int(features_tensor.shape[1])))
print('\n****************************************************************************')
#######################
# Initialize containers
#######################
# Initialize containers
self.top_features = -np.ones((top_num, features_dim))
self.top_classes = -np.ones((top_num, features_dim), dtype=np.int32)
self.saving = model.config.saving
# Testing parameters
num_votes = 3
# Create visu folder
self.visu_path = None
self.fmt_str = None
if model.config.saving:
self.visu_path = join('visu',
'visu_' + model.saving_path.split('/')[-1],
'top_activations',
'Relu{:02d}'.format(relu_idx))
self.fmt_str = 'f{:04d}_top{:02d}.ply'
if not exists(self.visu_path):
makedirs(self.visu_path)
# *******************
# Network predictions
# *******************
mean_dt = np.zeros(2)
last_display = time.time()
for v in range(num_votes):
# Run model on all test examples
# ******************************
# Initialise iterator with test data
if model.config.dataset.startswith('S3DIS'):
self.sess.run(dataset.val_init_op)
else:
self.sess.run(dataset.test_init_op)
count = 0
while True:
try:
if model.config.dataset.startswith('ShapeNetPart'):
if model.config.dataset.split('_')[1] == 'multi':
label_op = model.inputs['super_labels']
else:
label_op = model.inputs['point_labels']
elif model.config.dataset.startswith('S3DIS'):
label_op = model.inputs['point_labels']
elif model.config.dataset.startswith('Scannet'):
label_op = model.inputs['point_labels']
elif model.config.dataset.startswith('ModelNet40'):
label_op = model.inputs['labels']
else:
raise ValueError('Unsupported dataset')
# Run one step of the model
t = [time.time()]
ops = (all_ops[-1].outputs[0],
features_tensor,
label_op,
model.inputs['points'],
model.inputs['pools'],
model.inputs['in_batches'])
_, stacked_features, labels, all_points, all_pools, in_batches = self.sess.run(ops, {model.dropout_prob: 1.0})
t += [time.time()]
count += in_batches.shape[0]
# Stack all batches
max_ind = np.max(in_batches)
stacked_batches = []
for b_i, b in enumerate(in_batches):
stacked_batches += [b[b < max_ind - 0.5]*0+b_i]
stacked_batches = np.hstack(stacked_batches)
# Find batches at wanted layer
for l in range(model.config.num_layers - 1):
if l >= layer_idx:
break
stacked_batches = stacked_batches[all_pools[l][:, 0]]
# Get each example and update top_activations
for b_i, b in enumerate(in_batches):
b = b[b < max_ind - 0.5]
in_points = all_points[0][b]
features = stacked_features[stacked_batches == b_i]
points = all_points[layer_idx][stacked_batches == b_i]
if model.config.dataset in ['ShapeNetPart_multi', 'ModelNet40_classif']:
l = labels[b_i]
else:
l = np.argmax(np.bincount(labels[b]))
self.update_top_activations(features, labels[b_i], points, in_points, radius)
# Average timing
t += [time.time()]
mean_dt = 0.95 * mean_dt + 0.05 * (np.array(t[1:]) - np.array(t[:-1]))
# Display
if (t[-1] - last_display) > 1.0:
last_display = t[-1]
if model.config.dataset.startswith('S3DIS'):
completed = count / (model.config.validation_size * model.config.batch_num)
else:
completed = count / dataset.num_test
message = 'Vote {:d} : {:.1f}% (timings : {:4.2f} {:4.2f})'
print(message.format(v,
100 * completed,
1000 * (mean_dt[0]),
1000 * (mean_dt[1])))
#class_names = np.array([dataset.label_to_names[i] for i in range(dataset.num_classes)])
#print(class_names[self.top_classes[:, :20]].T)
except tf.errors.OutOfRangeError:
break
return relu_idx
def update_top_activations(self, features, label, l_points, input_points, radius, max_computed=60):
top_num = self.top_features.shape[0]
# Compute top indice for each feature
max_indices = np.argmax(features, axis=0)
# get top_point neighborhoods
for features_i, idx in enumerate(max_indices[:max_computed]):
if features[idx, features_i] <= self.top_features[-1, features_i]:
continue
if label in self.top_classes[:, features_i]:
ind0 = np.where(self.top_classes[:, features_i] == label)[0][0]
if features[idx, features_i] <= self.top_features[ind0, features_i]:
continue
elif ind0 < top_num - 1:
self.top_features[ind0:-1, features_i] = self.top_features[ind0+1:, features_i]
self.top_classes[ind0:-1, features_i] = self.top_classes[ind0+1:, features_i]
for next_i in range(ind0 + 1, top_num):
old_f = join(self.visu_path, self.fmt_str.format(features_i, next_i + 1))
new_f = join(self.visu_path, self.fmt_str.format(features_i, next_i))
if exists(old_f):
if exists(new_f):
remove(new_f)
rename(old_f, new_f)
# Find indice where new top should be placed
top_i = np.where(features[idx, features_i] > self.top_features[:, features_i])[0][0]
# Update top features
if top_i < top_num - 1:
self.top_features[top_i + 1:, features_i] = self.top_features[top_i:-1, features_i]
self.top_features[top_i, features_i] = features[idx, features_i]
self.top_classes[top_i + 1:, features_i] = self.top_classes[top_i:-1, features_i]
self.top_classes[top_i, features_i] = label
# Find in which batch lays the point
if self.saving:
# Get inputs
l_features = features[:, features_i]
point = l_points[idx, :]
dist = np.linalg.norm(input_points - point, axis=1)
influence = (radius - dist) / radius
# Project response on input cloud
if l_points.shape[0] == input_points.shape[0]:
responses = l_features
else:
tree = KDTree(l_points, leaf_size=50)
nn_k = min(l_points.shape[0], 10)
interp_dists, interp_inds = tree.query(input_points, nn_k, return_distance=True)
tukeys = np.square(1 - np.square(interp_dists / radius))
tukeys[interp_dists > radius] = 0
responses = np.sum(l_features[interp_inds] * tukeys, axis=1)
# Handle last examples
for next_i in range(top_num - 1, top_i, -1):
old_f = join(self.visu_path, self.fmt_str.format(features_i, next_i))
new_f = join(self.visu_path, self.fmt_str.format(features_i, next_i + 1))
if exists(old_f):
if exists(new_f):
remove(new_f)
rename(old_f, new_f)
# Save
filename = join(self.visu_path, self.fmt_str.format(features_i, top_i + 1))
write_ply(filename,
[input_points, influence, responses],
['x', 'y', 'z', 'influence', 'responses'])
def show_deformable_kernels_old(self, model, dataset, deform_idx=0):
##########################################
# First choose the visualized deformations
##########################################
# List all deformation ops
all_ops = [op for op in tf.get_default_graph().get_operations() if op.name.startswith('KernelPointNetwork')
and op.name.endswith('deformed_KP')]
print('\nPossible deformed indices:')
for i, t in enumerate(all_ops):
print(i, ': ', t.name)
# Chosen deformations
deformed_KP_tensor = all_ops[deform_idx].outputs[0]
# Layer index
layer_idx = int(all_ops[deform_idx].name.split('/')[1].split('_')[-1])
# Original kernel point positions
KP_vars = [v for v in tf.global_variables() if 'kernel_points' in v.name]
tmp = np.array(all_ops[deform_idx].name.split('/'))
test = []
for v in KP_vars:
cmp = np.array(v.name.split('/'))
l = min(len(cmp), len(tmp))
cmp = cmp[:l]
tmp = tmp[:l]
test += [np.sum(cmp == tmp)]
chosen_KP = np.argmax(test)
print('You chose to visualize the output of operation named: ' + all_ops[deform_idx].name)
print('\n****************************************************************************')
# Run model on all test examples
# ******************************
# Initialise iterator with test data
if model.config.dataset.startswith('S3DIS'):
self.sess.run(dataset.val_init_op)
else:
self.sess.run(dataset.test_init_op)
count = 0
while True:
try:
# Run one step of the model
t = [time.time()]
ops = (deformed_KP_tensor,
model.inputs['points'],
model.inputs['features'],
model.inputs['pools'],
model.inputs['in_batches'],
KP_vars)
stacked_deformed_KP, \
all_points, \
all_colors, \
all_pools, \
in_batches, \
original_KPs = self.sess.run(ops, {model.dropout_prob: 1.0})
t += [time.time()]
count += in_batches.shape[0]
# Stack all batches
max_ind = np.max(in_batches)
stacked_batches = []
for b_i, b in enumerate(in_batches):
stacked_batches += [b[b < max_ind - 0.5] * 0 + b_i]
stacked_batches = np.hstack(stacked_batches)
# Find batches at wanted layer
for l in range(model.config.num_layers - 1):
if l >= layer_idx:
break
stacked_batches = stacked_batches[all_pools[l][:, 0]]
# Get each example and update top_activations
in_points = []
in_colors = []
deformed_KP = []
points = []
lookuptrees = []
for b_i, b in enumerate(in_batches):
b = b[b < max_ind - 0.5]
in_points += [all_points[0][b]]
deformed_KP += [stacked_deformed_KP[stacked_batches == b_i]]
points += [all_points[layer_idx][stacked_batches == b_i]]
lookuptrees += [KDTree(points[-1])]
if all_colors.shape[1] == 4:
in_colors += [all_colors[b, 1:]]
else:
in_colors += [None]
print('New batch size : ', len(in_batches))
###########################
# Interactive visualization
###########################
# Create figure for features
fig1 = mlab.figure('Features', bgcolor=(1.0, 1.0, 1.0), size=(1280, 920))
fig1.scene.parallel_projection = False
# Indices
global obj_i, point_i, plots, offsets, p_scale, show_in_p, aim_point
p_scale = 0.03
obj_i = 0
point_i = 0
plots = {}
offsets = False
show_in_p = 2
aim_point = np.zeros((1, 3))
def picker_callback(picker):
""" Picker callback: this get called when on pick events.
"""
global plots, aim_point
if 'in_points' in plots:
if plots['in_points'].actor.actor._vtk_obj in [o._vtk_obj for o in picker.actors]:
point_rez = plots['in_points'].glyph.glyph_source.glyph_source.output.points.to_array().shape[0]
new_point_i = int(np.floor(picker.point_id / point_rez))
if new_point_i < len(plots['in_points'].mlab_source.points):
# Get closest point in the layer we are interested in
aim_point = plots['in_points'].mlab_source.points[new_point_i:new_point_i + 1]
update_scene()
if 'points' in plots:
if plots['points'].actor.actor._vtk_obj in [o._vtk_obj for o in picker.actors]:
point_rez = plots['points'].glyph.glyph_source.glyph_source.output.points.to_array().shape[0]
new_point_i = int(np.floor(picker.point_id / point_rez))
if new_point_i < len(plots['points'].mlab_source.points):
# Get closest point in the layer we are interested in
aim_point = plots['points'].mlab_source.points[new_point_i:new_point_i + 1]
update_scene()
def update_scene():
global plots, offsets, p_scale, show_in_p, aim_point, point_i
# Get the current view
v = mlab.view()
roll = mlab.roll()
# clear figure
for key in plots.keys():
plots[key].remove()
plots = {}
# Plot new data feature
p = points[obj_i]
# Rescale points for visu
p = (p * 1.5 / model.config.in_radius)
# Show point cloud
if show_in_p <= 1:
plots['points'] = mlab.points3d(p[:, 0],
p[:, 1],
p[:, 2],
resolution=8,
scale_factor=p_scale,
scale_mode='none',
color=(0, 1, 1),
figure=fig1)
if show_in_p >= 1:
# Get points and colors
in_p = in_points[obj_i]
in_p = (in_p * 1.5 / model.config.in_radius)
# Color point cloud if possible
in_c = in_colors[obj_i]
if in_c is not None:
# Primitives
scalars = np.arange(len(in_p)) # Key point: set an integer for each point
# Define color table (including alpha), which must be uint8 and [0,255]
colors = np.hstack((in_c, np.ones_like(in_c[:, :1])))
colors = (colors * 255).astype(np.uint8)
plots['in_points'] = mlab.points3d(in_p[:, 0],
in_p[:, 1],
in_p[:, 2],
scalars,
resolution=8,
scale_factor=p_scale*0.8,
scale_mode='none',
figure=fig1)
plots['in_points'].module_manager.scalar_lut_manager.lut.table = colors
else:
plots['in_points'] = mlab.points3d(in_p[:, 0],
in_p[:, 1],
in_p[:, 2],
resolution=8,
scale_factor=p_scale*0.8,
scale_mode='none',
figure=fig1)
# Get KP locations
rescaled_aim_point = aim_point * model.config.in_radius / 1.5
point_i = lookuptrees[obj_i].query(rescaled_aim_point, return_distance=False)[0][0]
if offsets:
KP = points[obj_i][point_i] + deformed_KP[obj_i][point_i]
scals = np.ones_like(KP[:, 0])
else:
KP = points[obj_i][point_i] + original_KPs[chosen_KP]
scals = np.zeros_like(KP[:, 0])
KP = (KP * 1.5 / model.config.in_radius)
plots['KP'] = mlab.points3d(KP[:, 0],
KP[:, 1],
KP[:, 2],
scals,
colormap='autumn',
resolution=8,
scale_factor=1.2*p_scale,
scale_mode='none',
vmin=0,
vmax=1,
figure=fig1)
if True:
plots['center'] = mlab.points3d(p[point_i, 0],
p[point_i, 1],
p[point_i, 2],
scale_factor=1.1*p_scale,
scale_mode='none',
color=(0, 1, 0),
figure=fig1)
# New title
plots['title'] = mlab.title(str(obj_i), color=(0, 0, 0), size=0.3, height=0.01)
text = '<--- (press g for previous)' + 50 * ' ' + '(press h for next) --->'
plots['text'] = mlab.text(0.01, 0.01, text, color=(0, 0, 0), width=0.98)
plots['orient'] = mlab.orientation_axes()
# Set the saved view
mlab.view(*v)
mlab.roll(roll)
return
def animate_kernel():
global plots, offsets, p_scale, show_in_p
# Get KP locations
KP_def = points[obj_i][point_i] + deformed_KP[obj_i][point_i]
KP_def = (KP_def * 1.5 / model.config.in_radius)
KP_def_color = (1, 0, 0)
KP_rigid = points[obj_i][point_i] + original_KPs[chosen_KP]
KP_rigid = (KP_rigid * 1.5 / model.config.in_radius)
KP_rigid_color = (1, 0.7, 0)
if offsets:
t_list = np.linspace(0, 1, 150, dtype=np.float32)
else:
t_list = np.linspace(1, 0, 150, dtype=np.float32)
@mlab.animate(delay=10)
def anim():
for t in t_list:
plots['KP'].mlab_source.set(x=t * KP_def[:, 0] + (1 - t) * KP_rigid[:, 0],
y=t * KP_def[:, 1] + (1 - t) * KP_rigid[:, 1],
z=t * KP_def[:, 2] + (1 - t) * KP_rigid[:, 2],
scalars=t * np.ones_like(KP_def[:, 0]))
yield
anim()
return
def keyboard_callback(vtk_obj, event):
global obj_i, point_i, offsets, p_scale, show_in_p
if vtk_obj.GetKeyCode() in ['b', 'B']:
p_scale /= 1.5
update_scene()
elif vtk_obj.GetKeyCode() in ['n', 'N']:
p_scale *= 1.5
update_scene()
if vtk_obj.GetKeyCode() in ['g', 'G']:
obj_i = (obj_i - 1) % len(deformed_KP)
point_i = 0
update_scene()
elif vtk_obj.GetKeyCode() in ['h', 'H']:
obj_i = (obj_i + 1) % len(deformed_KP)
point_i = 0
update_scene()
elif vtk_obj.GetKeyCode() in ['k', 'K']:
offsets = not offsets
animate_kernel()
elif vtk_obj.GetKeyCode() in ['z', 'Z']:
show_in_p = (show_in_p + 1) % 3
update_scene()
elif vtk_obj.GetKeyCode() in ['0']:
print('Saving')
# Find a new name
file_i = 0
file_name = 'KP_{:03d}.ply'.format(file_i)
files = [f for f in listdir('KP_clouds') if f.endswith('.ply')]
while file_name in files:
file_i += 1
file_name = 'KP_{:03d}.ply'.format(file_i)
KP_deform = points[obj_i][point_i] + deformed_KP[obj_i][point_i]
KP_normal = points[obj_i][point_i] + original_KPs[chosen_KP]
# Save
write_ply(join('KP_clouds', file_name),
[in_points[obj_i], in_colors[obj_i]],
['x', 'y', 'z', 'red', 'green', 'blue'])
write_ply(join('KP_clouds', 'KP_{:03d}_deform.ply'.format(file_i)),
[KP_deform],
['x', 'y', 'z'])
write_ply(join('KP_clouds', 'KP_{:03d}_normal.ply'.format(file_i)),
[KP_normal],
['x', 'y', 'z'])
print('OK')
return
# Draw a first plot
pick_func = fig1.on_mouse_pick(picker_callback)
pick_func.tolerance = 0.01
update_scene()
fig1.scene.interactor.add_observer('KeyPressEvent', keyboard_callback)
mlab.show()
except tf.errors.OutOfRangeError:
break
def show_effective_recep_field(self, net, loader, config, f_idx=0):
##########################################
# First choose the visualized deformations
##########################################
blocks = {}
named_blocks = [(m_name, m) for m_name, m in net.named_modules()
if len(m_name.split('.')) == 2 and m_name.split('.')[0].endswith('_blocks')]
chosen_block = named_blocks[-1][0]
for mi, (m_name, m) in enumerate(named_blocks):
c1 = bcolors.OKBLUE
c2 = bcolors.BOLD
ce = bcolors.ENDC
print('{:}{:}{:s}{:}{:} {:s}'.format(c1, c2, m_name, ce, ce, m.__repr__()))
blocks[m_name] = m
if mi == f_idx:
chosen_block = m_name
print('\nChoose which block output you want to visualize by entering the block name in blue')
override_block = input('Block name: ')
if len(override_block) > 0:
chosen_block = override_block
print('{:}{:}{:s}{:}{:} {:s}'.format(c1, c2, chosen_block, ce, ce, blocks[chosen_block].__repr__()))
features_dim = blocks[chosen_block].out_dim
# Fix all the trainable variables in the network (is it needed in eval mode?)
print('\n*************************************\n')
for p_name, param in net.named_parameters():
if param.requires_grad:
param.requires_grad = False
print('\n*************************************\n')
# Create modulation variable that requires grad
input_modulations = torch.nn.Parameter(torch.zeros((200000, 1),
dtype=torch.float32),
requires_grad=True)
print('\n*************************************\n')
for p_name, param in net.named_parameters():
if param.requires_grad:
print(p_name, param.shape)
print('\n*************************************\n')
# Create ERF loss
# Create ERF optimizer
global plots, p_scale, show_in_p, remove_h, aim_point
aim_point = np.zeros((1, 3), dtype=np.float32)
remove_h = 1.05
p_scale = 0.1
plots = {}
show_in_p = False
global points, in_points, grad_values, chosen_point, in_colors
points = None
in_points = np.zeros((0, 3))
grad_values = None
chosen_point = None
in_colors = None
###########################
# Interactive visualization
###########################
# Create figure for features
fig1 = mlab.figure('Features', bgcolor=(0.5, 0.5, 0.5), size=(640, 480))
fig1.scene.parallel_projection = False
# Indices
def update_ERF(only_points=False):
global points, in_points, grad_values, chosen_point, aim_point, in_colors
# Generate clouds until we effectively changed
batch = None
if only_points:
# get a new batch (index does not matter given our input pipeline)
for batch in loader:
if batch.points[0].shape[0] != in_points.shape[0]:
break
sum_grads = 0
if only_points:
num_tries = 1
else:
num_tries = 10
#################################################
# Apply ERF optim to the same batch several times
#################################################
if 'cuda' in self.device.type:
batch.to(self.device)
for test_i in range(num_tries):
print('Updating ERF {:.0f}%'.format((test_i + 1) * 100 / num_tries))
rand_f_i = np.random.randint(features_dim)
# Reset input modulation variable
torch.nn.init.zeros_(input_modulations)
reset_op = input_modulations_var.assign(tf.zeros_like(input_modulations_var))
self.sess.run(reset_op)
# zero the parameter gradients
ERF_optimizer.zero_grad()
# Forward pass
outputs = net(batch, config)
loss = net.ERF_loss(outputs)
# Backward
loss.backward()
# Get result from hook here?
ERF_optimizer.step()
torch.cuda.synchronize(self.device)
# Forward pass
outputs = net(batch, config)
original_KP = deform_convs[deform_idx].kernel_points.cpu().detach().numpy()
stacked_deformed_KP = deform_convs[deform_idx].deformed_KP.cpu().detach().numpy()
count += batch.lengths[0].shape[0]
if 'cuda' in self.device.type:
torch.cuda.synchronize(self.device)
# Reset input modulation variable
reset_op = input_modulations_var.assign(tf.zeros_like(input_modulations_var))
self.sess.run(reset_op)
# Apply gradient to input modulations
t = [time.time()]
ops = (ERF_train_op,
chosen_i_tf,
input_modulations_var,
model.inputs['points'],
model.inputs['features'],
model.inputs['pools'],
model.inputs['in_batches'])
feed_dict = {aimed_coordinates: aim_point,
chosen_f_tf: rand_f_i,
model.dropout_prob: 1.0}
_, chosen_i, new_mods, all_points, all_colors, all_pools, in_batches = self.sess.run(ops, feed_dict)
t += [time.time()]
# Get the new value of the modulations
sum_grads += np.abs(self.sess.run(input_modulations_var))
grad = sum_grads / num_tries
# Stack all batches
max_ind = np.max(in_batches)
stacked_batches = []
for b_i, b in enumerate(in_batches):
stacked_batches += [b[b < max_ind - 0.5] * 0 + b_i]
stacked_batches = np.hstack(stacked_batches)
# Find batches at wanted layer
for l in range(model.config.num_layers - 1):
if l >= layer_idx:
break
stacked_batches = stacked_batches[all_pools[l][:, 0]]
# Get each example and update top_activations
for b_i, b in enumerate(in_batches):
b = b[b < max_ind - 0.5]
in_points = all_points[0][b]
in_colors = all_colors[b, 1:]
points = all_points[layer_idx][stacked_batches == b_i]
grad_values = grad[b]
chosen_point = all_points[layer_idx][chosen_i]
def update_scene():
global plots, p_scale, show_in_p, remove_h
global points, in_points, grad_values, chosen_point
# Get the current view
v = mlab.view()
roll = mlab.roll()
# clear figure
for key in plots.keys():
plots[key].remove()
plots = {}
# Plot new data feature
in_p = in_points
p = points
p0 = chosen_point
responses = 100 * np.abs(np.ravel(grad_values))
#xresponses = responses ** (1/2)
# Remove roof
if 0.0 < remove_h < 1.0:
floor_h = np.min(in_p[:, 2])
ceil_h = np.max(in_p[:, 2])
threshold = floor_h + (ceil_h - floor_h) * remove_h
responses = responses[in_p[:, 2] < threshold]
in_p = in_p[in_p[:, 2] < threshold]
p = p[p[:, 2] < threshold]
# Rescale responses
min_response, max_response = np.min(responses), np.max(responses)
# Show point cloud
if show_in_p:
plots['points'] = mlab.points3d(p[:, 0],
p[:, 1],
p[:, 2],
resolution=8,
scale_factor=p_scale,
scale_mode='none',
color=(0, 1, 1),
figure=fig1)
plots['in_points'] = mlab.points3d(in_p[:, 0],
in_p[:, 1],
in_p[:, 2],
responses,
resolution=8,
scale_factor=p_scale * 0.8,
scale_mode='none',
vmin=0.1,
vmax=1.5,
figure=fig1)
plots['center'] = mlab.points3d(p0[0],
p0[1],
p0[2],
scale_factor=1.5 * p_scale,
scale_mode='none',
color=(0, 0, 0),
figure=fig1)
# New title
plots['title'] = mlab.title(str(int(100*remove_h)) + '%', color=(0, 0, 0), size=0.3, height=0.01)
text = '<--- (press g to remove ceiling)' + 50 * ' ' + '(press h to add ceiling) --->'
plots['text'] = mlab.text(0.01, 0.01, text, color=(0, 0, 0), width=0.98)
plots['orient'] = mlab.orientation_axes()
# Set the saved view
mlab.view(*v)
mlab.roll(roll)
return
def picker_callback(picker):
""" Picker callback: this get called when on pick events.
"""
global plots, aim_point, in_points
if plots['in_points'].actor.actor._vtk_obj in [o._vtk_obj for o in picker.actors]:
point_rez = plots['in_points'].glyph.glyph_source.glyph_source.output.points.to_array().shape[0]
new_point_i = int(np.floor(picker.point_id / point_rez))
if new_point_i < len(plots['in_points'].mlab_source.points):
# Get closest point in the layer we are interested in
aim_point = plots['in_points'].mlab_source.points[new_point_i:new_point_i + 1]
update_ERF()
update_scene()
def keyboard_callback(vtk_obj, event):
global remove_h, p_scale, show_in_p
global in_points, grad_values, chosen_point, in_colors
print(vtk_obj.GetKeyCode())
if vtk_obj.GetKeyCode() in ['b', 'B']:
p_scale /= 1.5
update_scene()
elif vtk_obj.GetKeyCode() in ['n', 'N']:
p_scale *= 1.5
update_scene()
if vtk_obj.GetKeyCode() in ['g', 'G']:
if remove_h > 0.0:
remove_h -= 0.1
update_scene()
elif vtk_obj.GetKeyCode() in ['h', 'H']:
if remove_h < 1.0:
remove_h += 0.1
update_ERF()
update_scene()
elif vtk_obj.GetKeyCode() in ['z', 'Z']:
show_in_p = not show_in_p
update_scene()
elif vtk_obj.GetKeyCode() in ['x', 'X']:
# Reset potentials
dataset.potentials['ERF'] = []
dataset.min_potentials['ERF'] = []
for i, tree in enumerate(dataset.input_trees['test']):
dataset.potentials['ERF'] += [np.random.rand(tree.data.shape[0]) * 1e-3]
dataset.min_potentials['ERF'] += [float(np.min(dataset.potentials['ERF'][-1]))]
# Update figure
update_ERF(only_points=True)
update_scene()
elif vtk_obj.GetKeyCode() in ['0']:
print('Saving')
# Find a new name
file_i = 0
file_name = 'ERF_{:03d}.ply'.format(file_i)
files = [f for f in listdir('ERF_clouds') if f.endswith('.ply')]
while file_name in files:
file_i += 1
file_name = 'ERF_{:03d}.ply'.format(file_i)
# Save
responses = 100 * np.abs(np.ravel(grad_values))
write_ply(join('ERF_clouds', file_name),
[in_points, in_colors, responses],
['x', 'y', 'z', 'red', 'green', 'blue', 'erf'])
write_ply(join('ERF_clouds', 'ERF_{:03d}_center.ply'.format(file_i)),
[chosen_point.reshape([1, -1])],
['x', 'y', 'z'])
print('OK')
return
# Draw a first plot
pick_func = fig1.on_mouse_pick(picker_callback)
pick_func.tolerance = 0.01
update_ERF(only_points=True)
update_scene()
fig1.scene.interactor.add_observer('KeyPressEvent', keyboard_callback)
mlab.show()
return
def show_deformable_kernels(self, net, loader, config, deform_idx=0):
"""
Show some inference with deformable kernels
"""
##########################################
# First choose the visualized deformations
##########################################
print('\nList of the deformable convolution available (chosen one highlighted in green)')
fmt_str = ' {:}{:2d} > KPConv(r={:.3f}, Din={:d}, Dout={:d}){:}'
deform_convs = []
for m in net.modules():
if isinstance(m, KPConv) and m.deformable:
if len(deform_convs) == deform_idx:
color = bcolors.OKGREEN
else:
color = bcolors.FAIL
print(fmt_str.format(color, len(deform_convs), m.radius, m.in_channels, m.out_channels, bcolors.ENDC))
deform_convs.append(m)
################
# Initialization
################
print('\n****************************************************\n')
# Loop variables
t0 = time.time()
t = [time.time()]
last_display = time.time()
mean_dt = np.zeros(1)
count = 0
# Start training loop
for epoch in range(config.max_epoch):
for batch in loader:
##################
# Processing batch
##################
# New time
t = t[-1:]
t += [time.time()]
if 'cuda' in self.device.type:
batch.to(self.device)
# Forward pass
outputs = net(batch, config)
original_KP = deform_convs[deform_idx].kernel_points.cpu().detach().numpy()
stacked_deformed_KP = deform_convs[deform_idx].deformed_KP.cpu().detach().numpy()
count += batch.lengths[0].shape[0]
if 'cuda' in self.device.type:
torch.cuda.synchronize(self.device)
# Find layer
l = None
for i, p in enumerate(batch.points):
if p.shape[0] == stacked_deformed_KP.shape[0]:
l = i
t += [time.time()]
# Get data
in_points = []
in_colors = []
deformed_KP = []
points = []
lookuptrees = []
i0 = 0
for b_i, length in enumerate(batch.lengths[0]):
in_points.append(batch.points[0][i0:i0 + length].cpu().detach().numpy())
if batch.features.shape[1] == 4:
in_colors.append(batch.features[i0:i0 + length, 1:].cpu().detach().numpy())
else:
in_colors.append(None)
i0 += length
i0 = 0
for b_i, length in enumerate(batch.lengths[l]):
points.append(batch.points[l][i0:i0 + length].cpu().detach().numpy())
deformed_KP.append(stacked_deformed_KP[i0:i0 + length])
lookuptrees.append(KDTree(points[-1]))
i0 += length
###########################
# Interactive visualization
###########################
# Create figure for features
fig1 = mlab.figure('Deformations', bgcolor=(1.0, 1.0, 1.0), size=(1280, 920))
fig1.scene.parallel_projection = False
# Indices
global obj_i, point_i, plots, offsets, p_scale, show_in_p, aim_point
p_scale = 0.03
obj_i = 0
point_i = 0
plots = {}
offsets = False
show_in_p = 2
aim_point = np.zeros((1, 3))
def picker_callback(picker):
""" Picker callback: this get called when on pick events.
"""
global plots, aim_point
if 'in_points' in plots:
if plots['in_points'].actor.actor._vtk_obj in [o._vtk_obj for o in picker.actors]:
point_rez = plots['in_points'].glyph.glyph_source.glyph_source.output.points.to_array().shape[0]
new_point_i = int(np.floor(picker.point_id / point_rez))
if new_point_i < len(plots['in_points'].mlab_source.points):
# Get closest point in the layer we are interested in
aim_point = plots['in_points'].mlab_source.points[new_point_i:new_point_i + 1]
update_scene()
if 'points' in plots:
if plots['points'].actor.actor._vtk_obj in [o._vtk_obj for o in picker.actors]:
point_rez = plots['points'].glyph.glyph_source.glyph_source.output.points.to_array().shape[0]
new_point_i = int(np.floor(picker.point_id / point_rez))
if new_point_i < len(plots['points'].mlab_source.points):
# Get closest point in the layer we are interested in
aim_point = plots['points'].mlab_source.points[new_point_i:new_point_i + 1]
update_scene()
def update_scene():
global plots, offsets, p_scale, show_in_p, aim_point, point_i
# Get the current view
v = mlab.view()
roll = mlab.roll()
# clear figure
for key in plots.keys():
plots[key].remove()
plots = {}
# Plot new data feature
p = points[obj_i]
# Rescale points for visu
p = (p * 1.5 / config.in_radius)
# Show point cloud
if show_in_p <= 1:
plots['points'] = mlab.points3d(p[:, 0],
p[:, 1],
p[:, 2],
resolution=8,
scale_factor=p_scale,
scale_mode='none',
color=(0, 1, 1),
figure=fig1)
if show_in_p >= 1:
# Get points and colors
in_p = in_points[obj_i]
in_p = (in_p * 1.5 / config.in_radius)
# Color point cloud if possible
in_c = in_colors[obj_i]
if in_c is not None:
# Primitives
scalars = np.arange(len(in_p)) # Key point: set an integer for each point
# Define color table (including alpha), which must be uint8 and [0,255]
colors = np.hstack((in_c, np.ones_like(in_c[:, :1])))
colors = (colors * 255).astype(np.uint8)
plots['in_points'] = mlab.points3d(in_p[:, 0],
in_p[:, 1],
in_p[:, 2],
scalars,
resolution=8,
scale_factor=p_scale*0.8,
scale_mode='none',
figure=fig1)
plots['in_points'].module_manager.scalar_lut_manager.lut.table = colors
else:
plots['in_points'] = mlab.points3d(in_p[:, 0],
in_p[:, 1],
in_p[:, 2],
resolution=8,
scale_factor=p_scale*0.8,
scale_mode='none',
figure=fig1)
# Get KP locations
rescaled_aim_point = aim_point * config.in_radius / 1.5
point_i = lookuptrees[obj_i].query(rescaled_aim_point, return_distance=False)[0][0]
if offsets:
KP = points[obj_i][point_i] + deformed_KP[obj_i][point_i]
scals = np.ones_like(KP[:, 0])
else:
KP = points[obj_i][point_i] + original_KP
scals = np.zeros_like(KP[:, 0])
radius_debug = deform_convs[deform_idx].radius
extent_debug = radius_debug / config.conv_radius * config.KP_extent
repulse_debug = radius_debug / config.conv_radius * config.repulse_extent
print('radius = {:.3f} ext = {:.3f} repuls = {:.3f}'.format(radius_debug,
extent_debug,
repulse_debug))
max_reaches = []
for KP_debug in deformed_KP[obj_i]:
r_max = np.max(np.linalg.norm(KP_debug, axis=1))
max_reach = (r_max + extent_debug) / (radius_debug / config.conv_radius)
max_reaches.append(max_reach)
print('max reach over all deformed kernels = ', np.max(max_reaches))
print('{:^13s} {:^13s} {:^13s}'.format('nn_dist', 'center_dist', 'max_reach'))
KP_debug = KP - points[obj_i][point_i]
for kkp in KP_debug:
rrr = np.linalg.norm(kkp)
nnn = np.sort(np.linalg.norm(KP_debug - kkp, axis=1))[1]
max_reach = (rrr + extent_debug) / (radius_debug / config.conv_radius)
print('{:^13.3f} {:^13.3f} {:^13.3f}'.format(nnn, rrr, max_reach))
print('\n****************************************************\n')
KP = (KP * 1.5 / config.in_radius)
plots['KP'] = mlab.points3d(KP[:, 0],
KP[:, 1],
KP[:, 2],
scals,
colormap='autumn',
resolution=8,
scale_factor=1.2*p_scale,
scale_mode='none',
vmin=0,
vmax=1,
figure=fig1)
if True:
plots['center'] = mlab.points3d(p[point_i, 0],
p[point_i, 1],
p[point_i, 2],
scale_factor=1.1*p_scale,
scale_mode='none',
color=(0, 1, 0),
figure=fig1)
# New title
plots['title'] = mlab.title(str(obj_i), color=(0, 0, 0), size=0.3, height=0.01)
text = '<--- (press g for previous)' + 50 * ' ' + '(press h for next) --->'
plots['text'] = mlab.text(0.01, 0.01, text, color=(0, 0, 0), width=0.98)
plots['orient'] = mlab.orientation_axes()
# Set the saved view
mlab.view(*v)
mlab.roll(roll)
return
def animate_kernel():
global plots, offsets, p_scale, show_in_p
# Get KP locations
KP_def = points[obj_i][point_i] + deformed_KP[obj_i][point_i]
KP_def = (KP_def * 1.5 / config.in_radius)
KP_def_color = (1, 0, 0)
KP_rigid = points[obj_i][point_i] + original_KP
KP_rigid = (KP_rigid * 1.5 / config.in_radius)
KP_rigid_color = (1, 0.7, 0)
if offsets:
t_list = np.linspace(0, 1, 150, dtype=np.float32)
else:
t_list = np.linspace(1, 0, 150, dtype=np.float32)
@mlab.animate(delay=10)
def anim():
for t in t_list:
plots['KP'].mlab_source.set(x=t * KP_def[:, 0] + (1 - t) * KP_rigid[:, 0],
y=t * KP_def[:, 1] + (1 - t) * KP_rigid[:, 1],
z=t * KP_def[:, 2] + (1 - t) * KP_rigid[:, 2],
scalars=t * np.ones_like(KP_def[:, 0]))
yield
anim()
return
def keyboard_callback(vtk_obj, event):
global obj_i, point_i, offsets, p_scale, show_in_p
if vtk_obj.GetKeyCode() in ['b', 'B']:
p_scale /= 1.5
update_scene()
elif vtk_obj.GetKeyCode() in ['n', 'N']:
p_scale *= 1.5
update_scene()
if vtk_obj.GetKeyCode() in ['g', 'G']:
obj_i = (obj_i - 1) % len(deformed_KP)
point_i = 0
update_scene()
elif vtk_obj.GetKeyCode() in ['h', 'H']:
obj_i = (obj_i + 1) % len(deformed_KP)
point_i = 0
update_scene()
elif vtk_obj.GetKeyCode() in ['k', 'K']:
offsets = not offsets
animate_kernel()
elif vtk_obj.GetKeyCode() in ['z', 'Z']:
show_in_p = (show_in_p + 1) % 3
update_scene()
elif vtk_obj.GetKeyCode() in ['0']:
print('Saving')
# Find a new name
file_i = 0
file_name = 'KP_{:03d}.ply'.format(file_i)
files = [f for f in listdir('KP_clouds') if f.endswith('.ply')]
while file_name in files:
file_i += 1
file_name = 'KP_{:03d}.ply'.format(file_i)
KP_deform = points[obj_i][point_i] + deformed_KP[obj_i][point_i]
KP_normal = points[obj_i][point_i] + original_KP
# Save
write_ply(join('KP_clouds', file_name),
[in_points[obj_i], in_colors[obj_i]],
['x', 'y', 'z', 'red', 'green', 'blue'])
write_ply(join('KP_clouds', 'KP_{:03d}_deform.ply'.format(file_i)),
[KP_deform],
['x', 'y', 'z'])
write_ply(join('KP_clouds', 'KP_{:03d}_normal.ply'.format(file_i)),
[KP_normal],
['x', 'y', 'z'])
print('OK')
return
# Draw a first plot
pick_func = fig1.on_mouse_pick(picker_callback)
pick_func.tolerance = 0.01
update_scene()
fig1.scene.interactor.add_observer('KeyPressEvent', keyboard_callback)
mlab.show()
return
@staticmethod
def show_activation(path, relu_idx=0, save_video=False):
"""
This function show the saved input point clouds maximizing the activations. You can also directly load the files
in a visualization software like CloudCompare.
In the case of relu_idx = 0 and if gaussian mode, the associated filter is also shown. This function can only
show the filters for the last saved epoch.
"""
################
# Find the files
################
# Check visu folder
visu_path = join('visu',
'visu_' + path.split('/')[-1],
'top_activations',
'Relu{:02d}'.format(relu_idx))
if not exists(visu_path):
message = 'Relu {:d} activations of the model {:s} not found.'
raise ValueError(message.format(relu_idx, path.split('/')[-1]))
# Get the list of files
feature_files = np.sort([f for f in listdir(visu_path) if f.endswith('.ply')])
if len(feature_files) == 0:
message = 'Relu {:d} activations of the model {:s} not found.'
raise ValueError(message.format(relu_idx, path.split('/')[-1]))
# Load mode
config = Config()
config.load(path)
mode = config.convolution_mode
#################
# Get activations
#################
all_points = []
all_responses = []
for file in feature_files:
# Load points
data = read_ply(join(visu_path, file))
all_points += [np.vstack((data['x'], data['y'], data['z'])).T]
all_responses += [data['responses']]
###########################
# Interactive visualization
###########################
# Create figure for features
fig1 = mlab.figure('Features', bgcolor=(0.5, 0.5, 0.5), size=(640, 480))
fig1.scene.parallel_projection = False
# Indices
global file_i
file_i = 0
def update_scene():
# clear figure
mlab.clf(fig1)
# Plot new data feature
points = all_points[file_i]
responses = all_responses[file_i]
min_response, max_response = np.min(responses), | np.max(responses) | numpy.max |
# coding=utf-8
# Copyright 2019 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for trax.rl.ppo."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import gin
import gym
import jax
from jax import random as jax_random
import numpy as np
from tensorflow import test
from tensorflow.io import gfile
from trax import layers
from trax import models
from trax.rl import ppo
from trax.rl import serialization_utils
from trax.shapes import ShapeDtype
from trax.supervised import inputs
from trax.supervised import trainer_lib
class PpoTest(test.TestCase):
def setUp(self):
super(PpoTest, self).setUp()
self.rng_key = trainer_lib.init_random_number_generators(0)
def test_get_policy_model_files(self):
output_dir = self.get_temp_dir()
def write_policy_model_file(epoch):
with gfile.GFile(
ppo.get_policy_model_file_from_epoch(output_dir, epoch), 'w') as f:
f.write('some data')
epochs = [200, 100, 300]
# 300, 200, 100
expected_policy_model_files = [
output_dir + '/model-000300.pkl',
output_dir + '/model-000200.pkl',
output_dir + '/model-000100.pkl',
]
for epoch in epochs:
write_policy_model_file(epoch)
policy_model_files = ppo.get_policy_model_files(output_dir)
self.assertEqual(expected_policy_model_files, policy_model_files)
gfile.rmtree(output_dir)
def test_get_epoch_from_policy_model_file(self):
self.assertEqual(0,
ppo.get_epoch_from_policy_model_file('model-000000.pkl'))
self.assertEqual(123456,
ppo.get_epoch_from_policy_model_file('model-123456.pkl'))
def test_get_policy_model_file_from_epoch(self):
self.assertEqual('/tmp/model-000000.pkl',
ppo.get_policy_model_file_from_epoch('/tmp', 0))
self.assertEqual('/tmp/model-123456.pkl',
ppo.get_policy_model_file_from_epoch('/tmp', 123456))
def test_policy_and_value_net(self):
observation_shape = (3, 4, 5)
batch_observation_shape = (1, 1) + observation_shape
n_actions = 2
n_controls = 3
pnv_model = ppo.policy_and_value_net(
n_controls=n_controls,
n_actions=n_actions,
vocab_size=None,
bottom_layers_fn=lambda: [layers.Flatten(n_axes_to_keep=2)],
two_towers=True,
)
input_signature = ShapeDtype(batch_observation_shape)
_, _ = pnv_model.init(input_signature)
batch = 2
time_steps = 10
batch_of_observations = np.random.uniform(
size=(batch, time_steps) + observation_shape)
pnv_output = pnv_model(batch_of_observations)
# Output is a list, first is probab of actions and the next is value output.
self.assertEqual(2, len(pnv_output))
self.assertEqual(
(batch, time_steps * n_controls, n_actions), pnv_output[0].shape)
self.assertEqual((batch, time_steps * n_controls), pnv_output[1].shape)
def test_pad_trajectories(self):
observation_shape = (2, 3, 4)
trajectories = []
n_trajectories = 7
n_actions = 10
# Time-steps are between [min_allowable_time_step, max_allowable_time_step]
max_allowable_time_step = 19
min_allowable_time_step = 5
# The actual max we see in the data.
max_time_step = -1
# Bucket length.
bucket_length = 15
# Make `n_trajectories` random trajectories.
for i in range(n_trajectories):
time_steps = np.random.randint(min_allowable_time_step,
max_allowable_time_step + 1)
if time_steps > max_time_step:
max_time_step = time_steps
observations = np.random.randint(
0, 255, size=(time_steps + 1,) + observation_shape).astype(np.uint8)
rewards = np.random.uniform(size=(time_steps,)).astype(np.float32)
actions = np.random.randint(
0, n_actions, size=(time_steps,)).astype(np.int32)
infos = {
'a': np.random.uniform(size=(time_steps,)).astype(np.float32),
'b': np.random.uniform(size=(time_steps,)).astype(np.float32)
}
trajectories.append((observations, rewards, actions, infos))
# Now pad these trajectories.
padded_trajectories = ppo.pad_trajectories(
trajectories, boundary=bucket_length)
# Expected padding.
i = 1
while i * bucket_length < max_time_step:
i += 1
expected_padding = i * bucket_length
# Get the padded objects.
(pad_lengths, reward_mask, padded_observations, padded_actions,
padded_rewards, padded_infos) = padded_trajectories
# Expectations on the padded shapes.
self.assertEqual(padded_observations.shape, (
n_trajectories,
expected_padding + 1,
) + observation_shape)
self.assertEqual(padded_actions.shape, (n_trajectories, expected_padding))
self.assertEqual(padded_rewards.shape, (n_trajectories, expected_padding))
self.assertEqual(reward_mask.shape, (n_trajectories, expected_padding))
self.assertEqual(padded_infos['a'].shape,
(n_trajectories, expected_padding))
self.assertEqual(padded_infos['b'].shape,
(n_trajectories, expected_padding))
# Assert that the padding lengths and reward mask are consistent.
self.assertAllEqual(
np.full((n_trajectories,), expected_padding),
np.array(np.sum(reward_mask, axis=1)) + pad_lengths)
def test_rewards_to_go(self):
rewards = np.array([
[1, 2, 4, 8, 16, 32, 64, 128],
[1, 1, 1, 1, 1, 1, 1, 1],
])
rewards_mask = np.array([
[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 0],
])
gamma = 0.5
rewards_to_go = ppo.rewards_to_go(rewards, rewards_mask, gamma)
self.assertAllEqual(
np.array([
[5, 8, 12, 16, 16, 0, 0, 0],
[1.984375, 1.96875, 1.9375, 1.875, 1.75, 1.5, 1.0, 0],
]), rewards_to_go)
def test_rewards_to_go_really_long_sequences(self):
T = 1200 # pylint: disable=invalid-name
rewards = np.random.uniform(1e-3, 1e-2, (1, T))
# Make a mask, clear out a fixed number `L` of 1s from the end.
L = 36 # pylint: disable=invalid-name
assert L < T
rewards_mask = np.ones_like(rewards)
rewards_mask[0, L:] = 0
gamma = 0.94
actual_r2g = ppo.rewards_to_go(rewards, rewards_mask, gamma).reshape(-1)
# Let's compute r2g the slow way.
masked_rewards = (rewards_mask * rewards).reshape(-1)
expected_r2g = np.zeros_like(masked_rewards)
for t in range(T):
for j in range(t, T):
expected_r2g[t] += (gamma**(j - t)) * masked_rewards[j]
self.assertAllClose(expected_r2g, actual_r2g)
def test_value_loss(self):
rewards = np.array([
[1, 2, 4, 8, 16, 32, 64, 128],
[1, 1, 1, 1, 1, 1, 1, 1],
])
rewards_mask = np.array([
[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 0],
])
gamma = 0.5
epsilon = 0.1
# Random observations and a value function that returns a constant value.
# NOTE: Observations have an extra time-step.
B, T = rewards.shape # pylint: disable=invalid-name
observation_shape = (210, 160, 3) # atari pong
random_observations = np.random.uniform(size=(B, T + 1) + observation_shape)
def value_net_apply(observations, params, rng=None):
del params, rng
# pylint: disable=invalid-name
B, T_p_1, OBS = (observations.shape[0], observations.shape[1],
observations.shape[2:])
del OBS
return np.ones((B, T_p_1))
# pylint: enable=invalid-name
value_prediction = value_net_apply(random_observations, [])
with jax.disable_jit():
(value_loss, _) = ppo.value_loss_given_predictions(
value_prediction,
rewards,
rewards_mask,
gamma,
epsilon)
self.assertNear(53.3637084961, value_loss, 1e-6)
def test_deltas(self):
rewards = np.array([
[1, 2, 4, 8, 16, 32, 64, 128],
[1, 1, 1, 1, 1, 1, 1, 1],
])
rewards_mask = np.array([
[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 0],
])
B, T = rewards.shape # pylint: disable=invalid-name
# Say, all predicted values are 1.
predicted_values = np.ones((B, T + 1))
gamma = 1.0
td_residuals = ppo.deltas(predicted_values, rewards, rewards_mask, gamma)
# With V(s) being the same for all s, td_residuals should be
# equal to the rewards + (\gamma - 1)*v(s), masked in the right places.
truncated_pv = predicted_values[:, :-1]
masked_rewards = rewards * rewards_mask
expected_residuals = (masked_rewards +
(gamma - 1) * truncated_pv) * rewards_mask
self.assertAllEqual(expected_residuals, td_residuals)
gamma = 0.5
td_residuals = ppo.deltas(predicted_values, rewards, rewards_mask, gamma)
expected_residuals = (masked_rewards +
(gamma - 1) * truncated_pv) * rewards_mask
self.assertAllEqual(expected_residuals, td_residuals)
def test_gae_advantages(self):
td_deltas = np.array([
[1, 2, 4, 8, 16, 32, 64, 128],
[1, 1, 1, 1, 1, 1, 1, 1],
])
rewards_mask = np.array([
[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 0],
])
gamma = 0.5
lambda_ = 1.0
expected_gae_advantages = np.array([
[5, 8, 12, 16, 16, 0, 0, 0],
[1.984375, 1.96875, 1.9375, 1.875, 1.75, 1.5, 1.0, 0],
])
gae_advantages = ppo.gae_advantages(td_deltas * rewards_mask, rewards_mask,
lambda_, gamma)
self.assertAllEqual(expected_gae_advantages, gae_advantages)
gamma = 1.0
lambda_ = 0.5
gae_advantages = ppo.gae_advantages(td_deltas * rewards_mask, rewards_mask,
lambda_, gamma)
self.assertAllEqual(expected_gae_advantages, gae_advantages)
def test_chosen_probabs(self):
# Shape (2, 2, 3)
probab_observations = np.array(
[[[0.1, 0.2, 0.7], [0.4, 0.1, 0.5]],
[[0.3, 0.1, 0.6], [0.1, 0.1, 0.8]]]
)
# Shape (2, 2, 1)
actions = np.array([[1, 2], [0, 1]])
chosen_probabs = ppo.chosen_probabs(probab_observations, actions)
self.assertAllEqual(
np.array([[0.2, 0.5], [0.3, 0.1]]), chosen_probabs)
def test_compute_probab_ratios(self):
p_old = np.array([[
[np.log(0.1), np.log(0.2), np.log(0.6), np.log(0.1)],
[np.log(0.4), np.log(0.1), np.log(0.4), np.log(0.1)],
[np.log(0.3), np.log(0.1), np.log(0.5), np.log(0.1)],
[np.log(0.1), np.log(0.2), np.log(0.6), np.log(0.1)],
], [
[np.log(0.3), np.log(0.1), np.log(0.5), np.log(0.1)],
[np.log(0.1), np.log(0.1), np.log(0.4), np.log(0.4)],
[np.log(0.3), np.log(0.1), np.log(0.5), np.log(0.1)],
[np.log(0.1), np.log(0.2), np.log(0.6), np.log(0.1)],
]])
p_new = np.array([[
[np.log(0.3), np.log(0.1), np.log(0.5), np.log(0.1)],
[np.log(0.4), np.log(0.1), np.log(0.1), np.log(0.3)],
[np.log(0.1), np.log(0.2), np.log(0.1), np.log(0.6)],
[np.log(0.3), np.log(0.1), np.log(0.5), np.log(0.1)],
], [
[np.log(0.1), np.log(0.2), np.log(0.1), np.log(0.6)],
[np.log(0.1), np.log(0.1), np.log(0.2), np.log(0.6)],
[np.log(0.3), np.log(0.1), np.log(0.3), np.log(0.3)],
[np.log(0.1), np.log(0.2), np.log(0.1), np.log(0.6)],
]])
actions = np.array([[1, 2, 0, 1], [0, 3, 3, 0]])
mask = np.array([[1, 1, 0, 0], [1, 1, 1, 0]])
probab_ratios = ppo.compute_probab_ratios(p_new, p_old, actions, mask)
self.assertAllClose(
np.array([
[0.1 / 0.2, 0.1 / 0.4, 0.0, 0.0],
[0.1 / 0.3, 0.6 / 0.4, 0.3 / 0.1, 0.0],
]), probab_ratios)
def test_clipped_probab_ratios(self):
probab_ratios = np.array([
[1.5, 1.0, 0.5, 0.7],
[2.5, 2.0, 0.1, 1.0],
])
clipped_probab_ratios = ppo.clipped_probab_ratios(probab_ratios, 0.1)
self.assertAllClose(
np.array([
[1.1, 1.0, 0.9, 0.9],
[1.1, 1.1, 0.9, 1.0],
]), clipped_probab_ratios)
def test_clipped_objective(self):
probab_ratios = np.array([
[1.5, 2.0, 0.5, 0.7],
[2.5, 2.0, 0.1, 1.0],
])
advantages = np.array([
[0.1, -0.1, 0.5, 0.7],
[2.0, -2.0, 2.0, 2.0],
])
mask = np.array([[1, 1, 0, 0], [1, 1, 1, 0]])
epsilon = 0.1
clipped_probab_ratios = np.array([
[1.1, 1.1, 0.9, 0.9],
[1.1, 1.1, 0.9, 1.0],
])
unused_advantages_x_probab_ratios = np.array([
[0.15, -0.2, 0.25, 0.49],
[5.00, -4.0, 0.20, 2.00]
])
unused_advantages_x_clipped_probab_ratios = np.array([
[0.11, -0.11, 0.45, 0.63],
[2.20, -2.20, .80, 2.00]
])
unused_minimums = np.array([
[0.11, -0.2, 0.25, 0.49],
[2.20, -4.0, 0.20, 2.00]
])
# minimums * mask
objective = np.array([
[0.11, -0.2, 0.0, 0.],
[2.20, -4.0, 0.2, 0.]
])
# Assert that we computed things correctly in this test.
self.assertAllClose(
np.minimum(probab_ratios * advantages,
clipped_probab_ratios * advantages) * mask,
objective)
self.assertAllClose(
objective,
ppo.clipped_objective(probab_ratios, advantages, mask, epsilon))
def test_combined_loss(self):
B, T, A, OBS = 2, 10, 2, (28, 28, 3) # pylint: disable=invalid-name
batch_observation_shape = (1, 1) + OBS
net = ppo.policy_and_value_net(
n_controls=1,
n_actions=A,
vocab_size=None,
bottom_layers_fn=lambda: [layers.Flatten(n_axes_to_keep=2)],
two_towers=True,
)
input_signature = ShapeDtype(batch_observation_shape)
old_params, _ = net.init(input_signature)
new_params, state = net.init(input_signature)
# Generate a batch of observations.
observations = np.random.uniform(size=(B, T + 1) + OBS)
actions = np.random.randint(0, A, size=(B, T + 1))
rewards = np.random.uniform(0, 1, size=(B, T))
mask = np.ones_like(rewards)
# Just test that this computes at all.
(new_log_probabs, value_predictions_new) = (
net(observations, weights=new_params, state=state))
(old_log_probabs, value_predictions_old) = (
net(observations, weights=old_params, state=state))
gamma = 0.99
lambda_ = 0.95
epsilon = 0.2
value_weight = 1.0
entropy_weight = 0.01
nontrainable_params = {
'gamma': gamma,
'lambda': lambda_,
'epsilon': epsilon,
'value_weight': value_weight,
'entropy_weight': entropy_weight,
}
rewards_to_actions = np.eye(value_predictions_old.shape[1])
(value_loss_1, _) = ppo.value_loss_given_predictions(
value_predictions_new, rewards, mask, gamma=gamma,
value_prediction_old=value_predictions_old, epsilon=epsilon)
(ppo_loss_1, _) = ppo.ppo_loss_given_predictions(
new_log_probabs,
old_log_probabs,
value_predictions_old,
actions,
rewards_to_actions,
rewards,
mask,
gamma=gamma,
lambda_=lambda_,
epsilon=epsilon)
(combined_loss, (ppo_loss_2, value_loss_2, entropy_bonus), _, state) = (
ppo.combined_loss(new_params,
old_log_probabs,
value_predictions_old,
net,
observations,
actions,
rewards_to_actions,
rewards,
mask,
nontrainable_params=nontrainable_params,
state=state)
)
# Test that these compute at all and are self consistent.
self.assertGreater(entropy_bonus, 0.0)
self.assertNear(value_loss_1, value_loss_2, 1e-6)
self.assertNear(ppo_loss_1, ppo_loss_2, 1e-6)
self.assertNear(
combined_loss,
ppo_loss_2 + (value_weight * value_loss_2) -
(entropy_weight * entropy_bonus),
1e-6
)
def test_masked_entropy(self):
# (2, 4+1, 4)
log_probs = np.array([[
[np.log(0.1), np.log(0.2), np.log(0.6), np.log(0.1)],
[np.log(0.4), np.log(0.1), np.log(0.4), np.log(0.1)],
[np.log(0.3), np.log(0.1), np.log(0.5), np.log(0.1)],
[np.log(0.1), np.log(0.2), np.log(0.6), np.log(0.1)],
[np.log(0.3), np.log(0.1), np.log(0.5), np.log(0.1)],
], [
[np.log(0.3), np.log(0.1), np.log(0.5), np.log(0.1)],
[np.log(0.1), np.log(0.1), np.log(0.4), np.log(0.4)],
[np.log(0.3), np.log(0.1), np.log(0.5), np.log(0.1)],
[np.log(0.1), np.log(0.2), np.log(0.6), np.log(0.1)],
[np.log(0.3), np.log(0.1), np.log(0.5), np.log(0.1)],
]])
# (2, 4)
mask = np.array([
[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0]
])
def plp(p):
return p * | np.log(p) | numpy.log |
import numpy as np
from numpy.linalg import matrix_power, matrix_rank, inv
__all__ = ['ctrb_mat', 'ctrb_index', 'ctrb_indices', 'ctrb_trans_mat',
'obsv_mat', 'obsv_indices', 'obsv_index']
def _check_ab(A: np.ndarray, B: np.ndarray):
if A.shape[0] != A.shape[1]:
raise ValueError('matrix A should be square.')
if B.shape[0] != A.shape[1]:
raise ValueError('matrix B should have the same row number as matrix A')
def _check_ac(A: np.ndarray, C: np.ndarray):
if A.shape[0] != A.shape[1]:
raise ValueError('matrix A should be square.')
if C.shape[1] != A.shape[1]:
raise ValueError('matrix C should have the same column number as matrix A')
def ctrb_mat(A: np.ndarray, B: np.ndarray):
_check_ab(A, B)
n = A.shape[0]
p = B.shape[1]
q = np.empty((n, n * p))
for i in range(n):
q[:, i * p: i * p + p] = matrix_power(A, i) @ B
return q
def obsv_mat(A: np.ndarray, C: np.ndarray):
_check_ac(A, C)
return ctrb_mat(A.T, C.T).T
def _adjust_qc_order(Qc):
p = Qc.shape[1] // Qc.shape[0]
Qc_ = Qc[:, 0::p]
for i in range(1, p):
q = Qc[:, i::p]
Qc_ = np.concatenate((Qc_, q), axis=1)
return Qc_
def ctrb_indices(A: np.ndarray, B: np.ndarray):
try:
Qc = ctrb_mat(A, B)
except ValueError as e:
raise ValueError('wrong shape of input matrices') from e
mat_indices = np.zeros(Qc.shape[1], dtype=np.bool)
rank = 0
n = A.shape[0]
for i in range(Qc.shape[1]):
mat_indices[i] = True
r = matrix_rank(Qc[:, mat_indices])
if r <= rank:
mat_indices[i] = False
else:
rank = r
if rank == n:
break
controllability_indices = np.zeros(n)
p = B.shape[1]
for i in range(p):
controllability_indices[i] = np.sum(mat_indices[i: n: p])
return np.trim_zeros(controllability_indices).astype(np.int)
def _adjust_qo_order(Qo):
return _adjust_qc_order(Qo.T).T
def obsv_indices(A: np.ndarray, C: np.ndarray):
return ctrb_indices(A.T, C.T)
def ctrb_index(A: np.ndarray, B: np.ndarray):
return np.max(ctrb_indices(A, B))
def obsv_index(A: np.ndarray, C: np.ndarray):
return np.max(obsv_indices(A, C))
def ctrb_trans_mat(A: np.ndarray, B: np.ndarray):
_check_ab(A, B)
if B.shape[1] == 1:
T = | np.empty(A.shape) | numpy.empty |
from __future__ import division, print_function
import glob
import numpy as np
from scipy import interpolate as interp
from scipy.ndimage import filters as filter
try:
from enterprise.pulsar import Pulsar
ent_present = True
except ImportError:
ent_present = False
fyr = 1./31536000.
# from Kristina
def getMax2d(samples1, samples2, weights=None, smooth=True, bins=[40, 40],
x_range=None, y_range=None, logx=False, logy=False, logz=False):
""" Function to return the maximum likelihood values by interpolating over
a two dimensional histogram made of two sets of samples.
Parameters
----------
samples1, samples2 : array or list
Arrays or lists from which to find two dimensional maximum likelihood
values.
weights : array of floats
Weights to use in histogram.
bins : list of ints
List of 2 integers which dictates number of bins for samples1 and
samples2.
x_range : tuple, optional
Range of samples1
y_range : tuple, optional
Range of samples2
logx : bool, optional
A value of True use log10 scale for samples1.
logy : bool, optional
A value of True use log10 scale for samples2.
logz : bool, optional
A value of True indicates that the z axis is in log10.
"""
if x_range is None:
xmin = np.amin(samples1)
xmax = np.amax(samples1)
else:
xmin = x_range[0]
xmax = x_range[1]
if y_range is None:
ymin = np.amin(samples2)
ymax = np.amax(samples2)
else:
ymin = y_range[0]
ymax = y_range[1]
if logx:
bins[0] = np.logspace(np.log10(xmin), np.log10(xmax), bins[0])
if logy:
bins[1] = np.logspace(np.log10(ymin), np.log10(ymax), bins[1])
hist2d,xedges,yedges = np.histogram2d(samples1, samples2, weights=weights,
bins=bins,
range=[[xmin,xmax],[ymin,ymax]])
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1] ]
if logz:
hist2d = | np.where(hist2d >= 0,hist2d,1) | numpy.where |
"""
Functions for geometrical image transformation and warping.
"""
import warnings
import numpy as np
import scipy.ndimage.interpolation
try:
import skimage.transform
scikit_image_not_found = False
except ImportError: # pragma: no cover
warnings.warn("scikit-image could not be imported. Image rotation will use scipy",
ImportWarning)
scikit_image_not_found = True # pragma: no cover
from sunpy.util.exceptions import SunpyUserWarning
__all__ = ['affine_transform']
def affine_transform(image, rmatrix, order=3, scale=1.0, image_center=None,
recenter=False, missing=0.0, use_scipy=False):
"""
Rotates, shifts and scales an image using :func:`skimage.transform.warp`,
or :func:`scipy.ndimage.interpolation.affine_transform` if specified. Falls
back to the scipy function if scikit-image can't be imported.
Parameters
----------
image : `numpy.ndarray`
2D Image to be rotated.
rmatrix : 2x2
Linear transformation rotation matrix.
order : int 0-5
Interpolation order to be used. When using scikit-image this parameter
is passed into :func:`skimage.transform.warp` (e.g., 3 corresponds to
bi-cubic interpolation).
When using scipy it is passed into
:func:`scipy.ndimage.interpolation.affine_transform` where it controls
the order of the spline.
Default: 3
scale : float
A scale factor for the image. Default is no scaling.
image_center : tuple
The point in the image to rotate around (axis of rotation).
Default: center of the array.
recenter : bool or array-like
Move the axis of rotation to the center of the array or recenter coords.
Default: True, recenter to the center of the array.
missing : float
The value to replace any missing data after the transformation.
use_scipy : bool
Force use of :func:`scipy.ndimage.interpolation.affine_transform`.
Will set all NaNs in image to zero before doing the transform.
Default: False, unless scikit-image can't be imported
Returns
-------
out : New rotated, scaled and translated image.
Notes
-----
This algorithm uses an affine transformation as opposed to a polynomial
geometrical transformation, which by default is :func:`skimage.transform.warp`.
One can specify using :func:`scipy.ndimage.interpolation.affine_transform` as
an alternative affine transformation. The two transformations use different
algorithms and thus do not give identical output.
When using for :func:`skimage.transform.warp` with order >= 4 or using
:func:`scipy.ndimage.interpolation.affine_transform` at all, NaN values will
replaced with zero prior to rotation. No attempt is made to retain the NaN
values.
Input arrays with integer data are cast to float64 and can be re-cast using
:func:`numpy.ndarray.astype` if desired.
Although this function is analogous to the IDL's rot() function, it does not
use the same algorithm as the IDL rot() function.
IDL's rot() calls the `POLY_2D <http://www.harrisgeospatial.com/docs/poly_2d.html>`_
method to calculate the inverse mapping of original to target pixel
coordinates. This is a polynomial geometrical transformation.
Then optionally it uses a bicubic convolution interpolation
algorithm to map the original to target pixel values.
"""
rmatrix = rmatrix / scale
array_center = (np.array(image.shape)[::-1]-1)/2.0
# Make sure the image center is an array and is where it's supposed to be
if image_center is not None:
image_center = np.asanyarray(image_center)
else:
image_center = array_center
# Determine center of rotation based on use (or not) of the recenter keyword
if recenter:
rot_center = array_center
else:
rot_center = image_center
displacement = np.dot(rmatrix, rot_center)
shift = image_center - displacement
if use_scipy or scikit_image_not_found:
if np.any(np.isnan(image)):
warnings.warn("Setting NaNs to 0 for SciPy rotation.", SunpyUserWarning)
# Transform the image using the scipy affine transform
rotated_image = scipy.ndimage.interpolation.affine_transform(
np.nan_to_num(image).T, rmatrix, offset=shift, order=order,
mode='constant', cval=missing).T
else:
# Make the rotation matrix 3x3 to include translation of the image
skmatrix = | np.zeros((3, 3)) | numpy.zeros |
import torch
import numpy as np
import math
from tqdm import tqdm
import data
from param import *
def get_2city_distance(n1, n2):
x1, y1, x2, y2 = n1[0], n1[1], n2[0], n2[1]
if isinstance(n1, torch.Tensor):
return torch.sqrt((x2 - x1).pow(2) + (y2 - y1).pow(2))
elif isinstance(n1, (list, np.ndarray)):
return math.sqrt(pow(x2 - x1, 2) + pow(y2 - y1, 2))
else:
raise TypeError
class Env:
data = None
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
def __init__(self, cfg):
'''
nodes(cities) : contains nodes and their 2 dimensional coordinates
[city_t, 2] = [3,2] dimension array e.g. [[0.5,0.7],[0.2,0.3],[0.4,0.1]]
'''
self.batch = cfg.batch
self.task_n = cfg.task_n
self.server_load = cfg.server_load
self.alpha = cfg.alphaa
self.beta = cfg.beta
self.gama = cfg.gama
def get_nodes(self, seed=None, task_n=100):
'''
return nodes:(task_n,2)
'''
if seed is not None:
np.random.seed(seed)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# Randomly generate (max_length) task
multi_param = (15 * task_n) / self.server_load
# [CPU, IO, Band, Memory]
resource = np.random.rand(task_n, 4) * 2 / self.server_load
task_priority = np.random.randint(5, size=(task_n, 1))
time_use = np.random.randint(low=10, high=21, size=(task_n, 1))
time_out = np.random.randint(low=round(multi_param * 0.8), high=round(multi_param * 1.2),
size=(task_n, 1))
samples = np.concatenate((resource, task_priority, time_out, time_use), axis=-1)
samples = torch.tensor(samples, dtype=torch.float32, device=device)
return samples
def get_batch_nodes(self, n_samples):
'''
return nodes:(batch,task_n,6)
'''
if Env.data is not None:
return Env.data
# [n_samples, task_n, 6]
samples = []
part_samples = []
print("start generate data")
for _ in tqdm(range(n_samples // 10)):
# [task_n, 6]
instance = data.get_instance(self.task_n)
part_samples.append(instance)
for _ in range(10):
samples.extend(part_samples)
samples = torch.tensor(samples, dtype=torch.float32)
print("generate data done")
Env.data = samples
return samples
def stack_l_fast(self, inputs, tours):
"""
*** this function is faster version of stack_l! ***
inputs: (batch, task_n, 7), Coordinates of nodes
tours: (batch, task_n), predicted tour
d: (batch, task_n, 7)
"""
inputs_cpu = inputs.cpu()
# [batch_size, 6]
result_list = []
for task_list, idx_list in zip(inputs_cpu, tours):
result = self.get_reward(task_list, idx_list)
result_list.append(result)
result_list = np.array(result_list)
batch_reward = result_list[:, 0]
return torch.tensor(batch_reward, dtype=torch.float32), np.mean(result_list, axis=0)
def get_reward(self, task_list, idx_list):
task_list = np.array(task_list)
self.task_n = len(task_list)
task_priority_max = 0
for i in range(self.task_n):
task_priority_max = max(task_priority_max, task_list[i][PRIORITY_IDX])
task_priority_sum = 0
for idx in range(self.task_n):
i = idx_list[idx]
task_priority = task_list[i][PRIORITY_IDX]
task_priority = (task_priority / task_priority_max) * (1 - idx / self.task_n)
task_priority_sum += task_priority
cpu = 0
time_use = 0
waiting_time = 0
server_run_map = []
server_remain = np.array([1, 1, 1])
for idx in idx_list:
task = task_list[idx]
need = task[:RESOURCE_NUM]
while server_remain[0] < need[0] or server_remain[1] < need[1] or \
server_remain[2] < need[2]:
server_run_map = np.array(server_run_map)
time_use += 1 # 更新时间
cpu += 1 - server_remain[0]
server_run_map[:, -1] -= 1
while len(server_run_map) > 0: # 移除已完成的任务
min_task_idx = np.argmin(server_run_map, axis=0)[-1]
min_task = server_run_map[min_task_idx]
min_need = min_task[:RESOURCE_NUM]
min_time = min_task[-1]
if min_time > 0:
break
server_remain = np.add(server_remain, min_need) # 更新剩余容量
server_run_map = np.delete(server_run_map, min_task_idx, axis=0) # 移除任务
# 资源充足,直接下放任务
if len(server_run_map) == 0:
server_run_map = np.array([task])
else:
server_run_map = np.row_stack((server_run_map, task))
waiting_time += task[RELEASE_TIME_IDX] + time_use
server_remain = np.subtract(server_remain, need) # 更新服务器剩余容量
# 运行完剩余任务
while len(server_run_map) > 0:
cpu = np.sum(server_run_map, axis=0)[0]
server_run_map = | np.array(server_run_map) | numpy.array |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2019 <NAME> <<EMAIL>>
#
# Distributed under terms of the MIT license.
import os
import gzip
import numpy as np
import logging
from recordclass import recordclass
from collections import defaultdict
from transformers import AutoTokenizer
from pytorch_pretrained_bert import BertTokenizer
from deep_morphology.data.base_data import BaseDataset, Vocab, DataFields
SentenceProbeFields = recordclass(
'SentenceProbeFields',
['sentence', 'sentence_len', 'target_idx', 'label']
)
class SentenceTokenPairFields(DataFields):
_fields = ('raw_sentence', 'idx1', 'idx2', 'token_starts',
'subwords', 'sentence_subword_len', 'label')
_alias = {
'tgt': 'label',
'input': 'subwords',
'input_len': 'sentence_subword_len'
}
_needs_vocab = ('label', 'subwords', )
class WordOnlyFields(DataFields):
_fields = ('sentence', 'target_word', 'target_word_len', 'target_idx',
'label')
_alias = {
'input': 'target_word',
'input_len': 'target_word_len',
'src_len': 'target_word_len',
'tgt': 'label',
}
_needs_vocab = ('target_word', 'label')
class EmbeddingOnlyFields(DataFields):
_fields = ('sentence', 'target_word', 'target_word_idx', 'label')
_alias = {
'tgt': 'label',
'src': 'target_word',
}
_needs_vocab = ('label', )
class EmbeddingOnlyPairFields(DataFields):
_fields = (
'left_sentence', 'left_target_word', 'left_target_idx',
'right_sentence', 'right_target_word', 'right_target_idx',
'label',
)
_alias = {
'tgt': 'label',
}
_needs_vocab = ('label', )
class BERTProberFields(DataFields):
_fields = (
'sentence', 'tokens', 'target', 'idx',
'sentence_len', 'target_idx', 'label',
)
_alias = {'tgt': 'label'}
_needs_vocab = ('label', )
class TokenInSequenceProberFields(DataFields):
_fields = (
'raw_sentence', 'raw_target', 'raw_idx',
'tokens', 'num_tokens', 'target_idx', 'label', 'token_starts',
)
_alias = {
'tgt': 'label',
# 'src_len': 'num_tokens',
'input_len': 'num_tokens'}
# token_starts needs a vocabulary because we manually set PAD=1000
_needs_vocab = ('tokens', 'label', 'token_starts')
_needs_constants = ('tokens', )
class MidSequenceProberFields(DataFields):
_fields = (
'raw_sentence', 'raw_target', 'raw_idx',
'input', 'input_len', 'target_idx', 'label', 'target_ids',
)
_alias = {'tgt': 'label', 'src_len': 'input_len'}
_needs_vocab = ('input', 'label', 'target_ids')
_needs_constants = ('input', )
class SequenceClassificationWithSubwordsDataFields(DataFields):
_fields = (
'raw_sentence', 'labels',
'sentence_len', 'subwords', 'sentence_subword_len', 'token_starts',
)
_alias = {'input': 'subwords',
'input_len': 'sentence_subword_len',
'tgt': 'labels'}
_needs_vocab = ('labels', )
class SentencePairFields(DataFields):
_fields = (
'left_sentence', 'left_sentence_len',
'left_target_word', 'left_target_idx',
'right_sentence', 'right_sentence_len',
'right_target_word', 'right_target_idx',
'label',
)
_alias = {'tgt': 'label'}
_needs_vocab = ('label', )
class WordOnlySentencePairFields(DataFields):
_fields = (
'left_sentence', 'left_target_word',
'left_target_word_len', 'left_target_idx',
'right_sentence', 'right_target_word',
'right_target_word_len', 'right_target_idx',
'label',
)
_alias = {'tgt': 'label'}
_needs_vocab = ('left_target_word', 'right_target_word', 'label', )
class BERTSentencePairFields(DataFields):
_fields = (
'left_sentence', 'left_tokens', 'left_sentence_len',
'left_target_word', 'left_target_first', 'left_target_last',
'right_sentence', 'right_tokens', 'right_sentence_len',
'right_target_word', 'right_target_first', 'right_target_last',
'label',
)
_alias = {'tgt': 'label'}
_needs_vocab = ('label', )
class Embedding:
def __init__(self, embedding_file, filter=None):
self.filter_ = filter
if embedding_file.endswith('.gz'):
with gzip.open(embedding_file, 'rt') as f:
self.load_stream(f)
else:
with open(embedding_file, 'rt') as f:
self.load_stream(f)
def load_stream(self, stream):
self.mtx = []
self.vocab = {}
for line in stream:
fd = line.strip().split(" ")
if len(fd) == 2:
continue
word = fd[0]
if self.filter_ and word not in self.filter_:
continue
self.vocab[word] = len(self.mtx)
self.mtx.append(list(map(float, fd[1:])))
self.mtx = np.array(self.mtx)
def __len__(self):
return self.mtx.shape[0]
def __getitem__(self, key):
if key not in self.vocab:
return self.mtx[0]
return self.mtx[self.vocab[key]]
@property
def embedding_dim(self):
return self.mtx.shape[1]
class EmbeddingProberDataset(BaseDataset):
unlabeled_data_class = 'UnlabeledEmbeddingProberDataset'
constants = []
data_recordclass = EmbeddingOnlyFields
def load_or_create_vocabs(self):
vocab_pre = os.path.join(self.config.experiment_dir, 'vocab_')
needs_vocab = getattr(self.data_recordclass, '_needs_vocab',
self.data_recordclass._fields)
self.vocabs = self.data_recordclass()
for field in needs_vocab:
vocab_fn = getattr(self.config, 'vocab_{}'.format(field),
vocab_pre+field)
if field == 'label':
constants = []
else:
constants = ['SOS', 'EOS', 'PAD', 'UNK']
if os.path.exists(vocab_fn):
setattr(self.vocabs, field, Vocab(file=vocab_fn, frozen=True))
else:
setattr(self.vocabs, field, Vocab(constants=constants))
def to_idx(self):
vocab = set(r.target_word for r in self.raw)
if self.config.embedding == 'discover':
language = self.config.train_file.split("/")[-2]
emb_fn = os.path.join(os.environ['HOME'], 'resources',
'fasttext', language, 'common.vec')
self.config.embedding = emb_fn
else:
emb_fn = self.config.embedding
self.embedding = Embedding(emb_fn, filter=vocab)
self.embedding_size = self.embedding.embedding_dim
if getattr(self.config, 'permute_embedding', False):
self.embedding.mtx = np.random.permutation(self.embedding.mtx)
if getattr(self.config, 'randomize_embedding', False):
self.embedding.mtx = np.random.random(self.embedding.mtx.shape)
word_vecs = []
labels = []
for r in self.raw:
word_vecs.append(self.embedding[r.target_word])
labels.append(self.vocabs.label[r.label])
self.mtx = EmbeddingOnlyFields(
target_word=word_vecs,
label=labels
)
def extract_sample_from_line(self, line):
fd = line.rstrip("\n").split("\t")
sent, target, idx = fd[:3]
if len(fd) > 3:
label = fd[3]
else:
label = None
return EmbeddingOnlyFields(
sentence=sent,
target_word=target,
target_word_idx=int(idx),
label=label
)
def print_sample(self, sample, stream):
stream.write("{}\t{}\t{}\t{}\n".format(
sample.sentence, sample.target_word,
sample.target_word_idx, sample.label
))
def decode(self, model_output):
for i, sample in enumerate(self.raw):
output = model_output[i].argmax().item()
sample.label = self.vocabs.label.inv_lookup(output)
def batched_iter(self, batch_size):
starts = list(range(0, len(self), batch_size))
if self.is_unlabeled is False and self.config.shuffle_batches:
np.random.shuffle(starts)
for start in starts:
end = start + batch_size
yield EmbeddingOnlyFields(
target_word=self.mtx.target_word[start:end],
label=self.mtx.label[start:end]
)
class UnlabeledEmbeddingProberDataset(EmbeddingProberDataset):
pass
class EmbeddingPairDataset(BaseDataset):
unlabeled_data_class = 'UnlabeledEmbeddingPairDataset'
constants = []
data_recordclass = EmbeddingOnlyPairFields
def load_or_create_vocabs(self):
vocab_pre = os.path.join(self.config.experiment_dir, 'vocab_')
self.vocabs = self.data_recordclass()
for field in ('left_target_word', 'label'):
vocab_fn = getattr(self.config, 'vocab_{}'.format(field),
vocab_pre+field)
constants = []
if os.path.exists(vocab_fn):
setattr(self.vocabs, field, Vocab(file=vocab_fn, frozen=True))
else:
setattr(self.vocabs, field, Vocab(constants=constants))
self.vocabs.right_target_word = self.vocabs.left_target_word
def to_idx(self):
vocab = set(r.left_target_word for r in self.raw) | \
set(r.right_target_word for r in self.raw)
if self.config.embedding == 'discover':
language = self.config.train_file.split("/")[-2]
emb_fn = os.path.join(os.environ['HOME'], 'resources',
'fasttext', language, 'common.vec')
self.config.embedding = emb_fn
else:
emb_fn = self.config.embedding
self.embedding = Embedding(emb_fn, filter=vocab)
self.embedding_size = self.embedding.embedding_dim
left_vecs = []
right_vecs = []
labels = []
for r in self.raw:
left_vecs.append(self.embedding[r.left_target_word])
right_vecs.append(self.embedding[r.right_target_word])
labels.append(self.vocabs.label[r.label])
self.mtx = EmbeddingOnlyPairFields(
left_target_word=left_vecs,
right_target_word=right_vecs,
label=labels,
)
def extract_sample_from_line(self, line):
fd = line.rstrip("\n").split("\t")
if len(fd) > 6:
label = fd[6]
else:
label = None
return EmbeddingOnlyPairFields(
left_sentence=fd[0],
left_target_word=fd[1],
left_target_idx=fd[2],
right_sentence=fd[3],
right_target_word=fd[4],
right_target_idx=fd[5],
label=label
)
def decode(self, model_output):
for i, sample in enumerate(self.raw):
output = model_output[i].argmax().item()
sample.label = self.vocabs.label.inv_lookup(output)
def print_sample(self, sample, stream):
stream.write("{}\n".format("\t".join(map(str, (
sample.left_sentence,
sample.left_target_word,
sample.left_target_idx,
sample.right_sentence,
sample.right_target_word,
sample.right_target_idx,
sample.label)
))))
def batched_iter(self, batch_size):
starts = list(range(0, len(self), batch_size))
if self.is_unlabeled is False and self.config.shuffle_batches:
np.random.shuffle(starts)
for start in starts:
end = start + batch_size
yield EmbeddingOnlyPairFields(
left_target_word=self.mtx.left_target_word[start:end],
right_target_word=self.mtx.right_target_word[start:end],
label=self.mtx.label[start:end]
)
class UnlabeledEmbeddingPairDataset(EmbeddingPairDataset):
pass
class ELMOSentencePairDataset(BaseDataset):
data_recordclass = SentencePairFields
unlabeled_data_class = 'UnlabeledELMOSentencePairDataset'
constants = []
# FIXME this is a copy of WordOnlySentenceProberDataset's method
# should be removed along with recordclass
def load_or_create_vocabs(self):
# FIXME this should be init or more like nowhere
self.tgt_field_idx = -1
vocab_pre = os.path.join(self.config.experiment_dir, 'vocab_')
needs_vocab = getattr(self.data_recordclass, '_needs_vocab',
self.data_recordclass._fields)
self.vocabs = self.data_recordclass()
for field in needs_vocab:
vocab_fn = getattr(self.config, 'vocab_{}'.format(field),
vocab_pre+field)
if field == 'label':
constants = []
else:
constants = ['SOS', 'EOS', 'PAD', 'UNK']
if os.path.exists(vocab_fn):
setattr(self.vocabs, field, Vocab(file=vocab_fn, frozen=True))
else:
setattr(self.vocabs, field, Vocab(constants=constants))
def extract_sample_from_line(self, line):
fd = line.rstrip("\n").split("\t")
left_sen = fd[0].split(" ")
right_sen = fd[3].split(" ")
lidx = int(fd[2])
ridx = int(fd[5])
assert left_sen[lidx] == fd[1]
assert right_sen[ridx] == fd[4]
if len(fd) > 6:
label = fd[6]
else:
label = None
return SentencePairFields(
left_sentence=left_sen,
left_sentence_len=len(left_sen),
left_target_word=left_sen[lidx],
left_target_idx=lidx,
right_sentence=right_sen,
right_sentence_len=len(right_sen),
right_target_word=right_sen[ridx],
right_target_idx=ridx,
label=label
)
def to_idx(self):
mtx = SentencePairFields.initialize_all(list)
for sample in self.raw:
for field, value in sample._asdict().items():
if field == 'label':
mtx.label.append(self.vocabs.label[value])
else:
getattr(mtx, field).append(value)
self.mtx = mtx
def batched_iter(self, batch_size):
starts = list(range(0, len(self), batch_size))
if self.is_unlabeled is False and self.config.shuffle_batches:
np.random.shuffle(starts)
PAD = '<pad>'
for start in starts:
self._start = start
end = min(start + batch_size, len(self.raw))
batch = SentencePairFields.initialize_all(list)
# pad left sentences
maxlen = max(self.mtx.left_sentence_len[start:end])
sents = [self.mtx.left_sentence[i] +
[PAD] * (maxlen - self.mtx.left_sentence_len[i])
for i in range(start, end)]
batch.left_sentence = sents
batch.left_target_idx = self.mtx.left_target_idx[start:end]
# pad right sentences
maxlen = max(self.mtx.right_sentence_len[start:end])
sents = [self.mtx.right_sentence[i] +
[PAD] * (maxlen - self.mtx.right_sentence_len[i])
for i in range(start, end)]
batch.right_sentence = sents
batch.right_target_idx = self.mtx.right_target_idx[start:end]
batch.label = self.mtx.label[start:end]
yield batch
def decode(self, model_output):
for i, sample in enumerate(self.raw):
output = model_output[i].argmax().item()
sample.label = self.vocabs.label.inv_lookup(output)
def print_sample(self, sample, stream):
stream.write("{}\n".format("\t".join(map(str, (
" ".join(sample.left_sentence),
sample.left_target_word,
sample.left_target_idx,
" ".join(sample.right_sentence),
sample.right_target_word,
sample.right_target_idx,
sample.label)
))))
class UnlabeledELMOSentencePairDataset(ELMOSentencePairDataset):
pass
class BERTSentencePairDataset(ELMOSentencePairDataset):
unlabeled_data_class = 'UnlabeledBERTSentencePairDataset'
def __init__(self, config, stream_or_file, **kwargs):
model_name = getattr(config, 'bert_model', 'bert-base-multilingual-cased')
if 'bert_tokenizer' in globals():
self.tokenizer = globals()['bert_tokenizer']
else:
self.tokenizer = BertTokenizer.from_pretrained(
model_name, do_lower_case=False)
globals()['bert_tokenizer'] = self.tokenizer
super().__init__(config, stream_or_file, **kwargs)
def extract_sample_from_line(self, line):
fd = line.rstrip("\n").split("\t")
left_sen, left_first, left_last = self.parse_sentence(fd[:3])
right_sen, right_first, right_last = self.parse_sentence(fd[3:6])
if len(fd) > 6:
label = fd[6]
else:
label = None
return BERTSentencePairFields(
left_sentence=fd[0],
left_tokens=left_sen,
left_sentence_len=len(left_sen),
left_target_word=fd[1],
left_target_first=left_first,
left_target_last=left_last,
right_sentence=fd[3],
right_tokens=right_sen,
right_sentence_len=len(right_sen),
right_target_word=fd[4],
right_target_first=right_first,
right_target_last=right_last,
label=label,
)
def parse_sentence(self, fields):
sent, target, idx = fields
idx = int(idx)
bert_tokens = ['[CLS]']
for i, t in enumerate(sent.split(" ")):
bt = self.tokenizer.tokenize(t)
if i == idx:
first = len(bert_tokens)
last = len(bert_tokens) + len(bt) - 1
bert_tokens.extend(bt)
bert_tokens.append('[SEP]')
return bert_tokens, first, last
def to_idx(self):
self.mtx = BERTSentencePairFields.initialize_all(list)
for sample in self.raw:
# left fields
self.mtx.left_sentence_len.append(sample.left_sentence_len)
tok_idx = self.tokenizer.convert_tokens_to_ids(sample.left_tokens)
self.mtx.left_tokens.append(tok_idx)
self.mtx.left_target_first.append(sample.left_target_first)
self.mtx.left_target_last.append(sample.left_target_last)
# right fields
self.mtx.right_sentence_len.append(sample.right_sentence_len)
tok_idx = self.tokenizer.convert_tokens_to_ids(sample.right_tokens)
self.mtx.right_tokens.append(tok_idx)
self.mtx.right_target_first.append(sample.right_target_first)
self.mtx.right_target_last.append(sample.right_target_last)
# label if labeled
if sample.label is None:
self.mtx.label.append(None)
else:
self.mtx.label.append(self.vocabs.label[sample.label])
def __len__(self):
return len(self.raw)
def batched_iter(self, batch_size):
starts = list(range(0, len(self), batch_size))
if self.is_unlabeled is False and self.config.shuffle_batches:
np.random.shuffle(starts)
PAD = 0
for start in starts:
self._start = start
end = min(start + batch_size, len(self.raw))
batch = BERTSentencePairFields.initialize_all(list)
# pad left sentences
maxlen = max(self.mtx.left_sentence_len[start:end])
sents = [self.mtx.left_tokens[i] +
[PAD] * (maxlen - self.mtx.left_sentence_len[i])
for i in range(start, end)]
batch.left_tokens = sents
batch.left_sentence_len = self.mtx.left_sentence_len[start:end]
batch.left_target_first = self.mtx.left_target_first[start:end]
batch.left_target_last = self.mtx.left_target_last[start:end]
# pad right sentences
maxlen = max(self.mtx.right_sentence_len[start:end])
sents = [self.mtx.right_tokens[i] +
[PAD] * (maxlen - self.mtx.right_sentence_len[i])
for i in range(start, end)]
batch.right_tokens = sents
batch.right_sentence_len = self.mtx.right_sentence_len[start:end]
batch.right_target_first = self.mtx.right_target_first[start:end]
batch.right_target_last = self.mtx.right_target_last[start:end]
batch.label = self.mtx.label[start:end]
yield batch
def print_sample(self, sample, stream):
stream.write("{}\n".format("\t".join(map(str, (
sample.left_sentence,
sample.left_target_word,
sample.left_target_first,
sample.right_sentence,
sample.right_target_word,
sample.right_target_first,
sample.label)
))))
class UnlabeledBERTSentencePairDataset(BERTSentencePairDataset):
pass
class WordOnlySentenceProberDataset(BaseDataset):
data_recordclass = WordOnlyFields
unlabeled_data_class = 'UnlabeledWordOnlySentenceProberDataset'
constants = []
def load_or_create_vocabs(self):
vocab_pre = os.path.join(self.config.experiment_dir, 'vocab_')
needs_vocab = getattr(self.data_recordclass, '_needs_vocab',
self.data_recordclass._fields)
self.vocabs = self.data_recordclass()
for field in needs_vocab:
vocab_fn = getattr(self.config, 'vocab_{}'.format(field),
vocab_pre+field)
if field == 'label':
constants = []
else:
constants = ['SOS', 'EOS', 'PAD', 'UNK']
if os.path.exists(vocab_fn):
setattr(self.vocabs, field, Vocab(file=vocab_fn, frozen=True))
else:
setattr(self.vocabs, field, Vocab(constants=constants))
def extract_sample_from_line(self, line):
fd = line.rstrip("\n").split("\t")
if len(line) > 3:
sent, target, idx, label = fd[:4]
else:
sent, target, idx = fd[:3]
label = None
idx = int(idx)
return WordOnlyFields(
sentence=sent,
target_word=target,
target_idx=idx,
target_word_len=len(target),
label=label,
)
def to_idx(self):
words = []
lens = []
labels = []
if self.config.use_global_padding:
maxlen = self.get_max_seqlen()
longer = sum(s.target_word_len > maxlen for s in self.raw)
if longer > 0:
logging.warning('{} elements longer than maxlen'.format(longer))
for sample in self.raw:
idx = list(self.vocabs.target_word[c] for c in sample.target_word)
if self.config.use_global_padding:
idx = idx[:maxlen-2]
idx = [self.vocabs.target_word.SOS] + \
idx + [self.vocabs.target_word.EOS]
idx = idx + [self.vocabs.target_word.PAD] * (maxlen - len(idx))
lens.append(maxlen)
else:
idx = [self.vocabs.target_word.SOS] + \
idx + [self.vocabs.target_word.EOS]
lens.append(len(idx))
words.append(idx)
labels.append(self.vocabs.label[sample.label])
self.mtx = WordOnlyFields(
target_word=words, target_word_len=lens, label=labels
)
def print_sample(self, sample, stream):
stream.write("{}\t{}\t{}\t{}\n".format(
sample.sentence, sample.target_word,
sample.target_idx, sample.label
))
def decode(self, model_output):
for i, sample in enumerate(self.raw):
output = model_output[i].argmax().item()
sample.label = self.vocabs.label.inv_lookup(output)
def __len__(self):
return len(self.raw)
def get_max_seqlen(self):
if hasattr(self.config, 'max_seqlen'):
return self.config.max_seqlen
return max(s.target_word_len for s in self.raw) + 2
class UnlabeledWordOnlySentenceProberDataset(WordOnlySentenceProberDataset):
def is_unlabeled(self):
return True
class BERTRandomTokenizer:
def __init__(self, tokenizer, keep_until=106, mix_initial_and_cont=False):
self.bert_tokenizer = tokenizer
start_rand = keep_until
bert_size = len(self.bert_tokenizer.vocab)
self.bert2rand = {}
if mix_initial_and_cont:
rand_range = np.arange(start_rand, bert_size)
np.random.shuffle(rand_range)
full_range = np.concatenate((np.arange(start_rand), rand_range))
for tok, idx in self.bert_tokenizer.vocab.items():
j = full_range[idx]
self.bert2rand[tok] = self.bert_tokenizer.ids_to_tokens[j]
else:
continuation = []
initial = []
for tok, idx in self.bert_tokenizer.vocab.items():
if idx < start_rand:
continue
if tok.startswith('##'):
continuation.append(tok)
else:
initial.append(tok)
crand = np.array(continuation)
| np.random.shuffle(crand) | numpy.random.shuffle |
import time
import numpy as np
def _t(x):
return np.transpose(x)
def _m(A, B):
return np.matmul(A, B)
class Sigmoid:
def __init__(self):
self.last_o = 1
def __call__(self, x):
self.last_o = (1.0 / (1.0 + np.exp(-x)))
return self.last_o
def grad(self):
return self.last_o * (1 - self.last_o)
class MeanSquaredError:
def __init__(self):
self.dh = 1
self.last_diff = 1
def __call__(self, h, y):
self.last_diff = h - y
return 1 / 2 * np.mean(np.square(self.last_diff))
def grad(self):
return self.last_diff
class Dense:
def __init__(self, W, b, a_obj):
self.W = W
self.b = b
self.a = a_obj()
self.dW = np.zeros_like(self.W)
self.db = | np.zeros_like(self.b) | numpy.zeros_like |
from __future__ import print_function
from PIL import Image
import os
import os.path
import numpy as np
import sys
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
import torch.utils.data as data
def unpickle(file):
with open(file, 'rb') as fo:
dict = pickle.load(fo)
return dict
class IMAGENET32(data.Dataset):
def __init__(self, root, train=True,
transform=None, lbl_range = (0,1000), id_range=(1,11), debug=False):
self.root = root
self.transform = transform
self.train = train # training set or test set
self.lbl_range = lbl_range
self.id_range = id_range
self.data = []
self.targets = []
if self.train:
for idx in range(id_range[0], id_range[1]):
if lbl_range[1] == 1002:
x, y = unpickle(os.path.join(self.root, 'Imagenet32_train/train_batch_py2_') + str(idx))
else:
x, y = self.loaddata(os.path.join(self.root, 'Imagenet32_train/train_data_batch_') + str(idx))
if lbl_range[1] == 1001:
#dump data with protocol 2
with open(os.path.join(self.root, 'Imagenet32_train/train_batch_py2_') + str(idx), 'wb') as fo:
pickle.dump((x,y), fo, 2)
self.data.append(x)
self.targets.extend(y)
print ("loaded:", idx)
else:
x, y = self.loaddata(os.path.join(self.root, 'Imagenet32_val/val_data'))
self.data.append(x)
self.targets.extend(y)
self.data = np.vstack(self.data).reshape(-1, 3, 32, 32)
self.data = self.data.transpose((0, 2, 3, 1))
self.targets = [y - 1 for y in self.targets]
if lbl_range[0] > 0 or lbl_range[1]<1000:
_data = self.data
_targets = self.targets
self.data = []
self.targets = []
for i in range(_data.shape[0]):
if _targets[i] >= lbl_range[0] and _targets[i]<lbl_range[1]:
self.data.append(_data[i])
self.targets.append(_targets[i])
self.data = | np.stack(self.data) | numpy.stack |
"""
The main module of nimbus that sets up the Bayesian formalism.
Classes:
Kilonova_Inference
"""
__author__ = '<NAME>'
import numpy as np
from scipy.stats import norm, truncnorm
from scipy.integrate import quad
from scipy.special import expit
from multiprocessing import Pool
from functools import partial
class Kilonova_Inference():
"""
Initializes utility functions for inference and defines the model.
Attributes
----------
lc_model_funcs : array-like
The array whose elements are band-specific functions that define the
light-curve evolution as a function of time.
nullevent_mlim_pdf : func
The function that evaluates the pdf for the observed upper limits when
the event is either not in the observed fields or is terrestrial.
Usage
-----
kne_inf = Kilonova_Inference(lc_model_func)
"""
def __init__(self, lc_model_funcs, nullevent_mlim_pdf):
print("Initializing inference framework...")
self.lc_model_funcs = lc_model_funcs
self.nbands = len(lc_model_funcs)
self.nullevent_mlim_pdf = nullevent_mlim_pdf
def lc_model_powerlaw(self, M_0, gamma, t_0, t):
"""
Returns the absolute magnitude evolution as a power law.
Parameters
----------
M_0 : float
The peak absolute magnitude of the light curve.
gamma : float
Power law index for the light curve decay.
t_0 : float
Initial time of the event.
t : float or array
Array of observation times.
Returns
-------
M : float or array
Absolute magnitude light curve as a function of time (same shape as
t).
"""
return (M_0 * pow(t_0/t, gamma))
def lc_model_linear(self, M_0, alpha, t_0, t):
"""
Returns the absolute magnitude evolution as a linear decay/rise.
Parameters
----------
M_0 : float
The peak absolute magnitude of the light curve.
alpha : float
Linear decay/rise index for the light curve.
t_0 : float
Initial time of the event.
t : float or array
Array of observation times.
Returns
-------
M : float or array
Absolute magnitude light curve as a function of time (same shape as
t).
"""
return M_0 + alpha*(t-t_0)
def M_to_m(self, M, distance):
"""
Returns the apparent magnitude using a distance and absolute
magnitude.
Parameters
----------
M : float or array
Absolute magnitude of object.
distance : float or array
Distance of the object (must have same size as M).
Returns
-------
m : float or array
Apparent magnitude of the object (same size as M or distance).
"""
return (M + 5 * np.log10(distance * 1e6) - 5)
def dlim(self, mlim, M):
"""
Returns the limiting distance for a model with absolute magnitude M
and limiting magnitude mlim.
Parameters
----------
mlim : float or array
Limitng magnitude from observations.
M : float or array
Absolute magnitude from model (must have same shape as mlim).
Returns
-------
dlim : float or array (same shape as mlim)
Limiting distance for given parameters.
"""
return 10**((mlim - M)/5.) * 10 * 1e-6
def create_distance_dist(self, mu_f, sigma_f):
"""
Returns a truncated normal distribution as the distance distribution.
Parameters
----------
mu_f : float
Mean of the distance distribution.
sigma_f : float
Standard deviation of the distance distribution.
Returns
-------
distance_dist : scipy.stats.rv_continuous.pdf object
The probability density function of the truncated normal
distribution.
"""
#set min,max distances as 0 Mpc, 4000 Mpc
a = (0. - mu_f)/sigma_f
b = (4000. - mu_f)/sigma_f
return truncnorm(a, b, mu_f, sigma_f)
def calc_expit_argument(self,d_lim,maglim_err=0.1):
"""
Returns a logistic/expit function that accounts for errors in the
measurement of limiting magnitudes.
Parameters
----------
d_lim : float
Limiting distance corresponding to the observed limiting
magnitude.
maglim_err : float
Error in the limiting magnitude measurement (default=0.1 mag).
Returns
-------
expit_func : func
Logitic function based on errors in the limiting magnitude.
"""
if maglim_err==0.:
maglim_err = 0.1
dlow = d_lim*10**-(3*maglim_err/5) # set dlow at 3-sigma
dmid = d_lim*10**-(maglim_err/5) # set dmid at 1-sigma
a = np.log(0.021/0.979)/(dlow - dmid)
b = -1.0*dmid
return lambda x : expit(a*(x + b))
def calc_likelihood_integral(self, M, expit_func, dist_samples,
mlow, mhigh):
"""
Returns the single observation likelihood integral evaluated using
posterior samples drawn from the distance distribution.
"""
dist_samples_survey = dist_samples[(dist_samples>self.dlim(mlow,M))
&(dist_samples<=self.dlim(mhigh,M))]
dist_samples_high = dist_samples[dist_samples>self.dlim(mhigh,M)]
N_samples_survey = len(dist_samples_survey)
N_samples_high = len(dist_samples_high)
N_total = N_samples_survey + N_samples_high
if (N_samples_survey==0)&(N_samples_high!=0):
return 1./(mhigh-mlow)
elif (N_samples_survey!=0)&(N_samples_high==0):
return np.sum((1./(
np.vectorize(
self.M_to_m)(M, dist_samples_survey) -mlow))*\
| np.vectorize(expit_func) | numpy.vectorize |
import numpy as np
import numpy.random as npr
import time#, timer
from . import gelmanrubin as gr
#reload(gr)
#import python_models as mc
#import models_c as mc
import multiprocessing as mp
def calcModel(nchains, functype, myfuncs, pedit, nextp, iortholist, funcx, cummodels, numparams, j, iblock=None, chains=None):
'''
Compute model light curve by combining model components. Also returns correlated noise parameters.
'''
#Build final model from model components
ymodels = np.ones((nchains, fit[j].nobj))
noisepars = [[] for i in range(nchains)]
k = 0
if chains == None:
chains = range(nchains)
if iblock == None:
iblock = range(cummodels[j],cummodels[j+1])
for i in range(cummodels[j],cummodels[j+1]):
if iblock.__contains__(i):
for n in chains:
if functype[i] == 'ortho':
#MODIFY COPY OF nextp ONLY
pedit[n,iortholist] = myfuncs[i](pedit[n,iortholist], funcx[i], fit[j].etc[k])
elif (functype[i] == 'ipmap') or (functype[i] == 'spline'):
ymodels[n] *= myfuncs[i](pedit[n,numparams[i]:numparams[i+1]], funcx[i], ymodels[n])
elif functype[i] == 'posoffset':
# Record change in Position 0 => cannot orthogonalize position parameters
ymodels[n] *= myfuncs[i](nextp[n,numparams[i]:numparams[i+1]], funcx[i], fit[j].etc[k])
elif hasattr(fit[j], 'timebins') and (functype[i] == 'ecl/tr'
or functype[i] == 'ramp'
or functype[i] == 'sinusoidal'):
# Average over high-resolution model
hiresmodel = myfuncs[i](pedit[n,numparams[i]:numparams[i+1]], funcx[i], fit[j].etc[k])
if len(fit[j].timebins) == fit[j].nobj:
for tb in range(len(fit[j].timebins)):
ymodels[n,tb] *= np.mean(hiresmodel[fit[j].timebins[tb]])
else:
for tb in range(len(fit[j].timebinsuc)):
ymodels[n,tb] *= np.mean(hiresmodel[fit[j].timebinsuc[tb]])
elif functype[i] == 'noise':
noisepars[n] = pedit[n,numparams[i]:numparams[i+1]]
else:
ymodels[n] *= myfuncs[i](pedit[n,numparams[i]:numparams[i+1]], funcx[i], fit[j].etc[k])
k += 1
return ymodels, noisepars
# Calculate chi^2
def calcChisq(y, sigma, ymodels, nchains, nextp, j, noisepars, isrednoise, wavelet, noisefunc, chains=None):
'''
Compute chi-squared with priors.
'''
if chains == None:
chains = range(nchains)
chi2 = np.zeros(nchains)
for n in chains:
if isrednoise == False:
#chi2[n] = mc.chisq(ymodels[n], y, sigma)
chi2[n] += np.sum((ymodels[n] - y)**2 / sigma**2)
else:
chi2[n] = noisefunc(noisepars[n], ymodels[n]-y, wavelet)
# Apply prior, if one exists
if len(fit[j].ipriors) > 0:
pbar = fit[j].priorvals[:,0] #prior mean
psigma = np.zeros(len(pbar)) #prior standard deviation
# Determine psigma based on which side of asymmetric Gaussian nextp is on
for i in range(len(fit[j].ipriors)):
if nextp[n,fit[j].ipriors[i]] < pbar[i]:
psigma[i] = fit[j].priorvals[i,1]
else:
psigma[i] = fit[j].priorvals[i,2]
#chi2[n] += fit[j].nobj*((nextp[n,fit[j].ipriors[i]] - pbar[i])/psigma[i])**2
chi2[n] += ((nextp[n,fit[j].ipriors[i]] - pbar[i])/psigma[i])**2
return chi2
def demc_block(y, pars, pmin, pmax, stepsize, numit, sigma, numparams, cummodels, functype, myfuncs, funcx, iortholist, fits, gamma=None, isGR=True, ncpu=1):
"""
This function uses a differential evolution Markov chain with block updating to assess uncertainties.
PARAMETERS
----------
y: Array containing dependent data
Params: Array of initial guess for parameters
#Pmin: Array of parameter minimum values
#Pmax: Array of parameter maximum values
stepsize: Array of 1-sigma change in parameter per iteration
Numit: Number of iterations to perform
Sigma: Standard deviation of data noise in y
Numparams: Number of parameters for each model
Cummodels: Cumulative number of models used
Functype: Define function type (eclipse, ramp, ip, etc), see models.py
Myfuncs: Pointers to model functions
Funcx: Array of x-axis values for myfuncs
fit: List of fit objects
gamma: Multiplcation factor in parameter differential, establishes acceptance rate
OUTPUTS
-------
This function returns an array of the best fitting parameters,
an array of all parameters over all iterations, and numaccept.
REFERENCES
----------
<NAME>. <NAME>, "Genetic algorithms and Markov Chain Monte Carlo: Differential Evolution Markov Chain makes Bayesian computing easy," Biometrics, 2006.
HISTORY
-------
Adapted from mcmc.py
<NAME>, UChicago August 2012
"""
global fit
fit = fits
params = np.copy(pars)
nchains, nump = params.shape
nextp = np.copy(params) #Proposed parameters
bestp = np.copy(params[0]) #Best-fit parameters
pedit = np.copy(params) #Editable parameters
numaccept = 0
allparams = np.zeros((nump, nchains, numit))
inotfixed = np.where(stepsize != 0)[0]
ishare = np.where(stepsize < 0)[0]
#ifree = np.where(stepsize > 0)[0]
outside = np.zeros((nchains, nump))
numevents = len(fit)
intsteps = np.min((numit/5,1e5))
isrednoise = False
wavelet = None
noisefunc = None
#UPDATE PARAMTER(S) EQUAL TO OTHER PARAMETER(S)
if (ishare.size > 0):
for s in range(ishare.size):
params[:,ishare[s]] = params[:,int(abs(stepsize[ishare[s]])-1)]
#Define blocks
blocks = []
for j in range(numevents):
#Build list of blocks
blocks = np.concatenate((blocks, fit[j].blocks))
for i in range(cummodels[j],cummodels[j+1]):
if functype[i] == 'noise':
# Set up for modified chi-squared calculation using correlated noise
isrednoise = True
wavelet = fit[j].etc[k]
noisefunc = myfuncs[i]
blocks = blocks.astype(int)
iblocks = []
eps = []
numblocks = blocks.max() + 1
numbp = np.zeros(numblocks)
ifree = [[] for i in range(numblocks)]
for b in range(numblocks):
#Map block indices
whereb = np.where(blocks == b)[0]
iblocks.append(whereb)
#Locate indices of free parameters in each block
for w in whereb:
ifree[b] = np.concatenate((ifree[b],numparams[w]+np.where(stepsize[numparams[w]:numparams[w+1]] > 0)[0])).astype(int)
#Calculate number of free parameters per block
numbp[b] += len(ifree[b])
eps.append(npr.normal(0, stepsize[ifree[b]]/100., [numit,numbp[b]]))
print("Number of free parameters per block:")
print(numbp)
numa = np.zeros(numblocks)
if gamma == None:
gamma = 2.38/np.sqrt(2.*numbp)
print("gamma:")
print(gamma)
#Calc chi-squared for model type using current params
currchisq = np.zeros(nchains)
currmodel = [[] for i in range(numevents)]
for j in range(numevents):
currmodel[j], noisepars = calcModel(nchains, functype, myfuncs, pedit, params, iortholist[j],
funcx, cummodels, numparams, j)
currchisq += calcChisq(y[j], sigma[j], currmodel[j], nchains, params, j, noisepars, isrednoise, wavelet, noisefunc)
bestchisq = currchisq[0]
#GENERATE RANDOM NUMBERS FOR MCMC
numnotfixed = len(inotfixed)
unif = npr.rand(numit,nchains)
randchains = npr.randint(0,nchains,[numit,nchains,2])
#START TIMER
clock = timer.Timer(numit,progress = np.arange(0.05,1.01,0.05))
#Run Differential Evolution Monte Carlo algorithm 'numit' times
for m in range(numit):
#Select next event (block) to update
b = m % numblocks
#Remove model component(s) that are taking a step
pedit = | np.copy(params) | numpy.copy |
"""
Created on Mon Aug 25 13:17:03 2014
@author: anthony
"""
import time
from multiprocessing import Pool
import matplotlib.pyplot as plt
import numpy as np
import scipy.interpolate as interp
from .cp_tools import cp_loglikelihood
from .cp_tools import cp_loglikelihood_proj
from .cp_tools import cp_model
from .cp_tools import mas2rad
from .cp_tools import project_cps
from .cp_tools import rad2mas
def phase_binary_flux(u, v, wavel, p, return_cvis=False):
"""Calculate the phases observed by an array on a binary star
----------------------------------------------------------------
p: 3-component vector (+2 optional), the binary "parameters":
- p[0] = sep (mas)
- p[1] = PA (deg) E of N.
- p[2] = flux (primary is assumed to be 1)
optional:
- p[2:] = contrast ratio for several wavelengths that we want
to calculate the cps over
- u,v: baseline coordinates (meters)
- wavel: wavelength (meters)
----------------------------------------------------------------"""
p = np.array(p)
# relative locations
th = (p[1] + 90.0) * np.pi / 180.0
ddec = mas2rad(p[0] * np.sin(th))
dra = -mas2rad(p[0] * np.cos(th))
# decompose into two "luminosities"
# but first, a little trick so this works whether
# p is a single value or a list of contrasts
spec = p[2:]
if len(spec) == 1:
spec = spec[0]
l2 = spec
l1 = 1 - l2
# phase-factor
output_shape = list(u.shape)
output_shape[-1] = np.size(wavel)
phi = np.zeros(output_shape, dtype=complex)
phi.real = np.cos(-2 * np.pi * (u * dra + v * ddec) / wavel)
phi.imag = np.sin(-2 * np.pi * (u * dra + v * ddec) / wavel)
cvis = l1 + l2 * phi
phase = np.angle(cvis, deg=True)
if return_cvis:
return cvis
else:
return np.mod(phase + 10980.0, 360.0) - 180.0
# =========================================================================
def cp_model_flux(params, u, v, wavels, model="constant"):
"""Function to model closure phases. Takes a parameter list, u,v triangles and range of wavelengths.
Allows fitting of a model to contrast vs wavelength.
Models for contrast ratio:
constant (contrast is constant with wavelength, default)
linear (params[2,3]=contrast ratios at end wavelengths),
free (params[2:]=contrast ratios).
ndof (the wavelength channels are evenly spaced cubic interpolations in params[2:])
polynomial (of the form Sum[n] params[n+2]*(wavelength*1e6)**n )
NOTE: This doesn't allow for nonzero size of each component!"""
nwav = wavels.size
if model == "constant":
cons = | np.repeat(params[2], nwav) | numpy.repeat |
#!/usr/bin/env python
####################################################################
### This is the PYTHON version of program 5.2 from page 171 of #
### "Modeling Infectious Disease in humans and animals" #
### by Keeling & Rohani. #
### #
### It is the simple SIR epidemic with corrected term-time #
### forcing of the transmission rate. #
### Note: setting beta1 too high can cause numerical difficulties. #
####################################################################
###################################
### Written by <NAME> #
### <EMAIL> (work) #
### <EMAIL> #
###################################
import scipy.integrate as spi
import numpy as np
import pylab as pl
Beta0 = 17 / 13.0
Beta1 = [0.25]
gamma = 1 / 13.0
mu = 1 / (50 * 365.0)
S0 = 1 / 17.0
I0 = 1e-4
Years = 10
MaxTime = 365.0
TS = 1.0
def term(t):
t = np.mod(t, 365)
if t < 6 or t > 100 and t < 115 or t > 200 and t < 251 or t > 300 and t < 307 or t > 356 and t <= 365:
Term = -1
else:
Term = 1
return Term
### This code can also be used to generate bifurcation diagrams, by setting
### beta1 equal to a vector of seasonality rates. The bifurcation diagram is
### constructed using extrapolated initial conditions. Try:
# (Beta0,Beta1,gamma,mu,S0,I0,Years, MaxTime)=(17/13.,np.arange(0.00,0.301,0.001),1/13.,1./(50*365),1/17.,1e-4,20,365.)
ND = Years * MaxTime
INPUT = np.array((S0, I0, 1 - S0 - I0))
def diff_eqs(INP, t):
"""The main set of equations"""
Y = np.zeros((3))
V = INP
beta = beta0 - beta1
Y[0] = mu - beta * V[0] * V[1] - mu * V[0]
Y[1] = beta * V[0] * V[1] - mu * V[1] - gamma * V[1]
Y[2] = gamma * V[1] - mu * V[2]
return Y # For odeint
def diff_eqs2(INP, t):
"""The main set of equations"""
Y = np.zeros((3))
V = INP
beta = beta0 + beta1
Y[0] = mu - beta * V[0] * V[1] - mu * V[0]
Y[1] = beta * V[0] * V[1] - mu * V[1] - gamma * V[1]
Y[2] = gamma * V[1] - mu * V[2]
return Y # For odeint
def FORCED_ODE(diff_eqs, INPUT, beta0, beta1):
"""Calculates the differential rates used in the integration."""
RES = np.zeros((3))
for Year in range(Years):
t_start = Year * 365.0 + 1
t_end = Year * 365.0 + 6.0
t_inc = TS
t_range = np.arange(t_start, t_end + t_inc, t_inc)
RES2 = spi.odeint(diff_eqs, INPUT, t_range)
RES = np.vstack((RES, RES2))
INPUT = RES2[-1]
t_start = Year * 365.0 + 7
t_end = Year * 365.0 + 100
t_inc = TS
t_range = np.arange(t_start, t_end + t_inc, t_inc)
RES2 = spi.odeint(diff_eqs2, INPUT, t_range)
RES = np.vstack((RES, RES2))
INPUT = RES2[-1]
t_start = Year * 365.0 + 101
t_end = Year * 365.0 + 115
t_inc = TS
t_range = np.arange(t_start, t_end + t_inc, t_inc)
RES2 = spi.odeint(diff_eqs, INPUT, t_range)
RES = np.vstack((RES, RES2))
INPUT = RES2[-1]
t_start = Year * 365.0 + 116
t_end = Year * 365.0 + 200
t_inc = TS
t_range = np.arange(t_start, t_end + t_inc, t_inc)
RES2 = spi.odeint(diff_eqs2, INPUT, t_range)
RES = np.vstack((RES, RES2))
INPUT = RES2[-1]
t_start = Year * 365.0 + 201
t_end = Year * 365.0 + 251
t_inc = TS
t_range = np.arange(t_start, t_end + t_inc, t_inc)
RES2 = spi.odeint(diff_eqs, INPUT, t_range)
RES = np.vstack((RES, RES2))
INPUT = RES2[-1]
t_start = Year * 365.0 + 252
t_end = Year * 365.0 + 300
t_inc = TS
t_range = np.arange(t_start, t_end + t_inc, t_inc)
RES2 = spi.odeint(diff_eqs2, INPUT, t_range)
RES = np.vstack((RES, RES2))
INPUT = RES2[-1]
t_start = Year * 365.0 + 301
t_end = Year * 365.0 + 307
t_inc = TS
t_range = np.arange(t_start, t_end + t_inc, t_inc)
RES2 = spi.odeint(diff_eqs, INPUT, t_range)
RES = np.vstack((RES, RES2))
INPUT = RES2[-1]
t_start = Year * 365.0 + 308
t_end = Year * 365.0 + 356
t_inc = TS
t_range = np.arange(t_start, t_end + t_inc, t_inc)
RES2 = spi.odeint(diff_eqs2, INPUT, t_range)
RES = np.vstack((RES, RES2))
INPUT = RES2[-1]
t_start = Year * 365.0 + 357
t_end = Year * 365.0 + 365
t_inc = TS
t_range = np.arange(t_start, t_end + t_inc, t_inc)
RES2 = spi.odeint(diff_eqs, INPUT, t_range)
RES = | np.vstack((RES, RES2)) | numpy.vstack |
import numpy as np
def car2sph(vec, axis = 0):
"""transform a cartesian vector into spherical coordinates"""
result = np.zeros(vec.shape)
result[0,:] = np.linalg.norm(vec, axis = axis) # r component
#result[1,:] = np.arctan(vec[1,:] / vec[0,:]) # phi component
result[1,:] = np.arctan2(vec[1,:] , vec[0,:]) # phi component
result[2,:] = np.arccos(vec[2,:] / result[0,:]) # theta component
return result
def project2observer(vec, unitvec2obs, axis = 0):
"""transforms vector to observer's coordinate system
Parameter
---------
vec: `~numpy.ndarray`
Vector to be transformed, usually, this should be the momemtum vector
at the time of detection or momentum vector at origin.
unitvec2obs: `~numpy.ndarray`
Unit vector between point where event hit the sphere (observer's position)
and the point where event originated (the source).
In CRPropa notation, this would be (X - X0) / || X - X0 ||
{options}
axis: int
axis of vector components
default: 0
Returns
-------
`~numpy.ndarray` with vec projected onto new coordinate system
of observer, where z-axis points along (X - X0)
"""
r = np.linalg.norm(unitvec2obs, axis = axis)
if axis == 0:
rho = np.sqrt(unitvec2obs[0,:]*unitvec2obs[0,:] + \
unitvec2obs[1,:]*unitvec2obs[1,:])
cosphi = unitvec2obs[0,:] / rho
sinphi = unitvec2obs[1,:] / rho
costheta = unitvec2obs[2,:] / r
elif axis == 1:
rho = np.sqrt(unitvec2obs[:,0]*unitvec2obs[:,0] + \
unitvec2obs[:,1]*unitvec2obs[:,1])
cosphi = unitvec2obs[:,0] / rho
sinphi = unitvec2obs[:,1] / rho
costheta = unitvec2obs[:,2] / r
sintheta = rho / r
e1 = np.vstack([-sinphi, cosphi, np.zeros_like(sinphi)])
e2 = np.vstack([cosphi * costheta, sinphi * costheta, -sintheta])
e3 = -unitvec2obs
result = np.vstack([np.sum(vec * e1, axis = 0),
np.sum(vec * e2, axis = 0),
np.sum(vec * e3, axis = 0)])
return result
def projectjetaxis(vec, jet_opening_angle = 5.,
jet_theta_angle = 5.,
jet_phi_angle = 90.):
"""
Project initial momentum vectors on jet axis
and select only momentum vectors that fall within cone
Parameters
----------
vec: `~numpy.ndarray`
Vector of initial momenta
jet_opening_angle: float
full jet opening angle (aperture) in degrees
jet_theta_angle: float
theta angle of jet axis, in degrees,
this is the angle to the l.o.s. to the observer
jet_phi_angle: float
phi angle of jet axis, in degrees
Returns
-------
array with mask for initial momentum vectors
"""
phi = np.radians(jet_phi_angle)
theta = np.radians(jet_theta_angle)
# jet vector in observers frame
vecjet = np.vstack([np.ones(vec.shape[1]) * np.cos(phi) *np.sin(theta),
np.ones(vec.shape[1]) * np.sin(phi) *np.sin(theta),
np.ones(vec.shape[1]) * np.cos(theta)])
# angle between jet axis and initial momentum
cosangle = np.sum(vecjet * -vec, axis = 0)
# restrict to those photons inside cone
# cos(alpha) >= cos(theta_jet / 2.) is equal to alpha <= theta_jet / 2.
return cosangle >= np.cos(np.radians(jet_opening_angle/2.))
# DEPRECATED FUNCTIONS:
def setRz(phi):
"""Rotation matrix around z axis"""
Rz = np.zeros((phi.size,3,3))
Rz[:,2,2] = 1.
Rz[:,0,0] = np.cos(phi)
Rz[:,1,1] = Rz[:,0,0]
Rz[:,0,1] = -np.sin(phi)
Rz[:,1,0] = -Rz[:,0,1]
return Rz
def setRy(phi):
"""Rotation matrix around y axis"""
Ry = np.zeros((phi.size,3,3))
Ry[:,1,1] = 1.
Ry[:,0,0] = np.cos(phi)
Ry[:,2,2] = Ry[:,0,0]
Ry[:,0,2] = - | np.sin(phi) | numpy.sin |
#!/usr/bin/env python2
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Perform inference on a single image or all images with a certain extension
(e.g., .jpg) in a folder.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import argparse
import cv2 # NOQA (Must import before importing caffe2 due to bug in cv2)
import glob
import logging
import os
import sys
import numpy as np
import base64
import csv
import timeit
import json
from detectron.utils.io import cache_url
import detectron.utils.c2 as c2_utils
c2_utils.import_detectron_ops()
# OpenCL may be enabled by default in OpenCV3; disable it because it's not
# thread safe and causes unwanted GPU memory allocations.
cv2.ocl.setUseOpenCL(False)
from caffe2.python import workspace
import caffe2
from detectron.core.config import assert_and_infer_cfg
from detectron.core.config import cfg
from detectron.core.config import merge_cfg_from_file
from detectron.utils.timer import Timer
import detectron.core.test_engine as model_engine
import detectron.core.test as infer_engine
import detectron.datasets.dummy_datasets as dummy_datasets
import detectron.utils.c2 as c2_utils
import detectron.utils.logging
import detectron.utils.vis as vis_utils
from detectron.utils.boxes import nms
c2_utils.import_detectron_ops()
# OpenCL may be enabled by default in OpenCV3; disable it because it's not
# thread safe and causes unwanted GPU memory allocations.
cv2.ocl.setUseOpenCL(False)
csv.field_size_limit(sys.maxsize)
BOTTOM_UP_FIELDNAMES = ['image_id', 'image_w', 'image_h',
'num_boxes', 'boxes', 'features']
FIELDNAMES = ['image_id', 'image_w', 'image_h', 'num_boxes',
'boxes', 'features', 'object']
def parse_args():
parser = argparse.ArgumentParser(description='End-to-end inference')
parser.add_argument(
'--cfg',
dest='cfg',
help='cfg model file (/path/to/model_config.yaml)',
default=None,
type=str
)
parser.add_argument(
'--wts',
dest='weights',
help='weights model file (/path/to/model_weights.pkl)',
default=None,
type=str
)
parser.add_argument(
'--output_dir',
dest='output_dir',
help='output dir name',
required=True,
type=str
)
parser.add_argument(
'--image-ext',
dest='image_ext',
help='image file name extension (default: jpg)',
default='jpg',
type=str
)
parser.add_argument(
'--bbox_file',
help="csv file from bottom-up attention model",
default=None
)
parser.add_argument(
'--total_group',
help="the number of group for exracting",
type=int,
default=1
)
parser.add_argument(
'--group_id',
help=" group id for current analysis, used to shard",
type=int,
default=0
)
parser.add_argument(
'--min_bboxes',
help=" min number of bboxes",
type=int,
default=10
)
parser.add_argument(
'--max_bboxes',
help=" min number of bboxes",
type=int,
default=100
)
parser.add_argument(
'--feat_name',
help=" the name of the feature to extract, default: gpu_0/fc7",
type=str,
default="gpu_0/fc7"
)
parser.add_argument(
'im_or_folder', help='image or folder of images', default=None
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def get_detections_from_im(cfg, model, im, image_id, feat_blob_name,
MIN_BOXES, MAX_BOXES, conf_thresh=0.2, bboxes=None):
with c2_utils.NamedCudaScope(0):
scores, cls_boxes, im_scale = infer_engine.im_detect_bbox(model,
im,
cfg.TEST.SCALE,
cfg.TEST.MAX_SIZE,
boxes=bboxes)
box_features = workspace.FetchBlob(feat_blob_name)
cls_prob = workspace.FetchBlob("gpu_0/cls_prob")
rois = workspace.FetchBlob("gpu_0/rois")
max_conf = np.zeros((rois.shape[0]))
# unscale back to raw image space
cls_boxes = rois[:, 1:5] / im_scale
for cls_ind in range(1, cls_prob.shape[1]):
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
keep = np.array(nms(dets, cfg.TEST.NMS))
max_conf[keep] = np.where(cls_scores[keep] > max_conf[keep], cls_scores[keep], max_conf[keep])
keep_boxes = np.where(max_conf >= conf_thresh)[0]
if len(keep_boxes) < MIN_BOXES:
keep_boxes = np.argsort(max_conf)[::-1][:MIN_BOXES]
elif len(keep_boxes) > MAX_BOXES:
keep_boxes = np.argsort(max_conf)[::-1][:MAX_BOXES]
objects = np.argmax(cls_prob[keep_boxes], axis=1)
return box_features[keep_boxes]
#return {
# "image_id": image_id,
# "image_h": np.size(im, 0),
# "image_w": np.size(im, 1),
# 'num_boxes': len(keep_boxes),
# 'boxes': base64.b64encode(cls_boxes[keep_boxes]),
# 'features': base64.b64encode(box_features[keep_boxes]),
# 'object': base64.b64encode(objects)
#}
def extract_bboxes(bottom_up_csv_file):
image_bboxes = {}
with open(bottom_up_csv_file, "r") as tsv_in_file:
reader = csv.DictReader(tsv_in_file, delimiter='\t',
fieldnames=BOTTOM_UP_FIELDNAMES)
for item in reader:
item['num_boxes'] = int(item['num_boxes'])
image_id = int(item['image_id'])
image_w = float(item['image_w'])
image_h = float(item['image_h'])
bbox = np.frombuffer(
base64.b64decode(item['boxes']),
dtype=np.float32).reshape((item['num_boxes'], -1))
image_bboxes[image_id] = bbox
return image_bboxes
def main(args):
logger = logging.getLogger(__name__)
merge_cfg_from_file(args.cfg)
cfg.NUM_GPUS = 1
args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
assert_and_infer_cfg(cache_urls=False)
model = model_engine.initialize_model_from_cfg(args.weights)
start = timeit.default_timer()
if os.path.isdir(args.im_or_folder):
im_list = glob.iglob(args.im_or_folder + '/*.' + args.image_ext)
else:
im_list = [args.im_or_folder]
# extract bboxes from bottom-up attention model
image_bboxes={}
if args.bbox_file is not None:
image_bboxes = extract_bboxes(args.bbox_file)
count = 0
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
for i, im_name in enumerate(im_list):
im_base_name = os.path.basename(im_name)
image_id = int(im_base_name.split(".")[0].split("_")[-1]) # for COCO
if image_id % args.total_group == args.group_id:
bbox = image_bboxes[image_id] if image_id in image_bboxes else None
im = cv2.imread(im_name)
if im is not None:
outfile = os.path.join(args.output_dir,
im_base_name.replace('jpg', 'npy'))
lock_folder = outfile.replace('npy', 'lock')
if not os.path.exists(lock_folder) and os.path.exists(outfile):
continue
if not os.path.exists(lock_folder):
os.makedirs(lock_folder)
result = get_detections_from_im(cfg, model, im,
image_id,args.feat_name,
args.min_bboxes,
args.max_bboxes,
bboxes=bbox)
| np.save(outfile, result) | numpy.save |
# Functions for converting ADCP velocities in beam coordinates to instrument- or Earth-coordinates.
# Direct translation of functions in the 'ADCPtools' MATLAB
# package (https://github.com/apaloczy/ADCPtools).
import numpy as np
from scipy.interpolate import interp1d
from .utils import sind, cosd, near, nearfl
######################
#### 4-beam Janus ####
######################
def janus2xyz(b1, b2, b3, b4, theta, r=None, ptch=None, roll=None, binmaptype=None, use3beamsol=True, verbose=True):
"""
USAGE
-----
vx, vy, vz = janus2xyz(b1, b2, b3, b4, theta, r=None, ptch=None, roll=None, binmaptype=None, use3beamsol=True, verbose=True)
theta, ptch, roll must be in RADIANS.
"""
Nz, Nt = b1.shape
if binmaptype is not None:
assert r is not None, "Must provide r if using bin-mapping."
assert ptch is not None, "Must provide pitch if using bin-mapping."
assert roll is not None, "Must provide roll if using bin-mapping."
if verbose:
print('Mapping bins to horizontal planes using *%s* interpolation.'%binmaptype)
b1, b2, b3, b4 = binmap(b1, b2, b3, b4, r, theta, ptch, roll, how=binmaptype)
else:
if verbose:
print('Bin-mapping NOT applied.')
if use3beamsol:
b1, b2, b3, b4 = janus3beamsol(b1, b2, b3, b4)
b1, b2 = b1[..., np.newaxis], b2[..., np.newaxis]
b3, b4 = b3[..., np.newaxis], b4[..., np.newaxis]
B = np.dstack((b1, b2, b3, b4))
uvfac = 1/(2*np.sin(theta))
wfac = 1/(4*np.cos(theta)) # For w derived from beams 1-4.
# 3rd row: w from the average of the 4 Janus beams.
# b1 b2 b3 b4
A = np.array([[-1, 1, 0, 0],
[ 0, 0, -1, 1],
[-1, -1, -1, -1]])
vxyz = np.empty((Nz, Nt, 3))*np.nan
for nz in range(Nz):
if verbose:
print('Calculating Vx, Vy, Vz at bin ', nz+1, '/', Nz)
for nt in range(Nt):
vxyz[nz, nt, :] = np.matmul(A, B[nz, nt, :].T)
Vx = vxyz[:, :, 0]*uvfac
Vy = vxyz[:, :, 1]*uvfac
Vz = vxyz[:, :, 2]*wfac
return Vx, Vy, Vz
def janus2earth(head, ptch, roll, theta, b1, b2, b3, b4, r=None, gimbaled=True, binmaptype=None, use3beamsol=True, verbose=True):
"""
USAGE
-----
[u, v, w] = janus2earth(head, ptch, roll, theta, b1, b2, b3, b4, r=None, gimbaled=True, binmaptype=None, use3beamsol=True, verbose=True)
Calculates Earth velocities (u,v,w) = (east,north,up) from beam-referenced velocity time series
from a 4-beam Janus ADCP, (e.g., Appendix A of Dewey & Stringer (2007), Equations A3-A11).
nz, nt, nb = number of vertical bins, data records, beams.
============================================================================
For TRDI instruments, call function like this:
u, v, w = janus2earth(head, ptch, roll, theta, b1, b2, b3, b4)
For Nortek instruments, call function like this:
u, v, w = janus2earth(head-90, roll, -ptch, theta, -b1, -b3, -b4, -b2)
============================================================================
TRDI CONVENTION:
================
* Velocity toward transducers' faces: POSITIVE
* Clockwise PITCH (tilt about x-AXIS): POSITIVE (beam 3 higher than beam 4)
* Clockwise ROLL (tilt about y-AXIS): POSITIVE (beam 2 higher than beam 1)
* Heading increases CLOCKWISE from the *Y-AXIS*.
^ positive y axis, psi = 0
|
3
|
|
|
2 --- O --- 1 ---> positive x axis, psi = +90
|
|
|
4
NORTEK CONVENTION:
==================
* Velocity toward transducers' faces: NEGATIVE
* Counter-clockwise PITCH (tilt about y-AXIS, equivalent to -ROLL in the TRDI convention): POSITIVE (beam 1 higher than beam 3)
* Clockwise ROLL (tilt about x-AXIS, equivalent to PITCH in the TRDI convention): POSITIVE (beam 4 higher than beam 2)
Heading increases CLOCKWISE from the *X-AXIS*.
^ positive y axis, psi = -90
|
4
|
|
|
3 --- O --- 1 ---> positive x axis, psi = 0
|
|
|
2
INPUTS
------
b1, b2, b3, b4 [nz -by- nt] matrices of along-beam velocity components.
head, ptch, roll [nt] vectors with (time-dependent) heading, pitch
and roll angles, following D&S2007's notation.
theta Beam angle measured from the vertical.
*For RDI Sentinel V and Nortek Signature: 25.
gimbaled [True or False] Whether the ADCP was deployed with a gimbaled roll sensor
(default true). Applies the correction to the raw pitch angle
if the pitch/roll sensors were mounted rigidly to the
instrument ('Gimbaled'==false), or the correction to the raw
heading angle if the ADCP was mounted on a gimbal (Dewey &
Stringer, 2007; Lohrmann et al., 1990).
binmaptype [None or 'linear' or 'nn']
Whether to map the beam velocities to fixed horizontal
planes with linear interpolation ('linear') or nearest-neighbor
interpolation ('nearest') prior to converting
to instrument coordinates (Ott, 2002; Dewey & Stringer, 2007).
*The default is to NOT perform any bin mapping.
use3beamsol [True or False] Whether to use three-beam solutions when exactly one beam has
no data in one cell.
OUTPUTS
-------
[u, v, w] [east, north, up] components of Earth-referenced velocity vector.
"""
nz, nt = b1.shape # Number of vertical bins and records in the time series.
d2r = np.pi/180
head = head*d2r
ptch = ptch*d2r
roll = roll*d2r
theta = theta*d2r
# Time-dependent angles (heading, pitch and roll).
Sph1 = np.sin(head)
Sph2 = np.sin(ptch)
Sph3 = | np.sin(roll) | numpy.sin |
"""
Create a Clamps structure (for the acq4/ephysanalysis routines) from modeling data generated
in VCN_Model (the pickled file)
"""
from pathlib import Path
import numpy as np
import pickle
import matplotlib
import matplotlib.pyplot as mpl
import pylibrary.plotting.plothelpers as PH
# from pylibrary.utility.params import Params
import ephysanalysis.metaarray as EM
class MakeClamps():
def __init__(self):
self.holding = 0. # set a default value for models
self.WCComp = 0.
self.CCComp = 0.
pass
def set_clamps(self, dmode='CC', time=None, data=None, cmddata=None, tstart_tdur=[0.01, 0.100]):
self.data = data
self.time = time
self.rate = np.diff(self.time)*1e6
self.cmddata = cmddata
self.tstart_tdur = tstart_tdur
self.tstart = tstart_tdur[0]
self.tend = np.sum(tstart_tdur)
self.dmode = dmode
def read_pfile(self, filename, plot=False):
fh = open(filename, 'rb')
df = pickle.load(fh)
r = df['Results'][0]
if plot:
P = PH.Plotter((1, 1), figsize=(6, 4))
cell_ax = list(P.axdict.keys())[0]
for trial in range(len(df['Results'])):
ds = df['Results'][trial]
k0 = list(df['Results'][trial].keys())[0]
dx = ds[k0]['monitor']
P.axdict[cell_ax].plot(dx['time'], dx['postsynapticV'], linewidth=1.0)
P.axdict[cell_ax].set_xlim(0., 150.)
P.axdict[cell_ax].set_ylim(-200., 50.)
PH.calbar(P.axdict[cell_ax], calbar=[120., -95., 25., 20.], axesoff=True, orient='left',
unitNames={'x': 'ms', 'y': 'mV'}, font='Arial', fontsize=8)
# mpl.savefig(outfile)
mpl.show()
# print(list(df.keys()))
# print('\nbasename: ', df['basename'])
# print('\nruninfo: ', df['runInfo'])
"""
The runInfo dictionary holds somethign like this:
runinfo: {'folder': PosixPath('VCN_Cells/VCN_c08/Simulations/IV'), 'fileName': 'Normal', 'runName': 'Run',
'manipulation': 'Canonical', 'preMode': 'cc', 'postMode': 'cc', 'TargetCellType': 'Bushy',
'electrodeSection': 'soma', 'dendriticElectrodeSection': 'dendrite',
'dendriticSectionDistance': 100.0, 'celsius': 37, 'nStim': 1,
'stimFreq': 200.0, 'stimInj': {'pulse': [-1.0, 2.01, 0.2]},
'stimDur': 100.0, 'stimDelay': 5.0, 'stimPost': 3.0,
'vnStim': 1, 'vstimFreq': 200.0, 'vstimInj': 50,
'vstimDur': 50.0, 'vstimDelay': 2.0, 'vstimPost': 3.0, 'vstimHolding': -60,
'gif_i0': 0.0, 'gif_sigma': 0.5, 'gif_fmod': 0.2, 'gif_tau': 3.0,
'gif_dur': 10.0, 'gif_skew': 0.0,
'runTime': 'Wed Oct 9 13:05:54 2019',
'inFile': None, 'inFileRep': 1, 'spikeTimeList': {},
'v_init': -61.0, 'useSaveState': True, 'tstop': 8.0, 'filename': 'VCN_c08_pulse_'}
"""
# print('\nmodelPars: ', df['modelPars'])
"""
The modelPars dict holds the following:
modelPars: {'species': 'mouse', 'cellClass': 'bushy', 'modelType': 'II',
'modelName': 'mGBC', 'soma': True, 'axon': False,
'dendrites': False, 'pumps': False, 'hillock': False,
'initialsegment': False, 'myelinatedaxon': False,
'unmyelinatedaxon': False, 'na': 'nav11', 'ttx': False,
'name': 'bushy', 'morphology': 'VCN_Cells/VCN_c08/Morphology/VCN_c08.hoc',
'temperature': 34.0}
Note 10/28/2019 changed structure so that runInfo and modelPars are both
subdictionaries of Params
"""
if 'runInfo' not in list(df.keys()): # handle data structure change 10/28/2019
dinfo = df['Params']['runInfo']
else:
dinfo = df['runInfo']
if isinstance(dinfo, Params):
dinfo = dinfo.todict()
print(dinfo)
dur = dinfo['stimDur']
delay = dinfo['stimDelay']
mode = dinfo['postMode'].upper()
ntr = len(df['Results'])
V = [[]]*ntr
I = [[]]*ntr
for i in range(len(df['Results'])):
fk = list(df['Results'][i].keys())[0]
dfx = df['Results'][i][fk]['monitor']
timebase = dfx['time']
V[i] = dfx['postsynapticV']
I[i] = dfx['i_stim0']
V = | np.array(V) | numpy.array |
# Copyright 2022 <NAME>, MIT license
"""
Module with all the definitions (routines) of general use
of the multitaper routines.
Contains:
* set_xint - setup Ierly's quadrature
* xint - Quadrature by Ierley's method of Chebychev sampling.
* dpss_ev - Recalculate the DPSS eigenvalues using Quadrature
* dpss - calculate the DPSS for given NW, NPTS
* eigenspec - calculate eigenspectra using DPSS sequences.
* adaptspec - calculate adaptively weighted power spectrum
* jackspec - calculate adaptively weighted jackknifed 95% confidence limits
* qiinv - calculate the Stationary Inverse Theory Spectrum.
* ftest - performs the F-test for a line component
* yk_reshape - reshape eigenft's around significant spectral lines
* wt2dof - calculate the d.o.f. of the multitaper
* df_spec - Dual frequency spectrum, using two MTSPEC classes to compute.
* sft - the slow Fourier transform
* squick - for sine multitaper, constructs average multitaper
* squick2 - for sine multitaper, constructs average multitaper, 2 signals
* sadapt - for sine multitaper, adaptive estimation of # of tapers
* sadapt2 - for sine multitaper, same but for 2 signals
* north - for sine multitaper, derivatives of spectrum
* curb - for sine multitaper, clips # of tapers
* get_data - download data and load into numpy array
|
"""
#-----------------------------------------------------
# Import main libraries and modules
#-----------------------------------------------------
import numpy as np
import scipy
from scipy import signal
import scipy.linalg as linalg
import scipy.interpolate as interp
import scipy.optimize as optim
import os
#-------------------------------------------------------------------------
# SET_XINT - Set up weights and sample points for Ierly quadrature
#-------------------------------------------------------------------------
def set_xint(ising):
"""
Sets up weights and sample points for Ierley quadrature,
Slightly changed from original code, to avoid using common
blocks. Also avoided using some go to statements, not needed.
*Parameters*
ising : integer
ising=1
integrand is analytic in closed interval
ising=2
integrand may have bounded singularities
at end points
*Returns*
w : ndarray (nomx,lomx+1)
weights
x : sample points (lomx+1)
sample points
lomx=number of samples = 2**nomx
*Modified*
November 2004 (<NAME>)
|
"""
nomx = 8
lomx = 256
w = np.zeros((nomx,lomx+1),dtype=float)
x = np.zeros(lomx+1,dtype=float)
pi = np.pi
n = 2
for index in range(1,nomx+1):
n = 2*n
nx = n-2
if (index == 1):
nx=4
pin = pi/float(n)
nhalf = int(n/2)
for i in range(nhalf+1):
t = float(i)*pin
si = 0.0
for k in range(0,nx+1,2):
ck=4.0
if (k == 0):
ck=2.0
rk=float(k)
si=si+ck*np.cos(rk*t)/(1.0-rk*rk)
if (i==0 or i==nhalf):
si=0.5*si
t = np.cos(t)
if (ising == 2):
t=0.5*pi*(1.0 +t)
si=si*0.5 * np.sin(t)*pi
t=np.cos(t)
x[i] = 0.5 *(1.0 +t)
w[index-1, i] = 0.5 *si/float(n)
elif (ising == 1):
x[i] = 0.5 *(1.0 +t)
w[index-1,i] = 0.5 *si/float(n)
# end i loop
# end index loop
return w, x
#-------------------------------------------------------------------------
# XINT - Numerical integration in the Fourier Domain using Ierly's method
#-------------------------------------------------------------------------
def xint(a,b,tol,vn,npts):
"""
Quadrature by Ierley's method of Chebychev sampling.
*Parameters*
a : float
upper limit of integration
b : float
upper limit of integration
tol : float
tolerance for integration
vn : ndarray
taper or Slepian sequence to convert-integrate
npts : int
number of points of tapers
*Notes*
This is a slight variation of Gleen Ierly's code. What was
mainly done, was to avoid use of common blocks, defining all
variables and performing the numerical integration inside
(previously done by function pssevf).
Exponential convergence rate for analytic functions! Much faster
than Romberg; competitive with Gauss integration, without awkward
weights.
Integrates the function dpsw on (a, b) to absolute
accuracy tol > 0.
the function in time is given by rpar with ipar points
I removed the optional printing routine part of the code,
to make it easier to read. I also moved both nval, etol
as normal variables inside the routine.
nval = number of function calls made by routine
etol = approximate magnitude of the error of the result
NB: function set_xint is called once before xint to
provide quadrature samples and weights.
I also altered the subroutine call, to get the weights
and not save them in a common block, but get them
directly back.
lomx=number of samples = 2**nomx
*Modified*
November 2004 (<NAME>)
*Calls*
utils.set_xint
|
"""
pi = np.pi
tpi = 2.0 * pi
nomx = 8
lomx = 256
ising = 1
w, x = set_xint(ising)
#---------------------------
# Check tol
#---------------------------
if (tol <= 0.0):
raise ValueError("In xint tol must be > 0 ", tol)
est = np.zeros(nomx,dtype=float)
fv = np.zeros(lomx+1,dtype=float)
n = 1
im = 2**(nomx+1)
for index in range(1,nomx+1):
n = 2*n
im = int(im/2)
im2 = int(im/2)
if (index <= 1):
for i in range(n+1):
# Bottom
y = a+(b-a)*x[im2*i]
om = tpi*y
ct, st = sft(vn,om)
f1 = ct*ct+st*st
# Top
y = b-(b-a)*x[im2*i]
om = tpi*y
ct, st = sft(vn,om)
f2 = ct*ct+st*st
fv[im2*i] = f1 + f2
# end i loop, index 1,
else:
for i in range(1,n,2):
# Bottom
y = a+(b-a)*x[im2*i]
om = tpi*y
ct,st = sft(vn,om)
f1 = ct*ct+st*st
# Top
y = b-(b-a)*x[im2*i]
om = tpi*y
ct, st = sft(vn,om)
f2 = ct*ct+st*st
fv[im2*i]= f1 + f2
# end i loop, index > 1
# end index 1, or more
x_int = 0.00
for i in range(n+1):
x_int = x_int + w[index-1, i]*fv[im2*i]
x_int = x_int*(b-a)
est[index-1] = x_int
etol = 0.0
#
# Check for convergence.
#
nval = 2*n
if (index == 2):
if ( est[index-1] == est[index-2] ):
return x_int
elif (index > 2):
sq = (est[index-1]-est[index-2])**2
bot = (0.01*sq + np.abs(est[index-1]-est[index-2]) )
if (sq == 0.0):
etol = 0.0
else:
etol = sq/bot
if (etol <= tol):
return x_int
# end check convergence
# end index loop
print('******** WARNING *********')
print(' xint unable to provide requested accuracy')
return x_int
#-------------------------------------------------------------------------
# end XINT
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# DPSS_EV - Eigenvalues of the DPSS sequences
#-------------------------------------------------------------------------
def dpss_ev(vn,w,atol=1e-14):
"""
Recalculate the DPSS eigenvalues, performing the
integration in the -W:W range, using Quadrature.
computes eigenvalues for the discrete prolate spheroidal sequences
in efn by integration of the corresponding squared discrete prolate
spheroidal wavefunctions over the inner domain. Due to symmetry, we
perform integration from zero to w.
We use Chebychev quadrature for the numerical integration.
*Parameters*
vn : ndarray [npts,kspec]
DPSS to calculate eigenvalues
w : float
the bandwidth (= time-bandwidth product/ndata)
atol : float, optional
absolute error tolerance for the integration. this should
be set to 10**-n, where n is the number of significant figures
that can be be represented on the machine.
default = 1e-14
*Returns*
lamb : ndarray [kspec]
vector of length vn.shape[1], contains the eigenvalues
*Modified*
November 2004 (<NAME>)
*Calls*
xint
|
"""
npts = np.shape(vn)[0]
kspec = np.shape(vn)[1]
lamb = np.zeros(kspec)
for k in range(kspec):
result = xint(0.0,w,atol,vn[:,k],npts)
lamb[k] = 2.0*result
return lamb
#-------------------------------------------------------------------------
# end DPSS_EV
#-------------------------------------------------------------------------
def dpss(npts,nw,kspec=None):
"""
Calculation of the Discrete Prolate Spheroidal Sequences, and
the correspondent eigenvalues.
- <NAME>. 1978 Bell Sys Tech J v57 n5 1371-1430
- <NAME>. 1982 Proc IEEE v70 n9 1055-1096
**Parameters**
npts : int
the number of points in the series
nw : float
the time-bandwidth product (number of Rayleigh bins)
kspec : int
Optional, the desired number of tapers default = 2*nw-1
**Returns**
v : ndarray (npts,kspec)
the eigenvectors (tapers) are returned in v[npts,nev]
lamb : ndarray (kspec)
the eigenvalues of the v's
**Notes**
In SCIPY the codes are already available to calculate the DPSS.
Eigenvalues are calculated using Chebeshev Quadrature.
Code also performs interpolation if NPTS>1e5
Also, define DPSS to be positive-standard, meaning vn's always
start positive, whether symmetric or not.
**Modified**
December 2020
February 2022 - Changed a for loop for a direct np.sum().
**Calls**
scipy.signal.windows.dpss
dpss_ev
|
"""
#-----------------------------------------------------
# Check number of tapers
#-----------------------------------------------------
W = nw/float(npts)
if (kspec is None):
kspec = np.int(np.round(2*nw-1))
#-----------------------------------------------------
# Get the DPSS, using SCIPY
# Interpolate if necesary
#-----------------------------------------------------
if (npts < 1e5):
v,lamb2 = signal.windows.dpss(npts, nw, Kmax=kspec,
sym=True,norm=2,
return_ratios=True)
v = v.transpose()
else:
lsize = np.floor(np.log10(npts))
nint = int((10**lsize))
print('DPSS using interpolation', npts, nint)
v2int = signal.windows.dpss(nint, nw, Kmax=kspec,
sym=True,norm=2)
v2int = v2int.transpose()
v = np.zeros((npts,kspec),dtype=float)
x = np.arange(nint)
y = np.linspace(0,nint-1,npts,endpoint=True)
for k in range(kspec):
I = interp.interp1d(x, v2int[:,k], kind='quadratic')
#'quadratic')
v[:,k] = I(y)
v[:,k] = v[:,k]*np.sqrt(float(nint)/float(npts))
#-----------------------------------------------------
# Normalize functions
#-----------------------------------------------------
vnorm = np.sqrt(np.sum(v**2,axis=0))
v = v/vnorm[None,:]
# Replaced for loop
#for i in range(kspec):
# vnorm = np.sqrt(np.sum(v[:,i]**2))
# v[:,i] = v[:,i]/vnorm
#-----------------------------------------------------
# Get positive standard
#-----------------------------------------------------
nx = npts%2
if (nx==1):
lh = int((npts+1)/2)
else:
lh = int(npts/2)
for i in range(kspec):
if (v[lh,i] < 0.0):
v[:,i] = -v[:,i]
lamb = dpss_ev(v,W)
return v, lamb
#-------------------------------------------------------------------------
# end DPSS
#-------------------------------------------------------------------------
def dpss2(npts,nw,nev=None):
"""
This is a try to compute the DPSS using the original Thomson
approach. It reduces the problem to half the size and inverts
independently for the even and odd functions.
This is work in progress and not used.
Modified from F90 library:
<NAME>
December 2020
The tapers are the eigenvectors of the tridiagonal matrix sigma(i,j)
[see Slepian(1978) eq 14 and 25.] They are also the eigenvectors of
the Toeplitz matrix eq. 18. We solve the tridiagonal system in
scipy.linalg.eigh_tridiagonal
(real symmetric tridiagonal solver) for the tapers and use
them in the integral equation in the frequency domain
(dpss_ev subroutine) to get the eigenvalues more accurately,
by performing Chebychev Gaussian Quadrature following Thomson's codes.
First, we create the main and off-diagonal vectors of the
tridiagonal matrix. We compute separetely the even and odd tapers,
by calling eigh_tridiagonal from SCIPY.
We, refine the eigenvalues, by computing the inner bandwidth
energy in the frequency domain (eq. 2.6 Thomson). Also the "leakage"
(1 - eigenvalue) is estimated, independenly if necesary.
In SCIPY the codea are already available to calculate the DPSS.
Eigenvalues are calculated using Chebeshev Quadrature.
Code also performs interpolation if NPTS>1e5
Also, define DPSS to be positive-standard, meaning vn's always
start positive, whether symmetric or not.
**Calls**
To do
|
"""
#-----------------------------------------------------
# Check number of tapers
#-----------------------------------------------------
bw = nw/float(npts)
if (nev is None):
nev = np.int(np.round(2*nw-1))
#-----------------------------------------------------
# Check size of vectors and half lengths
#-----------------------------------------------------
nx = npts%2
if (nx==1):
lh = int((npts+1)/2)
else:
lh = int(npts/2)
nodd = int ((nev-(nev%2))/2)
neven = nev - nodd
com = np.cos(2.0*np.pi*bw)
hn = float(npts-1.0)/2.0
r2 = np.sqrt(2.0)
# Initiate eigenvalues and eigenvectors
v = np.zeros((npts,nev),dtype=float)
theta = np.zeros(nev,dtype=float)
#---------------------------------------------
# Do even tapers
#---------------------------------------------
fv1 = np.zeros(lh,dtype=float)
fv2 = np.zeros(lh,dtype=float)
for i in range(lh):
n = i
fv1[i] = com*(hn - float(n))**2.0
fv2[i] = float(n*(npts-n))/2.0
if (nx == 0):
fv1[lh-1] = com*(hn-float(lh-1))**2.0 + float(lh*(npts-lh))/2.0
else:
fv2[lh-1] = r2*fv2[lh-1]
fv3 = fv2[1:lh]
eigval,v2 = linalg.eigh_tridiagonal(fv1, fv2[1:lh],
select='i',select_range=(lh-neven,lh-1))
if (nx==1):
for k in range(neven):
v[lh,k] = v[lh,k]*r2
for k in range(neven):
kr = k
k2 = 2*k
theta[k2] = eigval[kr]
nr = npts-1
for i in range(lh):
v[i,k2] = v2[i,kr]
v[nr,k2] = v2[i,kr]
nr=nr-1
#---------------------------------------------
# Do odd tapers
#---------------------------------------------
fv1 = np.zeros(lh,dtype=float)
fv2 = np.zeros(lh,dtype=float)
if (nodd > 0):
for i in range(lh):
n = i
fv1[i] = com*(hn - float(n))**2
fv2[i] = float(n*(npts-n))/2.0
if (nx == 0):
fv1[lh-1] = com*(hn-float(lh-1))**2 - float(lh*(npts-lh))/2.0
eigval,v2 = linalg.eigh_tridiagonal(fv1, fv2[1:lh],
select='i',select_range=(lh-nodd,lh-1))
for k in range(nodd):
kr = k
k2 = 2*k+1
theta[k2] = eigval[kr]
nr = npts-1
for i in range(lh):
v[i,k2] = v2[i,kr]
v[nr,k2] = -v2[i,kr]
nr=nr-1
#---------------------------------------
# Normalize the eigenfunction
# and positive standard
#---------------------------------------
for i in range(nev):
vnorm = np.sqrt(np.sum(v[:,i]**2))
v[:,i] = v[:,i]/vnorm
if (v[lh,i]<0.0):
v[:,i] = -v[:,i]
v = np.flip(v,axis=1)
lamb = dpss_ev(v,bw)
return v, lamb
#-------------------------------------------------------------------------
# end DPSS - my version
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# Eigenspec
#-------------------------------------------------------------------------
def eigenspec(x,vn,lamb,nfft):
"""
Calculate eigenspectra using DPSS sequences.
Gets yk's from Thomson (1982).
**Parameters**
x : ndarray [npts,0]
real vector with the time series
vn : ndarray [npts,kspec]
the different tapers computed in dpss
lambda : ndarray [kspec]
the eigenvalues of the tapers vn
nfft : int
number of frequency points (inc. positive
and negative frequencies)
**Returns**
yk : complex ndarray [kspec,nfft]
complex array with kspec fft's of tapered
data. Regardless of real/complex input data
all frequencies are stored. Good for coherence,
deconvolution, etc.
sk : ndarray [kspec,nfft]
real array with kspec eigenspectra
**Modified**
February 2022. Changed a for loop for xtap
<NAME>, November 2004
**Notes**
Computes eigen-ft's by windowing real data with dpss and taking ffts
Note that fft is unnormalized and window is such that its sum of
squares is one, so that psd=yk**2.
The fft's are computed using SCIPY FFT codes, and parallel FFT can
potentially speed up the calculation. Up to KSPEC works are sent.
The yk's are saved to get phase information. Note that tapers are
applied to the original data (npts long) and the FFT is zero padded
up to NFFT points.
**Calls**
scipy.fft.fft
|
"""
kspec = np.shape(vn)[1]
npts = np.shape(x)[0]
if (nfft < npts):
raise ValueError("NFFT must be larger than NPTS ", npts, nfft)
k2 = vn.shape[1]
if (kspec > k2):
raise ValueError("DPSS dimensions don't agree ", kspec, k2, ' tapers')
#-----------------------------------------------------------------
# Define matrices to be used
#-----------------------------------------------------------------
x2 = np.tile(x,(1,kspec))
xtap = vn*x2
# xtap = np.zeros((npts,kspec), dtype=float)
# for i in range(kspec):
# xtap[:,i] = vn[:,i]*x[:,0]
# Get eigenspec Yk's and Sk's
yk = scipy.fft.fft(xtap,axis=0,n=nfft,workers=kspec)
sk = np.abs(yk)**2
return yk, sk
#-------------------------------------------------------------------------
# end Eigenspec
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# Adaptspec
#-------------------------------------------------------------------------
def adaptspec(yk,sk,lamb,iadapt=0):
"""
Calculate adaptively weighted power spectrum
Options for non-adaptive estimates are posible, with optional parameter
iadapt, using average of sk's or weighted by eigenvalue.
**Parameters**
yk : complex ndarray [nfft,kspec]
complex array of kspec eigencoefficients
sk : ndarray [nfft,kspec]
array containing kspe power spectra
lamb : ndarray [kspec]
eigenvalues of tapers
iadapt : int
defines methos to use, default = 0
0 - adaptive multitaper
1 - unweighted, wt =1 for all tapers
2 - wt by the eigenvalue of DPSS
**Returns**
spec : ndarray [nfft]
real vector containing adaptively weighted spectrum
se : ndarray [nfft]
real vector containing the number of degrees of freedom
for the spectral estimate at each frequency.
wt : ndarray [nfft,kspec]
real array containing the ne weights for kspec
eigenspectra normalized so that if there is no bias, the
weights are unity.
**Modified**
<NAME>, Aug 2006
Corrected the estimation of the dofs se (sum of squares of wt is 1.0)
maximum wt = 1
<NAME>, October 2007
Added the an additional subroutine noadaptspec to calculate a simple non-adaptive multitaper spectrum.
This can be used in transfer functions and deconvolution,
where adaptive methods might not be necesary.
February 2022. Now calculating adapt weights without for loop.
**Calls**
nothing
|
"""
mloop = 1000
nfft = np.shape(yk)[0]
kspec = np.shape(yk)[1]
lamb1 = 1.0-lamb
#----------------------------------------------------
# Simple average, not adaptive. Weight=1
# iadapt=1
#----------------------------------------------------
if (iadapt==1):
wt = np.ones((nfft,kspec), dtype=float)
se = np.zeros((nfft,1), dtype=float)
sbar = np.zeros((nfft,1), dtype=float)
sbar[:,0] = np.sum(sk,axis=1)/ float(kspec)
se = se + 2.0 * float(kspec)
spec = sbar
return spec, se, wt
#----------------------------------------------------
# Weight by eigenvalue of Slepian functions
# iadapt=2
#----------------------------------------------------
if (iadapt==2):
wt = np.zeros((nfft,kspec), dtype=float)
for k in range(kspec):
wt[:,k] = lamb[k]
skw[:,k] = wt[:,k]**2 * sk[:,k]
wtsum = np.sum(wt**2,axis=1)
skwsum = np.sum(skw,axis=1)
sbar = skwsum / wtsum
spec = sbar[:,None]
#------------------------------------------------------------
# Number of Degrees of freedom
#------------------------------------------------------------
se = wt2dof(wt)
return spec, se, wt
# skw = np.zeros((nfft,kspec), dtype=float)
# wt = np.zeros((nfft,kspec), dtype=float)
#----------------------------------------
# Freq sampling (assume unit sampling)
#----------------------------------------
df = 1.0/float(nfft-1)
#----------------------------------------
# Variance of Sk's and avg variance
#----------------------------------------
varsk = np.sum(sk,axis=0)*df
dvar = np.mean(varsk)
bk = dvar * lamb1 # Eq 5.1b Thomson
sqlamb = np.sqrt(lamb)
#-------------------------------------------------
# Iterate to find optimal spectrum
#-------------------------------------------------
rerr = 9.5e-7 # Value used in F90 codes check
sbar = (sk[:,0] + sk[:,1])/2.0
spec = sbar[:,None]
for i in range(mloop):
slast = np.copy(sbar)
# for k in range(kspec):
# wt[:,k] = sqlamb[k]*sbar /(lamb[k]*sbar + bk[k])
# wt[:,k] = np.minimum(wt[:,k],1.0)
# skw[:,k] = wt[:,k]**2 * sk[:,k]
#
# wtsum = np.sum(wt**2,axis=1)
# skwsum = np.sum(skw,axis=1)
# sbar = skwsum / wtsum
wt1 = sqlamb[None,:]*sbar[:,None]
wt2 = (lamb[None,:]*sbar[:,None]+bk[None,:])
wt = np.minimum(wt1/wt2,1.0)
skw = wt**2 * sk
wtsum = np.sum(wt**2,axis=1)
skwsum = np.sum(skw,axis=1)
sbar = skwsum / wtsum
oerr = np.max(np.abs((sbar-slast)/(sbar+slast)))
if (i==mloop):
spec = sbar[:,None]
print('adaptspec did not converge, rerr = ',oerr, rerr)
break
if (oerr > rerr):
continue
spec = sbar[:,None]
break
spec = sbar[:,None]
#---------
#------------------------------------------------------------
# Number of Degrees of freedom
#------------------------------------------------------------
se = wt2dof(wt)
return spec, se, wt
#-------------------------------------------------------------------------
# end adaptspec
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# jackspec
#-------------------------------------------------------------------------
def jackspec(spec,sk,wt,se):
"""
code to calculate adaptively weighted jackknifed 95% confidence limits
**Parameters**
spec : ndarray [nfft]
real vector containing adaptively weighted spectrum
sk : ndarray [nfft,kspec]
array with kth power spectra
wt : ndarray [nfft,kspec]
real array containing the ne weights for kspec
eigenspectra normalized so that if there is no bias, the
weights are unity.
se : ndarray [nfft]
real vector containing the number of degrees of freedom
for the spectral estimate at each frequency.
**Returns**
spec_ci : ndarray [nfft,2]
real array of jackknife error estimates, with 5 and 95%
confidence intervals of the spectrum.
**Calls**
scipy.stats.t.ppf
**Modified**
<NAME>, Aug 2006
<NAME>, March 2007
Changed the Jackknife to be more efficient.
|
"""
#------------------------------------------------------
# Get sizes and define matrices
#------------------------------------------------------
nfft = np.shape(sk)[0]
kspec = np.shape(sk)[1]
wjk = np.zeros((nfft,kspec-1))
sj = np.zeros((nfft,kspec-1))
sjk = np.zeros((nfft,kspec))
varjk = np.zeros((nfft,kspec))
var = np.zeros((nfft,1))
#------------------------------------------------------
# Do simple jackknife
#------------------------------------------------------
for i in range(kspec):
ks = -1
for k in range(kspec):
if (k == i):
continue
ks = ks + 1
wjk[:,ks] = wt[:,k]
sj[:,ks] = wjk[:,ks]**2 * sk[:,k]
sjk[:,i] = np.sum(sj,axis=1)/ np.sum(wjk**2,axis=1)
#------------------------------------------------------
# Jackknife mean (Log S)
#------------------------------------------------------
lspec = np.log(spec)
lsjk = np.log(sjk)
lsjk_mean = np.sum(lsjk, axis=1)/float(kspec)
#------------------------------------------------------
# Jackknife Bias estimate (Log S)
#------------------------------------------------------
bjk = float(kspec-1) * (lspec - lsjk_mean)
#------------------------------------------------------
# Jackknife Variance estimate (Log S)
#------------------------------------------------------
for i in range(kspec):
varjk[:,i] = (lsjk[:,i] - lsjk_mean)**2
var[:,0] = np.sum(varjk, axis=1) * float(kspec-1)/float(kspec)
#------------------------------------------------------
# Use the degrees of freedom
#------------------------------------------------------
for i in range(nfft):
if (se[i]<1.0):
print('DOF < 1 ', i,'th frequency ', se[i])
raise ValueError("Jackknife - DOF are wrong")
qt = scipy.stats.t(df=se[i]).ppf((0.95))
var[i,0] = np.exp(qt)*np.sqrt(var[i,0])
#-----------------------------------------------------------------
# Clear variables
#-----------------------------------------------------------------
del wjk, sj, sjk, varjk
#-----------------------------------------------------------------
# Return confidence intervals
#-----------------------------------------------------------------
spec_ci = np.zeros((nfft,2))
ci_dw = spec/var
ci_up = spec*var
spec_ci[:,0] = ci_dw[:,0]
spec_ci[:,1] = ci_up[:,0]
return spec_ci
#-------------------------------------------------------------------------
# end jackspec
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# qiinv
#-------------------------------------------------------------------------
def qiinv(spec,yk,wt,vn,lamb,nw):
"""
Function to calculate the Quadratic Spectrum using the method
developed by Prieto et al. (2007).
The first 2 derivatives of the spectrum are estimated and the
bias associated with curvature (2nd derivative) is reduced.
Calculate the Stationary Inverse Theory Spectrum.
Basically, compute the spectrum inside the innerband.
This approach is very similar to D.J. Thomson (1990).
**Parameters**
spec : ndarray [nfft,0]
the adaptive multitaper spectrum (so far)
yk : ndarrau, complex [npts,kspec]
multitaper eigencoefficients, complex
wt : ndarray [nf,kspec]
the weights of the different coefficients.
input is the original multitaper weights,
from the Thomson adaptive weighting.
vn : ndarray [npts,kspec]
the Slepian sequences
lambda : ndarray [kspec]
the eigenvalues of the Slepian sequences
nw : float
The time-bandwisth product
**Returns**
qispec : ndarray [nfft,0]
the QI spectrum estimate
ds : ndarray [nfft,0]
the estimate of the first derivative
dds : ndarray [nfft,0]
the estimate of the second derivative
**References**
<NAME>, <NAME>, <NAME>, <NAME>,
and <NAME> (2007), Reducing the bias of multitaper
spectrum estimates, Geophys. J. Int., 171, 1269-1281.
doi: 10.1111/j.1365-246X.2007.03592.x.
**Notes**
In here I have made the Chebyshev polinomials unitless,
meaning that the associated parameters ALL have units
of the PSD and need to be normalized by 1/W for \alpha_1,
1/W**2 for \alpha_2, etc.
**Modified**
Nov 2021 (<NAME>)
Major adjustment in the inverse problem steps.
Now, the constant term is first inverted for,
and then the 1st and 2nd derivative so that we
obtain an independent 2nd derivative.
June 5, 2009 (<NAME>)
Major change, saving some important
values so that if the subroutine is called
more than once, with similar values, many of
the variables are not calculated again, making
the code run much faster.
**Calls**
scipy.optimize.nnls, scipy.linalg.qr,
scipy.linalg.lstsq
|
"""
npts = np.shape(vn)[0]
kspec = np.shape(vn)[1]
nfft = np.shape(yk)[0]
nfft2 = 11*nfft
nxi = 79;
L = kspec*kspec;
if (np.min(lamb) < 0.9):
print('Careful, Poor leakage of eigenvalue ', np.min(lamb));
print('Value of kspec is too large, revise? *****')
#---------------------------------------------
# Assign matrices to memory
#---------------------------------------------
xk = np.zeros((nfft,kspec), dtype=complex)
Vj = np.zeros((nxi,kspec), dtype=complex)
#---------------------------------------
# New inner bandwidth frequency
#---------------------------------------
bp = nw/npts # W bandwidth
xi = np.linspace(-bp,bp,num=nxi)
dxi = xi[2]-xi[1]
f_qi = scipy.fft.fftfreq(nfft2)
for k in range(kspec):
xk[:,k] = wt[:,k]*yk[:,k];
for i in range(nxi):
om = 2.0*np.pi*xi[i]
ct,st = sft(vn[:,k],om)
Vj[i,k] = 1.0/np.sqrt(lamb[k])*complex(ct,st)
#----------------------------------------------------------------
# Create the vectorized Cjk matrix and Pjk matrix { Vj Vk* }
#----------------------------------------------------------------
C = np.zeros((L,nfft),dtype=complex)
Pk = np.zeros((L,nxi), dtype=complex)
m = -1;
for i in range(kspec):
for k in range(kspec):
m = m + 1;
C[m,:] = ( np.conjugate(xk[:,i]) * (xk[:,k]) );
Pk[m,:] = np.conjugate(Vj[:,i]) * (Vj[:,k]);
Pk[:,0] = 0.5 * Pk[:,0];
Pk[:,nxi-1] = 0.5 * Pk[:,nxi-1];
#-----------------------------------------------------------
# I use the Chebyshev Polynomial as the expansion basis.
#-----------------------------------------------------------
hk = np.zeros((L,3), dtype=complex)
hcte = np.ones((nxi,1), dtype=float)
hslope = np.zeros((nxi,1), dtype=float)
hquad = np.zeros((nxi,1), dtype=float)
Cjk = np.zeros((L,1), dtype=complex)
cte = np.zeros(nfft)
cte2 = np.zeros(nfft)
slope = np.zeros(nfft)
quad = np.zeros(nfft)
sigma2 = np.zeros(nfft)
cte_var = np.zeros(nfft)
slope_var = np.zeros(nfft)
quad_var = np.zeros(nfft)
h1 = np.matmul(Pk,hcte) * dxi
hk[:,0] = h1[:,0]
hslope[:,0] = xi/bp
h2 = np.matmul(Pk,hslope) * dxi
hk[:,1] = h2[:,0]
hquad[:,0] = (2.0*((xi/bp)**2) - 1.0)
h3 = np.matmul(Pk,hquad) * dxi
hk[:,2] = h3[:,0]
nh = np.shape(hk)[1]
#----------------------------------------------------
# Begin Least squares solution (QR factorization)
#----------------------------------------------------
Q,R = scipy.linalg.qr(hk);
Qt = np.transpose(Q)
Leye = np.eye(L)
Ri,res,rnk,s = scipy.linalg.lstsq(R,Leye)
covb = np.real(np.matmul(Ri,np.transpose(Ri)))
for i in range(nfft):
Cjk[:,0] = C[:,i]
# hmodel,res,rnk,s = scipy.linalg.lstsq(hk,Cjk)
btilde = np.matmul(Qt,Cjk)
hmodel,res,rnk,s = scipy.linalg.lstsq(R,btilde)
#---------------------------------------------
# Estimate positive spectrumm
#---------------------------------------------
cte_out = optim.nnls(np.real(h1),
np.real(Cjk[:,0]))[0]
cte2[i] = np.real(cte_out)
pred = h1*cte2[i]
Cjk2 = Cjk-pred
#---------------------------------------------
# Now, solve the derivatives
#---------------------------------------------
btilde = np.matmul(Qt,Cjk2)
hmodel,res,rnk,s = scipy.linalg.lstsq(R,btilde)
cte[i] = np.real(hmodel[0])
slope[i] = -np.real(hmodel[1])
quad[i] = np.real(hmodel[2])
pred = np.matmul(hk,np.real(hmodel))
sigma2[i] = np.sum(np.abs(Cjk-pred)**2)/(L-nh)
cte_var[i] = sigma2[i]*covb[0,0]
slope_var[i] = sigma2[i]*covb[1,1]
quad_var[i] = sigma2[i]*covb[2,2]
slope = slope / (bp)
quad = quad / (bp**2)
slope_var = slope_var / (bp**2)
quad_var = quad_var / (bp**4)
qispec = np.zeros((nfft,1), dtype=float)
for i in range(nfft):
qicorr = (quad[i]**2)/((quad[i]**2) + quad_var[i] )
qicorr = qicorr * (1/6)*(bp**2)*quad[i]
qispec[i] = cte2[i] - qicorr
#qispec[i] = spec[i] - qicorr
ds = slope;
dds = quad;
ds = ds[:,np.newaxis]
dds = dds[:,np.newaxis]
return qispec, ds, dds
#-------------------------------------------------------------------------
# end qiinv
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# ftest
#-------------------------------------------------------------------------
def ftest(vn,yk):
"""
Performs the F test for a line component
Compute F-test for single spectral line components
at the frequency bins given by the mtspec routines.
**Parameters**
vn : ndarray [npts,kspec]
Slepian sequences real
yk : ndarray, complex [nfft,kspec]
multitaper eigencoefficients, complex
kspec fft's of tapered data series
**Returns**
F : ndarray [nfft]
vector of f-test values, real
p : ndarray [nfft]
vector with probability of line component
**Calls**
scipy.stats.f.cdf, scipy.stats.f.cdf
|
"""
npts = np.shape(vn)[0]
kspec = np.shape(vn)[1]
nfft = np.shape(yk)[0]
mu = np.zeros(nfft,dtype=complex)
F = np.zeros(nfft)
p = np.zeros(nfft)
dof1 = 2
dof2 = 2*(kspec-1)
#------------------------------------------------------
# The Vk(0), summing the time domain tapers
# Also normalize by sum(vn0)**2
#------------------------------------------------------
vn0 = np.sum(vn,axis=0)
vn0_sqsum = np.sum(np.abs(vn0)**2)
#------------------------------------------------------
# Calculate the mean amplitude of line components at
# each frequency
#------------------------------------------------------
for i in range(nfft):
vn_yk = vn0[:]*yk[i,:]
vn_yk_sum = np.sum(vn_yk)
mu[i] = vn_yk_sum/vn0_sqsum
#------------------------------------------------------
# Calculate F Test
# Top (kspec-1) mu**2 sum(vn0**2) Model variance
# Bottom sum(yk - mu*vn0)**2 Misfit
# Fcrit - IS the threshhold for 95% test.
#------------------------------------------------------
Fcrit = scipy.stats.f.ppf(0.95,dof1,dof2)
for i in range(nfft):
Fup = float(kspec-1) * np.abs(mu[i])**2 * np.sum(vn0**2)
Fdw = np.sum( np.abs(yk[i,:] - mu[i]*vn0[:])**2 )
F[i] = Fup/Fdw
p[i] = scipy.stats.f.cdf(F[i],dof1,dof2)
F = F[:,np.newaxis]
p = p[:,np.newaxis]
return F, p
#-------------------------------------------------------------------------
# end ftest
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# reshape spectrum
#-------------------------------------------------------------------------
def yk_reshape(yk_in,vn,p=None,fcrit=0.95):
"""
reshape the yk's based on the F-test of line compenents
Reshape eigenft's around significant spectral lines
The "significant" means above fcritical probability (def=0.95)
If probability is large at neighbouring frequencies, code will
only remove the largest probability energy.
**Parameters**
yk : ndarray complex [nfft,kspec]
eigenft's
vn : ndarray [npts,kspec]
DPSS sequences
p : ndarray optional [nfft]
F-test probabilities to find fcritical
In None, it will be calculated
fcrit : float optional
Probability value over which to reshape, default = 0.95
**Returns**
yk : ndarray, complex [nfft,kspec]
Reshaped eigenft's
sline : ndarray [nfft]
Power spetrum of line components only
**Modified**
April 2006 (<NAME>)
**Calls**
ftest - if P is not present
scipy.fft.fft
|
"""
if (p is None):
print('Doing F test')
p = utils.ftest(vn,yk)[1]
yk = np.copy(yk_in)
npts = np.shape(vn)[0]
kspec = np.shape(vn)[1]
nfft = np.shape(yk)[0]
sline = np.zeros((nfft,1),dtype=float)
Vk = np.zeros((nfft,kspec),dtype=complex)
#------------------------------------------------------
# Count and isolate, peaks that pass
# the fcrit criteria.
# Also, remove values which are not local peaks
#------------------------------------------------------
nl = 0
for i in range(nfft):
if (p[i] < fcrit):
p[i] = 0
continue
if (i==0):
if (p[i]>p[i+1]):
nl = nl + 1
else:
p[i] = 0.0
elif (i==nfft-1):
if (p[i]>p[i-1]):
nl = nl + 1
else:
p[i] = 0
else:
if (p[i]>p[i-1] and p[i]>p[i+1]):
nl = nl + 1
else:
p[i] = 0
#------------------------------------------------------
# If no lines are found, return back arrays
#------------------------------------------------------
if (nl == 0):
return yk,sline
#------------------------------------------------------
# Prepare vn's Vk's for line removal
# Compute the Vk's to reshape
# The Vk's normalized to have int -1/2 1/2 Vk**2 = 1
# This is obtained from fft already is sum(vn**2) = 1
#------------------------------------------------------
vn0 = np.sum(vn,axis=0)
for k in range(kspec):
Vk[:,k] = scipy.fft.fft(vn[:,k],nfft)
#------------------------------------------------------
# Remove mean value for each spectral line
#------------------------------------------------------
for i in range(nfft):
if (p[i]<fcrit):
continue
mu = np.sum(vn0*yk[i,:]) / np.sum(vn0**2)
for j in range(nfft):
jj = j - i
if (jj < 0):
jj = jj + nfft
yk_pred = mu*Vk[jj,:]
yk[j,:] = yk[j,:] - yk_pred
#yk[j,:] = yk[j,:] - mu*Vk[jj,:]
for k in range(kspec):
kfloat = 1.0/float(kspec)
sline[i] = sline[i] + kfloat*np.abs(mu*Vk[jj,k])**2
return yk, sline
#-------------------------------------------------------------------------
# end reshape
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# Calculate degrees of freedom
#-------------------------------------------------------------------------
def wt2dof(wt):
"""
Calculate the degrees of freedom of the multitaper based on the
weights of the different tapers.
**Parameters**
wt : ndarray [nfft,kspec]
weights of the tapers at each frequency
**Returns**
se : ndarray [nfft]
degrees of freedom at each frequency
**Modified**
February 2022, changed a for loop for direct numpy sum.
|
"""
nfft = | np.shape(wt) | numpy.shape |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for WENO reconstruction."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest # pylint: disable=g-bad-import-order
from absl.testing import parameterized
import numpy as np
from pde_superresolution import weno
class WENOTest(parameterized.TestCase):
def test_calculate_omega_smooth(self):
u = | np.zeros(5) | numpy.zeros |
import numpy as np
import types
from copy import deepcopy
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
class Dataset(object):
def __init__(self, n_candidate, n_safety, n_test, seed=None, meta_information={}, **contents):
# Record dataset split sizes
self._n_safety = n_safety
self._n_candidate = n_candidate
self._n_test = n_test
self._n_train = n_candidate + n_safety
self._n_samples = self._n_train + n_test
self._seed = seed
self._meta_information = meta_information
self._contents = deepcopy(contents)
self._unique_values = {}
for k, v in contents.items():
setattr(self, '_%s' % k, v)
if v.dtype == int:
self._unique_values[k] = np.unique(v)
# Compute indices for the splits
self._inds = {
'all' : np.arange(0, self._n_samples),
'train' : np.arange(0, self._n_train),
'test' : np.arange(self._n_train, self._n_samples),
'opt' : | np.arange(0, self._n_candidate) | numpy.arange |
# -*- coding: utf-8 -*-
'''
This modules contains functions necessary for applying OWL or group OWL
to the parameters
1. reg_params_init
2. apply_growl
3. apply_owl_prox
4. update_mask
5. measure_compression
6. adjust_learning_rate
7. preprocess_hparams
8. set_param_share
'''
from __future__ import division, print_function, absolute_import
import sys
sys.path.append('./owl_projection')
import tensorflow as tf
import numpy as np
from projectedOWL import proxOWL
from numpy.linalg import norm
from math import sqrt
from utils_nn import get_weight_placeholders, get_mask_placeholders
from flags import FLAGS, HParams
import re
import os
def reg_params_init(sess, hps):
'''
This function initializes the regularization paramters.
Args:
sess: the predefined computation graph.
hps: hyperparameters collection
Returns:
layer_owl_params: a list, each element is an array containing the weights
of the corresponding layer.
'''
weight_placeholder = get_weight_placeholders()
reg_applied_layers = hps.reg_applied_layers
layer_owl_params = []
for idx, triple in enumerate(weight_placeholder):
print('layer {}'.format(idx))
# if the layer is not regularized, then append []
if not reg_applied_layers[idx]:
layer_owl_params.append([])
continue
#Regularization parameters
reg_params = hps.reg_params
lambda_1 = np.float32(reg_params[idx][0])
lambda_2 = np.float32(reg_params[idx][1])
if (lambda_1 < 0) | (lambda_2 < 0):
raise Exception('regularization parameters must be non-negative')
#GrOWL weights should be applied to the rows of the (reshaped) weight matrix
param_i, placeholder_i, assign_op_i = triple
param_shape = sess.run(tf.shape(param_i))
if np.size(param_i.get_shape().as_list()) == 2:
row_num = param_shape[0]
elif np.size(param_i.get_shape().as_list()) == 4:
row_num = param_shape[2]
transition_ind = np.floor(row_num*FLAGS.PLD_transition)
param_index = np.linspace(start=transition_ind-1, stop=0, num=transition_ind)
print(' row num: {}, transition_ind: {}, largest reg: {}'.format(row_num, transition_ind, lambda_1 + lambda_2 * transition_ind))
if row_num > transition_ind:
param_index = np.append(param_index, np.zeros([1, int(row_num-transition_ind)]))
layer_owl_params.append(lambda_1 + lambda_2 * param_index)
print("length of weight_placeholder:{0}".format(len(weight_placeholder)))
assert len(layer_owl_params) == len(weight_placeholder)
assert len(layer_owl_params) == len(hps.reg_applied_layers)
return layer_owl_params, hps
def apply_group_lasso(W, weights):
#Prox op
W_norm = norm(W, axis=1)
new_W_norm = np.maximum(W_norm - weights[0], 0)
new_W = np.zeros_like(W)
for i in range(W.shape[0]):
if W_norm[i] < np.finfo(np.float32).eps:
new_W[i,:] = 0 * W[i,:]
else:
new_W[i,:] = new_W_norm[i] * W[i,:] / W_norm[i]
return new_W
def apply_growl(W, weights):
# Prox op
W_norm = norm(W, axis=1)
new_W_norm=proxOWL(W_norm, weights)
new_W = np.zeros_like(W)
for i in range(W.shape[0]):
if W_norm[i] < np.finfo(np.float32).eps:
new_W[i,:] = 0 * W[i,:]
else:
new_W[i,:] = new_W_norm[i] * W[i,:] / W_norm[i]
return new_W
def apply_reg_prox(sess, learning_rate_val, layer_reg_params, hps):
'''
Updates the weights parameter of each layer
Args:
sess: the comptutaion graph
learning_rate: the predefined learning rate
layer_reg_params: owl parameters, initially created by reg_params_init
hps:
Returns:
None
'''
# get weights of the network
weight_placeholders = get_weight_placeholders()
# prox_lr_val = min(learning_rate_val, 0.001)
prox_lr_val = learning_rate_val
for idx, triple in enumerate(weight_placeholders):
#Don't apply owl/growl if told not to
if not hps.reg_applied_layers[idx]:
continue
param_i, placeholder_i, assign_op_i = triple
param_val = sess.run(param_i)
dim_i = np.size(param_val.shape)
if dim_i == 2:
if FLAGS.use_growl:
prox_param_val = apply_growl(param_val, prox_lr_val * layer_reg_params[idx])
else:
prox_param_val = apply_group_lasso(param_val, prox_lr_val * layer_reg_params[idx])
elif dim_i == 4:
# For convolutional layer, we need to first reshape 4D tensor to 2D matrix
reduced_param_val = reshape_2D_4D(param_val, target_shape=None,
reshape_type=2, reshape_order='F')
if FLAGS.use_growl:
reduced_prox_param_val = apply_growl(reduced_param_val, prox_lr_val * layer_reg_params[idx])
else:
reduced_prox_param_val = apply_group_lasso(reduced_param_val, prox_lr_val * layer_reg_params[idx])
#Now reshape the 2D matrix back to 4D tensor
prox_param_val = reshape_2D_4D(reduced_prox_param_val, target_shape=param_val.shape,
reshape_type=1, reshape_order='F')
# assign the new weights to param_i using the assign_op_i
sess.run(assign_op_i, feed_dict={placeholder_i:prox_param_val})
def update_mask(sess, threshold, hps, res_dict, step):
'''
update the mask during the training process to prevent drifting from zero
Args:
sess: the computation graph
learning_rate: the predefined learning rate
threshold: the pruning threshold, this may help avoid the floating number error
occured during the masking process
model: the resnet class
hps: hyperparameters
res_dict: results dictionary
step: current step
Returns:
num_zero_layers: number of zero valued layers
'''
mask_palceholders = get_mask_placeholders()
weight_placeholders = get_weight_placeholders()
#count the zero valued layers in order to avoiding the nonsense results
num_zero_layers = 0
layer_ID = []
assert len(mask_palceholders) == len(weight_placeholders)
for idx, mask_triple in enumerate(mask_palceholders):
#Don't apply owl/growl if told not to
if not hps.reg_applied_layers[idx]:
continue
mask_i, mask_palceholders_i, mask_assign_op_i = mask_triple
param_i, param_placeholder_i, param_assign_op_i = weight_placeholders[idx]
dim_i = param_i.get_shape().as_list()
#Recover the masked weights to zeros if they drifted
param_val = sess.run(param_i)
mask = sess.run(mask_i)
param_val_masked = param_val * mask
#If apply to convolutional layer, compute the reshaped matrix
if np.size(dim_i) == 4:
param_val_masked_reshaped = reshape_2D_4D(param_val_masked, target_shape=None,
reshape_type=2, reshape_order='F')
mask_reshaped = reshape_2D_4D(mask, target_shape=None,
reshape_type=2, reshape_order='F')
#prune params and update the mask
row_norm = norm(param_val_masked_reshaped, axis=1)
row_size = param_val_masked_reshaped.shape[1]
print('layer:{}, largest row norm: {:6f}, median row norm: {:.6f}, min row norm: {:.6f}'.format(idx, np.max(row_norm), np.median(row_norm), np.min(row_norm)))
zero_row_idx = np.where(row_norm <=threshold)
print(' masked neurons: {}; total neurons: {}'.format(np.size(zero_row_idx), np.size(row_norm)))
param_val_masked_reshaped[zero_row_idx[0], :] = 0
mask_reshaped[zero_row_idx[0], :] = 0
#back to 4D
param_val_masked = reshape_2D_4D(param_val_masked_reshaped, target_shape=tuple(dim_i),
reshape_type=1, reshape_order='F')
mask = reshape_2D_4D(mask_reshaped, target_shape=tuple(dim_i),
reshape_type=1, reshape_order='F')
elif np.size(dim_i) == 2:
row_norm = norm(param_val_masked, axis=1)
row_size = param_val_masked.shape[1]
print('layer:{}, largest row norm: {:6f}, median row norm: {:.6f}, min row norm: {:.6f}'.format(idx, np.max(row_norm), np.median(row_norm), np.min(row_norm)))
zero_row_idx = np.where(row_norm <=threshold)
print(' masked rows: {}; total rows: {}'.format(np.size(zero_row_idx), np.size(row_norm)))
param_val_masked[zero_row_idx[0], :] = 0
mask[zero_row_idx[0], :] = 0
#Update the mask and weight matrix
sess.run(mask_assign_op_i, feed_dict={mask_palceholders_i:mask})
sess.run(param_assign_op_i, feed_dict={param_placeholder_i:param_val_masked})
nonzero_rows = np.size(row_norm) - np.size(zero_row_idx[0])
layer_nonzero_params = nonzero_rows * row_size
print(" total:{0}, nonzeros:{1}".format(np.size(param_val_masked),
layer_nonzero_params))
################################
#Record the zero valued layers
if np.size(row_norm) - np.size(zero_row_idx[0]) <= 3:
num_zero_layers += 1
layer_ID += [idx]
return num_zero_layers, layer_ID
def measure_compression(sess, res_dict, step, training, hps, num_cluster_arr=[]):
'''
Monitor the compression ratio
'''
mask_palceholders = get_mask_placeholders()
weight_placeholders = get_weight_placeholders()
num_nonzero_row_arr = []
num_total_row_arr = []
num_row_size_arr = []
num_nonzero_params = 0
num_unique_params = 0
num_total_params = 0
for idx, mask_triple in enumerate(mask_palceholders):
mask_i, mask_palceholders_i, mask_assign_op_i = mask_triple
param_i, param_placeholder_i, param_assign_op_i = weight_placeholders[idx]
dim_i = param_i.get_shape().as_list()
param_val = sess.run(param_i)
mask = sess.run(mask_i)
param_val_masked = param_val * mask
if np.size(dim_i) == 4:
param_val_masked_reshaped = reshape_2D_4D(param_val_masked, target_shape=None, reshape_type=2, reshape_order='F')
row_norm = norm(param_val_masked_reshaped, axis=1)
num_nonzero_params += np.count_nonzero(row_norm) * np.shape(param_val_masked_reshaped)[1]
num_unique_params += np.size(np.unique(param_val_masked_reshaped))
num_total_params += np.prod(dim_i)
num_nonzero_row_arr.append( | np.count_nonzero(row_norm) | numpy.count_nonzero |
#!/usr/bin/env python
# Generic code for a classifier
#
# Subscribes to a feature vector (custom_msgs/Float32MultiArray) and a label (custom_msgs/String)
# Uses upcoming feature data to fit a classifier to predict the label
# Interface with topic command (Start/Stop learning)
import rospy
import numpy as np
import signal
import sys
import threading
import os
from EpicToolbox import FileManager,mkdirfile
from std_msgs.msg import String as StdString
from std_msgs.msg import Header
from custom_msgs.msg import String, Float32MultiArray
from datetime import date
# MODEL DEPENDENT CODE ? WRAP TO CLASS?
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import KFold
from joblib import dump, load
from copy import deepcopy
from scipy import io
##################### ROS MESSAGES AND PUBLISHERS ##############################
stringmsg=String()
std_stringmsg=StdString()
labelpub = rospy.Publisher('prediction',String,queue_size=1)
logpub = rospy.Publisher('log', StdString, queue_size=50)
################################################################################
labels=[]
label=None
active_model=None
lock=threading.Lock()
learning = False
MAX_SAMPLES=100 #Number of samples per class to hold in memory
size=None #Size of the feature vector
memory=dict() #Sample data
numSamples=dict() #Number of samples
VERBOSE=2
# Setup a Rosbag
path=os.path.join(os.environ['HOME'],date.today().strftime('%m_%d_%y'))
mkdirfile(path)
f=FileManager(path,PathStructure=['Type','File'])
#rosparam=Rosparam('/')
############################ ROS CALLBACKS #####################################
def learning_callback(msg):
'''Enable or disable learning'''
global learning
if msg.data=='START':
printAndLog('Learning enabled')
learning=True
elif msg.data=='STOP':
printAndLog('Learning disabled')
learning=False
def label_callback(msg):
global labels,label,size,memory,numSamples,active_model
print('Label:{}'.format(msg.data))
lock.acquire()
label=msg.data
if label in labels:
pass
else:
print('\t New label to the classifier')
if size==None:
lock.release()
return
labels.append(label)
memory[label]=np.zeros((MAX_SAMPLES,size))
numSamples[label]=0
active_model=None #Reset the model since the number of labels changed
lock.release()
def labelstd_callback(msg):
stringmsg.header.stamp=rospy.Time.now()
stringmsg.data=msg.data
label_callback(stringmsg)
def features_callback(msg):
''' Get a new feature sample and incorporate the sample in memory'''
global active_model,labels,label,memory,numSamples,size,learning
if learning == False:
size=msg.layout.dim[0].size
if learning == True:
lock.acquire()
if label==None:
size=msg.layout.dim[0].size
lock.release()
return
# Add the sample to the buffers for the corresponding label
x=memory[label]
idx=numSamples[label]
if idx<MAX_SAMPLES:
x[idx,:]=msg.data
numSamples[label]=numSamples[label]+1
else:
x=np.roll(x,1,axis=0)
x[0,:]=msg.data
memory[label]=x
numSamples[label]=numSamples[label]+1
lock.release()
# Compute the prediction from the active model
if active_model==None:
return
lock.acquire()
out=active_model.predict(np.array([msg.data]))
lock.release()
stringmsg.header.stamp=rospy.Time.now()
stringmsg.data=out[0]
labelpub.publish(stringmsg)
#publish output
######################## HELPER FUNCTIONS ######################################
def signal_handler(sig,frame):
''' Terminate the connection to eris and close the node'''
print('Ctrl+c')
rosbag.stop()
sys.exit(0)
signal.signal(signal.SIGINT,signal_handler)
def printAndLog(strdata):
''' Print and publish string data to the log '''
print(strdata)
std_stringmsg.data=strdata
logpub.publish(std_stringmsg)
def memory2xy(memory):
'''Convert the data from memory to a x,y tables for fitting a model'''
labels=memory.keys()
x=[]
y=[]
for l in labels:
x.append(memory[l])
y.append([l]*memory[l].shape[0])
x= | np.concatenate(x) | numpy.concatenate |
import glob
import numpy as np
import os
def calc_blur_gradient_loss(net_type, target_blur_folder, faig_folder, save_faig_maskdeblurfilter_blur_loss_txt,
save_faig_maskdenoisefilter_blur_loss_txt, ig_folder,
save_ig_maskdeblurfilter_blur_loss_txt, save_ig_maskdenoisefilter_blur_loss_txt,
abs_filter_change_folder, save_abs_filter_change_maskdeblurfilter_blur_loss_txt,
save_abs_filter_change_maskdenoisefilter_blur_loss_txt, random_folder,
save_random_maskdeblurfilter_blur_loss_txt, save_random_maskdenoisefilter_blur_loss_txt,
sub_func_folder_names, sub_input_folder_name):
""" Quantity the discovered filters' contribution to the deblur function by measuring
output difference of the target model and the substituted model. The output difference
is calculated on image gradients of their gray counterpart.
Args:
net_type (str): network type. Default: srcnn_style or srresnet
target_blur_folder (str): folder path that contains the gradient map of target model's output
towards blurry input.
faig_folder (str): folder path that contains the gradient map of substituted-faig-discovered model's
output towards blurry input.
save_faig_maskdeblurfilter_blur_loss_txt (str): txt path that records the output different of
target model and substituted-faig-discovered (blur) model.
save_faig_maskdenoisefilter_blur_loss_txt (str): txt path that records the output different of
target model and substituted-faig-discovered (noise) model.
ig_folder (str): folder path that contains the gradient map of substituted-ig-discovered model's
output towards blurry input.
save_ig_maskdeblurfilter_blur_loss_txt (str): txt path that records the output different of
target model and substituted-ig-discovered (blur) model.
save_ig_maskdenoisefilter_blur_loss_txt (str): txt path that records the output different of
target model and substituted-ig-discovered (noise) model.
abs_filter_change_folder (str): folder path that contains the gradient map of
substituted-abs_filter_change-discovered model's output towards blurry input.
save_abs_filter_change_maskdeblurfilter_blur_loss_txt (str): txt path that records the output different of
target model and substituted-abs_filter_change-discovered (blur) model.
save_abs_filter_change_maskdenoisefilter_blur_loss_txt (str): txt path that records the output different of
target model and substituted-abs_filter_change-discovered (noise) model.
random_folder (str): folder path that contains the gradient map of substituted-random-discovered model's
output towards blurry input.
save_random_maskdeblurfilter_blur_loss_txt (str): txt path that records the output different of
target model and substituted-random-discovered (blur) model.
save_random_maskdenoisefilter_blur_loss_txt (str): txt path that records the output different of
target model and substituted-random-discovered (noise) model.
sub_func_folder_names (list): Default: ['maskdeblurfilter', 'maskdenoisefilter']
sub_input_folder_name (str): Default: 'Blur2_LRbicx2'
"""
deblur_func_imglist = list(sorted(glob.glob(os.path.join(target_blur_folder, '*.npy'))))
faig_maskdeblurfilter_blur_loss = []
faig_maskdenoisefilter_blur_loss = []
ig_maskdeblurfilter_blur_loss = []
ig_maskdenoisefilter_blur_loss = []
abs_filter_change_maskdeblurfilter_blur_loss = []
abs_filter_change_maskdenoisefilter_blur_loss = []
random_maskdeblurfilter_blur_loss = []
random_maskdenoisefilter_blur_loss = []
if net_type == 'srcnn_style':
total_neuron_nums = 156224
elif net_type == 'srresnet':
total_neuron_nums = 151936
for proportion in [1, 3, 5, 10]:
# for proportion in range(0, 101):
selected_num_neurons = int(total_neuron_nums * proportion / 100)
neuron_folder = f'{selected_num_neurons}kernels'
faig_neuron_folder_path = os.path.join(faig_folder, neuron_folder)
ig_neuron_folder_path = os.path.join(ig_folder, neuron_folder)
abs_filter_change_neuron_folder_path = os.path.join(abs_filter_change_folder, neuron_folder)
random_neuron_folder_path = os.path.join(random_folder, neuron_folder)
for idx, sub_folder in enumerate(sub_func_folder_names):
faig_neuron_sub_folder_path = os.path.join(faig_neuron_folder_path, sub_folder)
ig_neuron_sub_folder_path = os.path.join(ig_neuron_folder_path, sub_folder)
abs_filter_change_neuron_sub_folder_path = os.path.join(abs_filter_change_neuron_folder_path, sub_folder)
random_neuron_sub_folder_path = os.path.join(random_neuron_folder_path, sub_folder)
faig_imglist = list(
sorted(glob.glob(os.path.join(faig_neuron_sub_folder_path, sub_input_folder_name, '*.npy'))))
ig_imglist = list(
sorted(glob.glob(os.path.join(ig_neuron_sub_folder_path, sub_input_folder_name, '*.npy'))))
abs_filter_change_imglist = list(
sorted(
glob.glob(os.path.join(abs_filter_change_neuron_sub_folder_path, sub_input_folder_name, '*.npy'))))
random_imglist = list(
sorted(glob.glob(os.path.join(random_neuron_sub_folder_path, sub_input_folder_name, '*.npy'))))
faig_gradient_loss = 0.0
ig_gradient_loss = 0.0
abs_filter_change_gradient_loss = 0.0
random_gradient_loss = 0.0
for img_idx, img_path in enumerate(deblur_func_imglist):
refer_img_path = img_path
faig_img_path = faig_imglist[img_idx]
ig_img_path = ig_imglist[img_idx]
abs_filter_change_img_path = abs_filter_change_imglist[img_idx]
random_img_path = random_imglist[img_idx]
refer_gradient = | np.load(refer_img_path) | numpy.load |
from sklearn import datasets
import numpy as np
from scipy.special import jv
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import validation_curve
from mlxtend.plotting import plot_decision_regions
import graphviz
FONTSIZE = 20
def plot_2D_iris(fontsize=None):
if fontsize is None:
fontsize = FONTSIZE
X, y = datasets.load_iris(return_X_y=True)
X = X[:, :2]
labels = ['setosa', 'versicolor', 'virginica']
fig, ax = plt.subplots(figsize=(10, 8))
for target, label in zip(range(3), labels):
ax.scatter(X[y == target, 0], X[y == target, 1],
color=f'C{target}', s=100,
label=label, lw=0)
ax.set_xlabel('Sepal length (cm)', fontsize=fontsize)
ax.set_ylabel('Sepal width (cm)', fontsize=fontsize)
plt.xticks(fontsize=18)
plt.yticks(fontsize=18)
leg = plt.legend(title='Iris species', fontsize=18)
plt.setp(leg.get_title(), fontsize=fontsize)
plt.show()
def plot_tree_decision_regions(model: DecisionTreeClassifier, fontsize=None):
if fontsize is None:
fontsize = FONTSIZE
X, y = datasets.load_iris(return_X_y=True)
X = X[:, :2]
labels = ['setosa', 'versicolor', 'virginica']
fig, ax = plt.subplots(figsize=(10, 8))
with plt.style.context({'lines.markersize': 10}):
model.fit(X, y)
plot_decision_regions(X, y, model, colors='C0,C1,C2', markers='ooo',
hide_spines=False, ax=ax)
ax.set_xlabel('Sepal length (cm)', fontsize=fontsize)
ax.set_ylabel('Sepal width (cm)', fontsize=fontsize)
plt.xticks(fontsize=18)
plt.yticks(fontsize=18)
leg = plt.legend(title='Iris species', fontsize=18)
for idx, label in enumerate(labels):
leg.get_texts()[idx].set_text(label)
plt.setp(leg.get_title(), fontsize=fontsize)
plt.show()
def plot_decision_tree(model: DecisionTreeClassifier):
iris = datasets.load_iris()
dot_data = export_graphviz(model, out_file=None,
feature_names=iris.feature_names[:2],
class_names=iris.target_names,
impurity=False,
filled=True,
rounded=True,
special_characters=True)
graph = graphviz.Source(dot_data)
return graph
def plot_classification_vs_regression(fontsize=None):
if fontsize is None:
fontsize = FONTSIZE
np.random.seed(2)
markersize = 50
X_r = np.linspace(0, 1, 100)
y_r_true = jv(X_r, 2)
y_r = np.random.normal(y_r_true, 0.02)
fig, axarr = plt.subplots(nrows=1, ncols=2, figsize=(18, 8))
ax_reg = axarr[1]
ax_reg.scatter(X_r, y_r, s=markersize, alpha=0.7, lw=0, color='C2')
ax_reg.plot(X_r, y_r_true, color='C2')
ax_reg.set_xlabel('x', fontsize=fontsize)
ax_reg.set_ylabel('y', fontsize=fontsize)
ax_reg.set_title('Regression', fontsize=fontsize)
ax_reg.xaxis.set_ticklabels([])
ax_reg.yaxis.set_ticklabels([])
X_c, y_c = datasets.make_blobs(n_samples=500, n_features=2,
centers=2, random_state=2)
ax_c = axarr[0]
for label in range(2):
label_mask = y_c == label
ax_c.scatter(X_c[label_mask, 0], X_c[label_mask, 1], s=markersize,
c=f'C{label}', alpha=0.7, label=label)
ax_c.set_xlabel('$X_1$', fontsize=fontsize)
ax_c.set_ylabel('$X_2$', fontsize=fontsize)
ax_c.set_title('Classification', fontsize=fontsize)
ax_c.xaxis.set_ticklabels([])
ax_c.yaxis.set_ticklabels([])
clf = LogisticRegression(random_state=2).fit(X_c, y_c)
xmin, xmax = X_c[:, 0].min(), X_c[:, 0].max()
coef = clf.coef_
intercept = clf.intercept_
def line(x0):
return (-(x0 * coef[0, 0]) - intercept[0]) / coef[0, 1]
ax_c.plot([xmin, xmax], [line(xmin), line(xmax)], ls='--', color='k')
leg = ax_c.legend(title='Class labels', fontsize=18)
plt.setp(leg.get_title(), fontsize=fontsize)
plt.show()
def plot_data_representation():
fig = plt.figure(figsize=(9, 6))
ax = fig.add_axes([0, 0, 1, 1])
ax.axis('off')
ax.axis('equal')
# Draw features matrix
ax.vlines(range(6), ymin=0, ymax=9, lw=1)
ax.hlines(range(10), xmin=0, xmax=5, lw=1)
font_prop = dict(size=18, family='monospace')
ax.text(-1, -1, "Feature Matrix ($X$)", size=20)
ax.text(0.1, -0.3, r'n_features $\longrightarrow$', **font_prop)
ax.text(-0.1, 0.1, r'$\longleftarrow$ n_samples', rotation=90,
va='top', ha='right', **font_prop)
# Draw labels vector
ax.vlines(range(8, 10), ymin=0, ymax=9, lw=1)
ax.hlines(range(10), xmin=8, xmax=9, lw=1)
ax.text(7, -1, "Target Vector ($y$)", size=20)
ax.text(7.9, 0.1, r'$\longleftarrow$ n_samples', rotation=90,
va='top', ha='right', **font_prop)
ax.set_ylim(10, -2)
def plot_validation_curve():
x = np.linspace(0, 1, 1000)
y1 = -(x - 0.5) ** 2
y2 = y1 - 0.33 + | np.exp(x - 1) | numpy.exp |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
import copy
import numpy as np
import pandas as pd
from skimage import filters
from functools import reduce
from SIMPAD.RCMP import rcmstomp
def _generateGraph(var_len_valleys):
vertexList = []
for l in var_len_valleys:
vertexList = vertexList + var_len_valleys[l]
vertexList = np.array(vertexList)
edgeList = []
for i in np.arange(len(vertexList)):
overlap = \
np.where((~(vertexList[i, 0] > vertexList[:, 1]) & ~(vertexList[i, 1] < vertexList[:, 0])) == True)[0]
overlap = np.delete(overlap, np.where(overlap == i)[0])
for j in overlap:
edgeList.append((i, j))
edgeList = np.array(edgeList)
adjacencyList = [[] for vartex in vertexList]
for edge in edgeList:
adjacencyList[edge[0]].append(edge[1])
adjacencyList = np.array(adjacencyList)
return vertexList, edgeList, adjacencyList
def _generateSubgraphs(vertextList, adjacencyList):
subgraphs = []
freeVertices = list(np.arange(len(vertextList)))
while freeVertices:
freeVertex = freeVertices.pop()
subgraph = _constructSubgraph(freeVertex, adjacencyList, [freeVertex])
freeVertices = [vertex for vertex in freeVertices if vertex not in subgraph]
subgraphs.append(subgraph)
return subgraphs
def _constructSubgraph(vertex, adjacencyList, subgraph):
neighbors = [vertex for vertex in adjacencyList[vertex] if vertex not in subgraph]
if (len(neighbors) == 0):
return subgraph
else:
subgraph = subgraph + neighbors
for vertex in neighbors:
subgraph = _constructSubgraph(vertex, adjacencyList, subgraph)
return subgraph
def _incumb(vertexWeight, adjacencyList):
N = len(vertexWeight)
X = np.zeros(N, dtype=bool)
for i in range(N):
if (len(adjacencyList[i]) == 0):
X[i] = True
Z = np.zeros(N)
for i in range(N):
Z[i] = vertexWeight[i] - np.sum(vertexWeight[list(adjacencyList[i])])
freeVertices = np.where(X == 0)[0]
while True:
if len(freeVertices) == 0:
break;
imin = freeVertices[np.argmax(Z[freeVertices])]
X[imin] = True
freeVertices = freeVertices[freeVertices != imin]
X[adjacencyList[imin]] = False
freeVertices = freeVertices[~np.isin(freeVertices, adjacencyList[imin])]
for i in freeVertices:
Z[i] = vertexWeight[i] - np.sum(vertexWeight[ | np.intersect1d(freeVertices, adjacencyList[i]) | numpy.intersect1d |
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import pytest
import numpy as np
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.ops import composite as C
import mindspore.nn as nn
import mindspore.context as context
class NetSoftmax(nn.Cell):
def __init__(self):
super(NetSoftmax, self).__init__()
axis = -2
self.softmax1 = P.Softmax()
self.softmax2 = P.Softmax(axis)
def construct(self, x):
return self.softmax1(x), self.softmax2(x)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_softmax():
x = Tensor(np.array([[0.1, 0.3, 0.6, -0.3],
[0.2, -0.6, 0.8, 0.6],
[0.6, -1.2, 0.4, 0.6]]).astype(np.float32))
expect1 = np.ones(3)
expect2 = np.ones(4)
error1 = expect1 * 1.0e-6
error2 = expect2 * 1.0e-6
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
Softmax = NetSoftmax()
output = Softmax(x)
outputSum1 = output[0].asnumpy().sum(axis=1)
outputSum2 = output[1].asnumpy().sum(axis=0)
diff1 = np.abs(outputSum1 - expect1)
diff2 = np.abs(outputSum2 - expect2)
assert np.all(diff1 < error1)
assert | np.all(diff2 < error2) | numpy.all |
"""
fitting.py
Created by <NAME> on 2017-05-19.
"""
import os
import glob
import inspect
from collections import OrderedDict
import numpy as np
import astropy.io.fits as pyfits
import astropy.units as u
from astropy.cosmology import Planck15
import astropy.constants as const
from . import utils
#from .model import BeamCutout
from .utils import GRISM_COLORS
# Minimum redshift where IGM is applied
IGM_MINZ = 3.4 # blue edge of G800L
# Default parameters for drizzled line map
PLINE = {'kernel': 'point', 'pixfrac': 0.2, 'pixscale': 0.1, 'size': 8, 'wcs': None}
# IGM from eazy-py
try:
import eazy.igm
IGM = eazy.igm.Inoue14()
except:
IGM = None
def run_all_parallel(id, get_output_data=False, **kwargs):
import numpy as np
from grizli.fitting import run_all
from grizli import multifit
import time
import traceback
t0 = time.time()
print('Run {0}'.format(id))
args = np.load('fit_args.npy')[0]
args['verbose'] = False
for k in kwargs:
args[k] = kwargs[k]
fp = open('{0}_{1:05d}.log_par'.format(args['group_name'], id),'w')
fp.write('{0}_{1:05d}: {2}\n'.format(args['group_name'], id, time.ctime()))
fp.close()
try:
#args['zr'] = [0.7, 1.0]
#mb = multifit.MultiBeam('j100025+021651_{0:05d}.beams.fits'.format(id))
out = run_all(id, **args)
if get_output_data:
return out
status=1
except:
status=-1
trace = traceback.format_exc(limit=2)#, file=fp)
if args['verbose']:
print(trace)
t1 = time.time()
return id, status, t1-t0
def run_all(id, t0=None, t1=None, fwhm=1200, zr=[0.65, 1.6], dz=[0.004, 0.0002], fitter='nnls', group_name='grism', fit_stacks=True, only_stacks=False, prior=None, fcontam=0.2, pline=PLINE, mask_sn_limit=3, fit_only_beams=False, fit_beams=True, root='*', fit_trace_shift=False, phot=None, phot_obj=None, verbose=True, scale_photometry=False, show_beams=True, scale_on_stacked_1d=True, overlap_threshold=5, MW_EBV=0., sys_err=0.03, get_dict=False, bad_pa_threshold=1.6, units1d='flam', redshift_only=False, line_size=1.6, use_psf=False, get_line_width=False, sed_args={'bin':1, 'xlim':[0.3, 9]}, get_ir_psfs=True, min_mask=0.01, min_sens=0.08, **kwargs):
"""Run the full procedure
1) Load MultiBeam and stack files
2) ... tbd
fwhm=1200; zr=[0.65, 1.6]; dz=[0.004, 0.0002]; group_name='grism'; fit_stacks=True; prior=None; fcontam=0.2; mask_sn_limit=3; fit_beams=True; root=''
"""
import glob
import grizli.multifit
from grizli.stack import StackFitter
from grizli.multifit import MultiBeam
if get_dict:
frame = inspect.currentframe()
args = inspect.getargvalues(frame).locals
for k in ['id', 'get_dict', 'frame', 'glob', 'grizli', 'StackFitter', 'MultiBeam']:
if k in args:
args.pop(k)
return args
mb_files = glob.glob('{0}_{1:05d}.beams.fits'.format(root, id))
st_files = glob.glob('{0}_{1:05d}.stack.fits'.format(root, id))
if not only_stacks:
mb = MultiBeam(mb_files, fcontam=fcontam, group_name=group_name, MW_EBV=MW_EBV, sys_err=sys_err, verbose=verbose, psf=use_psf, min_mask=min_mask, min_sens=min_sens)
# Check for PAs with unflagged contamination or otherwise discrepant
# fit
out = mb.check_for_bad_PAs(chi2_threshold=bad_pa_threshold,
poly_order=1, reinit=True,
fit_background=True)
fit_log, keep_dict, has_bad = out
if has_bad:
if verbose:
print('\nHas bad PA! Final list: {0}\n{1}'.format(keep_dict,
fit_log))
hdu, fig = mb.drizzle_grisms_and_PAs(fcontam=0.5, flambda=False, kernel='point', size=32)
fig.savefig('{0}_{1:05d}.fix.stack.png'.format(group_name, id))
good_PAs = []
for k in keep_dict:
good_PAs.extend(keep_dict[k])
else:
good_PAs = None # All good
else:
good_PAs = None # All good
redshift_only=True # can't drizzle line maps from stacks
if fit_only_beams:
st = None
else:
st = StackFitter(st_files, fit_stacks=fit_stacks, group_name=group_name, fcontam=fcontam, overlap_threshold=overlap_threshold, MW_EBV=MW_EBV, verbose=verbose, sys_err=sys_err, PAs=good_PAs, chi2_threshold=bad_pa_threshold)
st.initialize_masked_arrays()
if only_stacks:
mb = st
if not only_stacks:
if fit_trace_shift:
b = mb.beams[0]
b.compute_model()
sn_lim = fit_trace_shift*1
if (np.max((b.model/b.grism['ERR'])[b.fit_mask.reshape(b.sh)]) > sn_lim) | (sn_lim > 100):
shift, _ = mb.fit_trace_shift(tol=1.e-3, verbose=verbose,
split_groups=True)
mb.initialize_masked_arrays()
## Get photometry from phot_obj
if (phot is None) & (phot_obj is not None):
phot_i, ii, dd = phot_obj.get_phot_dict(mb.ra, mb.dec)
if dd < 0.5*u.arcsec:
phot = phot_i
if phot is not None:
if phot == 'vizier':
### Get photometry from Vizier catalogs
vizier_catalog = list(utils.VIZIER_BANDS.keys())
phot = utils.get_Vizier_photometry(mb.ra, mb.dec, verbose=verbose,
vizier_catalog=vizier_catalog)
if phot is not None:
zgrid = utils.log_zgrid(zr=zr, dz=0.005)
phot['tempfilt'] = utils.generate_tempfilt(t0,
phot['filters'],
zgrid=zgrid,
MW_EBV=MW_EBV)
if phot is not None:
if st is not None:
st.set_photometry(**phot, min_err=sys_err)
mb.set_photometry(**phot, min_err=sys_err)
if t0 is None:
t0 = utils.load_templates(line_complexes=True, fsps_templates=True, fwhm=fwhm)
if t1 is None:
t1 = utils.load_templates(line_complexes=False, fsps_templates=True, fwhm=fwhm)
# Fit on stacked spectra or individual beams
if fit_only_beams:
fit_obj = mb
else:
fit_obj = st
### Do scaling now with direct spectrum function
if (scale_photometry > 0) & (phot is not None):
try:
scl = mb.scale_to_photometry(z=0, method='lm', templates=t0, order=scale_photometry*1-1)
except:
scl = [10.]
if hasattr(scl,'status'):
if scl.status > 0:
print('scale_to_photometry: [{0}]'.format(', '.join(['{0:.2f}'.format(x_i) for x_i in scl.x])))
mb.pscale = scl.x
if st is not None:
st.pscale = scl.x
# First pass
fit = fit_obj.xfit_redshift(templates=t0, zr=zr, dz=dz, prior=prior, fitter=fitter, verbose=verbose)
fit_hdu = pyfits.table_to_hdu(fit)
fit_hdu.header['EXTNAME'] = 'ZFIT_STACK'
if hasattr(fit_obj, 'pscale'):
fit_hdu.header['PSCALEN'] = (len(fit_obj.pscale)-1, 'PSCALE order')
for i, p in enumerate(fit_obj.pscale):
fit_hdu.header['PSCALE{0}'.format(i)] = (p, 'PSCALE parameter {0}'.format(i))
# Add photometry information
if (fit_obj.Nphot > 0) & hasattr(fit_obj, 'photom_filters'):
h = fit_hdu.header
h['NPHOT'] = fit_obj.Nphot, 'Number of photometry filters'
h['PHOTSRC'] = fit_obj.photom_source, 'Source of the photometry'
for i in range(len(fit_obj.photom_filters)):
h['PHOTN{0:03d}'.format(i)] = fit_obj.photom_filters[i].name.split()[0], 'Filter {0} name'.format(i)
h['PHOTL{0:03d}'.format(i)] = fit_obj.photom_pivot[i], 'Filter {0} pivot wavelength'.format(i)
h['PHOTF{0:03d}'.format(i)] = fit_obj.photom_flam[i], 'Filter {0} flux flam'.format(i)
h['PHOTE{0:03d}'.format(i)] = fit_obj.photom_eflam[i], 'Filter {0} err flam'.format(i)
# # Second pass if rescaling spectrum to photometry
# if scale_photometry:
# scl = mb.scale_to_photometry(z=fit.meta['z_map'][0], method='lm', templates=t0, order=scale_photometry*1-1)
# if scl.status > 0:
# mb.pscale = scl.x
# if st is not None:
# st.pscale = scl.x
#
# fit = fit_obj.xfit_redshift(templates=t0, zr=zr, dz=dz, prior=prior, fitter=fitter, verbose=verbose)
# fit_hdu = pyfits.table_to_hdu(fit)
# fit_hdu.header['EXTNAME'] = 'ZFIT_STACK'
# Zoom-in fit with individual beams
if fit_beams:
#z0 = fit.meta['Z50'][0]
z0 = fit.meta['z_map'][0]
#width = np.maximum(3*fit.meta['ZWIDTH1'][0], 3*0.001*(1+z0))
width = 20*0.001*(1+z0)
mb_zr = z0 + width*np.array([-1,1])
mb_fit = mb.xfit_redshift(templates=t0, zr=mb_zr, dz=[0.001, 0.0002], prior=prior, fitter=fitter, verbose=verbose)
mb_fit_hdu = pyfits.table_to_hdu(mb_fit)
mb_fit_hdu.header['EXTNAME'] = 'ZFIT_BEAM'
else:
mb_fit = fit
#### Get best-fit template
tfit = mb.template_at_z(z=mb_fit.meta['z_map'][0], templates=t1, fit_background=True, fitter=fitter)
# Redrizzle? ... testing
if False:
hdu, fig = mb.drizzle_grisms_and_PAs(fcontam=fcontam,
flambda=False,
size=48, scale=1.,
kernel='point', pixfrac=0.1,
zfit=tfit)
# Fit covariance
cov_hdu = pyfits.ImageHDU(data=tfit['covar'], name='COVAR')
Next = mb_fit.meta['N']
cov_hdu.header['N'] = Next
# Line EWs & fluxes
coeffs_clip = tfit['coeffs'][mb.N:]
covar_clip = tfit['covar'][mb.N:,mb.N:]
lineEW = utils.compute_equivalent_widths(t1, coeffs_clip, covar_clip, max_R=5000, Ndraw=1000, z=tfit['z'])
for ik, key in enumerate(lineEW):
for j in range(3):
if not np.isfinite(lineEW[key][j]):
lineEW[key][j] = -1.e30
cov_hdu.header['FLUX_{0:03d}'.format(ik)] = tfit['cfit'][key][0], '{0} line flux; erg / (s cm2)'.format(key.strip('line '))
cov_hdu.header['ERR_{0:03d}'.format(ik)] = tfit['cfit'][key][1], '{0} line uncertainty; erg / (s cm2)'.format(key.strip('line '))
cov_hdu.header['EW16_{0:03d}'.format(ik)] = lineEW[key][0], 'Rest-frame {0} EW, 16th percentile; Angstrom'.format(key.strip('line '))
cov_hdu.header['EW50_{0:03d}'.format(ik)] = lineEW[key][1], 'Rest-frame {0} EW, 50th percentile; Angstrom'.format(key.strip('line '))
cov_hdu.header['EW84_{0:03d}'.format(ik)] = lineEW[key][2], 'Rest-frame {0} EW, 84th percentile; Angstrom'.format(key.strip('line '))
cov_hdu.header['EWHW_{0:03d}'.format(ik)] = (lineEW[key][2]-lineEW[key][0])/2, 'Rest-frame {0} EW, 1-sigma half-width; Angstrom'.format(key.strip('line '))
# Velocity width
if get_line_width:
if phot is not None:
mb.unset_photometry()
vel_width_res = mb.fit_line_width(z0=tfit['z'], bl=1.2, nl=1.2)
if verbose:
print('Velocity width: BL/NL = {0:.0f}/{1:.0f}, z={2:.4f}'.format(vel_width_res[0]*1000, vel_width_res[1]*1000, vel_width_res[2]))
fit_hdu.header['VEL_BL'] = vel_width_res[0]*1000, 'Broad line FWHM'
fit_hdu.header['VEL_NL'] = vel_width_res[1]*1000, 'Narrow line FWHM'
fit_hdu.header['VEL_Z'] = vel_width_res[2], 'Line width, best redshift'
fit_hdu.header['VEL_NFEV'] = vel_width_res[3], 'Line width, NFEV'
fit_hdu.header['VEL_FLAG'] = vel_width_res[4], 'Line width, NFEV'
if phot is not None:
mb.set_photometry(**phot)
# Best-fit template itself
tfit_sp = utils.GTable()
for ik, key in enumerate(tfit['cfit']):
for save in [tfit_sp.meta]:
save['CVAL{0:03d}'.format(ik)] = tfit['cfit'][key][0], 'Coefficient for {0}'.format(key)
save['CERR{0:03d}'.format(ik)] = tfit['cfit'][key][1], 'Uncertainty for {0}'.format(key)
save['CNAME{0:03d}'.format(ik)] = key, 'Template name'
tfit_sp['wave'] = tfit['cont1d'].wave
tfit_sp['continuum'] = tfit['cont1d'].flux
tfit_sp['full'] = tfit['line1d'].flux
tfit_sp['wave'].unit = tfit['cont1d'].waveunits
tfit_sp['continuum'].unit = tfit['cont1d'].fluxunits
tfit_sp['full'].unit = tfit['line1d'].fluxunits
tfit_hdu = pyfits.table_to_hdu(tfit_sp)
tfit_hdu.header['EXTNAME'] = 'TEMPL'
# Make the plot
fig = mb.xmake_fit_plot(mb_fit, tfit, show_beams=show_beams, scale_on_stacked_1d=scale_on_stacked_1d)
# Add prior
if prior is not None:
fig.axes[0].plot(prior[0], np.log10(prior[1]), color='#1f77b4', alpha=0.5)
# Add stack fit to the existing plot
fig.axes[0].plot(fit['zgrid'], np.log10(fit['pdf']), color='0.5', alpha=0.5)
fig.axes[0].set_xlim(fit['zgrid'].min(), fit['zgrid'].max())
if phot is not None:
fig.axes[1].errorbar(mb.photom_pivot/1.e4, mb.photom_flam/1.e-19, mb.photom_eflam/1.e-19, marker='s', alpha=0.5, color='k', linestyle='None')
#fig.axes[1].plot(tfit['line1d'].wave/1.e4, tfit['line1d'].flux/1.e-19, color='k', alpha=0.2, zorder=100)
# Save the figure
fig.savefig('{0}_{1:05d}.full.png'.format(group_name, id))
if redshift_only:
return mb, st, fit, tfit, None
# Make the line maps
if pline is None:
pzfit, pspec2, pline = grizli.multifit.get_redshift_fit_defaults()
line_hdu = mb.drizzle_fit_lines(tfit, pline, force_line=utils.DEFAULT_LINE_LIST, save_fits=False, mask_lines=True, mask_sn_limit=mask_sn_limit, verbose=verbose, get_ir_psfs=get_ir_psfs)
# Add beam exposure times
exptime = mb.compute_exptime()
for k in exptime:
line_hdu[0].header['T_{0}'.format(k)] = (exptime[k], 'Total exposure time [s]')
line_hdu.insert(1, fit_hdu)
line_hdu.insert(2, cov_hdu)
if fit_beams:
line_hdu.insert(2, mb_fit_hdu)
line_hdu.insert(3, tfit_hdu)
line_hdu.writeto('{0}_{1:05d}.full.fits'.format(group_name, id), clobber=True, output_verify='fix')
# 1D spectrum
oned_hdul = mb.oned_spectrum_to_hdu(tfit=tfit, bin=1, outputfile='{0}_{1:05d}.1D.fits'.format(group_name, id))#, units=units1d)
######
# Show the drizzled lines and direct image cutout, which are
# extensions `DSCI`, `LINE`, etc.
s, si = 1, line_size
s = 4.e-19/np.max([beam.beam.total_flux for beam in mb.beams])
s = np.clip(s, 0.25, 4)
full_line_list = ['Lya', 'OII', 'Hb', 'OIII', 'Ha', 'SII', 'SIII']
fig = show_drizzled_lines(line_hdu, size_arcsec=si, cmap='plasma_r', scale=s, dscale=s, full_line_list=full_line_list)
fig.savefig('{0}_{1:05d}.line.png'.format(group_name, id))
if phot is not None:
out = mb, st, fit, tfit, line_hdu
if 'pz' in phot:
full_sed_plot(mb, tfit, zfit=fit, photometry_pz=phot['pz'], **sed_args)
else:
full_sed_plot(mb, tfit, zfit=fit, **sed_args)
return mb, st, fit, tfit, line_hdu
###################################
def full_sed_plot(mb, tfit, zfit=None, bin=1, minor=0.1, save='png', sed_resolution=180, photometry_pz=None, zspec=None, spectrum_steps=False, xlim=[0.3, 9], **kwargs):
"""
Make a separate plot showing photometry and the spectrum
"""
#import seaborn as sns
import prospect.utils.smoothing
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
import matplotlib.gridspec as gridspec
#mpl_colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
mpl_colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
# sns_colors = colors = sns.color_palette("cubehelix", 8)
### seaborn cubehelix colors
sns_colors = colors = [(0.1036, 0.094, 0.206),
(0.0825, 0.272, 0.307),
(0.1700, 0.436, 0.223),
(0.4587, 0.480, 0.199),
(0.7576, 0.476, 0.437),
(0.8299, 0.563, 0.776),
(0.7638, 0.757, 0.949),
(0.8106, 0.921, 0.937)]
# Best-fit
#mb = out[0]
#zfit = out[2]
#tfit = out[3]
t1 = tfit['templates']
best_model = mb.get_flat_model([tfit['line1d'].wave, tfit['line1d'].flux])
flat_model = mb.get_flat_model([tfit['line1d'].wave, tfit['line1d'].flux*0+1])
bg = mb.get_flat_background(tfit['coeffs'])
sp = mb.optimal_extract(mb.scif[mb.fit_mask][:-mb.Nphot] - bg, bin=bin)#['G141']
spm = mb.optimal_extract(best_model, bin=bin)#['G141']
spf = mb.optimal_extract(flat_model, bin=bin)#['G141']
# Photometry
A_phot = mb._interpolate_photometry(z=tfit['z'], templates=t1)
A_model = A_phot.T.dot(tfit['coeffs'])
photom_mask = mb.photom_eflam > -98
##########
# Figure
if True:
if zfit is not None:
fig = plt.figure(figsize=[11, 9./3])
gs = gridspec.GridSpec(1,3, width_ratios=[1,1.5,1])
ax1 = fig.add_subplot(gs[0])
ax2 = fig.add_subplot(gs[1])
ax3 = fig.add_subplot(gs[2])
else:
fig = plt.figure(figsize=[9, 9./3])
gs = gridspec.GridSpec(1,2, width_ratios=[1,1.5])
ax1 = fig.add_subplot(gs[0])
ax2 = fig.add_subplot(gs[1])
else:
gs = None
fig = plt.figure(figsize=[9, 9./3])
ax1 = fig.add_subplot(131)
ax2 = fig.add_subplot(132)
ax3 = fig.add_subplot(133)
# Photometry SED
ax1.errorbar(np.log10(mb.photom_pivot[photom_mask]/1.e4), mb.photom_flam[photom_mask]/1.e-19, mb.photom_eflam[photom_mask]/1.e-19, color='k', alpha=0.6, marker='s', linestyle='None', zorder=30)
sm = prospect.utils.smoothing.smoothspec(tfit['line1d'].wave, tfit['line1d'].flux, resolution=sed_resolution, smoothtype='R') #nsigma=10, inres=10)
ax1.scatter(np.log10(mb.photom_pivot[photom_mask]/1.e4), A_model/1.e-19, color='w', marker='s', s=80, zorder=10)
ax1.scatter(np.log10(mb.photom_pivot[photom_mask]/1.e4), A_model/1.e-19, color=sns_colors[4], marker='s', s=20, zorder=11)
yl1 = ax1.get_ylim()
ax1.plot(np.log10(tfit['line1d'].wave/1.e4), sm/1.e-19, color=sns_colors[4], linewidth=1, zorder=0)
#ax1.grid()
ax1.set_xlabel(r'$\lambda$ / $\mu$m')
ax2.set_xlabel(r'$\lambda$ / $\mu$m')
# Spectrum
ymax, ymin = -1e30, 1e30
for g in sp:
sn = sp[g]['flux']/sp[g]['err']
clip = sn > 3
clip = spf[g]['flux'] > 0.2*spf[g]['flux'].max()
try:
scale = mb.compute_scale_array(mb.pscale, sp[g]['wave'])
except:
scale = 1
ax2.errorbar(sp[g]['wave'][clip]/1.e4, (sp[g]['flux']/spf[g]['flux']/scale)[clip]/1.e-19, (sp[g]['err']/spf[g]['flux']/scale)[clip]/1.e-19, marker='.', color='k', alpha=0.5, linestyle='None', elinewidth=0.5, zorder=11)
if spectrum_steps:
ax2.plot(sp[g]['wave']/1.e4, spm[g]['flux']/spf[g]['flux']/1.e-19, color=sns_colors[4], linewidth=2, alpha=0.8, zorder=10, linestyle='steps-mid')
else:
ax2.plot(sp[g]['wave']/1.e4, spm[g]['flux']/spf[g]['flux']/1.e-19, color=sns_colors[4], linewidth=2, alpha=0.8, zorder=10, marker='.')
ymax = np.maximum(ymax, (spm[g]['flux']/spf[g]['flux']/1.e-19)[clip].max())
ymin = np.minimum(ymin, (spm[g]['flux']/spf[g]['flux']/1.e-19)[clip].min())
ax1.errorbar(np.log10(sp[g]['wave'][clip]/1.e4), (sp[g]['flux']/spf[g]['flux']/scale)[clip]/1.e-19, (sp[g]['err']/spf[g]['flux']/scale)[clip]/1.e-19, marker='.', color='k', alpha=0.2, linestyle='None', elinewidth=0.5, zorder=-100)
xl, yl = ax2.get_xlim(), ax2.get_ylim()
yl = (ymin-0.3*ymax, 1.3*ymax)
# SED x range
if xlim is None:
okphot = (mb.photom_eflam > 0)
xlim = [np.minimum(xl[0]*0.7, 0.7*mb.photom_pivot[okphot].min()/1.e4), np.maximum(xl[1]/0.7, mb.photom_pivot[okphot].max()/1.e4/0.7)]
ax1.set_xlim(np.log10(xlim[0]), np.log10(xlim[1]))
ticks = np.array([0.5, 1, 2, 4, 8])
ticks = ticks[(ticks >= xlim[0]) & (ticks <= xlim[1])]
ax1.set_xticks(np.log10(ticks))
ax1.set_xticklabels(ticks)
# Back to spectrum
ax2.scatter((mb.photom_pivot[photom_mask]/1.e4), A_model/1.e-19, color='w', marker='s', s=80, zorder=11)
ax2.scatter((mb.photom_pivot[photom_mask]/1.e4), A_model/1.e-19, color=sns_colors[4], marker='s', s=20, zorder=12)
ax2.errorbar(mb.photom_pivot[photom_mask]/1.e4, mb.photom_flam[photom_mask]/1.e-19, mb.photom_eflam[photom_mask]/1.e-19, color='k', alpha=0.6, marker='s', linestyle='None', zorder=20)
ax2.set_xlim(xl); ax2.set_ylim(yl)
ax2.set_yticklabels([])
#ax2.set_xticks(np.arange(1.1, 1.8, 0.1))
#ax2.set_xticklabels([1.1, '', 1.3, '', 1.5, '', 1.7])
ax2.xaxis.set_minor_locator(MultipleLocator(minor))
ax2.xaxis.set_major_locator(MultipleLocator(minor*2))
# Show spectrum range on SED panel
xb, yb = np.array([0, 1, 1, 0, 0]), np.array([0, 0, 1, 1, 0])
ax1.plot(np.log10(xl[0]+xb*(xl[1]-xl[0])), yl[0]+yb*(yl[1]-yl[0]), linestyle=':', color='k', alpha=0.4)
ymax = np.maximum(yl1[1], yl[1]+0.02*(yl[1]-yl[0]))
ax1.set_ylim(-0.1*ymax, ymax)
tick_diff = np.diff(ax1.get_yticks())[0]
ax2.yaxis.set_major_locator(MultipleLocator(tick_diff))
#ax2.set_yticklabels([])
##########
# P(z)
if zfit is not None:
if photometry_pz is not None:
ax3.plot(photometry_pz[0], np.log10(photometry_pz[1]), color=mpl_colors[0])
ax3.plot(zfit['zgrid'], np.log10(zfit['pdf']), color=sns_colors[0])
ax3.fill_between(zfit['zgrid'], np.log10(zfit['pdf']), np.log10(zfit['pdf'])*0-100, color=sns_colors[0], alpha=0.3)
ax3.set_xlim(zfit['zgrid'].min(), zfit['zgrid'].max())
ax3.set_ylim(-3, 2.9) #np.log10(zfit['pdf']).max())
ax3.set_ylabel(r'log $p(z)$')
ax3.set_xlabel(r'$z$')
ax1.set_ylabel(r'$f_\lambda$ / $10^{-19}$')
axt = ax2
axt.text(0.95, 0.95, r'$z_\mathrm{grism}$='+'{0:.3f}'.format(tfit['z']), ha='right', va='top', transform=axt.transAxes, color=sns_colors[0], size=10)#, backgroundcolor='w')
if zspec is not None:
axt.text(0.95, 0.89, r'$z_\mathrm{spec}$='+'{0:.3f}'.format(zspec), ha='right', va='top', transform=axt.transAxes, color='r', size=10)
if zfit is not None:
ax3.scatter(zspec, 2.7, color='r', marker='v', zorder=100)
axt.text(0.05, 0.95, '{0}: {1:>6d}'.format(mb.group_name, mb.id), ha='left', va='top', transform=axt.transAxes, color='k', size=10)#, backgroundcolor='w')
#axt.text(0.05, 0.89, '{0:>6d}'.format(mb.id), ha='left', va='top', transform=axt.transAxes, color='k', size=10)#, backgroundcolor='w')
if gs is None:
gs.tight_layout(pad=0.1)
else:
if zfit is not None:
fig.tight_layout(pad=0.1)
else:
fig.tight_layout(pad=0.5)
if save:
fig.savefig('{0}_{1:05d}.sed.{2}'.format(mb.group_name, mb.id, save))
return fig
def make_summary_catalog(target='pg0117+213', sextractor='pg0117+213-f140w.cat', verbose=True, filter_bandpasses=[]):
import glob
import os
from collections import OrderedDict
import matplotlib.pyplot as plt
import astropy.units as u
import astropy.io.fits as pyfits
import numpy as np
import grizli
from grizli import utils
keys = OrderedDict()
keys['PRIMARY'] = ['ID','RA','DEC','NINPUT','REDSHIFT','T_G102', 'T_G141', 'T_G800L', 'NUMLINES','HASLINES']
keys['ZFIT_STACK'] = ['CHI2POLY','CHI2SPL','SPLF01','SPLE01','SPLF02','SPLE02','SPLF03','SPLE03','SPLF04','SPLE04', 'DOF','CHIMIN','CHIMAX','BIC_POLY','BIC_SPL','BIC_TEMP','Z02', 'Z16', 'Z50', 'Z84', 'Z97', 'ZWIDTH1', 'ZWIDTH2', 'Z_MAP', 'Z_RISK', 'MIN_RISK', 'VEL_BL','VEL_NL','VEL_Z','VEL_NFEV','VEL_FLAG']
keys['ZFIT_BEAM'] = ['CHI2POLY','CHI2SPL','SPLF01','SPLE01','SPLF02','SPLE02','SPLF03','SPLE03','SPLF04','SPLE04', 'DOF','CHIMIN','CHIMAX','BIC_POLY','BIC_SPL','BIC_TEMP','Z02', 'Z16', 'Z50', 'Z84', 'Z97', 'ZWIDTH1', 'ZWIDTH2', 'Z_MAP', 'Z_RISK', 'MIN_RISK', 'VEL_BL','VEL_NL','VEL_Z','VEL_NFEV','VEL_FLAG']
keys['COVAR'] = ' '.join(['FLUX_{0:03d} ERR_{0:03d} EW50_{0:03d} EWHW_{0:03d}'.format(i) for i in range(24)]).split()
lines = []
pdf_max = []
files=glob.glob('{0}*full.fits'.format(target))
files.sort()
template_mags = []
sps_params = []
for file in files:
print(utils.NO_NEWLINE+file)
line = []
full = pyfits.open(file)
if 'DSCI' not in full:
continue
tab = utils.GTable.read(full['ZFIT_STACK'])
pdf_max.append(tab['pdf'].max())
for ext in keys:
if ext not in full:
for k in keys[ext]:
line.append(np.nan)
continue
h = full[ext].header
for k in keys[ext]:
if k in h:
line.append(h[k])
else:
line.append(np.nan)
# SPS
try:
sps = compute_sps_params(full)
except:
sps = {'Lv':-1*u.solLum, 'MLv':-1*u.solMass/u.solLum, 'MLv_rms':-1*u.solMass/u.solLum, 'SFRv':-1*u.solMass/u.year, 'SFRv_rms':-1*u.solMass/u.year, 'templ':-1}
sps_params.append(sps)
lines.append(line)
# Integrate best-fit template through filter bandpasses
if filter_bandpasses:
tfit = utils.GTable.gread(full['TEMPL'])
sp = utils.SpectrumTemplate(wave=tfit['wave'], flux=tfit['full'])
mags = [sp.integrate_filter(bp, abmag=True)
for bp in filter_bandpasses]
template_mags.append(mags)
columns = []
for ext in keys:
if ext == 'ZFIT_BEAM':
columns.extend(['beam_{0}'.format(k) for k in keys[ext]])
else:
columns.extend(keys[ext])
info = utils.GTable(rows=lines, names=columns)
info['PDF_MAX'] = pdf_max
root_col = utils.GTable.Column(name='root', data=[target]*len(info))
info.add_column(root_col, index=0)
for k in ['Lv','MLv','MLv_rms','SFRv','SFRv_rms']:
datak = [sps[k].value for sps in sps_params]
info[k] = datak
info[k].unit = sps[k].unit
info['sSFR'] = info['SFRv']/info['MLv']
info['stellar_mass'] = info['Lv']*info['MLv']
info['Lv'].format = '.1e'
info['MLv'].format = '.2f'
info['MLv_rms'].format = '.2f'
info['SFRv'].format = '.1f'
info['SFRv_rms'].format = '.1f'
info['sSFR'].format = '.1e'
info['stellar_mass'].format = '.1e'
if filter_bandpasses:
arr = np.array(template_mags)
for i, bp in enumerate(filter_bandpasses):
info['mag_{0}'.format(bp.name)] = arr[:,i]
info['mag_{0}'.format(bp.name)].format = '.3f'
for c in info.colnames:
info.rename_column(c, c.lower())
# Emission line names
files=glob.glob('{0}*full.fits'.format(target))
im = pyfits.open(files[0])
h = im['COVAR'].header
for i in range(24):
key = 'FLUX_{0:03d}'.format(i)
if key not in h:
continue
line = h.comments[key].split()[0]
for root in ['flux','err','ew50','ewhw']:
col = '{0}_{1}'.format(root, line)
info.rename_column('{0}_{1:03d}'.format(root, i), col)
if root.startswith('ew'):
info[col].format = '.1f'
else:
info[col].format = '.1f'
info['sn_{0}'.format(line)] = info['flux_'+line]/info['err_'+line]
info['sn_{0}'.format(line)][info['err_'+line] == 0] = -99
#info['sn_{0}'.format(line)].format = '.1f'
info['chinu'] = info['chimin']/info['dof']
info['chinu'].format = '.2f'
info['bic_diff'] = info['bic_poly'] - info['bic_temp']
info['bic_diff'].format = '.1f'
info['log_risk'] = np.log10(info['min_risk'])
info['log_risk'].format = '.2f'
info['log_pdf_max'] = np.log10(info['pdf_max'])
info['log_pdf_max'].format = '.2f'
info['zq'] = info['log_risk'] - info['log_pdf_max']
info['zq'].format = '.2f'
info['beam_chinu'] = info['beam_chimin']/info['beam_dof']
info['beam_chinu'].format = '.2f'
info['beam_bic_diff'] = info['beam_bic_poly'] - info['beam_bic_temp']
info['beam_bic_diff'].format = '.1f'
info['beam_log_risk'] = np.log10(info['beam_min_risk'])
info['beam_log_risk'].format = '.2f'
# ID with link to CDS
idx = ['<a href="http://vizier.u-strasbg.fr/viz-bin/VizieR?-c={0:.6f}+{1:.6f}&-c.rs=2">{2}</a>'.format(info['ra'][i], info['dec'][i], info['id'][i]) for i in range(len(info))]
info['idx'] = idx
### PNG columns
for ext in ['stack','full','line']:
png = ['{0}_{1:05d}.{2}.png'.format(target, id, ext) for id in info['id']]
info['png_{0}'.format(ext)] = ['<a href={0}><img src={0} height=200></a>'.format(p) for p in png]
### Column formats
for col in info.colnames:
if col.strip('beam_').startswith('z'):
info[col].format = '.4f'
if col in ['ra','dec']:
info[col].format = '.6f'
### Sextractor catalog
if sextractor is None:
info.write('{0}.info.fits'.format(target), overwrite=True)
return info
#sextractor = glob.glob('{0}-f*cat'.format(target))[0]
try:
hcat = grizli.utils.GTable.gread(sextractor) #, format='ascii.sextractor')
except:
hcat = grizli.utils.GTable.gread(sextractor, sextractor=True)
for c in hcat.colnames:
hcat.rename_column(c, c.lower())
idx, dr = hcat.match_to_catalog_sky(info, self_radec=('x_world', 'y_world'), other_radec=None)
for c in hcat.colnames:
info.add_column(hcat[c][idx])
info.write('{0}.info.fits'.format(target), overwrite=True)
return info
def compute_sps_params(full='j021820-051015_01276.full.fits', cosmology=Planck15):
import numpy as np
from astropy.io import fits as pyfits
from astropy.table import Table
import astropy.units as u
from grizli import utils
import pysynphot as S
if isinstance(full, str):
im = pyfits.open(full)
else:
im = full
h = im['TEMPL'].header
templ = Table(im['TEMPL'].data)
z = im['ZFIT_STACK'].header['Z_MAP']
# Get coefffs
coeffs, keys, ix = [], [], []
count=0
for k in h:
if k.startswith('CNAME'):
if h[k].startswith('fsps'):
ix.append(count)
keys.append(h[k])
coeffs.append(h[k.replace('CNAME','CVAL')])
count += 1
cov = im['COVAR'].data[np.array(ix),:][:,np.array(ix)]
covd = cov.diagonal()
# Normalize to V band, fsps_QSF_12_v3
normV = np.array([3.75473763e-15, 2.73797790e-15, 1.89469588e-15,
1.32683449e-15, 9.16760812e-16, 2.43922395e-16, 4.76835746e-15,
3.55616962e-15, 2.43745972e-15, 1.61394625e-15, 1.05358710e-15,
5.23733297e-16])
coeffsV = np.array(coeffs)*normV
rmsV = np.sqrt(covd)*normV
rms_norm = rmsV/coeffsV.sum()
coeffs_norm = coeffsV/coeffsV.sum()
param_file = os.path.join(os.path.dirname(__file__), 'data/templates/fsps/fsps_QSF_12_v3.param.fits')
tab_temp = Table.read(param_file)
temp_MLv = tab_temp['mass']/tab_temp['Lv']
temp_SFRv = tab_temp['sfr']
mass_norm = (coeffs_norm*tab_temp['mass']).sum()*u.solMass
Lv_norm = (coeffs_norm*tab_temp['Lv']).sum()*u.solLum
MLv = mass_norm / Lv_norm
SFR_norm = (coeffs_norm*tab_temp['sfr']).sum()*u.solMass/u.yr
SFRv = SFR_norm / Lv_norm
mass_var = ((rms_norm*tab_temp['mass'])**2).sum()
Lv_var = ((rms_norm*tab_temp['Lv'])**2).sum()
SFR_var = ((rms_norm*tab_temp['sfr'])**2).sum()
MLv_var = MLv**2 * (mass_var/mass_norm.value**2 + Lv_var/Lv_norm.value**2)
MLv_rms = np.sqrt(MLv_var)
SFRv_var = SFRv**2 * (SFR_var/SFR_norm.value**2 + Lv_var/Lv_norm.value**2)
SFRv_rms = np.sqrt(SFRv_var)
vband = S.ObsBandpass('v')
vbandz = S.ArrayBandpass(vband.wave*(1+z), vband.throughput)
best_templ = utils.SpectrumTemplate(templ['wave'], templ['full'])
fnu = best_templ.integrate_filter(vbandz)*(u.erg/u.s/u.cm**2/u.Hz)
dL = cosmology.luminosity_distance(z).to(u.cm)
Lnu = fnu*4*np.pi*dL**2
pivotV = vbandz.pivot()*u.Angstrom
nuV = (const.c/pivotV).to(u.Hz)
Lv = (nuV*Lnu).to(u.L_sun)
mass = MLv*Lv
SFR = SFRv*Lv
sps = {'Lv':Lv, 'MLv':MLv, 'MLv_rms':MLv_rms, 'SFRv':SFRv, 'SFRv_rms':SFRv_rms, 'templ':best_templ}
return sps
def _loss(dz, gamma=0.15):
"""Risk / Loss function, Tanaka et al. (https://arxiv.org/abs/1704.05988)
Parameters
----------
gamma : float
Returns
-------
loss : float
"""
return 1-1/(1+(dz/gamma)**2)
def refit_beams(root='j012017+213343', append='x', id=708, keep_dict={'G141':[201, 291]}, poly_order=3, make_products=True, run_fit=True, **kwargs):
"""
Regenerate a MultiBeam object selecting only certiain PAs
Parameters
----------
root : str
Root of the "beams.fits" file to load.
append : str
String to append to the rootname of the updated products.
id : int
Object ID. The input filename is built like
>>> beams_file = '{0}_{1:05d}.beams.fits'.format(root, id)
keep_dict : bool
Dictionary of the PAs/grisms to keep. (See the
`~grizli.multifit.MultiBeam.PA` attribute.)
poly_order : int
Order of the polynomial to fit.
make_products : bool
Make stacked spectra and diagnostic figures.
run_fit : bool
Run the redshift fit on the new products
kwargs : dict
Optional keywords passed to `~grizli.fitting.run_all_parallel`.
Returns
-------
mb : `~grizli.multifit.MultiBeam`
New beam object.
"""
import numpy as np
try:
from grizli import utils, fitting
except:
from . import utils, fitting
mb = MultiBeam('{0}_{1:05d}.beams.fits'.format(root, id), group_name=root)
keep_beams = []
for g in keep_dict:
if g not in mb.PA:
continue
for pa in keep_dict[g]:
if float(pa) in mb.PA[g]:
keep_beams.extend([mb.beams[i] for i in mb.PA[g][float(pa)]])
mb = MultiBeam(keep_beams, group_name=root+append)
mb.write_master_fits()
if not make_products:
return mb
wave = np.linspace(2000,2.5e4,100)
poly_templates = utils.polynomial_templates(wave, order=poly_order)
pfit = mb.template_at_z(z=0, templates=poly_templates, fit_background=True, fitter='lstsq', get_uncertainties=2)
try:
fig1 = mb.oned_figure(figsize=[5,3], tfit=pfit)
fig1.savefig('{0}_{1:05d}.1D.png'.format(root+append, id))
except:
pass
hdu, fig = mb.drizzle_grisms_and_PAs(fcontam=0.5, flambda=False, kernel='point', size=32, zfit=pfit)
fig.savefig('{0}_{1:05d}.stack.png'.format(root+append, id))
if run_fit:
fitting.run_all_parallel(id, group_name=root+append, root=root+'x', verbose=True, **kwargs)
return mb
class GroupFitter(object):
"""Combine stack.StackFitter and MultiBeam fitting into a single object
Will have to match the attributes between the different objects, which
is already close.
"""
def _test(self):
print(self.Ngrism)
def _get_slices(self, masked=False):
"""Precompute array slices for how the individual components map into the single combined arrays.
Parameters
----------
masked : bool
Return indices of masked arrays rather than simple slices of the
full beams.
Returns
-------
slices : list
List of slices.
"""
x = 0
slices = []
# use masked index arrays rather than slices
if masked:
for i in range(self.N):
beam = self.beams[i]
if beam.fit_mask.sum() == 0:
slices.append(None)
continue
idx = np.arange(beam.fit_mask.sum())+x
slices.append(idx) #[slice(x+0, x+beam.size)][beam.fit_mask])
x = idx[-1]+1
else:
for i in range(self.N):
slices.append(slice(x+0, x+self.beams[i].size))
x += self.beams[i].size
return slices
def _update_beam_mask(self):
"""
Compute versions of the masked arrays
"""
for ib, b in enumerate(self.beams):
b.fit_mask &= self.fit_mask[self.slices[ib]]
self.mslices = self._get_slices(masked=True)
self.Nmask = self.fit_mask.sum()
if hasattr(self, 'Nphot'):
self.Nspec = self.Nmask - self.Nphot
else:
self.Nspec = self.Nmask
def _init_background(self, masked=True):
"""Initialize the (flat) background model components
Parameters
----------
None :
Returns
-------
A_bg : `~np.ndarray`
"""
if masked:
A_bg = np.zeros((self.N, self.Nmask))
for i in range(self.N):
A_bg[i, self.mslices[i]] = 1.
else:
A_bg = np.zeros((self.N, self.Ntot))
for i in range(self.N):
A_bg[i, self.slices[i]] = 1.
return A_bg
def get_SDSS_photometry(self, bands='ugriz', templ=None, radius=2, SDSS_CATALOG='V/147/sdss12', get_panstarrs=False):
#from astroquery.sdss import SDSS
#from astropy import coordinates as coords
import astropy.units as u
from astroquery.vizier import Vizier
import astropy.coordinates as coord
import pysynphot as S
from eazy.templates import Template
from eazy.filters import FilterFile
from eazy.photoz import TemplateGrid
from eazy.filters import FilterDefinition
if get_panstarrs:
SDSS_CATALOG = 'II/349'
bands = 'grizy'
# pos = coords.SkyCoord(self.ra*u.deg, self.dec*u.deg, frame='icrs')
# fields = ['ra','dec','modelMag_r', 'modelMagErr_r']
# for b in bands:
# fields.extend(['modelFlux_'+b, 'modelFluxIvar_'+b])
#
# xid = SDSS.query_region(pos, photoobj_fields=fields, spectro=False, radius=radius*u.arcsec)
from astroquery.vizier import Vizier
import astropy.units as u
import astropy.coordinates as coord
coo = coord.SkyCoord(ra=self.ra, dec=self.dec, unit=(u.deg, u.deg),
frame='icrs')
v = Vizier(catalog=SDSS_CATALOG, columns=['+_r','*'])
try:
tab = v.query_region(coo, radius="{0}s".format(radius),
catalog=SDSS_CATALOG)[0]
ix = np.argmin(tab['rmag'])
tab = tab[ix]
except:
return None
filters = [FilterDefinition(bp=S.ObsBandpass('sdss,{0}'.format(b))) for b in bands]
pivot = {}
for ib, b in enumerate(bands):
pivot[b] = filters[ib].pivot()
#to_flam = 10**(-0.4*(22.5+48.6))*3.e18 # / pivot(Ang)**2
#flam = np.array([xid['modelFlux_{0}'.format(b)][0]*to_flam/pivot[b]**2 for b in bands])
#eflam = np.array([np.sqrt(1/xid['modelFluxIvar_{0}'.format(b)][0])*to_flam/pivot[b]**2 for b in bands])
to_flam = 10**(-0.4*(48.6))*3.e18 # / pivot(Ang)**2
flam = np.array([10**(-0.4*(tab[b+'mag']))*to_flam/pivot[b]**2 for ib, b in enumerate(bands)])
eflam = np.array([tab['e_{0}mag'.format(b)]*np.log(10)/2.5*flam[ib] for ib, b in enumerate(bands)])
phot = {'flam':flam, 'eflam':eflam, 'filters':filters, 'tempfilt':None}
if templ is None:
return phot
# Make fast SDSS template grid
templates = [Template(arrays=[templ[t].wave, templ[t].flux], name=t) for t in templ]
zgrid = utils.log_zgrid(zr=[0.01, 3.4], dz=0.005)
tempfilt = TemplateGrid(zgrid, templates, filters=filters, add_igm=True, galactic_ebv=0, Eb=0, n_proc=0)
#filters = [all_filters.filters[f-1] for f in [156,157,158,159,160]]
phot = {'flam':flam, 'eflam':eflam, 'filters':filters, 'tempfilt':tempfilt}
return phot
### Vizier
def set_photometry(self, flam=[], eflam=[], filters=[], lc=None, force=False, tempfilt=None, min_err=0.02, TEF=None, pz=None, source='unknown'):
"""
Add photometry
"""
if (self.Nphot > 0) & (not force):
print('Photometry already set (Nphot={0})'.format(self.Nphot))
return True
okphot = (eflam > 0) & np.isfinite(eflam) & np.isfinite(flam)
self.Nphot = okphot.sum() #len(flam)
self.Nphotbands = len(eflam)
if self.Nphot == 0:
return True
if (len(flam) != len(eflam)) | (len(flam) != len(filters)):
print('flam/eflam/filters dimensions don\'t match')
return False
self.photom_flam = flam*1
self.photom_eflam = np.sqrt(eflam**2+(min_err*flam)**2)
self.photom_flam[~okphot] = -99
self.photom_eflam[~okphot] = -99
self.photom_filters = filters
self.photom_source = source
self.sivarf = np.hstack([self.sivarf, 1/self.photom_eflam])
self.weightf = np.hstack([self.weightf, np.ones_like(self.photom_eflam)])
self.fit_mask = np.hstack([self.fit_mask, okphot])
self.fit_mask &= self.weightf > 0
#self.flat_flam = np.hstack((self.flat_flam, self.photom_eflam*0.))
# Mask for just spectra
self.fit_mask_spec = self.fit_mask & True
self.fit_mask_spec[-self.Nphotbands:] = False
self.Nmask = self.fit_mask.sum()
self.Nspec = self.Nmask - self.Nphot
self.scif = np.hstack((self.scif, flam))
self.DoF = int((self.weightf*self.fit_mask).sum())
self.is_spec = np.isfinite(self.scif)
self.is_spec[-len(flam):] = False
self.photom_pivot = np.array([filter.pivot() for filter in filters])
self.wavef = np.hstack((self.wavef, self.photom_pivot))
# eazypy tempfilt for faster interpolation
self.tempfilt = tempfilt
self.TEF = TEF
def unset_photometry(self):
if self.Nphot == 0:
return True
Nbands = self.Nphotbands
self.sivarf = self.sivarf[:-Nbands]
self.weightf = self.weightf[:-Nbands]
#self.flat_flam = self.flat_flam[:-Nbands]
self.fit_mask = self.fit_mask[:-Nbands]
self.fit_mask &= self.weightf > 0
self.fit_mask_spec = self.fit_mask & True
self.scif = self.scif[:-Nbands]
self.wavef = self.wavef[:-Nbands]
self.DoF = int((self.weightf*self.fit_mask).sum())
self.is_spec = 1
self.Nphot = 0
self.Nphotbands = 0
self.Nmask = self.fit_mask.sum()
self.Nspec = self.Nmask - self.Nphot
self.tempfilt = None
def _interpolate_photometry(self, z=0., templates=[]):
"""
Interpolate templates through photometric filters
xx: TBD better handling of emission line templates and use eazpy tempfilt
object for huge speedup
"""
NTEMP = len(templates)
A_phot = np.zeros((NTEMP+self.N, len(self.photom_flam))) #self.Nphot))
mask = self.photom_eflam > 0
if (self.tempfilt is not None):
if (self.tempfilt.NTEMP == NTEMP):
#A_spl = self.tempfilt(z)
A_phot[self.N:,:] = self.tempfilt(z)
A_phot *= 3.e18/self.photom_pivot**2*(1+z)
A_phot[~np.isfinite(A_phot)] = 0
return A_phot[:,mask]
for it, key in enumerate(templates):
#print(key)
tz = templates[key].zscale(z, scalar=1)
for ifilt, filt in enumerate(self.photom_filters):
A_phot[self.N+it, ifilt] = tz.integrate_filter(filt)*3.e18/self.photom_pivot[ifilt]**2#*(1+z)
# pl = plt.plot(tz.wave, tz.flux)
# plt.scatter(self.photom_pivot, A_phot[self.N+it,:], color=pl[0].get_color())
return A_phot[:,mask]
def xfit_at_z(self, z=0, templates=[], fitter='nnls', fit_background=True, get_uncertainties=False, get_design_matrix=False, pscale=None, COEFF_SCALE=1.e-19, get_components=False, huber_delta=4, get_residuals=False, include_photometry=True):
"""Fit the 2D spectra with a set of templates at a specified redshift.
Parameters
----------
z : float
Redshift.
templates : list
List of templates to fit.
fitter : str
Minimization algorithm to compute template coefficients.
The default 'nnls' uses non-negative least squares.
The other option is standard 'leastsq'.
fit_background : bool
Fit additive pedestal background offset.
get_uncertainties : bool
Compute coefficient uncertainties from the covariance matrix
get_design_matrix : bool
Return design matrix and data, rather than nominal outputs.
huber_delta : float
Use the Huber loss function (`~scipy.special.huber`) rather than
direct chi-squared. If `huber_delta` < 0, then fall back to chi2.
Returns
-------
chi2 : float
Chi-squared of the fit
coeffs, coeffs_err : `~np.ndarray`
Template coefficients and uncertainties.
covariance : `~np.ndarray`
Full covariance
"""
import scipy.optimize
#import scipy.sparse
from scipy.special import huber
NTEMP = len(templates)
if (self.Nphot > 0) & include_photometry:
A = np.zeros((self.N+NTEMP, self.Nmask))
else:
A = np.zeros((self.N+NTEMP, self.Nspec))
if fit_background:
A[:self.N,:self.Nspec] = self.A_bgm
lower_bound = np.zeros(self.N+NTEMP)
lower_bound[:self.N] = -0.05
upper_bound = np.ones(self.N+NTEMP)*np.inf
upper_bound[:self.N] = 0.05
# A = scipy.sparse.csr_matrix((self.N+NTEMP, self.Ntot))
# bg_sp = scipy.sparse.csc_matrix(self.A_bg)
for i, t in enumerate(templates):
if t.startswith('line'):
lower_bound[self.N+i] = -np.inf
ti = templates[t]
if z > IGM_MINZ:
if IGM is None:
igmz = 1.
else:
lylim = ti.wave < 1250
igmz = np.ones_like(ti.wave)
igmz[lylim] = IGM.full_IGM(z, ti.wave[lylim]*(1+z))
else:
igmz = 1.
# Don't redshift spline templates
if ti.name.startswith('bspl'):
s = [ti.wave, ti.flux*igmz]
else:
s = [ti.wave*(1+z), ti.flux/(1+z)*igmz]
for j, beam in enumerate(self.beams):
mask_i = beam.fit_mask.reshape(beam.sh)
clip = mask_i.sum(axis=0) > 0
if clip.sum() == 0:
continue
lam_beam = beam.wave[clip]
if ((s[0].min() > lam_beam.max()) |
(s[0].max() < lam_beam.min())):
continue
sl = self.mslices[j]
if t in beam.thumbs:
#print('Use thumbnail!', t)
A[self.N+i, sl] = beam.compute_model(thumb=beam.thumbs[t], spectrum_1d=s, in_place=False, is_cgs=True)[beam.fit_mask]*COEFF_SCALE
else:
A[self.N+i, sl] = beam.compute_model(spectrum_1d=s, in_place=False, is_cgs=True)[beam.fit_mask]*COEFF_SCALE
# if j == 0:
# m = beam.compute_model(spectrum_1d=s, in_place=False, is_cgs=True)
# ds9.frame(i)
# ds9.view(m.reshape(beam.sh))
if fit_background:
if fitter in ['nnls', 'lstsq']:
pedestal = 0.04
else:
pedestal = 0.
else:
pedestal = 0
#oktemp = (A*self.fit_mask).sum(axis=1) != 0
oktemp = A.sum(axis=1) != 0
# Photometry
if (self.Nphot > 0):
if include_photometry:
A_phot = self._interpolate_photometry(z=z,
templates=templates)
A[:,-self.Nphot:] = A_phot*COEFF_SCALE #np.hstack((A, A_phot))
full_fit_mask = self.fit_mask
else:
full_fit_mask = self.fit_mask_spec
else:
full_fit_mask = self.fit_mask
# Weight design matrix and data by 1/sigma
#Ax = A[oktemp,:]*self.sivarf[full_fit_mask]
# Include `weight` variable to account for contamination
sivarf = self.sivarf*np.sqrt(self.weightf)
Ax = A[oktemp,:]*sivarf[full_fit_mask]
#AxT = Ax[:,full_fit_mask].T
# Scale photometry
if hasattr(self, 'pscale'):
if (self.pscale is not None):
scale = self.compute_scale_array(self.pscale, self.wavef[full_fit_mask])
if self.Nphot > 0:
scale[-self.Nphot:] = 1.
Ax *= scale
if fit_background:
for i in range(self.N):
Ax[i,:] /= scale
# Need transpose
AxT = Ax.T
# Masked data array, including background pedestal
data = ((self.scif+pedestal*self.is_spec)*sivarf)[full_fit_mask]
if get_design_matrix:
return AxT, data
# Run the minimization
if fitter == 'nnls':
coeffs_i, rnorm = scipy.optimize.nnls(AxT, data)
elif fitter == 'lstsq':
coeffs_i, residuals, rank, s = np.linalg.lstsq(AxT, data, rcond=None)
else:
# Bounded Least Squares
lsq_out = scipy.optimize.lsq_linear(AxT, data, bounds=(lower_bound[oktemp], upper_bound[oktemp]), method='bvls', tol=1.e-8)
coeffs_i = lsq_out.x
if False:
r = AxT.dot(coeffs_i) - data
# Compute background array
if fit_background:
background = np.dot(coeffs_i[:self.N], A[:self.N,:]) - pedestal
if self.Nphot > 0:
background[-self.Nphot:] = 0.
coeffs_i[:self.N] -= pedestal
else:
background = self.scif[full_fit_mask]*0.
# Full model
if fit_background:
model = np.dot(coeffs_i[self.N:], Ax[self.N:,:]/sivarf[full_fit_mask])
else:
model = np.dot(coeffs_i, Ax/sivarf[full_fit_mask])
# Model photometry
if self.Nphot > 0:
self.photom_model = model[-self.Nphot:]*1
# Residuals and Chi-squared
resid = self.scif[full_fit_mask] - model - background
if get_components:
return model, background
#chi2 = np.sum(resid[full_fit_mask]**2*self.sivarf[full_fit_mask]**2)
norm_resid = resid*(sivarf)[full_fit_mask]
# Use Huber loss function rather than direct chi2
if get_residuals:
chi2 = norm_resid
else:
if huber_delta > 0:
chi2 = huber(huber_delta, norm_resid)*2.
else:
chi2 = norm_resid**2
chi2 = np.sum(chi2)
# Uncertainties from covariance matrix
if get_uncertainties:
try:
# Covariance is inverse of AT.A
covar_i = np.matrix(np.dot(AxT.T, AxT)).I.A
covar = utils.fill_masked_covar(covar_i, oktemp)
covard = np.sqrt(covar.diagonal())
# Compute covariances after masking templates with coeffs = 0
if get_uncertainties == 2:
nonzero = coeffs_i != 0
if nonzero.sum() > 0:
AxTm = AxT[:,nonzero]
#mcoeffs_i, rnorm = scipy.optimize.nnls(AxTm, data)
#mcoeffs_i[:self.N] -= pedestal
mcovar_i = np.matrix(np.dot(AxTm.T, AxTm)).I.A
mcovar = utils.fill_masked_covar(mcovar_i, nonzero)
mcovar = utils.fill_masked_covar(mcovar, oktemp)
mcovard = np.sqrt(mcovar.diagonal())
covar = mcovar
covard = mcovard
except:
print('Except: covar!')
covar = np.zeros((self.N+NTEMP, self.N+NTEMP))
covard = np.zeros(self.N+NTEMP)#-1.
mcovard = covard
else:
covar = np.zeros((self.N+NTEMP, self.N+NTEMP))
covard = np.zeros(self.N+NTEMP)#-1.
coeffs = np.zeros(self.N+NTEMP)
coeffs[oktemp] = coeffs_i #[self.N:]] = coeffs[self.N:]
coeffs_err = covard #np.zeros(NTEMP)
#full_coeffs_err[oktemp[self.N:]] = covard[self.N:]
del(A); del(Ax); del(AxT)
#if fit_background:
coeffs[self.N:] *= COEFF_SCALE
coeffs_err[self.N:] *= COEFF_SCALE
#covar[self.N:,self.N:] *= COEFF_SCALE**2
covar[self.N:,:] *= COEFF_SCALE
covar[:,self.N:] *= COEFF_SCALE
return chi2, coeffs, coeffs_err, covar
def xfit_redshift(self, prior=None, fwhm=1200,
make_figure=True, zr=[0.65, 1.6], dz=[0.005, 0.0004],
verbose=True, fit_background=True, fitter='nnls',
delta_chi2_threshold=0.004, poly_order=3, zoom=True,
line_complexes=True, templates={}, figsize=[8,5],
fsps_templates=False, get_uncertainties=True,
Rspline=30, huber_delta=4, get_student_logpdf=False):
"""TBD
"""
from scipy import polyfit, polyval
from scipy.stats import t as student_t
from scipy.special import huber
if zr is 0:
stars = True
zr = [0, 0.01]
fitter='nnls'
else:
stars = False
zgrid = utils.log_zgrid(zr, dz=dz[0])
NZ = len(zgrid)
#### Polynomial SED fit
wpoly = np.linspace(1000,5.e4,1000)
# tpoly = utils.polynomial_templates(wpoly, line=True)
# out = self.xfit_at_z(z=0., templates=tpoly, fitter='nnls',
# fit_background=True, get_uncertainties=False)
tpoly = utils.polynomial_templates(wpoly, order=poly_order,
line=False)
out = self.xfit_at_z(z=0., templates=tpoly, fitter='lstsq',
fit_background=True, get_uncertainties=False,
include_photometry=False, huber_delta=huber_delta)
chi2_poly, coeffs_poly, err_poly, cov = out
#### Spline SED fit
wspline = np.arange(4200, 2.5e4)
#Rspline = 30
df_spl = len(utils.log_zgrid(zr=[wspline[0], wspline[-1]], dz=1./Rspline))
tspline = utils.bspline_templates(wspline, df=df_spl+2, log=True, clip=0.0001)
out = self.xfit_at_z(z=0., templates=tspline, fitter='lstsq',
fit_background=True, get_uncertainties=True,
include_photometry=False, get_residuals=True)
spline_resid, coeffs_spline, err_spline, cov = out
if huber_delta > 0:
chi2_spline = (huber(huber_delta, spline_resid)*2.).sum()
else:
chi2_spline = (spline_resid**2).sum()
student_t_pars = student_t.fit(spline_resid)
#poly1d, xxx = utils.dot_templates(coeffs_poly[self.N:], tpoly, z=0)
# tpoly = utils.polynomial_templates(wpoly, order=3)
# out = self.xfit_at_z(z=0., templates=tpoly, fitter='lstsq',
# fit_background=True)
# chi2_poly, coeffs_poly, c, cov = out
# if True:
# cp, lp = utils.dot_templates(coeffs_poly[self.N:], tpoly)
### Set up for template fit
if templates == {}:
templates = utils.load_templates(fwhm=fwhm, stars=stars, line_complexes=line_complexes, fsps_templates=fsps_templates)
else:
if verbose:
print('User templates! N={0} \n'.format(len(templates)))
NTEMP = len(templates)
out = self.xfit_at_z(z=0., templates=templates, fitter=fitter,
fit_background=fit_background,
get_uncertainties=False)
chi2, coeffs, coeffs_err, covar = out
chi2 = np.zeros(NZ)
logpdf = np.zeros(NZ)
coeffs = np.zeros((NZ, coeffs.shape[0]))
covar = np.zeros((NZ, covar.shape[0], covar.shape[1]))
chi2min = 1e30
iz = 0
for i in range(NZ):
out = self.xfit_at_z(z=zgrid[i], templates=templates,
fitter=fitter, fit_background=fit_background,
get_uncertainties=get_uncertainties,
get_residuals=True)
fit_resid, coeffs[i,:], coeffs_err, covar[i,:,:] = out
if huber_delta > 0:
chi2[i] = (huber(huber_delta, fit_resid)*2.).sum()
else:
chi2[i] = (fit_resid**2).sum()
if get_student_logpdf:
logpdf[i] = student_t.logpdf(fit_resid, *student_t_pars).sum()
if chi2[i] < chi2min:
iz = i
chi2min = chi2[i]
if verbose:
print(utils.NO_NEWLINE + ' {0:.4f} {1:9.1f} ({2:.4f}) {3:d}/{4:d}'.format(zgrid[i], chi2[i], zgrid[iz], i+1, NZ))
if verbose:
print('First iteration: z_best={0:.4f}\n'.format(zgrid[iz]))
## Find peaks
import peakutils
# Make "negative" chi2 for peak-finding
#chi2_test = chi2_poly
chi2_test = chi2_spline
if chi2_test > (chi2.min()+100):
chi2_rev = (chi2.min() + 100 - chi2)/self.DoF
elif chi2_test < (chi2.min() + 9):
chi2_rev = (chi2.min() + 16 - chi2)/self.DoF
else:
chi2_rev = (chi2_test - chi2)/self.DoF
chi2_rev[chi2_rev < 0] = 0
indexes = peakutils.indexes(chi2_rev, thres=0.4, min_dist=8)
num_peaks = len(indexes)
if False:
plt.plot(zgrid, (chi2-chi2.min())/ self.DoF)
plt.scatter(zgrid[indexes], (chi2-chi2.min())[indexes]/ self.DoF, color='r')
# delta_chi2 = (chi2.max()-chi2.min())/self.DoF
# if delta_chi2 > delta_chi2_threshold:
if (num_peaks > 0) & (not stars) & zoom:
zgrid_zoom = []
for ix in indexes:
if (ix > 0) & (ix < len(chi2)-1):
c = polyfit(zgrid[ix-1:ix+2], chi2[ix-1:ix+2], 2)
zi = -c[1]/(2*c[0])
chi_i = polyval(c, zi)
zgrid_zoom.extend(np.arange(zi-2*dz[0],
zi+2*dz[0]+dz[1]/10., dz[1]))
# zgrid_zoom = utils.zoom_zgrid(zgrid, chi2/self.DoF,
# threshold=delta_chi2_threshold,
# factor=dz[0]/dz[1])
NZOOM = len(zgrid_zoom)
chi2_zoom = np.zeros(NZOOM)
logpdf_zoom = np.zeros(NZOOM)
coeffs_zoom = np.zeros((NZOOM, coeffs.shape[1]))
covar_zoom = np.zeros((NZOOM, coeffs.shape[1], covar.shape[2]))
iz = 0
chi2min = 1.e30
for i in range(NZOOM):
out = self.xfit_at_z(z=zgrid_zoom[i], templates=templates,
fitter=fitter,
fit_background=fit_background,
get_uncertainties=get_uncertainties,
get_residuals=True)
fit_resid, coeffs_zoom[i,:], e, covar_zoom[i,:,:] = out
if huber_delta > 0:
chi2_zoom[i] = (huber(huber_delta, fit_resid)*2.).sum()
else:
chi2_zoom[i] = (fit_resid**2).sum()
if get_student_logpdf:
logpdf_zoom[i] = student_t.logpdf(fit_resid,
*student_t_pars).sum()
#A, coeffs_zoom[i,:], chi2_zoom[i], model_2d = out
if chi2_zoom[i] < chi2min:
chi2min = chi2_zoom[i]
iz = i
if verbose:
print(utils.NO_NEWLINE+'- {0:.4f} {1:9.1f} ({2:.4f}) {3:d}/{4:d}'.format(zgrid_zoom[i], chi2_zoom[i], zgrid_zoom[iz], i+1, NZOOM))
zgrid = np.append(zgrid, zgrid_zoom)
chi2 = np.append(chi2, chi2_zoom)
logpdf = np.append(logpdf, logpdf_zoom)
coeffs = np.append(coeffs, coeffs_zoom, axis=0)
covar = np.vstack((covar, covar_zoom))
so = np.argsort(zgrid)
zgrid = zgrid[so]
chi2 = chi2[so]
logpdf = logpdf[so]
coeffs = coeffs[so,:]
covar = covar[so,:,:]
fit = utils.GTable()
fit.meta['N'] = (self.N, 'Number of spectrum extensions')
fit.meta['polyord'] = (poly_order, 'Order polynomial fit')
fit.meta['chi2poly'] = (chi2_poly, 'Chi^2 of polynomial fit')
kspl = (coeffs_spline != 0).sum()
fit.meta['chi2spl'] = (chi2_spline, 'Chi^2 of spline fit')
fit.meta['kspl'] = (kspl, 'Parameters, k, of spline fit')
# Evaluate spline at wavelengths for stars
xspline = np.array([8100, 9000, 1.27e4, 1.4e4])
flux_spline = utils.eval_bspline_templates(xspline, tspline, coeffs_spline[self.N:])
fluxerr_spline = utils.eval_bspline_templates(xspline, tspline, err_spline[self.N:])
for i in range(len(xspline)):
fit.meta['splf{0:02d}'.format(i+1)] = flux_spline[i], 'Spline flux at {0:.2f} um'.format(xspline[i]/1.e4)
fit.meta['sple{0:02d}'.format(i+1)] = fluxerr_spline[i], 'Spline flux err at {0:.2f} um'.format(xspline[i]/1.e4)
izbest = np.argmin(chi2)
clip = coeffs[izbest,:] != 0
ktempl = clip.sum()
fit.meta['NTEMP'] = (len(templates), 'Number of fitting templates')
fit.meta['DoF'] = (self.DoF, 'Degrees of freedom (number of pixels)')
fit.meta['ktempl'] = (ktempl, 'Parameters, k, of template fit')
fit.meta['chimin'] = (chi2.min(), 'Minimum chi2 of template fit')
fit.meta['chimax'] = (chi2.max(), 'Maximum chi2 of template fit')
fit.meta['fitter'] = (fitter, 'Minimization algorithm')
# Bayesian information criteria, normalized to template min_chi2
# BIC = log(number of data points)*(number of params) + min(chi2) + C
# https://en.wikipedia.org/wiki/Bayesian_information_criterion
scale_chinu = self.DoF/chi2.min()
scale_chinu = 1 # Don't rescale
fit.meta['bic_poly'] = np.log(self.DoF)*(poly_order+1+self.N) + (chi2_poly-chi2.min())*scale_chinu, 'BIC of polynomial fit'
fit.meta['bic_spl'] = np.log(self.DoF)*kspl + (chi2_spline-chi2.min())*scale_chinu, 'BIC of spline fit'
fit.meta['bic_temp'] = np.log(self.DoF)*ktempl, 'BIC of template fit'
for i, tname in enumerate(templates):
fit.meta['T{0:03d}NAME'.format(i+1)] = (templates[tname].name, 'Template name')
if tname.startswith('line '):
fit.meta['T{0:03d}FWHM'.format(i+1)] = (templates[tname].fwhm, 'FWHM, if emission line')
dtype = np.float64
fit['zgrid'] = np.cast[dtype](zgrid)
fit['chi2'] = np.cast[dtype](chi2)
if get_student_logpdf:
fit['student_logpdf'] = np.cast[dtype](logpdf)
fit.meta['t_df'] = student_t_pars[0], 'Student-t df'
fit.meta['t_loc'] = student_t_pars[1], 'Student-t loc'
fit.meta['t_scale'] = student_t_pars[2], 'Student-t scale'
#fit['chi2poly'] = chi2_poly
fit['coeffs'] = np.cast[dtype](coeffs)
fit['covar'] = np.cast[dtype](covar)
fit = self._parse_zfit_output(fit, prior=prior)
return fit
def _parse_zfit_output(self, fit, prior=None):
"""Parse best-fit redshift, etc.
TBD
"""
import scipy.interpolate
# Normalize to min(chi2)/DoF = 1.
scl_nu = fit['chi2'].min()/self.DoF
# PDF
pdf = np.exp(-0.5*(fit['chi2']-fit['chi2'].min())/scl_nu)
if prior is not None:
interp_prior = np.interp(fit['zgrid'], prior[0], prior[1])
pdf *= interp_prior
fit.meta['hasprior'] = True, 'Prior applied to PDF'
fit['prior'] = interp_prior
else:
interp_prior = None
fit.meta['hasprior'] = False, 'Prior applied to PDF'
# Normalize PDF
pdf /= np.trapz(pdf, fit['zgrid'])
# Interpolate pdf for more continuous measurement
spl = scipy.interpolate.Akima1DInterpolator(fit['zgrid'], np.log(pdf), axis=1)
zfine = utils.log_zgrid(zr=[fit['zgrid'].min(), fit['zgrid'].max()], dz=0.0001)
ok = np.isfinite(spl(zfine))
norm = np.trapz(np.exp(spl(zfine[ok])), zfine[ok])
# Compute CDF and probability intervals
dz = np.gradient(zfine[ok])
cdf = np.cumsum(np.exp(spl(zfine[ok]))*dz/norm)
pz_percentiles = np.interp(np.array([2.5, 16, 50, 84, 97.5])/100., cdf, zfine[ok])
# Random draws, testing
#rnd = np.interp(np.random.rand(1000), cdf, fit['zgrid']+dz/2.)
dz = np.gradient(fit['zgrid'])
gamma = 0.15
zsq = np.dot(fit['zgrid'][:,None], np.ones_like(fit['zgrid'])[None,:])
L = _loss((zsq-fit['zgrid'])/(1+fit['zgrid']), gamma=gamma)
risk = np.dot(pdf*L, dz)
zi = np.argmin(risk)
#print('xxx', zi, len(risk))
if (zi < len(risk)-1) & (zi > 0):
c = np.polyfit(fit['zgrid'][zi-1:zi+2], risk[zi-1:zi+2], 2)
z_risk = -c[1]/(2*c[0])
else:
z_risk = fit['zgrid'][zi]
min_risk = np.trapz(pdf*_loss((z_risk-fit['zgrid'])/(1+fit['zgrid']), gamma=gamma), fit['zgrid'])
# MAP, maximum p(z)
zi = np.argmax(pdf)
if (zi < len(pdf)-1) & (zi > 0):
c = np.polyfit(fit['zgrid'][zi-1:zi+2], pdf[zi-1:zi+2], 2)
z_map = -c[1]/(2*c[0])
else:
z_map = fit['zgrid'][zi]
# Store data in the fit table
fit['pdf'] = pdf
fit['risk'] = risk
fit.meta['Z02'] = pz_percentiles[0], 'Integrated p(z) = 0.025'
fit.meta['Z16'] = pz_percentiles[1], 'Integrated p(z) = 0.16'
fit.meta['Z50'] = pz_percentiles[2], 'Integrated p(z) = 0.5'
fit.meta['Z84'] = pz_percentiles[3], 'Integrated p(z) = 0.84'
fit.meta['Z97'] = pz_percentiles[4], 'Integrated p(z) = 0.975'
fit.meta['ZWIDTH1'] = pz_percentiles[3]-pz_percentiles[1], 'Width between the 16th and 84th p(z) percentiles'
fit.meta['ZWIDTH2'] = pz_percentiles[4]-pz_percentiles[0], 'Width between the 2.5th and 97.5th p(z) percentiles'
fit.meta['z_map'] = z_map, 'Redshift at MAX(PDF)'
fit.meta['z_risk'] = z_risk, 'Redshift at minimum risk'
fit.meta['min_risk'] = min_risk, 'Minimum risk'
fit.meta['gam_loss'] = gamma, 'Gamma factor of the risk/loss function'
return fit
def template_at_z(self, z=0, templates=None, fit_background=True, fitter='nnls', fwhm=1400, get_uncertainties=2, get_residuals=False, include_photometry=True, draws=0):
"""TBD
"""
if templates is None:
templates = utils.load_templates(line_complexes=False, fsps_templates=True, fwhm=fwhm)
out = self.xfit_at_z(z=z, templates=templates, fitter=fitter,
fit_background=fit_background,
get_uncertainties=get_uncertainties,
get_residuals=get_residuals,
include_photometry=include_photometry)
chi2, coeffs, coeffs_err, covar = out
cont1d, line1d = utils.dot_templates(coeffs[self.N:], templates, z=z,
apply_igm=(z > IGM_MINZ))
# Parse template coeffs
cfit = OrderedDict()
for i in range(self.N):
cfit['bg {0:03d}'.format(i)] = coeffs[i], coeffs_err[i]
for j, key in enumerate(templates):
i = j+self.N
cfit[key] = coeffs[i], coeffs_err[i]
if False:
# Compare drizzled and beam fits (very close)
for j, key in enumerate(templates):
print('{key:<16s} {0:.2e} {1:.2e} {2:.2e} {3:.2e}'.format(mb_cfit[key][0], mb_cfit[key][1], st_cfit[key][0], st_cfit[key][1], key=key))
tfit = OrderedDict()
tfit['cont1d'] = cont1d
tfit['line1d'] = line1d
tfit['cfit'] = cfit
tfit['coeffs'] = coeffs
tfit['chi2'] = chi2
tfit['covar'] = covar
tfit['z'] = z
tfit['templates'] = templates
if draws > 0:
xte, yte, lte = utils.array_templates(templates, max_R=5000, z=z)
err = np.sqrt(covar.diagonal())
nonzero = err > 0
cov_norm = ((covar/err).T/err)[nonzero,:][:,nonzero]
draw_coeff = np.zeros((draws, len(err)))
draw_coeff[:,nonzero] = np.random.multivariate_normal((coeffs/err)[nonzero], cov_norm, draws)*err[nonzero]
draw_spec = draw_coeff[:,self.N:].dot(yte)
err_spec = np.diff( | np.percentile(draw_spec, [16,84], axis=0) | numpy.percentile |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 20 18:02:22 2018
@author: maximoskaliakatsos-papakostas
"""
from music21 import *
import numpy as np
import os
import glob
import copy
# the user should give the file name - the folder with the files
def get_parts_np_from_file(fileName, parts_for_surface, time_res):
# INPUTS:
# fileName - string: the name of the file - full path
# parts_for_surface - int array or 'all': parts to include to score
# time_res - int: e.g. 16 or 32 etc.
# OUTPUTS:
# m: the score matrix in np array
# m_length: the number of columns in the matrix
p = converter.parse(fileName)
# tmp list that will include the lists of all pitches
tmp_all_pitches = []
if parts_for_surface == 'all':
parts_for_surface = range(len(p.parts))
# find the length of the largest part for concatenating midi
# this is used if measures are empty and we have to go with parts
largest_part_length = 0
for ii in parts_for_surface:
tmp_part = p.parts[ii]
notes = tmp_part.flat.notes
if largest_part_length < eval(str(notes[-1].offset)):
largest_part_length = eval(str(notes[-1].offset))
# make surface matrix
for i in parts_for_surface:
# get part
tmp_part = p.parts[i]
# tmp list that will include the lists of all pitches in the part
tmp_part_pitches = []
# get array of measures
measures = [m for m in tmp_part.getElementsByClass('Measure')]
if measures:
# for all measures
for m in measures:
# get time signature of measure
ts = m.flat.timeSignature
if ts != None:
# tmp_ts = ts[0]
if ts.numerator == 4 and ts.denominator == 4:
measureLength = time_res
elif ts.numerator == 3 and ts.denominator == 4:
measureLength = int(3.0*time_res/4.0)
elif ts.numerator == 3 and ts.denominator == 8:
measureLength = int(3.0*time_res/8.0)
elif ts.numerator == 2 and ts.denominator == 4:
measureLength = int(time_res/2.0)
elif ts.numerator == 12 and ts.denominator == 8:
measureLength = int(3.0*time_res/2.0)
elif ts.numerator == 3 and ts.denominator == 2:
measureLength = int(3.0*time_res/2.0)
else:
print("unknown time signature: ", ts.numerator, ts.denominator)
notes = m.flat.notes
# tmp list that stores the pitches for the measure
tmp_measure_pitches = np.zeros((128, measureLength))
for n in notes:
offset_value = int( eval(str(n.offset))*time_res/4.0 )
duration_value = int(n.duration.quarterLength*time_res/4.0)
if n.isChord:
for nn in n.pitches:
midi_number = nn.midi
# print(midi_number, " - ", offset_value, " - ", duration_value)
tmp_measure_pitches[midi_number, offset_value] = duration_value
else:
midi_number = n.pitch.midi
tmp_measure_pitches[midi_number, offset_value] = duration_value
# tmp_all_pitches.append(tmp_measure_pitches)
if len(tmp_part_pitches) == 0:
tmp_part_pitches = np.array(tmp_measure_pitches)
else:
tmp_part_pitches = np.hstack((tmp_part_pitches, tmp_measure_pitches))
else:
# do the same for part
notes = tmp_part.flat.notes
# get part length
part_length = largest_part_length
# tmp list that stores the pitches for the measure
tmp_part_pitches = np.zeros((128, int( part_length*time_res/4.0 )+1 ))
for n in notes:
offset_value = int( eval(str(n.offset))*time_res/4.0 )
duration_value = int(n.duration.quarterLength*time_res/4.0)
if n.isChord:
for nn in n.pitches:
midi_number = nn.midi
# print(midi_number, " - ", offset_value, " - ", duration_value)
tmp_part_pitches[midi_number, offset_value] = duration_value
else:
midi_number = n.pitch.midi
tmp_part_pitches[midi_number, offset_value] = duration_value
# end if measures
tmp_all_pitches.append(tmp_part_pitches)
# end for parts
all_pitches = np.array(tmp_all_pitches[0])
for a in tmp_all_pitches:
# print('tmp_all_pitches: ', tmp_all_pitches)
# print('all_pitches: ', all_pitches)
all_pitches[ all_pitches == 0 ] = a[ all_pitches == 0 ]
return all_pitches, np.size(all_pitches, axis=1)
def get_rel_pcp_np_from_file(fileName, parts_for_surface, time_res):
# INPUTS:
# fileName - string: the name of the file - full path
# parts_for_surface - int array or 'all': parts to include to score
# time_res - int: e.g. 16 or 32 etc.
# OUTPUTS:
# m: the score matrix in np array
# m_length: the number of columns in the matrix
p = converter.parse(fileName)
# tmp list that will include the lists of all pitches
tmp_all_pitches = []
if parts_for_surface == 'all':
parts_for_surface = range(len(p.parts))
# make surface matrix
for i in parts_for_surface:
# get part
tmp_part = p.parts[i]
# tmp list that will include the lists of all pitches in the part
tmp_part_pitches = []
# get array of measures
measures = [m for m in tmp_part.getElementsByClass('Measure')]
# for all measures
for m in measures:
# get time signature of measure
ts = m.flat.timeSignature
if ts != None:
# tmp_ts = ts[0]
if ts.numerator == 4 and ts.denominator == 4:
measureLength = time_res
elif ts.numerator == 3 and ts.denominator == 4:
measureLength = int(3.0*time_res/4.0)
elif ts.numerator == 3 and ts.denominator == 8:
measureLength = int(3.0*time_res/8.0)
else:
print("unknown time signature: ", ts.numerator, ts.denominator)
notes = m.flat.notes
# tmp list that stores the pitches for the measure
tmp_measure_pitches = np.zeros((128, measureLength))
for n in notes:
offset_value = int( eval(str(n.offset))*time_res/4.0 )
duration_value = int(n.duration.quarterLength*time_res/4.0)
if n.isChord:
for nn in n.pitches:
midi_number = nn.midi
# print(midi_number, " - ", offset_value, " - ", duration_value)
tmp_measure_pitches[midi_number, offset_value] = duration_value
else:
midi_number = n.pitch.midi
tmp_measure_pitches[midi_number, offset_value] = duration_value
# tmp_all_pitches.append(tmp_measure_pitches)
if len(tmp_part_pitches) == 0:
tmp_part_pitches = np.array(tmp_measure_pitches)
else:
tmp_part_pitches = np.hstack((tmp_part_pitches, tmp_measure_pitches))
tmp_all_pitches.append(tmp_part_pitches)
all_pitches = np.array(tmp_all_pitches[0])
for a in tmp_all_pitches:
all_pitches[ all_pitches == 0 ] = a[ all_pitches == 0 ]
return all_pitches, np.size(all_pitches, axis=1)
# end
# running for all files in folder
def get_parts_3Dnp_from_folder(folderName, parts_for_surface, time_res):
# INPUTS:
# folderName - string: the name of the folder - full path
# parts_for_surface - int array or 'all': parts to include to score
# time_res - int: e.g. 16 or 32 etc.
# OUTPUTS:
# m: the score matrices in np array (n_pieces, 128, max_len)
# m_length: the number of columns in each matrix (n_pieces, )
allDocs = glob.glob(folderName + os.sep + "*.xml")
# keep all matrices for each file
all_matrices = []
# keep the respective lengths to define -1 - padding
all_lengths = []
for fileName in allDocs:
m, l = get_parts_np_from_file(fileName, parts_for_surface, time_res)
all_matrices.append(m)
all_lengths.append(l)
# find max length for padding
all_lengths = np.array(all_lengths)
max_length = np.max(all_lengths)
# pad-em-all
for i in range(len(all_matrices)):
m = all_matrices[i]
# check if padding needed
if m.shape[1] < max_length:
# make a padding matrix
padder = -1.0*np.ones((128, max_length-m.shape[1]))
all_matrices[i] = np.hstack( (m, padder) )
all_matrices = | np.array(all_matrices) | numpy.array |
"""
Create stimuli to probe the networks.
"""
import numpy as np
def expanding_disk(pos,speed,width,exp_rate,maxwidth,amplitude,gridsize,appears,duration,order=10):
# Creates artificial stimuli of expanding disks. Params:
# pos: 2 dim starting position in grid coordinates
# speed: 2 dim speed vector, in pixels per time point
# width: the initial width of the disk
# exp_rate: the rate with which the disk expands, in pixels per time point
# maxwidth: the maximum attainable width of the disk
# amplitude: peak amplitude of the disk
# gridsize: the size of the grid, in pixels
# appears: the time point the disk first appears
# duration: the temporal extent of the stimulus, in units of time
# order: controls how sharp the transition on the margins of the disk is
disk_dur = duration - appears
xc = pos[0] + speed[0]*np.arange(disk_dur)
yc = pos[1] + speed[1]*np.arange(disk_dur)
w = width + exp_rate*np.arange(disk_dur)
w[w>maxwidth] = maxwidth
# correction for negative expansion rates
if exp_rate<0:
w[w<1] = 1
# do a meshgrid over 3 coordinates (x,y,w)
x = np.arange(gridsize); y = np.arange(gridsize)
X, Y, W = np.meshgrid(x,y,w)
norm_dist = ((X-xc)**2+(Y-yc)**2)/W**2
stim1 = amplitude*np.exp(-1/2*norm_dist**int(order/2))
stim = np.zeros((gridsize,gridsize,duration))
stim[:,:,appears:duration] = stim1
return stim
def expanding_annuli(pos,speed,width,init,exp_rate,maxsize,amplitude,gridsize,appears,duration,order=10):
# Creates artificial stimuli of expanding annuli. Params:
# pos: 2 dim starting position in grid coordinates
# speed: 2 dim speed vector, in pixels per time point
# width: the width of the annulus
# init: the initial size of the annulus
# exp_rate: the rate with which the annulus expands, in pixels per time point
# maxsize: the maximum attainable width of the annulus
# amplitude: peak amplitude of the annulus
# gridsize: the size of the grid, in pixels
# appears: the time point the annulus first appears
# duration: the temporal extent of the stimulus, in units of time
# order: controls how sharp the transition on the margins of the annulus is
base = expanding_disk(pos,speed,init,exp_rate,maxsize,amplitude,gridsize,appears,duration,order)
extract = expanding_disk(pos,speed,init-width,exp_rate,maxsize-width,amplitude,gridsize,appears,duration,order)
stim = base - extract
return stim
def moving_bars(k,speed,theta,phase,contrast,gridsize,duration):
# Creates artificial stimuli of moving bars. Params:
# k: spatial frequency of the bars, in inverse pixel values
# speed: amplitude and direction of moving speed
# theta: orientation of the bars in space in rads, 0 rads being horizontal
# contrast: amplitude of positive and negative amplitude of negative part
# gridsize: the size of the grid, in pixels
# duration: the temporal extent of the stimulus, in units of time
x = np.arange(gridsize); y = np.arange(gridsize); t = np.arange(duration)
X, Y, T = np.meshgrid(x,y,t)
stim = np.cos(2*np.pi*k*X*np.cos(theta)+2*np.pi*k*Y*np.sin(theta)+phase-2*np.pi*speed*T)
return contrast*np.sign(stim)
def counterphase_grating(k,f,theta,phase,contrast,gridsize,duration):
# Creates artificial stimuli of moving bars. Equation 2.18 from Dayan & Abbott. Params:
# k: spatial frequency of the bars, in inverse pixel values
# f: temporal frequency of the bars, in inverse temporal unit values
# theta: orientation of the bars in space in rads, 0 rads being horizontal
# contrast: amplitude of positive and negative amplitude of negative part
# gridsize: the size of the grid, in pixels
# duration: the temporal extent of the stimulus, in units of time
x = np.arange(gridsize); y = | np.arange(gridsize) | numpy.arange |
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 12 12:34:53 2021
@author: 14488
"""
import gym
from gym import spaces
from ur5_kinematic import UR5_kinematic
import numpy as np
from math import pi,sqrt
from copy import copy
from gym.utils import seeding
# TODO: this environment doesn't include the render part, I still need to fix the delay of coppeliasim
# if you want to render your work, please use render.py
# TODO: add a config.josn file to store all configuration of the environment.
# TODO: write v0 and v1 together to make the envrionment be compatiable with HER and PPO and other algorithms
# together.
DELTA = pi/180.0
REACH_THRESHOLD = 0.05
mu, sigma = 0, 1
MUL_RATE = 2
# You need to modify the following area yourself you specify your robot's condition:
######################Modify following unit yourself##########################
UR5_global_position = [0.4,-0.425,0.65] #UR5's base coordinate in your coordination system
ur5_safebox_high = [0.72,0.275,1.8] # UR5's safety working range's low limit
ur5_safebox_low = [-0.72,-1.025,0.66] # UR5's safety working range's high limit
target_range_low = [-0.48,-0.795,0.663] # Target position's low limit
target_range_high = [-0.02,-0.055,0.663] # Target position's high limit
MOVE_UNIT = 1 * DELTA # Joints' moving unit
# Robot joint's moveing limit:
# TODO: These moving limit still need to be re-colibarated
joint1 = [-pi , pi]
joint2 = [-pi/2, pi/2]
joint3 = [-pi, pi]
joint4 = [-pi, pi]
# joint5 = [-pi, pi] # We are only using 4 joints now.
##############################################################################
action_low = [-10,-10,-10,-10]
action_high = [10,10,10,10]
# TODO: need to have another robot's body checking system, in order that robot's
# body won't touch safety boundary and self-collision. As to the current one, it
# can only gurrantee that tip won't touch the boundary.
# random_action : Gussian, Uniform
class ur5_symbol_v0(gym.Env):
def __init__(self,
reset_to_pre_if_not_reach = True,
random_action = 'Gussian'):
self.kin = UR5_kinematic(UR5_global_position)
self.reset_mode = reset_to_pre_if_not_reach
self.random_action = random_action
self.seed()
self.low_action = np.array(
action_low, dtype=np.float32
)
self.high_action = np.array(
action_high, dtype=np.float32
)
self.low_state = np.array(
[joint1[0], joint2[0],joint3[0],joint4[0],ur5_safebox_low[0],
ur5_safebox_low[1],ur5_safebox_low[2],target_range_low[0],
target_range_low[1],target_range_low[2]], dtype=np.float32
)
self.high_state = np.array(
[joint1[1], joint2[1],joint3[1],joint4[1],ur5_safebox_high[0],
ur5_safebox_high[1],ur5_safebox_high[2],target_range_high[0],
target_range_high[1],target_range_high[2]], dtype=np.float32
)
self.observation_space = spaces.Box(
low=self.low_state,
high=self.high_state,
dtype=np.float32
)
self.action_space = spaces.Box(
low=self.low_action,
high=self.high_action,
dtype=np.float32
)
self.init_joints_pos = [0.0,0.0,0.0,0.0,pi/2,0.0]
#Reset
self.current_joint_pos = copy(self.init_joints_pos)
self.target_pos = self.random_gen_one_target_pos()
tip_pos = self.get_tip_pos()
ax, ay, az = tip_pos[0], tip_pos[1], tip_pos[2]
tx, ty, tz = self.target_pos[0], self.target_pos[1], self.target_pos[2]
self.current_dis = sqrt((ax - tx) ** 2 + (ay - ty) ** 2 + (az - tz) ** 2)
self.last_dis = copy(self.current_dis)
def reset(self):
if self.reset_mode:
if self.current_dis <= REACH_THRESHOLD:
self.target_pos = self.random_gen_one_target_pos()
self.current_joint_pos = copy(self.init_joints_pos)
tip_pos = self.get_tip_pos()
ax, ay, az = tip_pos[0], tip_pos[1], tip_pos[2]
tx, ty, tz = self.target_pos[0], self.target_pos[1], self.target_pos[2]
self.current_dis = sqrt((ax - tx) ** 2 + (ay - ty) ** 2 + (az - tz) ** 2)
self.last_dis = copy(self.current_dis)
return self.get_state()
def get_tip_pos(self):
coor = self.kin.Forward_ur5(self.current_joint_pos)
return [coor[0],coor[1],coor[2]]
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def get_state(self):
tip_pos = self.get_tip_pos()
state = np.array(
[self.current_joint_pos[0], self.current_joint_pos[1],
self.current_joint_pos[2], self.current_joint_pos[3],
tip_pos[0],tip_pos[1],tip_pos[2],self.target_pos[0],
self.target_pos[1],self.target_pos[2]], dtype=np.float32
)
return state
def random_gen_one_target_pos(self):
pos = np.random.uniform(target_range_low,target_range_high)
self.target_pos = pos.tolist()
return self.target_pos
def step(self,action):
self.apply_action(action)
tip_pos = self.get_tip_pos()
ax,ay,az = tip_pos[0],tip_pos[1],tip_pos[2]
tx,ty,tz = self.target_pos[0], self.target_pos[1], self.target_pos[2]
self.current_dis = sqrt((ax - tx) ** 2 + (ay - ty) ** 2 + (az - tz) ** 2)
##########################Reward 1####################################
# reward = (self.current_dis - sqrt((ax - tx) ** 2 + (ay - ty) ** 2 + (az - tz) ** 2)) / self.current_dis
######################################################################
##########################Reward 2####################################
reward = (self.last_dis - self.current_dis) * 100
######################################################################
###########################Reward 3###################################
# if self.current_dis < self.last_dis:
# reward = 0
# elif self.current_dis > self.last_dis:
# reward = -1
######################################################################
self.last_dis = copy(self.current_dis)
next_state = self.get_state()
done, step_end_status = self.check_done()
##########################Reward 4####################################
# if done == 1:
# reward = 200.0
# elif done == 2:
# reward = -30.0
# elif done == 0:
# reward = -1.0
##########################Reward5 ####################################
# if done == 1:
# reward = 0
# elif done == 2:
# reward = -50
# elif done ==0:
# reward = -1
#####################################################################
##########################Reward2 additional ########################
if step_end_status == 1:
reward += 200.0
elif step_end_status == 2:
reward += -30.0
return next_state, reward, done, {'step_end_status':step_end_status}
def apply_action(self, action):
_joint1_move = action[0] * MOVE_UNIT
_joint2_move = action[1] * MOVE_UNIT
_joint3_move = action[2] * MOVE_UNIT
_joint4_move = action[3] * MOVE_UNIT
# _joint5_move = action[0][4] * MOVE_UNIT
self.current_joint_pos[0] += _joint1_move
self.current_joint_pos[1] += _joint2_move
self.current_joint_pos[2] += _joint3_move
self.current_joint_pos[3] += _joint4_move
# self.current_joint_pos[4] + _joint5_move,
def check_done(self):
tip_pos = self.get_tip_pos()
ax,ay,az = tip_pos[0],tip_pos[1],tip_pos[2]
if self.current_dis <= REACH_THRESHOLD:
print("I am reaching the target!!!!!!!!!!!!!!")
return True, 1
elif ax <= ur5_safebox_low[0]:
print("Touching b3")
return True, 2
#In theory, b3 will not be touched anyway
elif ax >= ur5_safebox_high[0]:
print("Touching b4")
return True, 2
elif ay <= ur5_safebox_low[1]:
print("Touching b2")
return True, 2
elif ay >= ur5_safebox_high[1]:
print("Touching b1")
return True, 2
elif az <= ur5_safebox_low[2]:
print("Touching table surface")
return True, 2
elif az >= ur5_safebox_high[2]:
print("Touching sky")
return True, 2
# In theory, sky will never be touched..... :), it is too high
#TODO: Is there any nicer way to do it?
elif self.current_joint_pos[0] <= joint1[0]:
print("Joint 1 is reaching the low joint limit")
return True, 2
elif self.current_joint_pos[0] >= joint1[1]:
print("Joint 1 is reaching the high joint limit")
return True, 2
elif self.current_joint_pos[1] <= joint2[0]:
print("Joint 2 is reaching the low joint limit")
return True, 2
elif self.current_joint_pos[1] >= joint2[1]:
print("Joint 2 is reaching the high joint limit")
return True, 2
#For the real robot, the joint limit for the second joint is [-pi,pi], but in ours application
#we table, so our joint 2 cannot reach more than pi/2
elif self.current_joint_pos[2] <= joint3[0]:
print("Joint 3 is reaching the low joint limit")
return True, 2
elif self.current_joint_pos[2] >= joint3[1]:
print("Joint 3 is reaching the high joint limit")
return True, 2
elif self.current_joint_pos[3] <= joint4[0]:
print("Joint 4 is reaching the low joint limit")
return True, 2
elif self.current_joint_pos[3] >= joint4[1]:
print("Joint 4 is reaching the high joint limit")
return True, 2
# elif self.current_joint_pos[4] <= joint5[0]:
# print("Joint 5 is reaching the low joint limit")
# return 2
# elif self.current_joint_pos[4] >= joint5[1]:
# print("Joint 5 is reaching the high joint limit")
# return 2
else:
return False, 0
def random_action(self):
if self.random_action == "Gussian":
action = [MUL_RATE * np.random.normal(mu, sigma),
MUL_RATE * np.random.normal(mu, sigma),
MUL_RATE * | np.random.normal(mu, sigma) | numpy.random.normal |
"""
Utility function for modeling.
.. include:: ../include/links.rst
"""
import numpy as np
from scipy import linalg, stats
def cov_err(jac):
"""
Provided the Jacobian matrix from a least-squares minimization
routine, construct the parameter covariance matrix. See e.g.
Press et al. 2007, Numerical Recipes, 3rd ed., Section 15.4.2
This is directly pulled from ppxf.capfit.cov_err, but only
returns the covariance matrix:
https://pypi.org/project/ppxf/
Args:
jac (`numpy.ndarray`_):
Jacobian matrix
Returns:
`numpy.ndarray`_: Parameter covariance matrix.
"""
U, s, Vh = linalg.svd(jac, full_matrices=False)
w = s > np.spacing(s[0])*max(jac.shape)
return (Vh[w].T/s[w]**2) @ Vh[w]
def lin_interp(x, x1, y1, x2, y2):
"""
Linearly interpolate a new value at position x given two points
that define the line.
Nominally, the abscissa values for the two reference points
should be to either side of the new points.
.. warning::
Will raise runtime warnings if ``np.any(x1 == x2)`` due to a
division by 0, but this not checked.
Args:
x (:obj:`float`, `numpy.ndarray`_):
Coordinate(s) at which to sample the new value.
x1 (:obj:`float`, `numpy.ndarray`_):
Abscissa value of the first reference point.
y1 (:obj:`float`, `numpy.ndarray`_):
Ordinate value of the first reference point.
x2 (:obj:`float`, `numpy.ndarray`_):
Abscissa value of the second reference point.
y2 (:obj:`float`, `numpy.ndarray`_):
Ordinate value of the second reference point.
Returns:
:obj:`float`, `numpy.ndarray`_: Interpolated y value(s) at
the provided x value(s).
"""
return y1 + (y2-y1) * (x-x1) / (x2-x1)
def deriv_lin_interp(x, x1, y1, x2, y2):
"""
Linearly interpolate a new value at position x given two points that define
the line.
This function also calculates the derivatives of the result with respect to
``y1`` and ``y2``. I.e., assuming ``y1`` and ``y2`` are parameters of a
model (and ``x1`` and ``x2`` are *not*), this returns the derivative of the
computation w.r.t. the model parameters.
Nominally, the abscissa values for the two reference points should be to
either side of the new points.
.. warning::
Will raise runtime warnings if ``np.any(x1 == x2)`` due to a division by
0, but this not checked.
Args:
x (:obj:`float`, `numpy.ndarray`_):
Coordinate(s) at which to sample the new value.
x1 (:obj:`float`, `numpy.ndarray`_):
Abscissa value of the first reference point.
y1 (:obj:`float`, `numpy.ndarray`_):
Ordinate value of the first reference point.
x2 (:obj:`float`, `numpy.ndarray`_):
Abscissa value of the second reference point.
y2 (:obj:`float`, `numpy.ndarray`_):
Ordinate value of the second reference point.
Returns:
:obj:`float`, `numpy.ndarray`_: Interpolated y value(s) at
the provided x value(s).
"""
dx = (x-x1) / (x2-x1)
return y1 + (y2-y1) * dx, np.stack((1 - dx, dx), axis=-1)
def sech2(x):
r"""
Calculate the squared hyperbolic secant function using `numpy.cosh`_, while
controlling for overflow errors.
Overflow is assumed to occur whenever :math:`|x| \geq 100`.
Args:
x (array-like):
Values at which to calculate :math:`\sech(x)`.
Returns:
`numpy.ndarray`_: Result of :math:`\sech(x)` where any values of
:math:`x` that would control overflow errors are set to 0.
"""
_x = np.atleast_1d(x)
indx = np.absolute(_x) < 100
if np.all(indx):
return 1/np.cosh(_x)**2
s = np.zeros_like(_x)
s[indx] = 1/ | np.cosh(_x[indx]) | numpy.cosh |
#!/usr/bin/env python3
import numpy as np
import scipy.signal as sig
import numpy.ma as ma
from longslit.pipeline import *
import scipy.optimize as opt
from matplotlib import pyplot as plt
from itertools import *
def all_together_now(neon, ref_neon):
'''На выходе - "карта интерполяции" на линейную по длинам волн шкалу,
значение первого элемента и шага.
'''
wl_map = get_correction_map(neon)
lam = geometry.fit_neon(np.sum(neon, axis=0), ref_neon[0], ref_neon[1])
lam2 = np.linspace(lam[0], lam[-1], len(lam))
new_wl = np.array(list(map(lambda x: np.interp(lam2, lam, x), WL_map)))
return lam2, new_wl
def get_correction_map(neon, verbose=False, ref='mean'):
'''Считает карту интерполяции.
В каждой строчке - те координаты, на которые нужно
интерполировать исходное изображение, чтобы исправить
геометрические искажения вдоль оси Х.
(Опорные кадры - линейчатые спектры газов)
ref = 'mean' - приводить к средним значениям
ref = 'center' - приводить к значению в центре кадра
'''
y, x = np.shape(neon)
y = np.arange(y)
x = np.arange(x)
# За fwhm считаем fwhm (в пикселях) средней (по Y) строки
fwhm = calc_fwhm(neon[int(len(neon) / 2)])
print(('fwhm = ', fwhm, 'pix\n') if verbose else '', end='')
# Пики в каждой строчке (list из ndarray разной длины)
peaks = list(map(lambda row: find_peaks(row, fwhm, 20), neon))
print('***all peaks are found***' if verbose else '')
# Пики, отсортированные по линиям (2D masked array)
peaks = find_lines(peaks, fwhm, y, verbose)
print('***lines are found***' if verbose else '')
# k - 2D коэффициенты полинома
# mask - какие строчки (по Y) использовать
k, mask = my_polyfit(y, peaks, 2, 2)
if verbose:
plt.imshow(neon)
plt.plot(peaks, y, '.')
plt.plot(my_poly(k, y), y)
plt.show()
if ref == 'mean':
mean_peaks = ma.mean(peaks[mask], axis=0)
elif ref == 'center':
center = int(np.median(y))
mean_peaks = peaks[center]
i = 1
while (np.sum(mean_peaks.mask) != 0):
mean_peaks = np.median(peaks[center-i:center+i], axis=0)
i+=1
corr = np.polyfit(mean_peaks, k.T, 3)
corr_map = my_poly(my_poly(corr, x).T, y)
good_columns = (np.min(corr_map, axis=0) > 0)
# Умножение для bool - это and!
good_columns *= (np.max(corr_map, axis=0) < x[-1])
new_x = x[good_columns].astype('int')
corr_map = corr_map[:, new_x]
return corr_map
def calc_fwhm(spec, wl=None, n=3, guess=10):
if wl is None:
wl = np.arange(len(spec))
peaks = sig.find_peaks(spec)[0]
amps = spec[peaks]
peaks = peaks[np.argsort(amps)][-n:]
amps = amps[np.argsort(amps)][-n:]
fwhm = np.average(list(map(lambda x, A: one_peak_fwhm(x, A, wl, spec, guess),
wl[peaks], amps)))
return fwhm
def one_peak_fwhm(x, A, wl, spec, guess=1):
rng = (wl > x - guess) & (wl < x + guess)
return 2.355 * np.abs(opt.curve_fit(gauss, wl[rng], spec[rng],
p0=[guess, x, A])[0][0])
def find_peaks(spec, fwhm=0, h=1, acc=True):
'''Ищет пики выше заданного уровня h относительно медианы.
Затем удаляет из списка пики, у которых есть соседи ближе fwhm'''
#spec = spec-np.min(spec)
spec = spec / np.median(spec)
# plt.plot(spec)
# plt.plot(np.ones(len(spec))*np.median(spec))
# plt.show()
pks = sig.find_peaks(spec, height=h)[0]
if acc:
pks = np.array(list(map(lambda x: xmax(spec, x, fwhm=fwhm), pks)))
pks = pks[pks > 0]
mask = np.append(np.diff(pks) < fwhm, False)
mask = mask + np.append([False], np.diff(pks) < fwhm)
try:
pks = pks[np.logical_not(mask)]
return(pks[::])
except IndexError:
return []
def find_lines(peaks, fwhm, y=None, verbose=False):
if y is None:
y = np.arange(len(peaks))
# Делаем все строки одинаковой длины (по наидленнейшей)
peaks = np.array(list(zip_longest(*peaks)), dtype='float')
# if verbose:
# plt.plot(peaks.T, y, 'o')
# plt.show()
msk = np.isnan(peaks)
peaks = ma.array(peaks, mask=msk)
col = ['C' + str(j) for j in range(9)]
# print(len(peaks))
# print()
for i in range(len(peaks)):
fuck = peaks[i:]
line = fuck[0]
# msk = np.logical_not(np.isnan(line))
# k = ma.polyfit(y, line, 2)
# print(k)
est = np.ones(len(y)) * ma.median(line)
# est = np.polyval(k, y)
err = est - line
move_right = ma.filled((err > 5 * ma.median(ma.abs(err))), False)
move_left = ma.filled((err < -5 * ma.median(ma.abs(err))), False)
not_move = np.logical_not(move_right + move_left)
# plt.plot(y[not_move], fuck[0][not_move], '.' + col[i % 9])
# plt.plot(y, est, col[i % 9], ls='--')
# plt.plot(y[move_right], fuck[0][move_right], 'x' + col[i % 9])
# plt.plot(y[move_left], fuck[0][move_left], '+' + col[i % 9])
# plt.show()
# print(i)
# print(ma.mean(ma.abs(err)))
# print(ma.median(line))
# print()
if np.sum(move_right) > 0: # Те, что меньше медианы (слева)
nonearray = ma.array([[None] * np.sum(move_right.astype('int'))], mask=[[True] * np.sum(move_right.astype('int'))])
fuck[:, move_right] = ma.append(fuck[:, move_right][1:, :], nonearray, axis=0)
if np.sum(move_left) > 0:
nonearray = ma.array([[None] * np.sum(move_left.astype('int'))], mask=[[True] * np.sum(move_left.astype('int'))])
fuck[:, move_left] = ma.append(nonearray, fuck[:, move_left][:-1, :], axis=0)
# plt.plot(fuck[0], col[i%9])
peaks[i:] = fuck
plt.show()
peaks = peaks.T
msk = np.isnan(peaks)
peaks = ma.array(peaks, mask=msk)
good_lines = (np.sum(np.logical_not(msk), axis=0) > len(y) / 4.)
peaks = peaks[:, good_lines]
return peaks
def fit_neon(data, p, a, mode='wl'):
'''mode = wl - вернуть длины волн
mode = k - вернуть коэффициенты
'''
data = data / np.median(data) # Нормировка
# plt.plot(data)
# plt.show()
fwhm = calc_fwhm(data)
data_peaks = find_peaks(data, fwhm, h=20) # Поиск основных пиков
# print(len(data_peaks))
data_amp = data[data_peaks.astype('int')] # Примерная высота пиков
n = 4 # Количество пиков, используемых для первого приближения
# Пики для первого приближения (координата в пикселях)
ref_pix = np.sort(data_peaks[np.argsort(data_amp)[-n:]])
# Пики для первого приближения (длина волны)
ref_lam = np.sort(p[np.argsort(a)[-n:]])
# print(ref_lam)
# print(ref_pix)
# Первое приближение полиномиального преобразования
k = np.polyfit(ref_pix, ref_lam, n-1)
# Координаты пикселей после первого приближения
ref_pix = np.polyval(k, data_peaks)
# Пики для второго приближения (длина волны)
ref_lam = p[a * data.max() > data_amp.min()]
# Первое приближение (длины волн)
lam = np.polyval(k, np.arange(len(data)))
# plt.plot(lam - k[-1], data, '--')
# Избавление от близко стоящих пиков
fwhm_l = calc_fwhm(data, wl=lam)
mask = np.append(np.diff(ref_lam) < fwhm_l * 2, False)
mask = mask + np.append([False], np.diff(ref_lam) < fwhm_l * 2)
ref_lam = ref_lam[np.logical_not(mask)]
# Поиск соответствий между пиками
shape = (len(ref_pix), 1)
mask = np.argmin(np.abs( | np.tile(ref_lam, shape) | numpy.tile |
import csv
import math
from matplotlib import pyplot as plt
import numpy as np
from scipy import interpolate # strait up linear interpolation, nothing fancy
r2d = 180.0 / math.pi
# hz: resampling hz prior to correlation
# cam_mount: set approximate camera orienation (forward, down, and
# rear supported)
def sync_clocks(data, interp, movie_log, hz=60, cam_mount='forward',
force_time_shift=None, plot=True):
x = interp.imu_time
flight_min = x.min()
flight_max = x.max()
print("flight range = %.3f - %.3f (%.3f)" % (flight_min, flight_max, flight_max-flight_min))
# load movie log
movie = []
with open(movie_log, 'r') as f:
reader = csv.DictReader(f)
for row in reader:
record = [ float(row['frame']), float(row['time']),
float(row['rotation (deg)']),
float(row['translation x (px)']),
float(row['translation y (px)']) ]
movie.append( record )
# resample movie data
movie = np.array(movie, dtype=float)
movie_interp = []
x = movie[:,1]
movie_spl_roll = interpolate.interp1d(x, movie[:,2], bounds_error=False, fill_value=0.0)
movie_spl_pitch = interpolate.interp1d(x, movie[:,3], bounds_error=False, fill_value=0.0)
movie_spl_yaw = interpolate.interp1d(x, movie[:,4], bounds_error=False, fill_value=0.0)
xmin = x.min()
xmax = x.max()
print("movie range = %.3f - %.3f (%.3f)" % (xmin, xmax, xmax-xmin))
movie_len = xmax - xmin
for x in np.linspace(xmin, xmax, movie_len*hz):
if cam_mount == 'forward' or cam_mount == 'down':
movie_interp.append( [x, movie_spl_roll(x)] )
#movie_interp.append( [x, -movie_spl_yaw(x)] ) # test, fixme
else:
movie_interp.append( [x, -movie_spl_roll(x)] )
print("movie len:", len(movie_interp))
# resample flight data
flight_interp = []
if cam_mount == 'forward' or cam_mount == 'rear':
y_spline = interp.imu_p # forward/rear facing camera
else:
y_spline = interp.imu_r # down facing camera
time = flight_max - flight_min
for x in np.linspace(flight_min, flight_max, time*hz):
flight_interp.append( [x, y_spline(x)] )
#print "flight len:", len(flight_interp)
# compute best correlation between movie and flight data logs
movie_interp = np.array(movie_interp, dtype=float)
flight_interp = np.array(flight_interp, dtype=float)
do_butter_smooth = True
if do_butter_smooth:
# maybe filtering video estimate helps something?
import scipy.signal as signal
b, a = signal.butter(2, 10.0/(200.0/2))
flight_butter = signal.filtfilt(b, a, flight_interp[:,1])
movie_butter = signal.filtfilt(b, a, movie_interp[:,1])
ycorr = | np.correlate(flight_butter, movie_butter, mode='full') | numpy.correlate |
from builtins import zip
import numpy as np
from rubin_sim.maf.metrics.baseMetric import BaseMetric
from scipy.optimize import curve_fit
from rubin_sim.maf.utils import m52snr
import warnings
__all__ = ['periodicStar', 'PeriodicStarMetric']
class periodicStar(object):
def __init__(self, filternames):
self.filternames = filternames
def __call__(self, t,x0,x1,x2,x3,x4,x5,x6,x7,x8):
""" Approximate a periodic star as a simple sin wave.
t: array with "time" in days, and "filter" dtype names.
x0: Period (days)
x1: Phase (days)
x2: Amplitude (mag)
x3: mean u mag
x4: mean g mag
x5: mean r mag
x6: mean i mag
x7: mean z mag
x8: mean y mag
"""
filter2index = {'u':3, 'g':4, 'r':5, 'i':6,
'z':7,'y':8}
filterNames = np.unique(self.filternames)
mags = np.zeros(t.size,dtype=float)
mags = x2*np.sin((t+x1)/x0*2.*np.pi)
x=[x0,x1,x2,x3,x4,x5,x6,x7,x8]
for f in filterNames:
good = np.where(self.filternames == f)
mags[good] += x[filter2index[f]]
return mags
class PeriodicStarMetric(BaseMetric):
""" At each slicePoint, run a Monte Carlo simulation to see how well a periodic source can be fit.
Assumes a simple sin-wave light-curve, and generates Gaussain noise based in the 5-sigma limiting depth
of each observation.
"""
def __init__(self, metricName='PeriodicStarMetric', mjdCol='observationStartMJD',
m5Col='fiveSigmaDepth', filterCol='filter', period=10., amplitude=0.5,
phase=2.,
nMonte=1000, periodTol=0.05, ampTol=0.10, means=[20.,20.,20.,20.,20.,20.],
magTol=0.10, nBands=3, seed=42, **kwargs):
"""
period: days (default 10)
amplitude: mags (default 1)
nMonte: number of noise realizations to make in the Monte Carlo
periodTol: fractional tolerance on the period to demand for a star to be considered well-fit
ampTol: fractional tolerance on the amplitude to demand
means: mean magnitudes for ugrizy
magTol: Mean magnitude tolerance (mags)
nBands: Number of bands that must be within magTol
seed: random number seed
"""
self.mjdCol = mjdCol
self.m5Col = m5Col
self.filterCol = filterCol
super(PeriodicStarMetric, self).__init__(col=[self.mjdCol, self.m5Col,self.filterCol],
units='Fraction Detected',
metricName=metricName,**kwargs)
self.period = period
self.amplitude = amplitude
self.phase = phase
self.nMonte = nMonte
self.periodTol = periodTol
self.ampTol = ampTol
self.means = np.array(means)
self.magTol = magTol
self.nBands = nBands
np.random.seed(seed)
self.filter2index = {'u':3, 'g':4, 'r':5, 'i':6, 'z':7,'y':8}
def run(self, dataSlice, slicePoint=None):
# Bail if we don't have enough points
if dataSlice.size < self.means.size+3:
return self.badval
# Generate input for true light curve
t = np.empty(dataSlice.size, dtype=list(zip(['time','filter'],[float,'|U1'])))
t['time'] = dataSlice[self.mjdCol]-dataSlice[self.mjdCol].min()
t['filter'] = dataSlice[self.filterCol]
# If we are adding a distance modulus to the magnitudes
if 'distMod' in list(slicePoint.keys()):
mags = self.means + slicePoint['distMod']
else:
mags = self.means
trueParams = np.append( | np.array([self.period, self.phase, self.amplitude]) | numpy.array |
import os
import sys
import numpy as np
import time
import matplotlib.pyplot as plt
import pandas as pd
from utils import *
def sliding_dot_product(q, t):
n = t.size
m = q.size
# Append t with n zeros
ta = np.append(t, np.zeros(n))
# Reverse Q
qr = np.flip(q, 0)
# Append qra
qra = np.append(qr, np.zeros(2 * n - m))
# Compute FFTs
qraf = np.fft.fft(qra)
taf = np.fft.fft(ta)
# Compute the inverse FFT to the element-wise multiplication of qraf and taf
qt = np.fft.ifft(np.multiply(qraf, taf))
return qt[m:n]
def sliding_dot_product_stomp(q, t):
n = t.size
m = q.size
# Append t with n zeros
ta = np.append(t, | np.zeros(n) | numpy.zeros |
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import unittest
import numpy as np
from mo.front.common.partial_infer.elemental import copy_shape_infer
from mo.front.common.partial_infer.eltwise import eltwise_infer
from mo.middle.passes.fusing.resnet_optimization import stride_optimization
from mo.ops.convolution import Convolution
from mo.ops.pooling import Pooling
from mo.utils.ir_engine.compare_graphs import compare_graphs
from mo.utils.unittest.graph import build_graph
max_elt_lambda = lambda node: eltwise_infer(node, lambda a, b: np.maximum(a, b))
nodes_attributes = {
# Placeholders
'placeholder_1': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
# Concat1 operation
'eltwise_1': {'type': 'Maximum', 'kind': 'op', 'op': 'Maximum', 'infer': max_elt_lambda},
'eltwise_1_data': {'name': 'eltwise_1_data', 'value': None, 'shape': None, 'kind': 'data'},
# Convolutions
'conv_1': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NCHW',
'output_spatial_shape': None, 'output_shape': None, 'bias_term': True, 'group': 1,
'spatial_dims': np.array([2, 3]),
'channel_dims': np.array([1]), 'pad_spatial_shape': | np.array([[0, 0], [0, 0]]) | numpy.array |
from . import util
from .image_class import ImageClass
from astropy.io import fits
import numpy as np
import logging
# clobber keyword is deprecated in astropy 1.3
from astropy import __version__
if __version__ < '1.3':
overwrite = {'clobber': True}
else:
overwrite = {'overwrite': True}
def calculate_difference_image(science, reference, gain_ratio=np.inf, gain_mask=None, use_pixels=False, show=False, percent=99, use_mask_for_gain=True):
"""
Calculate the difference image using the Zackay algorithm.
This is the main function that calculates the difference image using the
Zackay, Ofek, Gal-Yam 2016. It operates on ImageClass objects defined in
image_class.py. The function will fit the gain ratio if not provided.
Ultimately this calculates equation 13 in Zackey, Ofek, Gal-Yam 2016.
Parameters
----------
science : PyZOGY.ImageClass
ImageClass instance created from the science image.
reference : PyZOGY.ImageClass
ImageClass instance created from the reference image.
gain_ratio : float, optional
Ration of the gains or flux based zero points of the two images.
gain_mask : str or numpy.ndarray, optional
Array or FITS file holding an array of pixels to use when fitting
the gain ratio.
use_pixels : bool, optional
Fit the gain ratio using pixels (True) or stars (False) in image.
show : bool, optional
Display debuggin plots during fitting.
percent : float, optional
Percentile cutoff to use for fitting the gain ratio.
Returns
-------
difference_image : numpy.ndarray
The difference between science and reference images.
"""
# match the gains
if gain_ratio == np.inf:
if gain_mask is not None:
if type(gain_mask) == str:
gain_mask_data = fits.getdata(gain_mask)
else:
gain_mask_data = gain_mask
science.mask[gain_mask_data == 1] = 1
reference.mask[gain_mask_data == 1] = 1
science.zero_point = util.solve_iteratively(science, reference, use_pixels=use_pixels, show=show,
percent=percent, use_mask=use_mask_for_gain)
else:
science.zero_point = gain_ratio
# create required arrays
science_image = science
reference_image = reference
science_psf = science.psf
reference_psf = reference.psf
# do fourier transforms (fft)
science_image_fft = np.fft.fft2(science_image)
reference_image_fft = np.fft.fft2(reference_image)
science_psf_fft = np.fft.fft2(science_psf)
reference_psf_fft = np.fft.fft2(reference_psf)
# calculate difference image
denominator = science.background_std ** 2 * reference.zero_point ** 2 * abs(reference_psf_fft) ** 2
denominator += reference.background_std ** 2 * science.zero_point ** 2 * abs(science_psf_fft) ** 2
difference_image_fft = science_image_fft * reference_psf_fft * reference.zero_point
difference_image_fft -= reference_image_fft * science_psf_fft * science.zero_point
difference_image_fft /= np.sqrt(denominator)
difference_image = np.fft.ifft2(difference_image_fft)
return difference_image
def calculate_difference_image_zero_point(science, reference):
"""
Calculate the flux based zero point of the difference image.
Calculate the difference image flux based zero point using equation 15 of
<NAME> 2016.
Parameters
----------
science : PyZOGY.ImageClass
ImageClass instance created from the science image.
reference : PyZOGY.ImageClass
ImageClass instance created from the reference image.
Returns
-------
difference_image_zero_point : float
Flux based zero point of the difference image.
"""
denominator = science.background_std ** 2 * reference.zero_point ** 2
denominator += reference.background_std ** 2 * science.zero_point ** 2
difference_image_zero_point = science.zero_point * reference.zero_point / np.sqrt(denominator)
logging.info('Global difference image zero point is {}'.format(np.mean(difference_image_zero_point)))
return difference_image_zero_point
def calculate_difference_psf(science, reference, difference_image_zero_point):
"""
Calculate the PSF of the difference image.
Calculactes the PSF of the difference image using equation 17 of Zackey,
<NAME> 2016.
Parameters
----------
science : PyZOGY.ImageClass
ImageClass instance created from the science image.
reference : PyZOGY.ImageClass
ImageClass instance created from the reference image.
difference_image_zero_point : float
Flux based zero point of the difference image.
Returns
-------
difference_psf : numpy.ndarray
PSF of the difference image.
"""
science_psf_fft = np.fft.fft2(science.psf)
reference_psf_fft = np.fft.fft2(reference.psf)
denominator = science.background_std ** 2 * reference.zero_point ** 2 * abs(reference_psf_fft) ** 2
denominator += reference.background_std ** 2 * science.zero_point ** 2 * abs(science_psf_fft) ** 2
difference_psf_fft = science.zero_point * science_psf_fft * reference_psf_fft
difference_psf_fft /= difference_image_zero_point * np.sqrt(denominator)
difference_psf = np.fft.ifft2(difference_psf_fft)
return difference_psf
def calculate_matched_filter_image(difference_image, difference_psf, difference_zero_point):
"""
Calculate the matched filter difference image.
Calculates the matched filter difference image described in Zackey, Ofek,
Gal-Yam 2016 defined in equation 16.
Parameters
----------
difference_image : numpy.ndarray
A difference image as calculated using calculate_difference_image.
difference_psf : numpy.ndarray
PSF for the difference image above.
difference_zero_point
Flux based zero point for the image above.
Returns
-------
matched_filter : numpy.ndarray
Matched filter image.
"""
matched_filter_fft = difference_zero_point * np.fft.fft2(difference_image) * np.conj(np.fft.fft2(difference_psf))
matched_filter = np.fft.ifft2(matched_filter_fft)
return matched_filter
def source_noise(image, kernel):
"""
Calculate source noise correction for matched filter image
Calculate the noise due to the sources in an image. The output is used by
noise corrected matched filter image. This is equation 26 in Zackey, Ofek,
Gal-Yam 2016.
Parameters
----------
image : PyZOGY.ImageClass
ImageClass instance with read_noise attribute defined.
kernel : numpy.ndarray
Convolution kernel for the noise image. This comes from the function
called noise_kernels.
Returns
-------
image_variance_corr : numpy.ndarray
Variance of the image due to source noise.
"""
if image.variance is None:
image.variance = np.copy(image.raw_image) + image.read_noise
image_variance_corr = np.fft.ifft2(np.fft.fft2(image.variance) * | np.fft.fft2(kernel ** 2) | numpy.fft.fft2 |
import numpy as np
from groupy.garray.matrix_garray import MatrixGArray
from groupy.garray.Z2_array import Z2Array
# A transformation in p4m can be coded using four integers:
# m in {0, 1}, mirror reflection in the second translation axis or not
# r in {0, 1, 2, 3}, the rotation index
# u, translation along the first spatial axis
# v, translation along the second spatial axis
# We will always store these in the order (m, r, u, v).
# This is called the 'int' parameterization of p4m.
# A matrix representation of this group is given by
# T(u, v) M(m) R(r)
# where
# T = [[ 1, 0, u],
# [ 0, 1, v],
# [ 0, 0, 1]]
# M = [[ (-1) ** m, 0, 0],
# [ 0, 1, 0],
# [ 0, 0, 1]]
# R = [[ cos(r pi / 2), -sin(r pi /2), 0],
# [ sin(r pi / 2), cos(r pi / 2), 0],
# [ 0, 0, 1]]
# This is called the 'hmat' (homogeneous matrix) parameterization of p4m.
# The matrix representation is easier to work with when multiplying and inverting group elements,
# while the integer parameterization is required when indexing gfunc on p4m.
class P4MArray(MatrixGArray):
parameterizations = ['int', 'hmat']
_g_shapes = {'int': (4,), 'hmat': (3, 3)}
_left_actions = {}
_reparameterizations = {}
_group_name = 'p4m'
def __init__(self, data, p='int'):
data = np.asarray(data)
assert data.dtype == np.int
assert (p == 'int' and data.shape[-1] == 4) or (p == 'hmat' and data.shape[-2:] == (3, 3))
self._left_actions[P4MArray] = self.__class__.left_action_hmat
self._left_actions[Z2Array] = self.__class__.left_action_hvec
super(P4MArray, self).__init__(data, p)
def int2hmat(self, int_data):
m = int_data[..., 0]
r = int_data[..., 1]
u = int_data[..., 2]
v = int_data[..., 3]
out = | np.zeros(int_data.shape[:-1] + (3, 3), dtype=np.int) | numpy.zeros |
"""Tools for RFI flagging
This module contains tools for finding and removing Radio Frequency Interference
(RFI).
Note that this generates masks where the elements containing RFI are marked as
:obj:`True`, and the remaining elements are marked :obj:`False`. This is in
contrast to the routines in :mod:`ch_pipeline.rfi` which generates a inverse
noise weighting, where RFI containing elements are effectively :obj:`False`, and
the remainder are :obj:`True`.
There are general purpose routines for flagging RFI in `andata` like datasets:
- :py:meth:`flag_dataset`
- :py:meth:`number_deviations`
For more control there are specific routines that can be called:
- :py:meth:`mad_cut_2d`
- :py:meth:`mad_cut_1d`
- :py:meth:`mad_cut_rolling`
- :py:meth:`spectral_cut`
- :py:meth:`frequency_mask`
- :py:meth:`sir1d`
- :py:meth:`sir`
"""
import warnings
import logging
import numpy as np
import scipy.signal as sig
from . import tools
# Set up logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
# Ranges of bad frequencies given by their start and end frequencies (in MHz)
bad_frequencies = np.array(
[
[449.41, 450.98],
[454.88, 456.05],
[457.62, 459.18],
[483.01, 485.35],
[487.70, 494.34],
[497.85, 506.05],
[529.10, 536.52],
[541.60, 554.49],
[564.65, 585.35],
[693.16, 693.55],
[694.34, 696.68],
[729.88, 745.12],
[746.29, 756.45],
]
)
def flag_dataset(
data, freq_width=10.0, time_width=420.0, threshold=5.0, flag1d=False, rolling=False
):
"""RFI flag the dataset. This function wraps `number_deviations`,
and remains largely for backwards compatability. The pipeline code
now calls `number_deviations` directly.
Parameters
----------
data : `andata.CorrData`
Must contain vis and weight attribute that are both
`np.ndarray[nfreq, nprod, ntime]`. Note that this
function does not work with CorrData that has
been stacked over redundant baselines.
freq_width : float
Frequency interval in *MHz* to compare across.
time_width : float
Time interval in *seconds* to compare.
threshold : float
Threshold in MAD over which to cut out RFI.
rolling : bool
Use a rolling window instead of distinct blocks.
flag1d : bool, optional
Only apply the MAD cut in the time direction. This is useful if the
frequency coverage is sparse.
Returns
-------
mask : np.ndarray
RFI mask, output shape is the same as input visibilities.
"""
auto_ii, auto_vis, auto_ndev = number_deviations(
data,
freq_width=freq_width,
time_width=time_width,
flag1d=flag1d,
rolling=rolling,
stack=False,
)
auto_mask = np.abs(auto_ndev) > threshold
# Apply the frequency cut to the data (add here because we are distributed
# over products and its easy)
freq_mask = frequency_mask(data.freq)
auto_ii, auto_mask = np.logical_or(auto_mask, freq_mask[:, np.newaxis, np.newaxis])
# Create an empty mask for the full dataset
mask = np.zeros(data.vis[:].shape, dtype=np.bool)
# Loop over all products and flag if either inputs auto correlation was flagged
for pi in range(data.nprod):
ii, ij = data.index_map["prod"][pi]
if ii in auto_ii:
ai = auto_ii.index(ii)
mask[:, pi] = np.logical_or(mask[:, pi], auto_mask[:, ai])
if ij in auto_ii:
aj = auto_ii.index(ij)
mask[:, pi] = np.logical_or(mask[:, pi], auto_mask[:, aj])
return mask
def number_deviations(
data,
freq_width=10.0,
time_width=420.0,
flag1d=False,
apply_static_mask=False,
rolling=False,
stack=False,
normalize=False,
fill_value=None,
):
"""Calculate the number of median absolute deviations (MAD)
of the autocorrelations from the local median.
Parameters
----------
data : `andata.CorrData`
Must contain vis and weight attributes that are both
`np.ndarray[nfreq, nprod, ntime]`.
freq_width : float
Frequency interval in *MHz* to compare across.
time_width : float
Time interval in *seconds* to compare across.
flag1d : bool
Only apply the MAD cut in the time direction. This is useful if the
frequency coverage is sparse.
apply_static_mask : bool
Apply static mask obtained from `frequency_mask` before computing
the median absolute deviation.
rolling : bool
Use a rolling window instead of distinct blocks.
stack: bool
Average over all autocorrelations.
normalize : bool
Normalize by the median value over time prior to averaging over
autocorrelations. Only relevant if `stack` is True.
fill_value: float
Data that was already flagged as bad will be set to this value in
the output array. Should be a large positive value that is greater
than the threshold that will be placed. Default is float('Inf').
Returns
-------
auto_ii: np.ndarray[ninput,]
Index of the inputs that have been processed.
If stack is True, then [0] will be returned.
auto_vis: np.ndarray[nfreq, ninput, ntime]
The autocorrelations that were used to calculate
the number of deviations.
ndev : np.ndarray[nfreq, ninput, ntime]
Number of median absolute deviations of the autocorrelations
from the local median.
"""
from caput import memh5, mpiarray
if fill_value is None:
fill_value = float("Inf")
# Check if dataset is parallel
parallel = isinstance(data.vis, memh5.MemDatasetDistributed)
data.redistribute("freq")
# Extract the auto correlations
prod = data.index_map["prod"][data.index_map["stack"]["prod"]]
auto_ii, auto_pi = np.array(
list(zip(*[(pp[0], ind) for ind, pp in enumerate(prod) if pp[0] == pp[1]]))
)
auto_vis = data.vis[:, auto_pi, :].view(np.ndarray).copy().real
# If requested, average over all inputs to construct the stacked autocorrelations
# for the instrument (also known as the incoherent beam)
if stack:
weight = (data.weight[:, auto_pi, :].view(np.ndarray) > 0.0).astype(np.float32)
# Do not include bad inputs in the average
partial_stack = data.index_map["stack"].size < data.index_map["prod"].size
if not partial_stack and hasattr(data, "input_flags"):
input_flags = data.input_flags[:]
logger.info(
"There are on average %d good inputs."
% np.mean(np.sum(input_flags, axis=0), axis=-1)
)
if np.any(input_flags) and not np.all(input_flags):
logger.info("Applying input_flags to weight.")
weight *= input_flags[np.newaxis, auto_ii, :].astype(weight.dtype)
if normalize:
logger.info("Normalizing autocorrelations prior to stacking.")
med_auto = nanmedian(
np.where(weight, auto_vis, np.nan), axis=-1, keepdims=True
)
med_auto = np.where(np.isfinite(med_auto), med_auto, 0.0)
auto_vis *= tools.invert_no_zero(med_auto)
norm = np.sum(weight, axis=1, keepdims=True)
auto_vis = np.sum(
weight * auto_vis, axis=1, keepdims=True
) * tools.invert_no_zero(norm)
auto_flag = norm > 0.0
auto_ii = np.zeros(1, dtype=np.int)
else:
auto_flag = data.weight[:, auto_pi, :].view(np.ndarray) > 0.0
# Convert back to an MPIArray distributed over the freq axis
if parallel:
auto_flag = mpiarray.MPIArray.wrap(auto_flag, axis=0, comm=data.vis.comm)
auto_vis = mpiarray.MPIArray.wrap(auto_vis, axis=0, comm=data.vis.comm)
# Now redistribute the array over inputs
if parallel:
auto_vis = auto_vis.redistribute(1)
auto_flag = auto_flag.redistribute(1)
# Create static flag of frequencies that are known to be bad
if apply_static_mask:
static_flag = ~frequency_mask(data.freq)
else:
static_flag = np.ones(data.nfreq, dtype=np.bool)
static_flag = static_flag[:, np.newaxis]
# Create an empty array for number of median absolute deviations
ndev = np.zeros(auto_vis.shape, dtype=np.float32)
# Calculate frequency interval in bins
fwidth = (
int(freq_width / np.median(np.abs(np.diff(data.freq)))) + 1 if not flag1d else 1
)
# Calculate time interval in samples
twidth = int(time_width / np.median(np.abs(np.diff(data.time)))) + 1
# Loop over extracted autos and create a mask for each
for ind in range(auto_vis.shape[1]):
# Create a quick copy
flg = static_flag & auto_flag[:, ind].view(np.ndarray)
arr = auto_vis[:, ind].view(np.ndarray).copy()
# Use NaNs to ignore previously flagged data when computing the MAD
arr = np.where(flg, arr.real, np.nan)
# Apply RFI flagger
if rolling:
ndev_i = mad_cut_rolling(arr, twidth=twidth, fwidth=fwidth, mask=False)
elif flag1d:
ndev_i = mad_cut_1d(arr, twidth=twidth, mask=False)
else:
ndev_i = mad_cut_2d(arr, twidth=twidth, fwidth=fwidth, mask=False)
ndev[:, ind, :] = ndev_i
# Fill any values equal to NaN with the user specified fill value
ndev = np.where(np.isfinite(ndev), ndev, fill_value)
# Convert back to an MPIArray and redistribute over freq axis
if parallel:
ndev = mpiarray.MPIArray.wrap(ndev, axis=1, comm=data.vis.comm)
ndev = ndev.redistribute(0)
auto_vis = auto_vis.redistribute(0)
return auto_ii, auto_vis, ndev
def spectral_cut(data, fil_window=15, only_autos=False):
"""Flag out the TV bands, or other constant spectral RFI.
Parameters
----------
data : `andata.obj`
If `only_autos` shape is (freq, n_feeds, time), else (freq, n_prod,
time).
fil_window : integer
Window of median filter for baseline of chime spectrum. Default is 15.
only_autos : boolean
Whether data contains only autos or not.
Returns
-------
mask: np.ndarray[freq,time]
RFI mask (no product axis).
"""
if only_autos:
data_vis = data.vis[:].real
else:
nfeed = int((2 * data.vis.shape[1]) ** 0.5)
auto_ind = [tools.cmap(i, i, nfeed) for i in range(nfeed)]
data_vis = data.vis[:, auto_ind].real
stack_autos = np.mean(data_vis, axis=1)
stack_autos_time_ave = np.mean(stack_autos, axis=-1)
# Locations of the generally decent frequency bands
drawn_bool_mask = frequency_mask(data.freq)
good_data = np.logical_not(drawn_bool_mask)
# Calculate standard deivation of the average channel
std_arr = np.std(stack_autos, axis=-1)
sigma = np.median(std_arr) / np.sqrt(
stack_autos.shape[1]
) # standard deviation of the mean
# Smooth with a median filter, and then interpolate to estimate the
# baseline of the spectrum
fa = np.arange(data_vis.shape[0])
medfilt = sig.medfilt(stack_autos_time_ave[good_data], fil_window)
interpolat_arr_baseline = np.interp(fa, fa[good_data], medfilt)
rel_pow = stack_autos_time_ave - interpolat_arr_baseline
# Mask out frequencies with too much power
mask_1d = rel_pow > 10 * sigma
# Generate mask
mask = np.zeros((data_vis.shape[0], data_vis.shape[2]), dtype=np.bool)
mask[:] = mask_1d[:, None]
return mask
def frequency_mask(freq_centre, freq_width=None):
"""Flag known bad frequencies.
Parameters
----------
freq_centre : np.ndarray[nfreq]
Centre of each frequency channel.
freq_width : np.ndarray[nfreq] or float, optional
Width of each frequency channel. If `None` (default), calculate the
width from the frequency centre separation.
Returns
-------
mask : np.ndarray[nfreq]
An array marking the bad frequency channels.
"""
if freq_width is None:
freq_width = np.abs(np.median(np.diff(freq_centre)))
mask = np.zeros_like(freq_centre, dtype=np.bool)
freq_start = freq_centre - freq_width / 2
freq_end = freq_centre + freq_width / 2
for fs, fe in bad_frequencies:
tm = np.logical_and(freq_end > fs, freq_start < fe)
mask = np.logical_or(mask, tm)
return mask
def mad_cut_2d(data, fwidth=64, twidth=42, threshold=5.0, freq_flat=True, mask=True):
"""Mask out RFI using a median absolute deviation cut in time-frequency blocks.
Parameters
----------
data : np.ndarray[freq, time]
Array of data to mask.
fwidth : integer, optional
Number of frequency samples to average median over.
twidth : integer, optional
Number of time samples to average median over.
threshold : scalar, optional
Number of median deviations above which we cut the data.
freq_flat : boolean, optional
Flatten in the frequency direction by dividing through by the median.
mask : boolean, optional
If True return the mask, if False return the number of
median absolute deviations.
Returns
-------
mask : np.ndarray[freq, time]
Mask or number of median absolute deviations for each sample.
"""
median = nanmedian if np.any(~np.isfinite(data)) else np.median
flen = int(np.ceil(data.shape[0] * 1.0 / fwidth))
tlen = int(np.ceil(data.shape[1] * 1.0 / twidth))
if mask:
madmask = np.ones(data.shape, dtype="bool")
else:
madmask = np.ones(data.shape, dtype=np.float64)
if freq_flat:
# Flatten
mfd = tools.invert_no_zero(median(data, axis=1))
data *= mfd[:, np.newaxis]
## Iterate over all frequency and time blocks
#
# This can be done more quickly by reshaping the arrays into blocks, but
# only works when there are an integer number of blocks. Probably best to
# rewrite in cython.
for fi in range(flen):
fs = fi * fwidth
fe = min((fi + 1) * fwidth, data.shape[0])
for ti in range(tlen):
ts = ti * twidth
te = min((ti + 1) * twidth, data.shape[1])
dsec = data[fs:fe, ts:te]
msec = madmask[fs:fe, ts:te]
mval = median(dsec.flatten())
dev = dsec - mval
med_abs_dev = median(np.abs(dev).flatten())
med_inv = tools.invert_no_zero(med_abs_dev)
if mask:
msec[:] = (np.abs(dev) * med_inv) > threshold
else:
msec[:] = dev * med_inv
return madmask
def mad_cut_1d(data, twidth=42, threshold=5.0, mask=True):
"""Mask out RFI using a median absolute deviation cut in the time direction.
This is useful for datasets with sparse frequency coverage. Functionally
this routine is equivalent to :func:`mad_cut_2d` with `fwidth = 1`, but will
be much faster.
Parameters
----------
data : np.ndarray[freq, time]
Array of data to mask.
twidth : integer, optional
Number of time samples to average median over.
threshold : scalar, optional
Number of median deviations above which we cut the data.
mask : boolean, optional
If True return the mask, if False return the number of
median absolute deviations.
Returns
-------
mask : np.ndarray[freq, time]
Mask or number of median absolute deviations for each sample.
"""
median = nanmedian if np.any(~np.isfinite(data)) else np.median
tlen = int(np.ceil(data.shape[1] * 1.0 / twidth))
if mask:
madmask = | np.ones(data.shape, dtype="bool") | numpy.ones |
import os
import numpy as np
import oct2py
from pilco.controller.linear_controller import LinearController
from pilco.controller.rbf_controller import RBFController
from pilco.util.util import squash_action_dist
octave = oct2py.Oct2Py()
dir_path = "pilco/test/matlab_code"
print(dir_path)
octave.addpath(dir_path)
def test_rbf():
np.random.seed(0)
state_dim = 5
n_actions = 2
n_features = 10
# Training Dataset
X0 = np.random.rand(n_features, state_dim)
A = np.random.rand(state_dim, n_actions)
Y0 = np.sin(X0).dot(A) + 1e-3 * (np.random.rand(n_features, n_actions) - 0.5)
length_scales = np.random.rand(n_actions, state_dim)
rbf = RBFController(X0, Y0, n_actions=n_actions, length_scales=length_scales)
# Generate input
mu = np.random.rand(1, state_dim)
sigma = np.random.rand(state_dim, state_dim)
sigma = sigma.dot(sigma.T) # Make sigma positive semidefinite
M, S, V = rbf.choose_action(mu, sigma, None)
# V is already multiplied with S, have to revert that to run positive test
V = np.linalg.solve(sigma, np.eye(sigma.shape[0])) @ V
# convert data to the struct expected by the MATLAB implementation
length_scales = length_scales.reshape(n_actions, state_dim)
sigma_f = rbf.sigma_f()
sigma_eps = rbf.sigma_eps()
hyp = np.hstack(
(length_scales,
sigma_f,
sigma_eps)
).T
gpmodel = oct2py.io.Struct()
gpmodel.hyp = hyp
gpmodel.inputs = X0
gpmodel.targets = Y0
M_mat, S_mat, V_mat = octave.gp2(gpmodel, mu.T, sigma, nout=3)
M_mat = np.asarray(M_mat)[:, 0]
S_mat = np.atleast_2d(S_mat)
V_mat = np.asarray(V_mat)
assert M.shape == M_mat.T.shape
assert S.shape == S_mat.shape
assert V.shape == V_mat.shape
np.testing.assert_allclose(M, M_mat.T, rtol=1e-5)
np.testing.assert_allclose(S, S_mat, rtol=1e-5)
np.testing.assert_allclose(V, V_mat, rtol=1e-5)
def test_set_params_rbf():
np.random.seed(0)
state_dim = 5
n_actions = 2
n_features = 10
X0 = | np.random.rand(n_features, state_dim) | numpy.random.rand |
from functools import partial
import numpy as np
import pytest
import nengo
import nengo.utils.numpy as npext
from nengo.connection import ConnectionSolverParam
from nengo.dists import Choice, UniformHypersphere
from nengo.exceptions import BuildError, ValidationError
from nengo.solvers import LstsqL2
from nengo.processes import Piecewise
from nengo.transforms import Dense, NoTransform
from nengo.utils.testing import signals_allclose
def test_args(AnyNeuronType, seed, rng):
N = 10
d1, d2 = 3, 2
with nengo.Network(seed=seed) as model:
model.config[nengo.Ensemble].neuron_type = AnyNeuronType()
A = nengo.Ensemble(N, dimensions=d1)
B = nengo.Ensemble(N, dimensions=d2)
nengo.Connection(
A,
B,
eval_points=rng.normal(size=(500, d1)),
synapse=0.01,
function=np.sin,
transform=rng.normal(size=(d2, d1)),
)
def test_node_to_neurons(Simulator, PositiveNeuronType, plt, seed, allclose):
N = 50
m = nengo.Network(seed=seed)
with m:
m.config[nengo.Ensemble].neuron_type = PositiveNeuronType()
a = nengo.Ensemble(N, dimensions=1)
inn = nengo.Node(output=np.sin)
inh = nengo.Node(Piecewise({0: 0, 0.5: 1}))
nengo.Connection(inn, a)
nengo.Connection(inh, a.neurons, transform=[[-5]] * N)
inn_p = nengo.Probe(inn, "output")
a_p = nengo.Probe(a, "decoded_output", synapse=0.1)
inh_p = nengo.Probe(inh, "output")
with Simulator(m) as sim:
sim.run(1.0)
t = sim.trange()
ideal = np.sin(t)
ideal[t >= 0.5] = 0
plt.plot(t, sim.data[inn_p], label="Input")
plt.plot(t, sim.data[a_p], label="Neuron approx, synapse=0.1")
plt.plot(t, sim.data[inh_p], label="Inhib signal")
plt.plot(t, ideal, label="Ideal output")
plt.legend(loc="best", fontsize="small")
assert allclose(sim.data[a_p][-10:], 0, atol=0.1, rtol=0.01)
def test_ensemble_to_neurons(Simulator, PositiveNeuronType, plt, seed, allclose):
with nengo.Network(seed=seed) as net:
net.config[nengo.Ensemble].neuron_type = PositiveNeuronType()
ens = nengo.Ensemble(40, dimensions=1)
inhibitor = nengo.Ensemble(40, dimensions=1)
stim = nengo.Node(output=np.sin)
inhibition = nengo.Node(Piecewise({0: 0, 0.5: 1}))
nengo.Connection(stim, ens)
nengo.Connection(inhibition, inhibitor)
nengo.Connection(
inhibitor, ens.neurons, transform=-10 * np.ones((ens.n_neurons, 1))
)
stim_p = nengo.Probe(stim, "output")
ens_p = nengo.Probe(ens, "decoded_output", synapse=0.05)
inhibitor_p = nengo.Probe(inhibitor, "decoded_output", synapse=0.05)
inhibition_p = nengo.Probe(inhibition, "output")
with Simulator(net) as sim:
sim.run(1.0)
t = sim.trange()
ideal = np.sin(t)
ideal[t >= 0.5] = 0
plt.plot(t, sim.data[stim_p], label="Input")
plt.plot(t, sim.data[ens_p], label="`ens` value, pstc=0.05")
plt.plot(t, sim.data[inhibitor_p], label="`inhibitor` value, pstc=0.05")
plt.plot(t, sim.data[inhibition_p], label="Inhibition signal")
plt.plot(t, ideal, label="Ideal output")
plt.legend(loc=0, prop={"size": 10})
assert allclose(sim.data[ens_p][-10:], 0, atol=0.1, rtol=0.01)
assert allclose(sim.data[inhibitor_p][-10:], 1, atol=0.1, rtol=0.01)
def test_node_to_ensemble(Simulator, NonDirectNeuronType, plt, seed, allclose):
N = 50
m = nengo.Network(seed=seed)
with m:
m.config[nengo.Ensemble].neuron_type = NonDirectNeuronType()
input_node = nengo.Node(output=lambda t: [np.sin(t * 3), np.cos(t * 3)])
a = nengo.Ensemble(N * 1, dimensions=1)
b = nengo.Ensemble(N * 1, dimensions=1)
c = nengo.Ensemble(N * 2, dimensions=2)
d = nengo.Ensemble(N, neuron_type=nengo.Direct(), dimensions=3)
nengo.Connection(input_node, a, function=lambda x: -x[0])
nengo.Connection(input_node[:1], b, function=lambda x: -x)
nengo.Connection(input_node, c, function=lambda x: -(x ** 2))
nengo.Connection(
input_node, d, function=lambda x: [-x[0], -(x[0] ** 2), -(x[1] ** 2)]
)
a_p = nengo.Probe(a, "decoded_output", synapse=0.01)
b_p = nengo.Probe(b, "decoded_output", synapse=0.01)
c_p = nengo.Probe(c, "decoded_output", synapse=0.01)
d_p = nengo.Probe(d, "decoded_output", synapse=0.01)
with Simulator(m) as sim:
sim.run(2.0)
t = sim.trange()
plt.plot(t, sim.data[a_p])
plt.plot(t, sim.data[b_p])
plt.plot(t, sim.data[c_p])
plt.plot(t, sim.data[d_p])
plt.legend(
[
"-sin",
"-sin",
"-(sin ** 2)",
"-(cos ** 2)",
"-sin",
"-(sin ** 2)",
"-(cos ** 2)",
],
loc="best",
fontsize="small",
)
assert allclose(sim.data[a_p][-10:], sim.data[d_p][-10:][:, 0], atol=0.1, rtol=0.01)
assert allclose(sim.data[b_p][-10:], sim.data[d_p][-10:][:, 0], atol=0.1, rtol=0.01)
assert allclose(
sim.data[c_p][-10:], sim.data[d_p][-10:][:, 1:3], atol=0.1, rtol=0.01
)
def test_neurons_to_ensemble(Simulator, PositiveNeuronType, plt, seed):
N = 20
m = nengo.Network(seed=seed)
with m:
m.config[nengo.Ensemble].neuron_type = PositiveNeuronType()
a = nengo.Ensemble(N * 2, dimensions=2)
b = nengo.Ensemble(N, dimensions=1)
c = nengo.Ensemble(N, dimensions=N * 2)
nengo.Connection(a.neurons, b, transform=-5 * np.ones((1, N * 2)))
nengo.Connection(a.neurons, c)
b_p = nengo.Probe(b, "decoded_output", synapse=0.01)
c_p = nengo.Probe(c, "decoded_output", synapse=0.01)
with Simulator(m) as sim:
sim.run(0.1)
t = sim.trange()
plt.plot(t, sim.data[b_p], c="b")
plt.plot(t, sim.data[c_p], c="k")
plt.legend(
["Negative weights", "Neurons -> Ensemble dimensions"],
loc="best",
fontsize="small",
)
plt.xlim(right=t[-1])
assert np.all(sim.data[b_p][-10:] < 0)
def test_neurons_to_node(Simulator, NonDirectNeuronType, plt, seed, allclose):
N = 5
m = nengo.Network(seed=seed)
with m:
m.config[nengo.Ensemble].neuron_type = NonDirectNeuronType()
a = nengo.Ensemble(N, dimensions=1, encoders=np.ones((N, 1)))
out = nengo.Node(lambda t, x: x, size_in=N)
nengo.Connection(nengo.Node(1), a)
nengo.Connection(a.neurons, out, synapse=None)
a_spikes = nengo.Probe(a.neurons, synapse=0.005)
out_p = nengo.Probe(out, synapse=0.005)
with Simulator(m) as sim:
sim.run(0.1)
t = sim.trange()
plt.subplot(2, 1, 1)
plt.title("Activity filtered with $\\tau$ = 0.005")
plt.ylabel("Neural activity")
plt.plot(t, sim.data[a_spikes])
plt.xlim(right=t[-1])
plt.subplot(2, 1, 2)
plt.ylabel("Node activity")
plt.plot(t, sim.data[out_p])
plt.xlim(right=t[-1])
assert allclose(sim.data[a_spikes], sim.data[out_p])
def test_neurons_to_neurons(Simulator, PositiveNeuronType, plt, seed, allclose):
N1, N2 = 50, 80
m = nengo.Network(seed=seed)
with m:
m.config[nengo.Ensemble].neuron_type = PositiveNeuronType()
a = nengo.Ensemble(N1, dimensions=1)
b = nengo.Ensemble(N2, dimensions=1)
inp = nengo.Node(output=1)
nengo.Connection(inp, a)
nengo.Connection(a.neurons, b.neurons, transform=-1 * np.ones((N2, N1)))
inp_p = nengo.Probe(inp, "output")
a_p = nengo.Probe(a, "decoded_output", synapse=0.1)
b_p = nengo.Probe(b, "decoded_output", synapse=0.1)
with Simulator(m) as sim:
sim.run(0.6)
t = sim.trange()
plt.plot(t, sim.data[inp_p], label="Input")
plt.plot(t, sim.data[a_p], label="A, represents input")
plt.plot(t, sim.data[b_p], label="B, should be 0")
plt.ylim(top=1.1)
plt.xlim(right=t[-1])
plt.legend(loc="best")
assert allclose(sim.data[a_p][-10:], 1, atol=0.1, rtol=0.01)
assert allclose(sim.data[b_p][-10:], 0, atol=0.1, rtol=0.01)
def test_function_and_transform(Simulator, plt, seed, allclose):
"""Test using both a function and a transform"""
model = nengo.Network(seed=seed)
with model:
u = nengo.Node(output=lambda t: np.sin(6 * t))
a = nengo.Ensemble(100, 1)
b = nengo.Ensemble(200, 2, radius=1.5)
nengo.Connection(u, a)
nengo.Connection(a, b, function=np.square, transform=[[1.0], [-1.0]])
ap = nengo.Probe(a, synapse=0.03)
bp = nengo.Probe(b, synapse=0.03)
with Simulator(model) as sim:
sim.run(0.8)
x = np.dot(sim.data[ap] ** 2, [[1.0, -1]]).T
y = sim.data[bp].T
t = sim.trange()
plt.plot(t, x[0], "b:", label="a**2")
plt.plot(t, x[1], "g:", label="-a**2")
plt.plot(t, y[0], "b", label="b[0]")
plt.plot(t, y[1], "g", label="b[1]")
plt.legend(loc=0, prop={"size": 10})
plt.xlim(right=t[-1])
assert allclose(x[0], y[0], atol=0.1, rtol=0.01)
assert allclose(x[1], y[1], atol=0.1, rtol=0.01)
def test_dist_transform(Simulator, seed, allclose):
"""Using a distribution to initialize transform."""
with nengo.Network(seed=seed) as net:
net.config[nengo.Connection].transform = nengo.dists.Gaussian(0.5, 1)
n = 300
a = nengo.Node(output=[0] * n)
b = nengo.Node(size_in=n + 1)
c = nengo.Ensemble(n + 2, 10)
d = nengo.Ensemble(n + 3, 11)
# make a couple different types of connections to make sure that a
# correctly sized transform is being generated
conn1 = nengo.Connection(a, b)
conn2 = nengo.Connection(b, c)
conn3 = nengo.Connection(b, c.neurons)
conn4 = nengo.Connection(b[:2], c[2])
conn5 = nengo.Connection(c, d, solver=nengo.solvers.LstsqL2(weights=True))
assert isinstance(conn1.transform.init, nengo.dists.Gaussian)
with Simulator(net) as sim:
pass
w = sim.data[conn1].weights
assert allclose(np.mean(w), 0.5, atol=0.01)
assert allclose(np.std(w), 1, atol=0.01)
assert w.shape == (n + 1, n)
assert sim.data[conn2].weights.shape == (10, n + 1)
assert sim.data[conn3].weights.shape == (n + 2, n + 1)
assert sim.data[conn4].weights.shape == (1, 2)
assert sim.data[conn5].weights.shape == (n + 3, n + 2)
# make sure the seed works (gives us the same transform)
with nengo.Network(seed=seed) as net:
net.config[nengo.Connection].transform = nengo.dists.Gaussian(0.5, 1)
a = nengo.Node(output=[0] * n)
b = nengo.Node(size_in=n + 1)
conn = nengo.Connection(a, b)
with Simulator(net) as sim:
pass
assert allclose(w, sim.data[conn].weights)
def test_weights(Simulator, AnyNeuronType, plt, seed, allclose):
n1, n2 = 100, 50
def func(t):
return [np.sin(4 * t), np.cos(12 * t)]
transform = np.array([[0.6, -0.4]])
m = nengo.Network(label="test_weights", seed=seed)
with m:
m.config[nengo.Ensemble].neuron_type = AnyNeuronType()
u = nengo.Node(output=func)
a = nengo.Ensemble(n1, dimensions=2, radius=1.4)
b = nengo.Ensemble(n2, dimensions=1)
bp = nengo.Probe(b)
nengo.Connection(u, a)
nengo.Connection(
a, b, synapse=0.01, transform=transform, solver=LstsqL2(weights=True)
)
with Simulator(m) as sim:
sim.run(1.0)
t = sim.trange()
x = np.array(func(t)).T
y = np.dot(x, transform.T)
z = nengo.Lowpass(0.01).filt(sim.data[bp], dt=sim.dt)
assert signals_allclose(
t, y, z, atol=0.15, buf=0.1, delay=0.025, plt=plt, allclose=allclose
)
@pytest.mark.filterwarnings(
"ignore:For connections from.*setting the solver has no effect",
"ignore:For connections to.*setting `weights=True` on a solver has no effect",
)
def test_configure_weight_solver(Simulator, seed, plt, allclose):
"""Ensures that connections that don't use the weight solver ignore it"""
n1, n2 = 100, 101
function = lambda x: x ** 2
with nengo.Network(seed=seed) as net:
net.config[nengo.Connection].solver = nengo.solvers.LstsqL2(weights=True)
u = nengo.Node(lambda t: np.sin(8 * t))
a = nengo.Ensemble(n1, 1)
b = nengo.Ensemble(n2, 1)
v = nengo.Node(size_in=1)
up = nengo.Probe(u, synapse=nengo.Alpha(0.01))
vp = nengo.Probe(v, synapse=nengo.Alpha(0.01))
nengo.Connection(u, a)
ens_conn = nengo.Connection(a, b, function=function)
nengo.Connection(b, v)
with nengo.Simulator(net) as sim:
sim.run(1.0)
t = sim.trange()
x = sim.data[up]
y = function(x)
z = sim.data[vp]
assert sim.data[ens_conn].weights.shape == (n2, n1)
assert signals_allclose(
t, y, z, buf=0.01, delay=0.015, atol=0.05, rtol=0.05, plt=plt, allclose=allclose
)
def test_vector(Simulator, AnyNeuronType, plt, seed, allclose):
N1, N2 = 50, 50
transform = [-1, 0.5]
m = nengo.Network(seed=seed)
with m:
m.config[nengo.Ensemble].neuron_type = AnyNeuronType()
u = nengo.Node(output=[0.5, 0.5])
a = nengo.Ensemble(N1, dimensions=2)
b = nengo.Ensemble(N2, dimensions=2)
nengo.Connection(u, a)
nengo.Connection(a, b, transform=transform)
up = nengo.Probe(u, "output")
bp = nengo.Probe(b, synapse=0.03)
with Simulator(m) as sim:
sim.run(0.2)
t = sim.trange()
x = sim.data[up]
y = x * transform
yhat = sim.data[bp]
plt.plot(t, y, "--")
plt.plot(t, yhat)
assert allclose(y[-10:], yhat[-10:], atol=0.1, rtol=0.01)
def test_dimensionality_errors(NonDirectNeuronType, seed, rng):
N = 10
with nengo.Network(seed=seed) as m:
m.config[nengo.Ensemble].neuron_type = NonDirectNeuronType()
n01 = nengo.Node(output=[1])
n02 = nengo.Node(output=[1, 1])
n21 = nengo.Node(output=lambda t, x: [1], size_in=2)
e1 = nengo.Ensemble(N, 1)
e2 = nengo.Ensemble(N, 2)
# these should work
nengo.Connection(n01, e1)
nengo.Connection(n02, e2)
nengo.Connection(e2, n21)
nengo.Connection(n21, e1)
nengo.Connection(e1.neurons, n21, transform=rng.randn(2, N))
nengo.Connection(e2, e1, function=lambda x: x[0])
nengo.Connection(e2, e2, transform=np.ones(2))
# these should not work
with pytest.raises(
ValidationError, match="not equal to connection output size"
):
nengo.Connection(n02, e1)
with pytest.raises(
ValidationError, match="not equal to connection output size"
):
nengo.Connection(e1, e2)
with pytest.raises(ValidationError, match="Transform input size"):
nengo.Connection(
e2.neurons, e1, transform=Dense((1, N + 1), init=Choice([1.0]))
)
with pytest.raises(ValidationError, match="Transform output size"):
nengo.Connection(
e2.neurons, e1, transform=Dense((2, N), init=Choice([1.0]))
)
with pytest.raises(ValidationError, match="Function output size"):
nengo.Connection(e2, e1, function=lambda x: x, transform=Dense((1, 1)))
with pytest.raises(ValidationError, match="function.*must accept a single"):
nengo.Connection(e2, e1, function=lambda: 0, transform=Dense((1, 1)))
with pytest.raises(ValidationError, match="Function output size"):
nengo.Connection(n21, e2, transform=Dense((2, 2)))
with pytest.raises(ValidationError, match="Shape of initial value"):
nengo.Connection(e2, e2, transform=np.ones((2, 2, 2)))
with pytest.raises(ValidationError, match="Function output size"):
nengo.Connection(e1, e2, transform=Dense((3, 3), init=np.ones(3)))
# these should not work because of indexing mismatches
with pytest.raises(ValidationError, match="Function output size"):
nengo.Connection(n02[0], e2, transform=Dense((2, 2)))
with pytest.raises(ValidationError, match="Transform output size"):
nengo.Connection(n02, e2[0], transform=Dense((2, 2)))
with pytest.raises(ValidationError, match="Function output size"):
nengo.Connection(n02[1], e2[0], transform=Dense((2, 2)))
with pytest.raises(ValidationError, match="Transform input size"):
nengo.Connection(n02, e2[0], transform=Dense((2, 1), init=Choice([1.0])))
with pytest.raises(ValidationError, match="Transform input size"):
nengo.Connection(e2[0], e2, transform=Dense((1, 2), init=Choice([1.0])))
# these should not work because of repeated indices
dense22 = Dense((2, 2), init=np.ones((2, 2)))
with pytest.raises(ValidationError, match="Input.*repeated indices"):
nengo.Connection(n02[[0, 0]], e2, transform=dense22)
with pytest.raises(ValidationError, match="Output.*repeated indices"):
nengo.Connection(e2, e2[[1, 1]], transform=dense22)
def test_slicing(Simulator, AnyNeuronType, plt, seed, allclose):
N = 300
x = np.array([-1, -0.25, 1])
s1a = slice(1, None, -1)
s1b = [2, 0]
T1 = [[-1, 0.5], [2, 0.25]]
y1 = np.zeros(3)
y1[s1b] = np.dot(T1, x[s1a])
s2a = [0, 2]
s2b = slice(0, 2)
T2 = [[-0.5, 0.25], [0.5, 0.75]]
y2 = np.zeros(3)
y2[s2b] = np.dot(T2, x[s2a])
s3a = [2, 0]
s3b = np.asarray([0, 2]) # test slicing with numpy array
T3 = [0.5, 0.75]
y3 = np.zeros(3)
y3[s3b] = np.dot(np.diag(T3), x[s3a])
sas = [s1a, s2a, s3a]
sbs = [s1b, s2b, s3b]
Ts = [T1, T2, T3]
ys = [y1, y2, y3]
weight_solver = nengo.solvers.LstsqL2(weights=True)
with nengo.Network(seed=seed) as m:
m.config[nengo.Ensemble].neuron_type = AnyNeuronType()
u = nengo.Node(output=x)
a = nengo.Ensemble(N, dimensions=3, radius=1.7)
nengo.Connection(u, a)
probes = []
weight_probes = []
for sa, sb, T in zip(sas, sbs, Ts):
b = nengo.Ensemble(N, dimensions=3, radius=1.7)
nengo.Connection(a[sa], b[sb], transform=T)
probes.append(nengo.Probe(b, synapse=0.03))
# also test on weight solver
b = nengo.Ensemble(N, dimensions=3, radius=1.7)
nengo.Connection(a[sa], b[sb], transform=T, solver=weight_solver)
weight_probes.append(nengo.Probe(b, synapse=0.03))
with Simulator(m) as sim:
sim.run(0.25)
t = sim.trange()
for i, [y, p] in enumerate(zip(ys, probes)):
plt.subplot(len(ys), 1, i + 1)
plt.plot(t, np.tile(y, (len(t), 1)), "--")
plt.plot(t, sim.data[p])
atol = 0.01 if AnyNeuronType is nengo.Direct else 0.1
for i, [y, p, wp] in enumerate(zip(ys, probes, weight_probes)):
assert allclose(y, sim.data[p][-20:], atol=atol), "Failed %d" % i
assert allclose(y, sim.data[wp][-20:], atol=atol), "Weights %d" % i
def test_neuron_slicing(Simulator, plt, seed, rng, allclose):
N = 6
sa = slice(None, None, 2)
sb = slice(None, None, -2)
x = np.array([-1, -0.25, 1])
with nengo.Network(seed=seed) as m:
m.config[nengo.Ensemble].neuron_type = nengo.LIFRate()
u = nengo.Node(output=x)
a = nengo.Ensemble(N, dimensions=3, radius=1.7)
b = nengo.Ensemble(N, dimensions=3, radius=1.7)
nengo.Connection(u, a)
c = nengo.Connection(a.neurons[sa], b.neurons[sb])
c.transform = rng.normal(scale=1e-3, size=(c.size_out, c.size_in))
ap = nengo.Probe(a.neurons, synapse=0.03)
bp = nengo.Probe(b.neurons, synapse=0.03)
with Simulator(m) as sim:
sim.run(0.2)
t = sim.trange()
x = sim.data[ap]
y = np.zeros((len(t), b.n_neurons))
y[:, sb] = np.dot(x[:, sa], c.transform.init.T)
y = b.neuron_type.rates(y, sim.data[b].gain, sim.data[b].bias)
plt.plot(t, y, "k--")
plt.plot(t, sim.data[bp])
assert allclose(y[-10:], sim.data[bp][-10:], atol=3.0, rtol=0.0)
def test_shortfilter(Simulator, AnyNeuronType):
# Testing the case where the connection filter is < dt
m = nengo.Network()
with m:
m.config[nengo.Ensemble].neuron_type = AnyNeuronType()
a = nengo.Ensemble(n_neurons=10, dimensions=1)
nengo.Connection(a, a, synapse=0)
b = nengo.Ensemble(n_neurons=10, dimensions=1)
nengo.Connection(a, b, synapse=0)
nengo.Connection(b, a, synapse=0)
with Simulator(m, dt=0.01):
# This test passes if there are no cycles in the op graph
pass
# We will still get a cycle if the user explicitly sets the
# filter to None
with m:
d = nengo.Ensemble(1, dimensions=1, neuron_type=nengo.Direct())
nengo.Connection(d, d, synapse=None)
with pytest.raises(ValueError):
Simulator(m, dt=0.01)
def test_zerofilter(Simulator, seed):
# Testing the case where the connection filter is zero
m = nengo.Network(seed=seed)
with m:
# Ensure no cycles in the op graph.
a = nengo.Ensemble(1, dimensions=1, neuron_type=nengo.Direct())
nengo.Connection(a, a, synapse=0)
# Ensure that spikes are not filtered
b = nengo.Ensemble(
3, dimensions=1, intercepts=[-0.9, -0.8, -0.7], neuron_type=nengo.LIF()
)
bp = nengo.Probe(b.neurons)
with Simulator(m) as sim:
sim.run(1.0)
# assert that we have spikes (binary)
assert np.unique(sim.data[bp]).size == 2
def test_function_output_size(Simulator, plt, seed, allclose):
"""Try a function that outputs both 0-d and 1-d arrays"""
def bad_function(x):
return x if x > 0 else 0
model = nengo.Network(seed=seed)
with model:
u = nengo.Node(output=lambda t: (t - 0.1) * 10)
a = nengo.Ensemble(n_neurons=100, dimensions=1)
b = nengo.Ensemble(n_neurons=100, dimensions=1)
nengo.Connection(u, a, synapse=None)
nengo.Connection(a, b, synapse=None, function=bad_function)
up = nengo.Probe(u)
bp = nengo.Probe(b, synapse=0.03)
with Simulator(model) as sim:
sim.run(0.2)
t = sim.trange()
x = nengo.Lowpass(0.03).filt(np.maximum(sim.data[up], 0), dt=sim.dt)
y = sim.data[bp]
plt.plot(t, x, "k")
plt.plot(t, y)
assert allclose(x, y, atol=0.1)
def test_slicing_function(Simulator, plt, seed, allclose):
"""Test using a pre-slice and a function"""
N = 300
f_in = lambda t: [np.cos(3 * t), np.sin(3 * t)]
f_x = lambda x: [x, -(x ** 2)]
with nengo.Network(seed=seed) as model:
u = nengo.Node(output=f_in)
a = nengo.Ensemble(N, 2, radius=np.sqrt(2))
b = nengo.Ensemble(N, 2, radius=np.sqrt(2))
nengo.Connection(u, a)
nengo.Connection(a[1], b, function=f_x)
up = nengo.Probe(u, synapse=0.05)
bp = nengo.Probe(b, synapse=0.05)
with Simulator(model) as sim:
sim.run(1.0)
t = sim.trange()
v = sim.data[up]
w = np.column_stack(f_x(v[:, 1]))
y = sim.data[bp]
plt.plot(t, y)
plt.plot(t, w, ":")
assert allclose(w, y, atol=0.1)
@pytest.mark.parametrize("negative_indices", (True, False))
def test_list_indexing(Simulator, plt, seed, negative_indices, allclose):
with nengo.Network(seed=seed) as model:
u = nengo.Node([-1, 1])
a = nengo.Ensemble(50, dimensions=1)
b = nengo.Ensemble(50, dimensions=1, radius=2.2)
c = nengo.Ensemble(100, dimensions=2, radius=1.3)
d = nengo.Ensemble(100, dimensions=2, radius=1.3)
if negative_indices:
nengo.Connection(u[[0, 1]], a[[0, -1]])
nengo.Connection(u[[1, -1]], b[[0, -1]])
nengo.Connection(u[[0, 1]], c[[0, 1]])
nengo.Connection(u[[1, -1]], d[[0, 1]])
else:
nengo.Connection(u[[0, 1]], a[[0, 0]])
nengo.Connection(u[[1, 1]], b[[0, 0]])
nengo.Connection(u[[0, 1]], c[[0, 1]])
nengo.Connection(u[[1, 1]], d[[0, 1]])
a_probe = nengo.Probe(a, synapse=0.03)
b_probe = nengo.Probe(b, synapse=0.03)
c_probe = nengo.Probe(c, synapse=0.03)
d_probe = nengo.Probe(d, synapse=0.03)
with Simulator(model) as sim:
sim.run(0.4)
t = sim.trange()
a_data = sim.data[a_probe]
b_data = sim.data[b_probe]
c_data = sim.data[c_probe]
d_data = sim.data[d_probe]
line = plt.plot(t, a_data)
plt.axhline(0, color=line[0].get_color())
line = plt.plot(t, b_data)
plt.axhline(2, color=line[0].get_color())
line = plt.plot(t, c_data)
plt.axhline(-1, color=line[0].get_color())
line = plt.plot(t, d_data)
plt.axhline(1, color=line[1].get_color())
assert allclose(a_data[t > 0.3], [0], atol=0.2)
assert allclose(b_data[t > 0.3], [2], atol=0.2)
assert allclose(c_data[t > 0.3], [-1, 1], atol=0.2)
assert allclose(d_data[t > 0.3], [1, 1], atol=0.2)
@pytest.mark.filterwarnings("ignore:boolean index did not match")
def test_boolean_indexing(Simulator, rng, plt, allclose):
D = 10
mu = np.arange(D) % 2 == 0
mv = | np.arange(D) | numpy.arange |
#coding: utf-8
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.models import Sequential, Model
from keras.layers import Dense, Embedding, Activation, merge, Input, Lambda, Reshape,Lambda
from keras import layers,regularizers
from keras.layers import Concatenate,Multiply,Average,Maximum
from keras.layers import Conv2D, Flatten, Dropout, Conv1D,UpSampling2D,Cropping2D,Cropping1D
from keras.layers.pooling import MaxPooling1D, GlobalAveragePooling1D, MaxPooling2D,GlobalAveragePooling2D,AveragePooling2D
from keras.layers import LSTM, GRU, TimeDistributed, Bidirectional
from keras.utils.np_utils import to_categorical
from keras import initializers
from keras import backend as K
from keras.engine.topology import Layer
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import ZeroPadding2D
from keras.utils import np_utils
from keras.layers.merge import *
import pandas as pd
import numpy as np
import math
import os
import re
import sys
import time
import re
import string
import nltk
from utils import *
import h5py
from PIL import Image
from keras.applications.vgg16 import VGG16
from keras.applications.resnet50 import ResNet50
from keras.optimizers import SGD,Adam
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing.image import load_img,img_to_array
from keras.utils import plot_model
import matplotlib.pyplot as plt
from keras.callbacks import EarlyStopping,Callback
#from parallel_model import ParallelModel
from keras.backend import tf as ktf
from math import ceil
need_256 = True
import tensorflow as tf
import tensorlayer as tl
import numpy as np
from keras.preprocessing.image import img_to_array, load_img
import random
import os
from keras.optimizers import SGD
import pickle
from PIL import Image
from keras.preprocessing.image import load_img,img_to_array
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model
import keras
from keras import optimizers
from keras.applications import vgg16, xception
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D
from keras.layers import Input, Embedding, Dropout, Flatten, Dense
from keras.models import Model, Sequential
from keras.preprocessing.image import img_to_array
from keras.utils import to_categorical
import pandas as pd
from skimage import io,transform
import chardet
import gc
num_labels=200
img_width, img_height = 224, 224
nb_train_samples = 540 #4000
nb_validation_samples = 60 #2000
epochs = 100
batch_size = 32
#input_shape = (img_width, img_height, 3)
early_stop=EarlyStopping(monitor='acc',patience=3,verbose=2,mode='max')
import gc
gc.collect()
###========================Design model ====================================###
img_width, img_height = 224, 224
input_shapes=(img_width, img_height,3)
i_shape=(224,224)
# build the VGG16 network
learning_rate = 1e-3 # Layer specific learning rate
# Weight decay not implemented
def BN(name=""):
return BatchNormalization(momentum=0.95, name=name, epsilon=1e-5)
class Interp(layers.Layer):
def __init__(self, new_size, **kwargs):
self.new_size = new_size
super(Interp, self).__init__(**kwargs)
def build(self, input_shape):
super(Interp, self).build(input_shape)
def call(self, inputs, **kwargs):
new_height, new_width = self.new_size
resized = ktf.image.resize_images(inputs, [new_height, new_width],
align_corners=True)
return resized
def compute_output_shape(self, input_shape):
return tuple([None, self.new_size[0], self.new_size[1], input_shape[3]])
def get_config(self):
config = super(Interp, self).get_config()
config['new_size'] = self.new_size
return config
# def Interp(x, shape):
# new_height, new_width = shape
# resized = ktf.image.resize_images(x, [new_height, new_width],
# align_corners=True)
# return resized
def residual_conv(prev, level, pad=1, lvl=1, sub_lvl=1, modify_stride=False):
lvl = str(lvl)
sub_lvl = str(sub_lvl)
names = ["conv" + lvl + "_" + sub_lvl + "_1x1_reduce",
"conv" + lvl + "_" + sub_lvl + "_1x1_reduce_bn",
"conv" + lvl + "_" + sub_lvl + "_3x3",
"conv" + lvl + "_" + sub_lvl + "_3x3_bn",
"conv" + lvl + "_" + sub_lvl + "_1x1_increase",
"conv" + lvl + "_" + sub_lvl + "_1x1_increase_bn"]
if modify_stride is False:
prev = Conv2D(64 * level, (1, 1), strides=(1, 1), name=names[0],
use_bias=False)(prev)
elif modify_stride is True:
prev = Conv2D(64 * level, (1, 1), strides=(2, 2), name=names[0],
use_bias=False)(prev)
prev = BN(name=names[1])(prev)
prev = Activation('relu')(prev)
prev = ZeroPadding2D(padding=(pad, pad))(prev)
prev = Conv2D(64 * level, (3, 3), strides=(1, 1), dilation_rate=pad,
name=names[2], use_bias=False)(prev)
prev = BN(name=names[3])(prev)
prev = Activation('relu')(prev)
prev = Conv2D(256 * level, (1, 1), strides=(1, 1), name=names[4],
use_bias=False)(prev)
prev = BN(name=names[5])(prev)
return prev
def short_convolution_branch(prev, level, lvl=1, sub_lvl=1, modify_stride=False):
lvl = str(lvl)
sub_lvl = str(sub_lvl)
names = ["conv" + lvl + "_" + sub_lvl + "_1x1_proj",
"conv" + lvl + "_" + sub_lvl + "_1x1_proj_bn"]
if modify_stride is False:
prev = Conv2D(256 * level, (1, 1), strides=(1, 1), name=names[0],
use_bias=False)(prev)
elif modify_stride is True:
prev = Conv2D(256 * level, (1, 1), strides=(2, 2), name=names[0],
use_bias=False)(prev)
prev = BN(name=names[1])(prev)
return prev
def empty_branch(prev):
return prev
def residual_short(prev_layer, level, pad=1, lvl=1, sub_lvl=1, modify_stride=False):
prev_layer = Activation('relu')(prev_layer)
block_1 = residual_conv(prev_layer, level,
pad=pad, lvl=lvl, sub_lvl=sub_lvl,
modify_stride=modify_stride)
block_2 = short_convolution_branch(prev_layer, level,
lvl=lvl, sub_lvl=sub_lvl,
modify_stride=modify_stride)
added = Add()([block_1, block_2])
return added
def residual_empty(prev_layer, level, pad=1, lvl=1, sub_lvl=1):
prev_layer = Activation('relu')(prev_layer)
block_1 = residual_conv(prev_layer, level, pad=pad,
lvl=lvl, sub_lvl=sub_lvl)
block_2 = empty_branch(prev_layer)
added = Add()([block_1, block_2])
return added
def ResNet(inp, layers):
# Names for the first couple layers of model
names = ["conv1_1_3x3_s2",
"conv1_1_3x3_s2_bn",
"conv1_2_3x3",
"conv1_2_3x3_bn",
"conv1_3_3x3",
"conv1_3_3x3_bn"]
# Short branch(only start of network)
cnv1 = Conv2D(64, (3, 3), strides=(2, 2), padding='same', name=names[0],
use_bias=False)(inp) # "conv1_1_3x3_s2"
bn1 = BN(name=names[1])(cnv1) # "conv1_1_3x3_s2/bn"
relu1 = Activation('relu')(bn1) # "conv1_1_3x3_s2/relu"
cnv1 = Conv2D(64, (3, 3), strides=(1, 1), padding='same', name=names[2],
use_bias=False)(relu1) # "conv1_2_3x3"
bn1 = BN(name=names[3])(cnv1) # "conv1_2_3x3/bn"
relu1 = Activation('relu')(bn1) # "conv1_2_3x3/relu"
cnv1 = Conv2D(128, (3, 3), strides=(1, 1), padding='same', name=names[4],
use_bias=False)(relu1) # "conv1_3_3x3"
bn1 = BN(name=names[5])(cnv1) # "conv1_3_3x3/bn"
relu1 = Activation('relu')(bn1) # "conv1_3_3x3/relu"
res = MaxPooling2D(pool_size=(3, 3), padding='same',
strides=(2, 2))(relu1) # "pool1_3x3_s2"
# ---Residual layers(body of network)
"""
Modify_stride --Used only once in first 3_1 convolutions block.
changes stride of first convolution from 1 -> 2
"""
# 2_1- 2_3
res = residual_short(res, 1, pad=1, lvl=2, sub_lvl=1)
for i in range(2):
res = residual_empty(res, 1, pad=1, lvl=2, sub_lvl=i + 2)
# 3_1 - 3_3
res = residual_short(res, 2, pad=1, lvl=3, sub_lvl=1, modify_stride=True)
for i in range(3):
res = residual_empty(res, 2, pad=1, lvl=3, sub_lvl=i + 2)
if layers is 50:
# 4_1 - 4_6
res = residual_short(res, 4, pad=2, lvl=4, sub_lvl=1)
for i in range(5):
res = residual_empty(res, 4, pad=2, lvl=4, sub_lvl=i + 2)
elif layers is 101:
# 4_1 - 4_23
res = residual_short(res, 4, pad=2, lvl=4, sub_lvl=1)
for i in range(22):
res = residual_empty(res, 4, pad=2, lvl=4, sub_lvl=i + 2)
else:
print("This ResNet is not implemented")
# 5_1 - 5_3
res = residual_short(res, 8, pad=4, lvl=5, sub_lvl=1)
for i in range(2):
res = residual_empty(res, 8, pad=4, lvl=5, sub_lvl=i + 2)
res = Activation('relu')(res)
return res
def interp_block(prev_layer, level, feature_map_shape, input_shape):
if input_shape == (224, 224):
kernel_strides_map = {1: 28,2: 21,3: 14,6: 7}
else:
print("Pooling parameters for input shape ",input_shape, " are not defined.")
exit(1)
names = [
"conv5_3_pool" + str(level) + "_conv",
"conv5_3_pool" + str(level) + "_conv_bn"
]
kernel = (kernel_strides_map[level], kernel_strides_map[level])
strides = (kernel_strides_map[level], kernel_strides_map[level])
print(kernel)
print(strides)
prev_layer = AveragePooling2D(kernel, strides=strides)(prev_layer)
prev_layer = Conv2D(512, (1, 1), strides=(1, 1), name=names[0],use_bias=False)(prev_layer)
prev_layer = BN(name=names[1])(prev_layer)
prev_layer = Activation('relu')(prev_layer)
# prev_layer = Lambda(Interp, arguments={
# 'shape': feature_map_shape})(prev_layer)
prev_layer = Interp(feature_map_shape)(prev_layer)
return prev_layer
def interp_block_t(prev_layer, level, feature_map_shape, input_shape):
if input_shape == (224, 224):
kernel_strides_map = {1: 28,2: 21,3: 14,6: 7}
else:
print("Pooling parameters for input shape ",input_shape, " are not defined.")
exit(1)
names = [
"conv5_3_pool" + str(level) + "_conv_1",
"conv5_3_pool" + str(level) + "_conv_bn_1"
]
kernel = (kernel_strides_map[level], kernel_strides_map[level])
strides = (kernel_strides_map[level], kernel_strides_map[level])
print(kernel)
print(strides)
prev_layer = AveragePooling2D(kernel, strides=strides)(prev_layer)
prev_layer = Conv2D(512, (1, 1), strides=(1, 1), name=names[0],use_bias=False)(prev_layer)
prev_layer = BN(name=names[1])(prev_layer)
prev_layer = Activation('relu')(prev_layer)
# prev_layer = Lambda(Interp, arguments={
# 'shape': feature_map_shape})(prev_layer)
prev_layer = Interp(feature_map_shape)(prev_layer)
return prev_layer
def interp_fake_block(num,prev_layer, level, feature_map_shape, input_shape):
if input_shape == (224, 224):
kernel_strides_map = {1: 28,2: 21,3: 14,6: 7}
else:
print("Pooling parameters for input shape ",input_shape, " are not defined.")
exit(1)
names = [
"conv5_3_pool" + str(level) + "_conv_"+str(num),
"conv5_3_pool" + str(level) + "_conv_bn_"+str(num)
]
kernel = (kernel_strides_map[level], kernel_strides_map[level])
strides = (kernel_strides_map[level], kernel_strides_map[level])
print(kernel)
print(strides)
prev_layer = AveragePooling2D(kernel, strides=strides)(prev_layer)
prev_layer = Conv2D(512, (1, 1), strides=(1, 1), name=names[0],use_bias=False)(prev_layer)
prev_layer = BN(name=names[1])(prev_layer)
prev_layer = Activation('relu')(prev_layer)
# prev_layer = Lambda(Interp, arguments={
# 'shape': feature_map_shape})(prev_layer)
prev_layer = Interp(feature_map_shape)(prev_layer)
return prev_layer
def build_pyramid_pooling_module(res, input_shape):
"""Build the Pyramid Pooling Module."""
# ---PSPNet concat layers with Interpolation
feature_map_size = tuple(int(ceil(input_dim / 8.0))
for input_dim in input_shape)
print("PSP module will interpolate to a final feature map size of %s" %
(feature_map_size, ))
interp_block1 = interp_block(res, 1, feature_map_size, input_shape)
interp_block2 = interp_block(res, 2, feature_map_size, input_shape)
interp_block3 = interp_block(res, 3, feature_map_size, input_shape)
interp_block6 = interp_block(res, 6, feature_map_size, input_shape)
# concat all these layers. resulted
# shape=(1,feature_map_size_x,feature_map_size_y,4096)
res = Concatenate()([res,
interp_block6,
interp_block3,
interp_block2,
interp_block1])
return res
def build_fake_pyramid_pooling_module(num,res, input_shape):
"""Build the Pyramid Pooling Module."""
# ---PSPNet concat layers with Interpolation
feature_map_size = tuple(int(ceil(input_dim / 8.0))
for input_dim in input_shape)
print("PSP module will interpolate to a final feature map size of %s" %
(feature_map_size, ))
interp_block1 = interp_fake_block(num,res, 1, feature_map_size, input_shape)
interp_block2 = interp_fake_block(num,res, 2, feature_map_size, input_shape)
interp_block3 = interp_fake_block(num,res, 3, feature_map_size, input_shape)
interp_block6 = interp_fake_block(num,res, 6, feature_map_size, input_shape)
# concat all these layers. resulted
# shape=(1,feature_map_size_x,feature_map_size_y,4096)
res = Concatenate()([res,
interp_block6,
interp_block3,
interp_block2,
interp_block1])
return res
def build_pyramid_pooling_sub_module(res, input_shape):
"""Build the Pyramid Pooling Module."""
# ---PSPNet concat layers with Interpolation
feature_map_size = tuple(int(ceil(input_dim / 8.0))
for input_dim in input_shape)
print("PSP module will interpolate to a final feature map size of %s" %
(feature_map_size, ))
interp_block1 = interp_block(res, 1, feature_map_size, input_shape)
interp_block2 = interp_block(res, 2, feature_map_size, input_shape)
interp_block3 = interp_block(res, 3, feature_map_size, input_shape)
interp_block6 = interp_block(res, 6, feature_map_size, input_shape)
# concat all these layers. resulted
# shape=(1,feature_map_size_x,feature_map_size_y,4096)
res = Concatenate()([res,interp_block6,
interp_block3,
interp_block2,
interp_block1])
return res
def build_pyramid_pooling_mult_module(res,org, input_shape):
"""Build the Pyramid Pooling Module."""
# ---PSPNet concat layers with Interpolation
feature_map_size = tuple(int(ceil(input_dim / 8.0))
for input_dim in input_shape)
print("PSP module will interpolate to a final feature map size of %s" %(feature_map_size, ))
interp_rblock1 = interp_block(res, 1, feature_map_size, input_shape)
interp_rblock2 = interp_block(res, 2, feature_map_size, input_shape)
interp_rblock3 = interp_block(res, 3, feature_map_size, input_shape)
interp_rblock6 = interp_block(res, 6, feature_map_size, input_shape)
interp_oblock1 = interp_block_t(org, 1, feature_map_size, input_shape)
interp_oblock2 = interp_block_t(org, 2, feature_map_size, input_shape)
interp_oblock3 = interp_block_t(org, 3, feature_map_size, input_shape)
interp_oblock6 = interp_block_t(org, 6, feature_map_size, input_shape)
interp_block1 =Multiply()([interp_rblock1, interp_oblock1])
interp_block2 =Multiply()([interp_rblock2, interp_oblock2])
interp_block3 =Multiply()([interp_rblock3, interp_oblock3])
interp_block6 =Multiply()([interp_rblock6, interp_oblock6])
rr=Multiply()([res, org])
re1 = Concatenate()([rr,
interp_block6,
interp_block3,
interp_block2,
interp_block1])
return re1
def build_pyramid_pooling_aver_module(res,org, input_shape):
"""Build the Pyramid Pooling Module."""
# ---PSPNet concat layers with Interpolation
feature_map_size = tuple(int(ceil(input_dim / 8.0))
for input_dim in input_shape)
print("PSP module will interpolate to a final feature map size of %s" %(feature_map_size, ))
interp_rblock1 = interp_block(res, 1, feature_map_size, input_shape)
interp_rblock2 = interp_block(res, 2, feature_map_size, input_shape)
interp_rblock3 = interp_block(res, 3, feature_map_size, input_shape)
interp_rblock6 = interp_block(res, 6, feature_map_size, input_shape)
interp_oblock1 = interp_block_t(org, 1, feature_map_size, input_shape)
interp_oblock2 = interp_block_t(org, 2, feature_map_size, input_shape)
interp_oblock3 = interp_block_t(org, 3, feature_map_size, input_shape)
interp_oblock6 = interp_block_t(org, 6, feature_map_size, input_shape)
interp_block1 =Average()([interp_rblock1, interp_oblock1])
interp_block2 =Average()([interp_rblock2, interp_oblock2])
interp_block3 =Average()([interp_rblock3, interp_oblock3])
interp_block6 =Average()([interp_rblock6, interp_oblock6])
#rr=Multiply()([res, org])
re1 = Concatenate()([org,
interp_block6,
interp_block3,
interp_block2,
interp_block1])
return re1
def build_pspnet(nb_classes, resnet_layers, input_shape, activation='softmax'):
"""Build PSPNet."""
print("Building a PSPNet based on ResNet %i expecting inputs of shape %s predicting %i classes" % (resnet_layers, input_shape, nb_classes))
inp = Input((input_shape[0], input_shape[1], 3))
res = ResNet(inp, layers=resnet_layers)
print (res.shape)
psp = build_pyramid_pooling_module(res, input_shape)
print (psp.shape)
x = Conv2D(512, (3, 3), strides=(1, 1), padding="same", name="conv5_4",use_bias=False)(psp)
x = BN(name="conv5_4_bn")(x)
x = Activation('relu')(x)
x = Dropout(0.1)(x)
x = Conv2D(nb_classes, (1, 1), strides=(1, 1), name="conv6")(x)
# x = Lambda(Interp, arguments={'shape': (
# input_shape[0], input_shape[1])})(x)
x = Interp([input_shape[0], input_shape[1]])(x)
x = Activation('softmax')(x)
model = Model(inputs=inp, outputs=x)
model.summary()
# Solver
sgd = SGD(lr=learning_rate, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd,loss='categorical_crossentropy',metrics=['accuracy'])
return model
#1: get weigth,2
def identity_block(X, f, filters, stage, block):
# defining name basis
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
# Retrieve Filters
F1, F2, F3 = filters
# Save the input value. You'll need this later to add back to the main path.
X_shortcut = X
# First component of main path
X = Conv2D(filters = F1, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2a')(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)
X = Activation('relu')(X)
# Second component of main path (3 lines)
X = Conv2D(filters= F2, kernel_size=(f,f),strides=(1,1),padding='same',name=conv_name_base + '2b')(X)
X = BatchNormalization(axis=3,name=bn_name_base+'2b')(X)
X = Activation('relu')(X)
# Third component of main path ( lines)
X = Conv2D(filters=F3,kernel_size=(1,1),strides=(1,1),padding='valid',name=conv_name_base+'2c')(X)
X = BatchNormalization(axis=3,name=bn_name_base+'2c')(X)
# Final step: Add shortcut value to main path, and pass it through a RELU activation (lines)
X = Add()([X, X_shortcut])
X = Activation('relu')(X)
return X
def convolutional_block(X, f, filters, stage, block, s = 2):
# defining name basis
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
# Retrieve Filters
F1, F2, F3 = filters
# Save the input value
X_shortcut = X
##### MAIN PATH #####
# First component of main path
X = Conv2D(F1, (1, 1), strides = (s,s),padding='valid',name = conv_name_base + '2a')(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)
X = Activation('relu')(X)
# Second component of main path
X = Conv2D(F2,(f,f),strides=(1,1),padding='same',name=conv_name_base+'2b')(X)
X = BatchNormalization(axis=3,name=bn_name_base+'2b')(X)
X = Activation('relu')(X)
# Third component of main path
X = Conv2D(F3,(1,1),strides=(1,1),padding='valid',name=conv_name_base+'2c')(X)
X = BatchNormalization(axis=3,name=bn_name_base+'2c')(X)
##### SHORTCUT PATH ####
X_shortcut = Conv2D(F3,(1,1),strides=(s,s),padding='valid',name=conv_name_base+'1')(X_shortcut)
X_shortcut = BatchNormalization(axis=3,name =bn_name_base+'1')(X_shortcut)
# Final step: Add shortcut value to main path, and pass it through a RELU activation
X = Add()([X,X_shortcut])
X = Activation('relu')(X)
return X
# GRADED FUNCTION: ResNet50
def RResNet50(input_shape = (64, 64, 3), classes=200):
# Define the input as a tensor with shape input_shape
X_input = Input(input_shape)
# Zero-Padding
X = ZeroPadding2D((3, 3))(X_input)
# Stage 1
X = Conv2D(64, (7, 7), strides = (2, 2), name = 'conv1')(X)
X = BatchNormalization(axis = 3, name = 'bn_conv1')(X)
X = Activation('relu')(X)
X = MaxPooling2D((3, 3), strides=(2, 2))(X)
# Stage 2
X = convolutional_block(X, f = 3, filters = [64, 64, 256], stage = 2, block='a', s = 1)
X = identity_block(X, 3, [64, 64, 256], stage=2, block='b')
X = identity_block(X, 3, [64, 64, 256], stage=2, block='c')
### START CODE HERE ###
# Stage 3
X = convolutional_block(X, f = 3,filters= [128,128,512],stage=3,block='a',s=2)
X = identity_block(X,3,[128,128,512],stage=3,block='b')
X = identity_block(X,3,[128,128,512],stage=3,block='c')
X = identity_block(X,3,[128,128,512],stage=3,block='d')
# Stage 4
X = convolutional_block(X,f=3,filters=[256,256,1024],stage=4,block='a',s=2)
X = identity_block(X,3,[256,256,1024],stage=4,block='b')
X = identity_block(X,3,[256,256,1024],stage=4,block='c')
X = identity_block(X,3,[256,256,1024],stage=4,block='d')
X = identity_block(X,3,[256,256,1024],stage=4,block='e')
X = identity_block(X,3,[256,256,1024],stage=4,block='f')
# Stage 5
X = convolutional_block(X, f = 3,filters= [512,512,2048],stage=5,block='a',s=2)
X = identity_block(X,3,[512,512,2048],stage=5,block='b')
X = identity_block(X,3,[512,512,2048],stage=5,block='c')
# AVGPOOL
X = AveragePooling2D((2,2),strides=(2,2))(X)
# output layer
X = Flatten()(X)
model = Model(inputs = X_input, outputs = X, name='ResNet50')
return model
from keras.applications.resnet50 import ResNet50
def create_resnet50(input_img):
net = ResNet50(weights='imagenet', include_top=False,
input_tensor=input_img)
for layer in net.layers[1:]:
layer.trainable = False
net = Reshape((-1,))(net.outputs[0])
return net
def true_ResNet50(classes):
base_model = RResNet50(input_shape=(224,224,3),classes=200)
base_model.load_weights('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5')
for layer in base_model.layers:
layer.trainable=False
res = base_model.get_layer('activation_49').output
# print res.shape
res = BatchNormalization()(res)
model = Model(inputs=base_model.input, outputs=res,name='true-ResNet50')
#model.summary()
return model
def fake_ResNet50_base(index,input_shape=(224,224,3),classes=200):
base_model = RResNet50(input_shape=(224,224,3),classes=200)
base_model.load_weights('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5')
for layer in base_model.layers:
layer.trainable=False
layer.name = layer.name + str("_")+str(index)
base_model.summary()
#Num=(index+2)*49+index*6
Num=(index+2)*49
res_layer='activation_'+str(Num)+ str("_")+str(index)
print(res_layer)
res = base_model.get_layer(res_layer).output
#print res.shape
res = BatchNormalization()(res)
model = Model(inputs=base_model.input, outputs=res)
return model
def fake_ResNet50_base_new(index,input_shape=(224,224,3),classes=200):
base_model = RResNet50(input_shape=(224,224,3),classes=200)
base_model.load_weights('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5')
for layer in base_model.layers:
layer.trainable=False
layer.name = layer.name + str("_")+str(index)
base_model.summary()
Num=(index+2)*49+index*6
res_layer='activation_'+str(Num)+ str("_")+str(index)
print(res_layer)
res = base_model.get_layer(res_layer).output
res = BatchNormalization()(res)
model = Model(inputs=base_model.input, outputs=res)
return model
def text_cnnmodel(classes=200):
main_input = Input(shape=(64,), dtype='float64')
embedder = Embedding(len(vocab) + 1, 256, input_length = 64)
#embedder = Embedding(9999, 256, input_length = 64)
embed = embedder(main_input)
conv1_1 = Conv1D(256, 3, padding='same')(embed)
bn1_1 = BatchNormalization()(conv1_1)
relu1_1 = Activation('relu')(bn1_1)
conv1_2 = Conv1D(128, 3, padding='same')(relu1_1)
bn1_2 = BatchNormalization()(conv1_2)
relu1_2 = Activation('relu')(bn1_2)
cnn1 = MaxPooling1D(pool_size=4)(relu1_2)
# kernel_size = 4
conv2_1 = Conv1D(256, 4, padding='same')(embed)
bn2_1 = BatchNormalization()(conv2_1)
relu2_1 = Activation('relu')(bn2_1)
conv2_2 = Conv1D(128, 4, padding='same')(relu2_1)
bn2_2 = BatchNormalization()(conv2_2)
relu2_2 = Activation('relu')(bn2_2)
cnn2 = MaxPooling1D(pool_size=4)(relu2_2)
# kernel_size = 5
conv3_1 = Conv1D(256, 5, padding='same')(embed)
bn3_1 = BatchNormalization()(conv3_1)
relu3_1 = Activation('relu')(bn3_1)
conv3_2 = Conv1D(128, 5, padding='same')(relu3_1)
bn3_2 = BatchNormalization()(conv3_2)
relu3_2 = Activation('relu')(bn3_2)
cnn3 = MaxPooling1D(pool_size=4)(relu3_2)
#
conc = Concatenate()([cnn1,cnn2,cnn3])
flat = Flatten()(conc)
drop = Dropout(0.5)(flat)
fc = Dense(2048)(drop)
bn = BatchNormalization(name='bn')(fc)
model = Model(inputs = main_input, outputs = bn)
return model
def text_cnnmodel_base(index,classes):
base_model = text_cnnmodel(classes)
for layer in base_model.layers:
layer.trainable=False
layer.name = layer.name + str("_")+str(index)
res = base_model.output
#print res.shape
model = Model(inputs=base_model.input, outputs=res)
return model
#es = EarlyStopping(monitor='val_loss', patience=1)
#model.fit(x=X_train,y=Y_train,epochs=20,batch_size=32,validation_data=(X_val, Y_val),callbacks=[es])
#tt=build_pspnet(102, 50, input_shape=(224,224), activation='softmax')
def mult_text_cnnmodel(classes):
capt1_model=text_cnnmodel_base(0,classes)
capt1_feature=capt1_model.output
capt1_in=capt1_model.input
capt2_model=text_cnnmodel_base(1,classes)
capt2_feature=capt2_model.output
capt2_in=capt2_model.input
capt3_model=text_cnnmodel_base(2,classes)
capt3_feature=capt3_model.output
capt3_in=capt3_model.input
capt4_model=text_cnnmodel_base(3,classes)
capt4_feature=capt4_model.output
capt4_in=capt4_model.input
capt5_model=text_cnnmodel_base(4,classes)
capt5_feature=capt5_model.output
capt5_in=capt5_model.input
capt6_model=text_cnnmodel_base(5,classes)
capt6_feature=capt6_model.output
capt6_in=capt6_model.input
capt7_model=text_cnnmodel_base(6,classes)
capt7_feature=capt7_model.output
capt7_in=capt7_model.input
capt8_model=text_cnnmodel_base(7,classes)
capt8_feature=capt8_model.output
capt8_in=capt8_model.input
capt9_model=text_cnnmodel_base(8,classes)
capt9_feature=capt9_model.output
capt9_in=capt9_model.input
capt10_model=text_cnnmodel_base(9,classes)
capt10_feature=capt10_model.output
capt10_in=capt10_model.input
outs =Average()([capt1_feature, capt2_feature,capt3_feature, capt4_feature,capt5_feature,capt6_feature,capt7_feature, capt8_feature,capt9_feature, capt10_feature])
model = Model(inputs= [capt1_in,capt2_in,capt3_in,capt4_in,capt5_in,capt6_in,capt7_in,capt8_in,capt9_in,capt10_in], outputs=outs,name='mult_text_cnnmodel')
#model.summary()
return model
def fake_ResNet50_new(classes):
fake_base_model1=fake_ResNet50_base(0,input_shape = (224, 224, 3),classes=200)
temp_feature1=fake_base_model1.output
in1=fake_base_model1.input
fake_base_model2=fake_ResNet50_base(1,input_shape = (224, 224, 3),classes=200)
temp_feature2=fake_base_model2.output
in2=fake_base_model2.input
fake_base_model3=fake_ResNet50_base(2,input_shape = (224, 224, 3),classes=200)
temp_feature3=fake_base_model3.output
in3=fake_base_model3.input
fake_base_model4=fake_ResNet50_base(3,input_shape = (224, 224, 3),classes=200)
temp_feature4=fake_base_model4.output
in4=fake_base_model4.input
fake_base_model5=fake_ResNet50_base(4,input_shape = (224, 224, 3),classes=200)
temp_feature5=fake_base_model5.output
in5=fake_base_model5.input
fake_base_model6=fake_ResNet50_base(5,input_shape = (224, 224, 3),classes=200)
temp_feature6=fake_base_model6.output
in6=fake_base_model6.input
fake_base_model7=fake_ResNet50_base(6,input_shape = (224, 224, 3),classes=200)
temp_feature7=fake_base_model7.output
in7=fake_base_model7.input
fake_base_model8=fake_ResNet50_base(7,input_shape = (224, 224, 3),classes=200)
temp_feature8=fake_base_model8.output
in8=fake_base_model8.input
fake_base_model9=fake_ResNet50_base(8,input_shape = (224, 224, 3),classes=200)
temp_feature9=fake_base_model9.output
in9=fake_base_model9.input
fake_base_model10=fake_ResNet50_base(9,input_shape = (224, 224, 3),classes=200)
temp_feature10=fake_base_model10.output
in10=fake_base_model10.input
outs =Average()([temp_feature1, temp_feature2,temp_feature3, temp_feature4,temp_feature5,temp_feature6,temp_feature7, temp_feature8,temp_feature9, temp_feature10])
model = Model(inputs=[in1,in2,in3,in4,in5,in6,in7,in8,in9,in10], outputs=outs,name='fake-ResNet50')
return model
def true_text_ResNet50_2(classes =200):
print ('bulid true image model')
true_image_model = true_ResNet50( classes =200)
output1=true_image_model.output
input1=true_image_model.input
#output1=Conv2D(512, (1, 1), padding='same', activation='relu')(output1)
print('bulid caption model')
text_model=mult_text_cnnmodel(classes=200)
output3=text_model.output
input3=text_model.input
merged=Add()([output1,output3])
Flat= Flatten()(merged)
Dor=Dropout(0.5)(Flat)
fc = Dense(512)(Dor)
bnn = BatchNormalization(name='bn2')(fc)
Den=Dense(classes, activation='softmax')(bnn)
m_model=Model(inputs=[input1,input3[0],input3[1],input3[2],input3[3],input3[4],input3[5],input3[6],input3[7],input3[8],input3[9]], outputs=Den)
#plot_model(s_model, to_file='true-fake-restnet50-fine-20181104.png',show_shapes=True)
#m_model.summary()
return m_model
def true_fake_text_ResNet50_3(classes =200):
print('bulid true image model')
true_image_model = true_ResNet50( classes =200)
print ('bulid fake image model')
fake_image_model = fake_ResNet50_new( classes =200)
output1=true_image_model.output
input1=true_image_model.input
output2=fake_image_model.output
input2=fake_image_model.input
# print(input1.shape)
# print(input2)
print ('bulid caption model')
text_model=mult_text_cnnmodel_new(classes=200)
output3=text_model.output
input3=text_model.input
merged=Add()([output2,output3])
print(output2.shape)
print(output3.shape)
print(merged.shape)
Flat= Flatten()(merged)
Dor=Dropout(0.5)(Flat)
fc = Dense(2048)(Dor)
bnn = BatchNormalization(name='bn2')(fc)
merged1=Add()([output1,bnn])
Flat1= Flatten()(merged1)
Dor1=Dropout(0.5)(Flat1)
fc1 = Dense(512)(Dor1)
#fc2=Dropout(0.6)(fc1)
bnn1 = BatchNormalization(name='bn3')(fc1)
Den1=Dense(classes, activation='softmax')(bnn1)
m_model=Model(inputs=[input1,input2[0],input2[1],input2[2],input2[3],input2[4],input2[5],input2[6],input2[7],input2[8],input2[9],input3[0],input3[1],input3[2],input3[3],input3[4],input3[5],input3[6],input3[7],input3[8],input3[9]], outputs=Den1)
#plot_model(s_model, to_file='true-fake-restnet50-fine-20181104.png',show_shapes=True)
# m_model.summary()
return m_model
#from keras_attention_block import *
def true_fake_text_ResNet50_4(classes):
print('bulid true image model')
true_image_model = true_ResNet50( classes )
print ('bulid fake image model')
fake_image_model = fake_ResNet50_new( classes)
output1=true_image_model.output
input1=true_image_model.input
output2=fake_image_model.output
input2=fake_image_model.input
print(input1.shape)
print(input2)
print ('bulid caption model')
text_model=mult_text_cnnmodel_new(classes)
output3=text_model.output
input3=text_model.input
attentIuput1=SelfAttention1DLayer(similarity="multiplicative",dropout_rate=0.2)(output1)
attentIuput2=SelfAttention1DLayer(similarity="multiplicative",dropout_rate=0.2)(output2)
attentIuput3=SelfAttention1DLayer(similarity="multiplicative",dropout_rate=0.2)(output3)
merged=Add()([attentInput2,attentInput3])
Flat= Flatten()(merged)
Dor=Dropout(0.1)(Flat)
fc = Dense(2048)(Dor)
bnn = BatchNormalization(name='bn2')(fc)
merged1=Add()([attentInput1,bnn])
Flat1= Flatten()(merged1)
Dor1=Dropout(0.1)(Flat1)
fc1 = Dense(512)(Dor1)
bnn1 = BatchNormalization(name='bn3')(fc1)
Den1=Dense(classes, activation='softmax')(bnn1)
m_model=Model(inputs=[input1,input2[0],input2[1],input2[2],input2[3],input2[4],input2[5],input2[6],input2[7],input2[8],input2[9],input3[0],input3[1],input3[2],input3[3],input3[4],input3[5],input3[6],input3[7],input3[8],input3[9]], outputs=Den1)
#plot_model(s_model, to_file='true-fake-restnet50-fine-20181104.png',show_shapes=True)
#m_model.summary()
return m_model
def true_fake_ResNet50(classes =200):
#print 'bulid true image model'
true_image_model = true_ResNet50( classes =200)
#print 'bulid fake image model'
fake_image_model = fake_ResNet50_new( classes =200)
output1=true_image_model.output
input1=true_image_model.input
output2=fake_image_model.output
input2=fake_image_model.input
#print input1.shape
#print input2
merged=Add()([output1,output2])
#print merged.shape
Flat= Flatten()(merged)
Dor=Dropout(0.5)(Flat)
fc = Dense(512)(Dor)
bnn = BatchNormalization()(fc)
Den1=Dense(classes, activation='softmax')(bnn)
s_model=Model(inputs=[input1,input2[0],input2[1],input2[2],input2[3],input2[4],input2[5],input2[6],input2[7],input2[8],input2[9]], outputs=Den1)
#plot_model(s_model, to_file='true-fake-restnet50-fine-20181104.png',show_shapes=True)
s_model.summary()
return s_model
def Our_ResNet50(classes):
#K.set_learning_phase(0)
base_model = RResNet50(input_shape=(224,224,3),classes=200)
base_model.load_weights('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5')
for layer in base_model.layers:
layer.trainable=False
res = base_model.get_layer('activation_49').output
# print res.shape
#K.set_learning_phase(1)
#x = GlobalAveragePooling2D()(res)
x = Flatten()(res)
#res1 = Activation('relu')(x)
predictions = Dense(classes, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)
model.summary()
return model
#import keras_resnet.models
def Our_ResNet50_1(classes=200):
#K.set_learning_phase(0)
shape=(224,224,3)
#x=keras.layers.Input(shape)
#base_model = keras_resnet.models.ResNet50(x, classes=102)
#predictions=base_model.output
#model = Model(inputs=base_model.input, outputs=predictions)
base_model = ResNet50(weights='imagenet', include_top=False, input_shape=shape)
for layer in base_model.layers:
layer.trainable = False
x = base_model.output
#K.set_learning_phase(1)
x = Flatten(name='flatten')(x)
predictions = Dense(classes, activation='softmax', name='predictions')(x)
model = Model(inputs=base_model.input, outputs=predictions)
model.summary()
for layer in model.layers[:141]:
layer.trainable=False
for layer in model.layers[141:]:
layer.trainable=True
return model
def Our_ResNet50_2(classes=200):
# K.set_learning_phase(0)
model = keras.applications.resnet50.ResNet50()
model.layers.pop()
for layer in model.layers:
layer.trainable=False
last = model.layers[-1].output
x = Dense(classes, activation="softmax")(last)
finetuned_model = Model(model.input, x)
finetuned_model.summary()
return finetuned_model
def Our_ResNet50_based_2(classes=200):
# K.set_learning_phase(0)
model = keras.applications.resnet50.ResNet50()
model.layers.pop()
#model.summary()
for layer in model.layers:
layer.trainable=False
res_layer='activation_49'
last = model.get_layer(res_layer).output
finetuned_model = Model(model.input, last)
finetuned_model.summary()
return finetuned_model
def Our_ResNet50_facke_based_2(index,classes=200):
# K.set_learning_phase(0)
base_model = keras.applications.resnet50.ResNet50()
base_model.layers.pop()
#base_model.summary()
for layer in base_model.layers:
layer.trainable=False
layer.name = layer.name + str("_")+str(index)
last = base_model.layers[-1].output
#print(base_model.layers[-1])
Num=(index+2)*49+index*6
res_layer='activation_'+str(Num)+ str("_")+str(index)
#res_layer='activation_'+str(Num)
print(res_layer)
res = base_model.get_layer(res_layer).output
finetuned_model = Model(base_model.input, last)
#finetuned_model.summary()
return finetuned_model
def true_text_ResNet50_2(classes):
print ('bulid true image model')
true_image_model = true_ResNet50( classes )
output1=true_image_model.output
input1=true_image_model.input
#output1=Conv2D(512, (1, 1), padding='same', activation='relu')(output1)
#print input1.shape
print ('bulid caption model')
text_model=mult_text_cnnmodel(classes)
output3=text_model.output
input3=text_model.input
merged=Add()([output1,output3])
Flat= Flatten()(merged)
Dor=Dropout(0.5)(Flat)
fc = Dense(512)(Dor)
bnn = BatchNormalization(name='bn2')(fc)
Den=Dense(classes, activation='softmax')(bnn)
m_model=Model(inputs=[input1,input3[0],input3[1],input3[2],input3[3],input3[4],input3[5],input3[6],input3[7],input3[8],input3[9]], outputs=Den)
#plot_model(s_model, to_file='true-fake-restnet50-fine-20181104.png',show_shapes=True)
m_model.summary()
return m_model
def caption_fake1_ResNet50_2(index,classes):
print('merge the fake images %d' % index)
fake_base_model1=Our_ResNet50_facke_based_2(index,classes=200)
temp_feature1=fake_base_model1.output
in1=fake_base_model1.input
#print(temp_feature1.shape)
caption_model=text_cnnmodel_base(index,classes)
caption_feature=caption_model.output
in2=caption_model.input
merged=Add()([temp_feature1,caption_feature])
#Flat= Flatten()(merged)
#Dor=Dropout(0.5)(Flat)
#fc = Dense(2048)(Dor)
model=Model(inputs=[in1,in2],outputs=merged,name='caption_fake1_ResNet50')
return model
def Muit_fake1_Feature_model_2(classes):
print('bulid caption_fakeImage model')
fakeCaption_model1=caption_fake1_ResNet50_2(0,classes)
fakeCaption_featuer1=fakeCaption_model1.output
in1=fakeCaption_model1.input
fakeCaption_model2=caption_fake1_ResNet50_2(1,classes)
fakeCaption_featuer2=fakeCaption_model2.output
in2=fakeCaption_model2.input
fakeCaption_model3=caption_fake1_ResNet50_2(2,classes)
fakeCaption_featuer3=fakeCaption_model3.output
in3=fakeCaption_model3.input
fakeCaption_model4=caption_fake1_ResNet50_2(3,classes)
fakeCaption_featuer4=fakeCaption_model4.output
in4=fakeCaption_model4.input
fakeCaption_model5=caption_fake1_ResNet50_2(4,classes)
fakeCaption_featuer5=fakeCaption_model5.output
in5=fakeCaption_model5.input
fakeCaption_model6=caption_fake1_ResNet50_2(5,classes)
fakeCaption_featuer6=fakeCaption_model6.output
in6=fakeCaption_model6.input
fakeCaption_model7=caption_fake1_ResNet50_2(6,classes)
fakeCaption_featuer7=fakeCaption_model7.output
in7=fakeCaption_model7.input
fakeCaption_model8=caption_fake1_ResNet50_2(7,classes)
fakeCaption_featuer8=fakeCaption_model8.output
in8=fakeCaption_model8.input
fakeCaption_model9=caption_fake1_ResNet50_2(8,classes)
fakeCaption_featuer9=fakeCaption_model9.output
in9=fakeCaption_model9.input
fakeCaption_model10=caption_fake1_ResNet50_2(9,classes)
fakeCaption_featuer10=fakeCaption_model10.output
in10=fakeCaption_model10.input
print(fakeCaption_featuer1.shape)
outs =Average()([fakeCaption_featuer1, fakeCaption_featuer2,fakeCaption_featuer3,fakeCaption_featuer4, fakeCaption_featuer5,fakeCaption_featuer6,fakeCaption_featuer7, fakeCaption_featuer8,fakeCaption_featuer9, fakeCaption_featuer10])
#print(outs.shape)
model = Model(inputs= [in1[0],in1[1],in2[0],in2[1],in3[0],in3[1],in4[0],in4[1],in5[0],in5[1],in6[0],in6[1],in7[0],in7[1],in8[0],in8[1],in9[0],in9[1],in10[0],in10[1]], outputs=outs,name='Muit_fake1_Feain1[0],in1[1],ture_model')
return model
def finnal_muilt1Feature_model_2(classes):
print('bulid true image model')
true_image_model = Our_ResNet50_based_2(classes)
true_image_feature=true_image_model.output
in0=true_image_model.input
print('build Muit_fake_Feature_model')
mult_fake1_caption_model=Muit_fake1_Feature_model_2(classes)
mult_fake1_caption_feature=mult_fake1_caption_model.output
in1=mult_fake1_caption_model.input
print(mult_fake1_caption_feature.shape)
merged=Add()([true_image_feature,mult_fake1_caption_feature])
print(merged.shape)
Flat= Flatten()(merged)
Dor=Dropout(0.5)(Flat)
fc = Dense(512)(Dor)
bnn = BatchNormalization(name='bn2')(fc)
Den=Dense(classes, activation='softmax')(bnn)
m_model=Model(inputs=[in0,in1[:][0],in1[:][1],in1[:][2],in1[:][3],in1[:][4],in1[:][5],in1[:][6],in1[:][7],in1[:][8],in1[:][9],in1[:][10],in1[:][11],in1[:][12],in1[:][13],in1[:][14],in1[:][15],in1[:][16],in1[:][17],in1[:][18],in1[:][19]], outputs=Den)
# m_model=Model(inputs=[in0,in1[:][0],in1[:][1],in1[:][2],in1[:][3],in1[:][4],in1[:][5],in1[:][6],in1[:][7],in1[:][8],in1[:][9],in1[:][10],in1[:][11],in1[:][12],in1[:][13],in1[:][14],in1[:][15],in1[:][16],in1[:][17],in1[:][18],in1[:][19]], outputs=Den)
return m_model
def caption_fake1_ResNet50(index,classes):
print('merge the fake images %d' % index)
fake_base_model1=fake_ResNet50_base_new(index,input_shape = (224, 224, 3),classes=200)
temp_feature1=fake_base_model1.output
in1=fake_base_model1.input
#print(temp_feature1.shape)
caption_model=text_cnnmodel_base(index,classes)
caption_feature=caption_model.output
in2=caption_model.input
merged=Add()([temp_feature1,caption_feature])
#Flat= Flatten()(merged)
#Dor=Dropout(0.5)(Flat)
#fc = Dense(2048)(Dor)
model=Model(inputs=[in1,in2],outputs=merged,name='caption_fake1_ResNet50')
return model
def Muit_fake1_Feature_model(classes):
print('bulid caption_fakeImage model')
fakeCaption_model1=caption_fake1_ResNet50(0,classes)
fakeCaption_featuer1=fakeCaption_model1.output
in1=fakeCaption_model1.input
fakeCaption_model2=caption_fake1_ResNet50(1,classes)
fakeCaption_featuer2=fakeCaption_model2.output
in2=fakeCaption_model2.input
fakeCaption_model3=caption_fake1_ResNet50(2,classes)
fakeCaption_featuer3=fakeCaption_model3.output
in3=fakeCaption_model3.input
fakeCaption_model4=caption_fake1_ResNet50(3,classes)
fakeCaption_featuer4=fakeCaption_model4.output
in4=fakeCaption_model4.input
fakeCaption_model5=caption_fake1_ResNet50(4,classes)
fakeCaption_featuer5=fakeCaption_model5.output
in5=fakeCaption_model5.input
fakeCaption_model6=caption_fake1_ResNet50(5,classes)
fakeCaption_featuer6=fakeCaption_model6.output
in6=fakeCaption_model6.input
fakeCaption_model7=caption_fake1_ResNet50(6,classes)
fakeCaption_featuer7=fakeCaption_model7.output
in7=fakeCaption_model7.input
fakeCaption_model8=caption_fake1_ResNet50(7,classes)
fakeCaption_featuer8=fakeCaption_model8.output
in8=fakeCaption_model8.input
fakeCaption_model9=caption_fake1_ResNet50(8,classes)
fakeCaption_featuer9=fakeCaption_model9.output
in9=fakeCaption_model9.input
fakeCaption_model10=caption_fake1_ResNet50(9,classes)
fakeCaption_featuer10=fakeCaption_model10.output
in10=fakeCaption_model10.input
#print(fakeCaption_featuer1.shape)
outs =Average()([fakeCaption_featuer1, fakeCaption_featuer2,fakeCaption_featuer3,fakeCaption_featuer4, fakeCaption_featuer5,fakeCaption_featuer6,fakeCaption_featuer7, fakeCaption_featuer8,fakeCaption_featuer9, fakeCaption_featuer10])
#print(outs.shape)
model = Model(inputs= [in1[0],in1[1],in2[0],in2[1],in3[0],in3[1],in4[0],in4[1],in5[0],in5[1],in6[0],in6[1],in7[0],in7[1],in8[0],in8[1],in9[0],in9[1],in10[0],in10[1]], outputs=outs,name='Muit_fake1_Feain1[0],in1[1],ture_model')
return model
def Muit_fake1_Feature_model_3(classes):
print('bulid caption_fakeImage model')
fakeCaption_model1=caption_fake1_ResNet50(0,classes)
fakeCaption_featuer1=fakeCaption_model1.output
in1=fakeCaption_model1.input
fakeCaption_model2=caption_fake1_ResNet50(1,classes)
fakeCaption_featuer2=fakeCaption_model2.output
in2=fakeCaption_model2.input
fakeCaption_model3=caption_fake1_ResNet50(2,classes)
fakeCaption_featuer3=fakeCaption_model3.output
in3=fakeCaption_model3.input
#print(fakeCaption_featuer1.shape)
outs =Average()([fakeCaption_featuer1, fakeCaption_featuer2,fakeCaption_featuer3])
#print(outs.shape)
model = Model(inputs= [in1[0],in1[1],in2[0],in2[1],in3[0],in3[1]], outputs=outs,name='Muit_fake1_Feain1[0],in1[1],ture_model')
return model
def Muit_fake1_Feature_model_2(classes):
print('bulid caption_fakeImage model')
fakeCaption_model1=caption_fake1_ResNet50(0,classes)
fakeCaption_featuer1=fakeCaption_model1.output
in1=fakeCaption_model1.input
fakeCaption_model2=caption_fake1_ResNet50(1,classes)
fakeCaption_featuer2=fakeCaption_model2.output
in2=fakeCaption_model2.input
#print(fakeCaption_featuer1.shape)
outs =Average()([fakeCaption_featuer1, fakeCaption_featuer2])
#print(outs.shape)
model = Model(inputs= [in1[0],in1[1],in2[0],in2[1]], outputs=outs,name='Muit_fake1_Feain1[0],in1[1],ture_model')
return model
def finnal_muilt1FeatureFake1_model(classes):
print('bulid true image model')
true_image_model = true_ResNet50(classes)
true_image_feature=true_image_model.output
in0=true_image_model.input
print('build Muit_fake_Feature_model')
fake_base_model1=caption_fake1_ResNet50(0,classes)
temp_feature1=fake_base_model1.output
in1=fake_base_model1.input
merged=Add()([true_image_feature,temp_feature1])
Flat= Flatten()(merged)
Dor=Dropout(0.5)(Flat)
fc = Dense(512)(Dor)
bnn = BatchNormalization(name='bn2')(fc)
Den=Dense(classes, activation='softmax')(bnn)
#in1_data=in1[0][0]
#print(in1_data.shape)
#in2_data=in1[1][0]
#print(in2_data.shape)
#m_model=Model(inputs=[in0,in1_0,in1_1,in2_0,in2_1,in3_0,in3_1,in4_0,in4_1,in5_0,in5_1,in6_0,in6_1,in7_0,in7_1,in8_0,in8_1,in9_0,in9_1,in10_0,in10_1],outputs=Den)
m_model=Model(inputs=[in0,in1[:][0],in1[:][1]], outputs=Den)
#m_model=Model(inputs=[in0,inall],outputs=Den)
#plot_model(s_model, to_file='true-fake-restnet50-fine-20181104.png',show_shapes=True)
m_model.summary()
return m_model
def finnal_muilt1FeatureFake2_model(classes):
print('bulid true image model')
true_image_model = true_ResNet50(classes)
true_image_feature=true_image_model.output
in0=true_image_model.input
print('build Muit_fake_Feature_model')
mult_fake1_caption_model=Muit_fake1_Feature_model_2(classes)
mult_fake1_caption_feature=mult_fake1_caption_model.output
in1=mult_fake1_caption_model.input
merged=Add()([true_image_feature,mult_fake1_caption_feature])
Flat= Flatten()(merged)
Dor=Dropout(0.5)(Flat)
fc = Dense(512)(Dor)
bnn = BatchNormalization(name='bn2')(fc)
Den=Dense(classes, activation='softmax')(bnn)
#in1_data=in1[0][0]
#print(in1_data.shape)
#in2_data=in1[1][0]
#print(in2_data.shape)
#m_model=Model(inputs=[in0,in1_0,in1_1,in2_0,in2_1,in3_0,in3_1,in4_0,in4_1,in5_0,in5_1,in6_0,in6_1,in7_0,in7_1,in8_0,in8_1,in9_0,in9_1,in10_0,in10_1],outputs=Den)
m_model=Model(inputs=[in0,in1[:][0],in1[:][1],in1[:][2],in1[:][3]], outputs=Den)
#m_model=Model(inputs=[in0,inall],outputs=Den)
#plot_model(s_model, to_file='true-fake-restnet50-fine-20181104.png',show_shapes=True)
m_model.summary()
return m_model
def finnal_muilt1Feature_model(classes):
print('bulid true image model')
true_image_model = true_ResNet50(classes)
true_image_feature=true_image_model.output
in0=true_image_model.input
print('build Muit_fake_Feature_model')
mult_fake1_caption_model=Muit_fake1_Feature_model(classes)
mult_fake1_caption_feature=mult_fake1_caption_model.output
in1=mult_fake1_caption_model.input
merged=Add()([true_image_feature,mult_fake1_caption_feature])
Flat= Flatten()(merged)
Dor=Dropout(0.5)(Flat)
fc = Dense(512)(Dor)
bnn = BatchNormalization(name='bn2')(fc)
Den=Dense(classes, activation='softmax')(bnn)
#in1_data=in1[0][0]
#print(in1_data.shape)
#in2_data=in1[1][0]
#print(in2_data.shape)
#m_model=Model(inputs=[in0,in1_0,in1_1,in2_0,in2_1,in3_0,in3_1,in4_0,in4_1,in5_0,in5_1,in6_0,in6_1,in7_0,in7_1,in8_0,in8_1,in9_0,in9_1,in10_0,in10_1],outputs=Den)
m_model=Model(inputs=[in0,in1[:][0],in1[:][1],in1[:][2],in1[:][3],in1[:][4],in1[:][5],in1[:][6],in1[:][7],in1[:][8],in1[:][9],in1[:][10],in1[:][11],in1[:][12],in1[:][13],in1[:][14],in1[:][15],in1[:][16],in1[:][17],in1[:][18],in1[:][19]], outputs=Den)
#m_model=Model(inputs=[in0,inall],outputs=Den)
#plot_model(s_model, to_file='true-fake-restnet50-fine-20181104.png',show_shapes=True)
m_model.summary()
return m_model
def finnal_muilt1FeatureFake3_model(classes):
print('bulid true image model')
true_image_model = true_ResNet50(classes)
true_image_feature=true_image_model.output
in0=true_image_model.input
print('build Muit_fake_Feature_model')
mult_fake1_caption_model=Muit_fake1_Feature_model_3(classes)
mult_fake1_caption_feature=mult_fake1_caption_model.output
in1=mult_fake1_caption_model.input
merged=Add()([true_image_feature,mult_fake1_caption_feature])
Flat= Flatten()(merged)
Dor=Dropout(0.5)(Flat)
fc = Dense(512)(Dor)
bnn = BatchNormalization(name='bn2')(fc)
Den=Dense(classes, activation='softmax')(bnn)
#in1_data=in1[0][0]
#print(in1_data.shape)
#in2_data=in1[1][0]
#print(in2_data.shape)
#m_model=Model(inputs=[in0,in1_0,in1_1,in2_0,in2_1,in3_0,in3_1,in4_0,in4_1,in5_0,in5_1,in6_0,in6_1,in7_0,in7_1,in8_0,in8_1,in9_0,in9_1,in10_0,in10_1],outputs=Den)
m_model=Model(inputs=[in0,in1[:][0],in1[:][1],in1[:][2],in1[:][3],in1[:][4],in1[:][5]], outputs=Den)
#m_model=Model(inputs=[in0,inall],outputs=Den)
#plot_model(s_model, to_file='true-fake-restnet50-fine-20181104.png',show_shapes=True)
m_model.summary()
return m_model
# def fake2_ResNet50(index,classes):
# t1=index+0
# fake_base_model1=fake_ResNet50_base(t1,input_shape = (224, 224, 3),classes=200)
# temp_feature1=fake_base_model1.output
# in1=fake_base_model1.input
# t2=index+1
# fake_base_model2=fake_ResNet50_base(t2,input_shape = (224, 224, 3),classes=200)
# temp_feature2=fake_base_model2.output
# in2=fake_base_model2.input
# outs =Average()([temp_feature1, temp_feature2])
# model = Model(inputs= [in1,in2], outputs=outs,name='fake-ResNet50')
# return model
# def caption_fake2_ResNet50(index,classes):
# print('merge the fake images')
# mult_fake_model=fake3_ResNet50(classes)
# mult_fakeimage_feature=mult_fake_model.output
# in1=mult_fake_model.input
# nIndex=index*2
# caption_model=text_cnnmodel_base(nIndex,classes)
# caption_feature=caption_model.output
# in2=caption_model.input
# merged=Add()([mult_fakeimage_feature,caption_feature])
# Flat= Flatten()(merged)
# Dor=Dropout(0.1)(Flat)
# fc = Dense(2048)(Dor)
# model=Model(inputs=[in1,in2],outputs=fc,name='caption_fake3_ResNet50')
# return model
def caption_fake2_ResNet50_k2(index,classes):
print('merge the fake images %d' % index)
fake_base_model1=fake_ResNet50_base_new(index,input_shape = (224, 224, 3),classes=200)
temp_feature1=fake_base_model1.output
in1=fake_base_model1.input
caption_model=text_cnnmodel_base(index,classes)
caption_feature=caption_model.output
in2=caption_model.input
Index=index+1
print('merge the fake images %d' % Index)
fake_base_model2=fake_ResNet50_base_new(Index,input_shape = (224, 224, 3),classes=200)
temp_feature2=fake_base_model2.output
in3=fake_base_model2.input
MultiFeature=Average()([temp_feature1,temp_feature2])
merged=Add()([caption_feature,MultiFeature])
#Flat= Flatten()(merged)
#Dor=Dropout(0.5)(Flat)
#fc = Dense(2048)(Dor)
model=Model(inputs=[in1,in3,in2],outputs=merged,name='caption_fake2_ResNet50')
return model
def Muit_fake5_Feature_model_k2(classes):
print('bulid caption_fakeImage model')
fakeCaption_model1=caption_fake2_ResNet50_k2(0,classes)
fakeCaption_featuer1=fakeCaption_model1.output
in1=fakeCaption_model1.input
fakeCaption_model2=caption_fake2_ResNet50_k2(2,classes)
fakeCaption_featuer2=fakeCaption_model2.output
in2=fakeCaption_model2.input
fakeCaption_model3=caption_fake2_ResNet50_k2(4,classes)
fakeCaption_featuer3=fakeCaption_model3.output
in3=fakeCaption_model3.input
fakeCaption_model4=caption_fake2_ResNet50_k2(6,classes)
fakeCaption_featuer4=fakeCaption_model4.output
in4=fakeCaption_model4.input
fakeCaption_model5=caption_fake2_ResNet50_k2(8,classes)
fakeCaption_featuer5=fakeCaption_model5.output
in5=fakeCaption_model5.input
outs =Average()([fakeCaption_featuer1, fakeCaption_featuer2,fakeCaption_featuer3,fakeCaption_featuer4, fakeCaption_featuer5])
model = Model(inputs= [in1,in2,in3,in4,in5], outputs=outs,name='Muit_fake5_Feature_model_k2')
return model
def finnal_muilt2Feature_model_k2(classes):
print('bulid true image model')
true_image_model = true_ResNet50(classes)
true_image_feature=true_image_model.output
in0=true_image_model.input
print('build Muit_fake5_Feature_model_k2')
mult_fake2_caption_model=Muit_fake5_Feature_model_k2(classes)
mult_fake2_caption_feature=mult_fake2_caption_model.output
in1=mult_fake3_caption_model.input
merged=Add()([true_image_feature,mult_fake3_caption_feature])
Flat= Flatten()(merged)
Dor=Dropout(0.5)(Flat)
fc = Dense(512)(Dor)
bnn = BatchNormalization(name='bn2')(fc)
Den=Dense(classes, activation='softmax')(bnn)
m_model=Model(inputs=[in0,in1], outputs=Den)
#plot_model(s_model, to_file='true-fake-restnet50-fine-20181104.png',show_shapes=True)
m_model.summary()
return m_model
def fake2_ResNet50(index,ki,classes):
base_model1 = RResNet50(input_shape=(224,224,3),classes=200)
base_model1.load_weights('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5')
for layer in base_model1.layers:
layer.trainable=False
layer.name = layer.name + str("_")+str(index)
#base_model1.summary()
Num1=(index+2)*49++ki*6
res_layer1='activation_'+str(Num1)+ str("_")+str(index)
#print(res_layer1)
res1 = base_model1.get_layer(res_layer1).output
res1 = BatchNormalization()(res1)
in1=base_model1.input
base_model2 = RResNet50(input_shape=(224,224,3),classes=200)
base_model2.load_weights('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5')
Index=index+1
for layer in base_model2.layers:
layer.trainable=False
layer.name = layer.name + str("_")+str(Index)
#base_model2.summary()
Num2=(Index+2)*49++ki*6
res_layer2='activation_'+str(Num2)+ str("_")+str(Index)
#print(res_layer2)
res2 = base_model2.get_layer(res_layer2).output
res2 = BatchNormalization()(res2)
in2=base_model2.input
mult_fakeimage_feature =Average()([res1, res2])
nIndex=index*2
caption_model=text_cnnmodel_base(nIndex,classes)
caption_feature=caption_model.output
in3=caption_model.input
merged=Add()([mult_fakeimage_feature,caption_feature])
Flat= Flatten()(merged)
Dor=Dropout(0.1)(Flat)
fc = Dense(2048)(Dor)
model = Model(inputs= [in1,in2,in3], outputs=fc,name='fake2-ResNet50')
return model
def caption_fake2_ResNet50(index,classes):
print('merge the fake images')
mult_fake_model=fake2_ResNet50(index,classes)
mult_fakeimage_feature=mult_fake_model.output
in1=mult_fake_model.input
nIndex=index*2
caption_model=text_cnnmodel_base(nIndex,classes)
caption_feature=caption_model.output
in2=caption_model.input
merged=Add()([mult_fakeimage_feature,caption_feature])
Flat= Flatten()(merged)
Dor=Dropout(0.1)(Flat)
fc = Dense(2048)(Dor)
model=Model(inputs=[in1[0],in1[1],in2],outputs=fc,name='caption_fake2_ResNet50')
return model
def caption_fake2_k2_ResNet50(index,classes):
print('merge the fake images')
t1=index+0
fake_base_model1=fake_ResNet50_base(t1,input_shape = (224, 224, 3),classes=200)
temp_feature1=fake_base_model1.output
in1=fake_base_model1.input
t2=index+1
fake_base_model2=fake_ResNet50_base(t2,input_shape = (224, 224, 3),classes=200)
temp_feature2=fake_base_model2.output
in2=fake_base_model2.input
mult_fakeimage_feature =Average()([temp_feature1, temp_feature2])
nIndex=index*2
caption_model=text_cnnmodel_base(nIndex,classes)
caption_feature=caption_model.output
in3=caption_model.input
merged=Add()([mult_fakeimage_feature,caption_feature])
Flat= Flatten()(merged)
Dor=Dropout(0.1)(Flat)
fc = Dense(2048)(Dor)
model=Model(inputs=[in1,in2,in3],outputs=fc,name='caption_fake2_ResNet50')
return model
from keras_attention_block import *
def text_attent_cnnmodel(classes=200):
main_input = Input(shape=(64,), dtype='float64')
embedder = Embedding(len(vocab) + 1, 256, input_length = 64)
#embedder = Embedding(9999, 256, input_length = 64)
embed = embedder(main_input)
conv1_1 = Conv1D(256, 3, padding='same')(embed)
bn1_1 = BatchNormalization()(conv1_1)
relu1_1 = Activation('relu')(bn1_1)
conv1_2 = Conv1D(128, 3, padding='same')(relu1_1)
bn1_2 = BatchNormalization()(conv1_2)
relu1_2 = Activation('relu')(bn1_2)
cnn1 = MaxPooling1D(pool_size=4)(relu1_2)
# kernel_size = 4
conv2_1 = Conv1D(256, 4, padding='same')(embed)
bn2_1 = BatchNormalization()(conv2_1)
relu2_1 = Activation('relu')(bn2_1)
conv2_2 = Conv1D(128, 4, padding='same')(relu2_1)
bn2_2 = BatchNormalization()(conv2_2)
relu2_2 = Activation('relu')(bn2_2)
cnn2 = MaxPooling1D(pool_size=4)(relu2_2)
# kernel_size = 5
conv3_1 = Conv1D(256, 5, padding='same')(embed)
bn3_1 = BatchNormalization()(conv3_1)
relu3_1 = Activation('relu')(bn3_1)
conv3_2 = Conv1D(128, 5, padding='same')(relu3_1)
bn3_2 = BatchNormalization()(conv3_2)
relu3_2 = Activation('relu')(bn3_2)
cnn3 = MaxPooling1D(pool_size=4)(relu3_2)
#
conc = Concatenate()([cnn1,cnn2,cnn3])
#print(conc.shape)
#attention_mul = SelfAttention1DLayer(similarity="linear",dropout_rate=0.2)(conc)
#print(attention_mul.shape)
#flat = Flatten()(attention_mul)
#drop = Dropout(0.5)(flat)
#print(drop.shape)
#attention_mul = SelfAttention2DLayer(similarity="linear",dropout_rate=0.2)(drop)
#print(attention_mul.shape)
#fc = Dense(2048)(drop)
#bn = BatchNormalization(name='bn')(fc)
model = Model(inputs = main_input, outputs = conc)
#model.summary()
return model
def text_attent_cnnmodel_base(index,classes):
base_model = text_attent_cnnmodel(classes)
for layer in base_model.layers:
layer.trainable=False
layer.name = layer.name + str("_")+str(index)
res = base_model.output
#print res.shape
model = Model(inputs=base_model.input, outputs=res)
return model
#es = EarlyStopping(monitor='val_loss', patience=1)
#model.fit(x=X_train,y=Y_train,epochs=20,batch_size=32,validation_data=(X_val, Y_val),callbacks=[es])
#tt=build_pspnet(102, 50, input_shape=(224,224), activation='softmax')
def mult_text_attent_cnnmodel(classes):
capt1_model=text_attent_cnnmodel_base(0,classes)
capt1_feature=capt1_model.output
capt1_in=capt1_model.input
capt2_model=text_attent_cnnmodel_base(1,classes)
capt2_feature=capt2_model.output
capt2_in=capt2_model.input
capt3_model=text_attent_cnnmodel_base(2,classes)
capt3_feature=capt3_model.output
capt3_in=capt3_model.input
capt4_model=text_attent_cnnmodel_base(3,classes)
capt4_feature=capt4_model.output
capt4_in=capt4_model.input
capt5_model=text_attent_cnnmodel_base(4,classes)
capt5_feature=capt5_model.output
capt5_in=capt5_model.input
capt6_model=text_attent_cnnmodel_base(5,classes)
capt6_feature=capt6_model.output
capt6_in=capt6_model.input
capt7_model=text_attent_cnnmodel_base(6,classes)
capt7_feature=capt7_model.output
capt7_in=capt7_model.input
capt8_model=text_attent_cnnmodel_base(7,classes)
capt8_feature=capt8_model.output
capt8_in=capt8_model.input
capt9_model=text_attent_cnnmodel_base(8,classes)
capt9_feature=capt9_model.output
capt9_in=capt9_model.input
capt10_model=text_attent_cnnmodel_base(9,classes)
capt10_feature=capt10_model.output
capt10_in=capt10_model.input
outs = Concatenate()([capt1_feature, capt2_feature,capt3_feature, capt4_feature,capt5_feature,capt6_feature,capt7_feature, capt8_feature,capt9_feature, capt10_feature])
print(outs.shape)
attention_mul = SelfAttention1DLayer(similarity="multiplicative",dropout_rate=0.2)(outs)
print(attention_mul.shape)
flat = Flatten()(attention_mul)
drop = Dropout(0.5)(flat)
#print(drop.shape)
fc = Dense(2048)(drop)
bn = BatchNormalization(name='bn')(fc)
model = Model(inputs= [capt1_in,capt2_in,capt3_in,capt4_in,capt5_in,capt6_in,capt7_in,capt8_in,capt9_in,capt10_in], outputs=bn,name='mult_text_cnnmodel')
model.summary()
return model
def true_attent_ResNet50(classes):
base_model = RResNet50(input_shape=(224,224,3),classes=200)
base_model.load_weights('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5')
for layer in base_model.layers:
layer.trainable=False
res = base_model.get_layer('activation_49').output
#print(res.shape)
#attention_mul = SelfAttention2DLayer(similarity="dot_product",dropout_rate=None)(res)
#attention_mul = SelfAttention2DLayer(output_size=(7,7),similarity="additive",d_a=10,dropout_rate=None)(res)
attention_mul = Attention2DLayer(similarity="dot_product",dropout_rate=None)(res)
#print(attention_mul.shape)
res = BatchNormalization()(attention_mul)
model = Model(inputs=base_model.input, outputs=res,name='true-ResNet50')
#model.summary()
return model
def fake2_attent_ResNet50(index,ki,classes):
base_model1 = RResNet50(input_shape=(224,224,3),classes=200)
base_model1.load_weights('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5')
for layer in base_model1.layers:
layer.trainable=False
layer.name = layer.name + str("_")+str(index)
#base_model1.summary()
Num1=(index+2)*49++ki*6
res_layer1='activation_'+str(Num1)+ str("_")+str(index)
#print(res_layer1)
res1 = base_model1.get_layer(res_layer1).output
#res1 = SelfAttention2DLayer(similarity="dot_product",dropout_rate=None)(res1)
res1 = Attention2DLayer(similarity="dot_product",dropout_rate=None)(res1)
#res1 = SelfAttention2DLayer(output_size=(7,7),similarity="additive",d_a=10,dropout_rate=None)(res1)
res1 = BatchNormalization()(res1)
in1=base_model1.input
base_model2 = RResNet50(input_shape=(224,224,3),classes=200)
base_model2.load_weights('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5')
Index=index+1
for layer in base_model2.layers:
layer.trainable=False
layer.name = layer.name + str("_")+str(Index)
#base_model2.summary()
Num2=(Index+2)*49++ki*6
res_layer2='activation_'+str(Num2)+ str("_")+str(Index)
#print(res_layer2)
res2 = base_model2.get_layer(res_layer2).output
#res2 = SelfAttention2DLayer(similarity="dot_product",dropout_rate=None)(res2)
#res2 = SelfAttention2DLayer(output_size=(7,7),similarity="additive",d_a=10,dropout_rate=None)(res2)
res2 = Attention2DLayer(similarity="dot_product",dropout_rate=None)(res2)
res2 = BatchNormalization()(res2)
in2=base_model2.input
mult_fakeimage_feature =Average()([res1, res2])
#mult_fakeimage_feature =Concatenate()([res1, res2])
nIndex=index*2
caption_model=text_attent_cnnmodel_base(nIndex,classes)
caption_feature=caption_model.output
#caption_feature = SelfAttention1DLayer(similarity="dot_product",dropout_rate=None)(caption_feature)
caption_feature = Attention1DLayer(similarity="dot_product",dropout_rate=None)(caption_feature)
print(caption_feature.shape)
#caption_feature = SelfAttention1DLayer(kernel_size=(16,384),similarity="additive",dropout_rate=None)(caption_feature)
caption_feature = Flatten()(caption_feature)
caption_feature = Dropout(0.5)(caption_feature)
caption_feature = Dense(2048)(caption_feature)
caption_feature = BatchNormalization(name='bn')(caption_feature)
in3=caption_model.input
merged=Add()([mult_fakeimage_feature,caption_feature])
Flat= Flatten()(merged)
Dor=Dropout(0.1)(Flat)
fc = Dense(2048)(Dor)
model = Model(inputs= [in1,in2,in3], outputs=fc,name='fake2-ResNet50')
return model
def fake2_attent1_ResNet50(index,ki,classes):
base_model1 = RResNet50(input_shape=(224,224,3),classes=200)
base_model1.load_weights('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5')
for layer in base_model1.layers:
layer.trainable=False
layer.name = layer.name + str("_")+str(index)
#base_model1.summary()
Num1=(index+2)*49++ki*6
res_layer1='activation_'+str(Num1)+ str("_")+str(index)
#print(res_layer1)
res1 = base_model1.get_layer(res_layer1).output
res1 = BatchNormalization()(res1)
in1=base_model1.input
base_model2 = RResNet50(input_shape=(224,224,3),classes=200)
base_model2.load_weights('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5')
Index=index+1
for layer in base_model2.layers:
layer.trainable=False
layer.name = layer.name + str("_")+str(Index)
#base_model2.summary()
Num2=(Index+2)*49++ki*6
res_layer2='activation_'+str(Num2)+ str("_")+str(Index)
#print(res_layer2)
res2 = base_model2.get_layer(res_layer2).output
res2 = BatchNormalization()(res2)
in2=base_model2.input
mult_fakeimage_feature =Average()([res1, res2])
#mult_fakeimage_feature = Reshape((49,2048))(mult_fakeimage_feature)
#mult_fakeimage_feature =Concatenate()([res1, res2])
nIndex=index*2
caption_model=text_attent_cnnmodel_base(nIndex,classes)
caption_feature=caption_model.output
caption_feature= Flatten()(caption_feature)
caption_feature=Dropout(0.5)(caption_feature)
caption_feature = Dense(2048)(caption_feature)
#caption_feature = SelfAttention1DLayer(similarity="dot_product",dropout_rate=None)(caption_feature)
#caption_feature = Attention1DLayer(similarity="dot_product",dropout_rate=None)([mult_fakeimage_feature,caption_feature])
print(caption_feature.shape)
#caption_feature = SelfAttention1DLayer(kernel_size=(16,384),similarity="additive",dropout_rate=None)(caption_feature)
#caption_feature = Flatten()(caption_feature)
#caption_feature = Dropout(0.5)(caption_feature)
#caption_feature = Dense(2048)(caption_feature)
#caption_feature = BatchNormalization(name='bn')(caption_feature)
in3=caption_model.input
merged=Add()([mult_fakeimage_feature,caption_feature])
#Flat= Flatten()(merged)
#Dor=Dropout(0.1)(Flat)
#fc = Dense(2048)(Dor)
model = Model(inputs= [in1,in2,in3], outputs=merged,name='fake2-ResNet50')
return model
def fake1_attent_ResNet50(index,ki,classes):
base_model1 = RResNet50(input_shape=(224,224,3),classes=200)
base_model1.load_weights('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5')
for layer in base_model1.layers:
layer.trainable=False
layer.name = layer.name + str("_")+str(index)
base_model1.summary()
Num1=(index+2)*49++ki*6
res_layer1='activation_'+str(Num1)+ str("_")+str(index)
print(res_layer1)
res1 = base_model1.get_layer(res_layer1).output
res1 = SelfAttention2DLayer(similarity="additive",dropout_rate=0.5)(res1)
res1 = BatchNormalization()(res1)
in1=base_model1.input
nIndex=index*2
caption_model=text_attent_cnnmodel_base(nIndex,classes)
caption_feature=caption_model.output
caption_feature = SelfAttention1DLayer(similarity="additive",dropout_rate=0.5)(caption_feature)
caption_feature = Flatten()(caption_feature)
caption_feature = Dropout(0.5)(caption_feature)
caption_feature = Dense(2048)(caption_feature)
caption_feature = BatchNormalization(name='bn')(caption_feature)
in2=caption_model.input
merged=Add()([res1,caption_feature])
Flat= Flatten()(merged)
Dor=Dropout(0.1)(Flat)
fc = Dense(2048)(Dor)
model = Model(inputs= [in1,in2], outputs=fc,name='fake2-ResNet50')
return model
###similarity="multiplicative""additive""linear""dot_product"
def Muit_fake1_k1_attent_model(classes):
print('bulid true image model')
true_image_model = true_attent_ResNet50(classes)
true_image_feature=true_image_model.output
in0=true_image_model.input
print('build Muit_fake3_Feature_model')
print('bulid caption_fakeImage model')
fakeCaption_model1=fake1_attent_ResNet50(0,0,classes)
fakeCaption_featuer1=fakeCaption_model1.output
in1=fakeCaption_model1.input
#mult_fake3_caption_feature =Average()([fakeCaption_featuer1, fakeCaption_featuer2,fakeCaption_featuer3,fakeCaption_featuer4, fakeCaption_featuer5])
merged=Add()([true_image_feature,fakeCaption_featuer1])
Flat= Flatten()(merged)
Dor=Dropout(0.5)(Flat)
fc = Dense(512)(Dor)
bnn = BatchNormalization(name='bn2')(fc)
Den=Dense(classes, activation='softmax')(bnn)
model = Model(inputs= [in0,in1[0],in1[1]], outputs=Den,name='Muit_fake3__k1_Feature_model')
model.summary()
return model
def Muit_fake1_k2_attent_model(classes):
print('bulid true image model')
true_image_model = true_attent_ResNet50(classes)
true_image_feature=true_image_model.output
in0=true_image_model.input
print('build Muit_fake3_Feature_model')
print('bulid caption_fakeImage model')
fakeCaption_model1=fake2_attent_ResNet50(0,0,classes)
fakeCaption_featuer1=fakeCaption_model1.output
in1=fakeCaption_model1.input
#mult_fake3_caption_feature =Average()([fakeCaption_featuer1, fakeCaption_featuer2,fakeCaption_featuer3,fakeCaption_featuer4, fakeCaption_featuer5])
merged=Add()([true_image_feature,fakeCaption_featuer1])
Flat= Flatten()(merged)
Dor=Dropout(0.5)(Flat)
fc = Dense(512)(Dor)
bnn = BatchNormalization(name='bn2')(fc)
Den=Dense(classes, activation='softmax')(bnn)
model = Model(inputs= [in0,in1[0],in1[1],in1[2]], outputs=Den,name='Muit_fake3__k2_Feature_model')
model.summary()
return model
def Muit_fake1_k2_attent1_model(classes):
print('bulid true image model')
true_image_model = true_ResNet50(classes)
true_image_feature=true_image_model.output
in0=true_image_model.input
print('build Muit_fake3_Feature_model')
print('bulid caption_fakeImage model')
fakeCaption_model1=fake2_attent1_ResNet50(0,0,classes)
fakeCaption_featuer1=fakeCaption_model1.output
in1=fakeCaption_model1.input
#mult_fake3_caption_feature =Average()([fakeCaption_featuer1, fakeCaption_featuer2,fakeCaption_featuer3,fakeCaption_featuer4, fakeCaption_featuer5])
#merged=Add()([true_image_feature,fakeCaption_featuer1])
merged = Attention2DLayer(similarity="additive",dropout_rate=0.1)([true_image_feature,fakeCaption_featuer1])
#merged = Attention2DLayer(output_size=(7,7),similarity="additive",dropout_rate=0.1)([true_image_feature,fakeCaption_featuer1])
Flat= Flatten()(merged)
Dor=Dropout(0.5)(Flat)
fc = Dense(512)(Dor)
bnn = BatchNormalization(name='bn2')(fc)
Den=Dense(classes, activation='softmax')(bnn)
model = Model(inputs= [in0,in1[0],in1[1],in1[2]], outputs=Den,name='Muit_fake3__k2_Feature_model')
model.summary()
return model
def Muit_fake1_k2_attent1_dot_model(classes):
print('bulid true image model')
true_image_model = true_ResNet50(classes)
true_image_feature=true_image_model.output
in0=true_image_model.input
print('build Muit_fake3_Feature_model')
print('bulid caption_fakeImage model')
fakeCaption_model1=fake2_attent1_ResNet50(0,0,classes)
fakeCaption_featuer1=fakeCaption_model1.output
in1=fakeCaption_model1.input
#mult_fake3_caption_feature =Average()([fakeCaption_featuer1, fakeCaption_featuer2,fakeCaption_featuer3,fakeCaption_featuer4, fakeCaption_featuer5])
#merged=Add()([true_image_feature,fakeCaption_featuer1])
true_image_feature=Conv2D(49, kernel_size=(3,3), padding='same')(true_image_feature)
fakeCaption_featuer1=Conv2D(49, kernel_size=(3,3), padding='same')(fakeCaption_featuer1)
merged = Attention2DLayer(similarity="dot_product",dropout_rate=None)([true_image_feature,fakeCaption_featuer1])
#merged = Attention2DLayer(output_size=(7,7),similarity="additive",dropout_rate=0.1)([true_image_feature,fakeCaption_featuer1])
Flat= Flatten()(merged)
Dor=Dropout(0.5)(Flat)
fc = Dense(512)(Dor)
bnn = BatchNormalization(name='bn2')(fc)
Den=Dense(classes, activation='softmax')(bnn)
model = Model(inputs= [in0,in1[0],in1[1],in1[2]], outputs=Den,name='Muit_fake3__k2_Feature_model')
model.summary()
return model
def Muit_fake1_k2_Feature_model(classes):
print('bulid true image model')
true_image_model = true_attent_ResNet50(classes)
true_image_feature=true_image_model.output
in0=true_image_model.input
print('build Muit_fake3_Feature_model')
print('bulid caption_fakeImage model')
fakeCaption_model1=fake2_ResNet50(0,0,classes)
fakeCaption_featuer1=fakeCaption_model1.output
in1=fakeCaption_model1.input
#mult_fake3_caption_feature =Average()([fakeCaption_featuer1, fakeCaption_featuer2,fakeCaption_featuer3,fakeCaption_featuer4, fakeCaption_featuer5])
merged=Add()([true_image_feature,fakeCaption_featuer1])
Flat= Flatten()(merged)
Dor=Dropout(0.5)(Flat)
fc = Dense(512)(Dor)
bnn = BatchNormalization(name='bn2')(fc)
Den=Dense(classes, activation='softmax')(bnn)
model = Model(inputs= [in0,in1[0],in1[1],in1[2]], outputs=Den,name='Muit_fake3__k2_Feature_model')
model.summary()
return model
def Muit_fake5_k2_Feature_model(classes):
print('bulid true image model')
true_image_model = true_ResNet50(classes)
true_image_feature=true_image_model.output
in0=true_image_model.input
print('build Muit_fake3_Feature_model')
print('bulid caption_fakeImage model')
fakeCaption_model1=fake2_ResNet50(0,0,classes)
fakeCaption_featuer1=fakeCaption_model1.output
in1=fakeCaption_model1.input
fakeCaption_model2=fake2_ResNet50(2,1,classes)
fakeCaption_featuer2=fakeCaption_model2.output
in2=fakeCaption_model2.input
fakeCaption_model3=fake2_ResNet50(4,2,classes)
fakeCaption_featuer3=fakeCaption_model3.output
in3=fakeCaption_model3.input
fakeCaption_model4=fake2_ResNet50(6,3,classes)
fakeCaption_featuer4=fakeCaption_model4.output
in4=fakeCaption_model4.input
fakeCaption_model5=fake2_ResNet50(8,4,classes)
fakeCaption_featuer5=fakeCaption_model5.output
in5=fakeCaption_model5.input
mult_fake3_caption_feature =Average()([fakeCaption_featuer1, fakeCaption_featuer2,fakeCaption_featuer3,fakeCaption_featuer4, fakeCaption_featuer5])
merged=Add()([true_image_feature,mult_fake3_caption_feature])
Flat= Flatten()(merged)
Dor=Dropout(0.5)(Flat)
fc = Dense(512)(Dor)
bnn = BatchNormalization(name='bn2')(fc)
Den=Dense(classes, activation='softmax')(bnn)
model = Model(inputs= [in0,in1[0],in1[1],in1[2],in2[0],in2[1],in2[2],in3[0],in3[1],in3[2],in4[0],in4[1],in4[2],in5[0],in5[1],in5[2]], outputs=Den,name='Muit_fake3__k2_Feature_model')
model.summary()
return model
def finnal_muilt5Feature_k2_model(classes):
print('bulid true image model')
true_image_model = true_ResNet50(classes)
true_image_feature=true_image_model.output
in0=true_image_model.input
print('build Muit_fake3_Feature_model')
mult_fake2_caption_model=Muit_fake5_k2_Feature_model(classes)
mult_fake2_caption_feature=mult_fake2_caption_model.output
in1=mult_fake3_caption_model.input
merged=Add()([true_image_feature,mult_fake3_caption_feature])
Flat= Flatten()(merged)
Dor=Dropout(0.5)(Flat)
fc = Dense(512)(Dor)
bnn = BatchNormalization(name='bn2')(fc)
Den=Dense(classes, activation='softmax')(bnn)
m_model=Model(inputs=[in0,in1], outputs=Den)
#plot_model(s_model, to_file='true-fake-restnet50-fine-20181104.png',show_shapes=True)
m_model.summary()
return m_model
def fake3_ResNet50(index,classes):
t1=index+0
fake_base_model1=fake_ResNet50_base(t1,input_shape = (224, 224, 3),classes=200)
temp_feature1=fake_base_model1.output
in1=fake_base_model1.input
t2=index+1
fake_base_model2=fake_ResNet50_base(t2,input_shape = (224, 224, 3),classes=200)
temp_feature2=fake_base_model2.output
in2=fake_base_model2.input
t3=index+2
fake_base_model3=fake_ResNet50_base(t3,input_shape = (224, 224, 3),classes=200)
temp_feature3=fake_base_model3.output
in3=fake_base_model3.input
outs =Average()([temp_feature1, temp_feature2,temp_feature3])
model = Model(inputs= [in1,in2,in3], outputs=outs,name='fake-ResNet50')
return model
def caption_fake3_ResNet50(index,classes):
print('merge the fake images')
mult_fake_model=fake3_ResNet50(classes)
mult_fakeimage_feature=mult_fake_model.output
in1=mult_fake_model.input
nIndex=index*3
caption_model=text_cnnmodel_base(nIndex,classes)
caption_feature=caption_model.output
in2=caption_model.input
merged=Add()([mult_fakeimage_feature,caption_feature])
Flat= Flatten()(merged)
Dor=Dropout(0.1)(Flat)
fc = Dense(2048)(Dor)
model=Model(inputs=[in1,in2],outputs=fc,name='caption_fake3_ResNet50')
return model
def Muit_fake3_Feature_model(classes):
print('bulid caption_fakeImage model')
fakeCaption_model1=caption_fake3_ResNet50(0,classes)
fakeCaption_featuer1=fakeCaption_model1.output
in1=fakeCaption_model1.input
fakeCaption_model2=caption_fake3_ResNet50(1,classes)
fakeCaption_featuer2=fakeCaption_model2.output
in2=fakeCaption_model2.input
fakeCaption_model3=caption_fake3_ResNet50(2,classes)
fakeCaption_featuer3=fakeCaption_model3.output
in3=fakeCaption_model3.input
fakeCaption_model4=caption_fake3_ResNet50(3,classes)
fakeCaption_featuer4=fakeCaption_model4.output
in4=fakeCaption_model4.input
fakeCaption_model5=caption_fake3_ResNet50(4,classes)
fakeCaption_featuer5=fakeCaption_model5.output
in5=fakeCaption_model5.input
fakeCaption_model6=caption_fake3_ResNet50(5,classes)
fakeCaption_featuer6=fakeCaption_model6.output
in6=fakeCaption_model6.input
fakeCaption_model7=caption_fake3_ResNet50(6,classes)
fakeCaption_featuer7=fakeCaption_model7.output
in7=fakeCaption_model7.input
fakeCaption_model8=caption_fake3_ResNet50(7,classes)
fakeCaption_featuer8=fakeCaption_model8.output
in8=fakeCaption_model8.input
fakeCaption_model9=caption_fake3_ResNet50(8,classes)
fakeCaption_featuer9=fakeCaption_model9.output
in9=fakeCaption_model9.input
fakeCaption_model10=caption_fake3_ResNet50(9,classes)
fakeCaption_featuer10=fakeCaption_model10.output
in10=fakeCaption_model10.input
outs =Average()([fakeCaption_featuer1, fakeCaption_featuer2,fakeCaption_featuer3,fakeCaption_featuer4, fakeCaption_featuer5,fakeCaption_featuer6,fakeCaption_featuer7, fakeCaption_featuer8,fakeCaption_featuer9, fakeCaption_featuer10])
model = Model(inputs= [in1,in2,in3,in4,in5,in6,in7,in8,in9,in10], outputs=outs,name='Muit_fake3_Feature_model')
return model
def finnal_muilt3Feature_model(classes):
print('bulid true image model')
true_image_model = true_ResNet50(classes)
true_image_feature=true_image_model.output
in0=true_image_model.input
print('build Muit_fake3_Feature_model')
mult_fake3_caption_model=Muit_fake3_Feature_model(classes)
mult_fake3_caption_feature=mult_fake3_caption_model.output
in1=mult_fake3_caption_model.input
merged=Add()([true_image_feature,mult_fake3_caption_feature])
Flat= Flatten()(merged)
Dor=Dropout(0.5)(Flat)
fc = Dense(512)(Dor)
bnn = BatchNormalization(name='bn2')(fc)
Den=Dense(classes, activation='softmax')(bnn)
m_model=Model(inputs=[in0,in1], outputs=Den)
#plot_model(s_model, to_file='true-fake-restnet50-fine-20181104.png',show_shapes=True)
m_model.summary()
return m_model
def fake5_ResNet50(classes):
fake_base_model1=fake_ResNet50_base55(0,input_shape = (224, 224, 3),classes=200)
temp_feature1=fake_base_model1.output
in1=fake_base_model1.input
fake_base_model2=fake_ResNet50_base55(1,input_shape = (224, 224, 3),classes=200)
temp_feature2=fake_base_model2.output
in2=fake_base_model2.input
fake_base_model3=fake_ResNet50_base55(2,input_shape = (224, 224, 3),classes=200)
temp_feature3=fake_base_model3.output
in3=fake_base_model3.input
fake_base_model4=fake_ResNet50_base55(3,input_shape = (224, 224, 3),classes=200)
temp_feature4=fake_base_model4.output
in4=fake_base_model4.input
fake_base_model5=fake_ResNet50_base55(4,input_shape = (224, 224, 3),classes=200)
temp_feature5=fake_base_model5.output
in5=fake_base_model5.input
#ins =Add()([inputall[0], inputall[1],inputall[2], inputall[3],inputall[4], inputall[5],inputall[6], inputall[7],inputall[8], inputall[9]])
outs =Average()([temp_feature1, temp_feature2,temp_feature3, temp_feature4,temp_feature5])
model = Model(inputs= [in1,in2,in3,in4,in5], outputs=outs,name='fake-ResNet50')
return model
def caption_fake5_ResNet50(index,classes):
print('merge the fake images')
mult_fake_model=fake5_ResNet50(classes)
mult_fakeimage_feature=mult_fake_model.output
in1=mult_fake_model.input
caption_model=text_cnnmodel_base(index,classes)
caption_feature=caption_model.output
in2=caption_model.input
merged=Add()([mult_fakeimage_feature,caption_feature])
Flat= Flatten()(merged)
Dor=Dropout(0.1)(Flat)
fc = Dense(2048)(Dor)
model=Model(inputs=[in1,in2],outputs=fc,name='caption_fake5_ResNet50')
return model
def Muit_fake5_Feature_model(classes):
print('bulid caption_fakeImage model')
fakeCaption_model1=caption_fake5_ResNet50(0,classes)
fakeCaption_featuer1=fakeCaption_model1.output
in1=fakeCaption_model1.input
fakeCaption_model2=caption_fake5_ResNet50(1,classes)
fakeCaption_featuer2=fakeCaption_model2.output
in2=fakeCaption_model2.input
fakeCaption_model3=caption_fake5_ResNet50(2,classes)
fakeCaption_featuer3=fakeCaption_model3.output
in3=fakeCaption_model3.input
fakeCaption_model4=caption_fake5_ResNet50(3,classes)
fakeCaption_featuer4=fakeCaption_model4.output
in4=fakeCaption_model4.input
fakeCaption_model5=caption_fake5_ResNet50(4,classes)
fakeCaption_featuer5=fakeCaption_model5.output
in5=fakeCaption_model5.input
fakeCaption_model6=caption_fake5_ResNet50(5,classes)
fakeCaption_featuer6=fakeCaption_model6.output
in6=fakeCaption_model6.input
fakeCaption_model7=caption_fake5_ResNet50(6,classes)
fakeCaption_featuer7=fakeCaption_model7.output
in7=fakeCaption_model7.input
fakeCaption_model8=caption_fake5_ResNet50(7,classes)
fakeCaption_featuer8=fakeCaption_model8.output
in8=fakeCaption_model8.input
fakeCaption_model9=caption_fake5_ResNet50(8,classes)
fakeCaption_featuer9=fakeCaption_model9.output
in9=fakeCaption_model9.input
fakeCaption_model10=caption_fake5_ResNet50(9,classes)
fakeCaption_featuer10=fakeCaption_model10.output
in10=fakeCaption_model10.input
outs =Average()([fakeCaption_featuer1, fakeCaption_featuer2,fakeCaption_featuer3,fakeCaption_featuer4, fakeCaption_featuer5,fakeCaption_featuer6,fakeCaption_featuer7, fakeCaption_featuer8,fakeCaption_featuer9, fakeCaption_featuer10])
model = Model(inputs= [in1,in2,in3,in4,in5,in6,in7,in8,in9,in10], outputs=outs,name='Muit_fake3_Feature_model')
return model
def finnal_muilt5Feature_model(classes):
print('bulid true image model')
true_image_model = true_ResNet50(classes)
true_image_feature=true_image_model.output
in0=true_image_model.input
print('build Muit_fake5_Feature_model')
mult_fake5_caption_model=Muit_fake5_Feature_model(classes)
mult_fake5_caption_feature=mult_fake5_caption_model.output
in1=mult_fake5_caption_model.input
merged=Add()([true_image_feature,mult_fake3_caption_feature])
Flat= Flatten()(merged)
Dor=Dropout(0.5)(Flat)
fc = Dense(512)(Dor)
bnn = BatchNormalization(name='bn2')(fc)
Den=Dense(classes, activation='softmax')(bnn)
m_model=Model(inputs=[in0,in1], outputs=Den)
#plot_model(s_model, to_file='true-fake-restnet50-fine-20181104.png',show_shapes=True)
m_model.summary()
return m_model
###======================== PREPARE DATA ====================================###
#build myself data generator
#imgInfo_file_path: pickle (file name with path)
#classInfo_file_path: pickle( file class)
#image_direction: true image path
#fackimage_direction: fack image path
#txt_direction: text path
#image_size: input image size of model
#num: the value of K(StackMGAN++)
tokenizer = Tokenizer(filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',lower=True,split=" ")
Alltxt=open('birds-dataset/birds/vacab.txt','r')
Alltext=Alltxt.read()
tokenizer.fit_on_texts(Alltext)
vocab = tokenizer.word_index
import cv2
def data_generator_5(imgInfo_file_path,classInfo_file_path,image_direction,txt_direction,fackimage0_direction,fackimage1_direction,image_size,BATCHSIZE,num):
testfilenames = open(imgInfo_file_path,'rb')
rmesf= pickle.load(testfilenames)
testfilenames = open(classInfo_file_path,'rb')
rmesc= pickle.load(testfilenames)
txt1=[]
txt2=[]
txt3=[]
txt4=[]
txt5=[]
fake01=[]
fake02=[]
fake03=[]
fake04=[]
fake05=[]
fake11=[]
fake12=[]
fake13=[]
fake14=[]
fake15=[]
images=[]
labels=[]
imagefile=[]
textfile=[]
iclass=[]
imagename=[]
num_of_examples=len(rmesf)
for i in range(len(rmesf)):
temp=rmesf[i]
tempimagename=image_direction+temp
#print(tempimagename)
if os.path.isfile(tempimagename)==False:
print('error! no such ture file: %s' %tempimagename)
continue
else:
#class_001/image_00000.txt
img=cv2.imread(tempimagename)
img=cv2.resize(img,(image_size[0], image_size[1]))
img=np.array(img)
ttemp=rmesc[i]
#print(ttemp)
templable=int(ttemp)
templable1=int(ttemp)-1
templable='%03d' % templable
#print(templable)
ftemp=temp[:-4]
txtPath=txt_direction+'class_'+templable+'/'+ftemp+'.txt'
#print(txtPath)
if os.path.isfile(txtPath)==False:
print('error! no such caption file: %s' %txtPath)
continue
else:
temptxt=[]
tempfake0=[]
tempfake1=[]
tmask0=False
tmask1=False
mm=0
for line in open(txtPath,'r'):
if mm<5:
fftemp=temp[:-4]
fakefname0=fackimage0_direction+fftemp+'_sentence'+str(mm)+'.png'
fakefname1=fackimage1_direction+fftemp+'_sentence'+str(mm)+'.png'
mm=mm+1
#print(fakefname)
if os.path.isfile(fakefname0)==False:
print('error! no such fake0 image file: %s' %fakefname0)
tmask0=False
continue
else:
if os.path.isfile(fakefname1)==False:
print('error! no such fake1 image file: %s' %fakefname1)
tmask1=False
continue
else:
ftimg0=cv2.imread(fakefname0)
ftimg0=cv2.resize(ftimg0,(image_size[0], image_size[1]))
ftimg0=np.array(ftimg0)
tempfake0.append(ftimg0)
ftimg1=cv2.imread(fakefname1)
ftimg1=cv2.resize(ftimg1,(image_size[0], image_size[1]))
ftimg1=np.array(ftimg1)
tempfake1.append(ftimg1)
temptxt.append(line)
tmask0=True
tmask1=True
if tmask0==True and tmask1==True:
txt1.append(temptxt[0])
txt2.append(temptxt[1])
txt3.append(temptxt[2])
txt4.append(temptxt[3])
txt5.append(temptxt[4])
fake01.append(tempfake0[0])
fake02.append(tempfake0[1])
fake03.append(tempfake0[2])
fake04.append(tempfake0[3])
fake05.append(tempfake0[4])
fake11.append(tempfake1[0])
fake12.append(tempfake1[1])
fake13.append(tempfake1[2])
fake14.append(tempfake1[3])
fake15.append(tempfake1[4])
labels.append(int(templable1))
images.append(img)
capt_train_word_ids1 = tokenizer.texts_to_sequences(txt1)
txt1 = pad_sequences(capt_train_word_ids1, maxlen=64)
capt_train_word_ids2 = tokenizer.texts_to_sequences(txt2)
txt2 = pad_sequences(capt_train_word_ids2, maxlen=64)
capt_train_word_ids3 = tokenizer.texts_to_sequences(txt3)
txt3 = pad_sequences(capt_train_word_ids3, maxlen=64)
capt_train_word_ids4 = tokenizer.texts_to_sequences(txt4)
txt4 = pad_sequences(capt_train_word_ids4, maxlen=64)
capt_train_word_ids5 = tokenizer.texts_to_sequences(txt5)
txt5 = pad_sequences(capt_train_word_ids5, maxlen=64)
images=np.array(images)
fake01=np.array(fake01)
fake02=np.array(fake02)
fake03=np.array(fake03)
fake04=np.array(fake04)
fake05=np.array(fake05)
fake11=np.array(fake11)
fake12=np.array(fake12)
fake13=np.array(fake13)
fake14=np.array(fake14)
fake15=np.array(fake15)
labels = to_categorical(labels, num)
labels=np.array(labels)
# gc.collect()
return images,fake01,fake02,fake03,fake04,fake05,fake11,fake12,fake13,fake14,fake15,txt1,txt2,txt3,txt4,txt5,labels
class data_generator_5_k2:
def __init__(self,trueImg,fake01,fake11,txt1,labels,Image_size,BATCHSIZE,num):
self.index=0
self.batch_size=BATCHSIZE
self.image_size=Image_size
self.classes=num
self.load_data(img=trueImg,fakeImg01=fake01,fakeImg11=fake11,capts=txt1,Labels=labels)
def load_data(self,img,fakeImg01,fakeImg11,capts,Labels):
self.img=img
self.fake01=fakeImg01
self.fake11=fakeImg11
self.capts=capts
self.labels=Labels
self.num_of_data=len(self.img)
def get_mini_batch(self):
while True:
batch_images=[]
batch_fake01=[]
batch_fake11=[]
batch_txt1=[]
batch_labels=[]
for i in range(self.batch_size):
if(self.index==len(self.img)):
self.index=0
batch_images.append(self.img[self.index])
batch_fake01.append(self.fake01[self.index])
batch_fake11.append(self.fake11[self.index])
batch_txt1.append(self.capts[self.index])
batch_labels.append(self.labels[self.index])
batch_images=np.array(batch_images)
batch_fake01=np.array(batch_fake01)
batch_fake11=np.array(batch_fake11)
batch_labels=np.array(batch_labels)
batch_txt1=np.array(batch_txt1)
yield {'input_1':batch_images,'input_2_0':batch_fake01,'input_3_1':batch_fake11,'input_4_0':batch_txt1},{'dense_4':batch_labels}
class data_generator_5_k1:
def __init__(self,trueImg,fake01,txt1,labels,Image_size,BATCHSIZE,num):
self.index=0
self.batch_size=BATCHSIZE
self.image_size=Image_size
self.classes=num
self.load_data(img=trueImg,fakeImg01=fake01,capts=txt1,Labels=labels)
def load_data(self,img,fakeImg01,capts,Labels):
self.img=img
self.fake01=fakeImg01
self.capts=capts
self.labels=Labels
self.num_of_data=len(self.img)
def get_mini_batch(self):
while True:
batch_images=[]
batch_fake01=[]
batch_txt1=[]
batch_labels=[]
for i in range(self.batch_size):
if(self.index==len(self.img)):
self.index=0
batch_images.append(self.img[self.index])
batch_fake01.append(self.fake01[self.index])
batch_txt1.append(self.capts[self.index])
batch_labels.append(self.labels[self.index])
batch_images=np.array(batch_images)
batch_fake01=np.array(batch_fake01)
batch_labels=np.array(batch_labels)
batch_txt1=np.array(batch_txt1)
yield {'input_1':batch_images,'input_2_0':batch_fake01,'input_3_1':batch_txt1},{'dense_4':batch_labels}
num=200
#fp_model=Muit_fake1_k2_attent_model(num)
#fp_model=Muit_fake1_k2_attent1_model(num)
fp_model=Muit_fake1_k2_attent1_dot_model(num)
#fp_model=Muit_fake1_k1_attent_model(num)
#fp_model=Our_ResNet50_1(num)
print('read birds test data set')
fake1_direction='/home/ubuntu/data/birds-code/fake1/'
fake2_direction='/home/ubuntu/data/birds-code/fake2/'
imgInfo_file_path='birds-dataset/birds/Test/filenames.pickle'
classInfo_file_path='birds-dataset/birds/Test/class_info.pickle'
image_direction='birds-dataset/birds/images/'
txt_direction='birds-dataset/birds/text/'
width,height=224,224
image_size=(width,height,3)
BATCHSIZE=64
num=200
#val_gen=data_generator_3(imgInfo_file_path,classInfo_file_path,image_direction,fackimage_direction,txt_direction,image_size,BATCHSIZE,num)
val_images,val_fake01,val_fake02,val_fake03,val_fake04,val_fake05,val_fake11,val_fake12,val_fake13,val_fake14,val_fake15,val_txt1,val_txt2,val_txt3,val_txt4,val_txt5,val_labels=data_generator_5(imgInfo_file_path,classInfo_file_path,image_direction,txt_direction,fake1_direction,fake2_direction,image_size,BATCHSIZE,num)
print(len(val_images))
import random
LLL=len(val_images)
randnum = random.randint(0,LLL)
random.seed(randnum)
random.shuffle(val_images)
random.seed(randnum)
random.shuffle(val_fake01)
random.seed(randnum)
random.shuffle(val_fake02)
random.seed(randnum)
random.shuffle(val_fake03)
random.seed(randnum)
random.shuffle(val_fake04)
random.seed(randnum)
random.shuffle(val_fake05)
random.seed(randnum)
random.shuffle(val_fake11)
random.seed(randnum)
random.shuffle(val_fake12)
random.seed(randnum)
random.shuffle(val_fake13)
random.seed(randnum)
random.shuffle(val_fake14)
random.seed(randnum)
random.shuffle(val_fake15)
random.seed(randnum)
random.shuffle(val_txt1)
random.seed(randnum)
random.shuffle(val_txt2)
random.seed(randnum)
random.shuffle(val_txt3)
random.seed(randnum)
random.shuffle(val_txt4)
random.seed(randnum)
random.shuffle(val_txt5)
random.seed(randnum)
random.shuffle(val_labels)
print('read flower train data set')
fake1_direction='/home/ubuntu/data/birds-code/fake1/'
fake2_direction='/home/ubuntu/data/birds-code/fake2/'
imgInfo_file_path='birds-dataset/birds/Train/filenames.pickle'
classInfo_file_path='birds-dataset/birds/Train/class_info.pickle'
image_direction='birds-dataset/birds/images/'
txt_direction='birds-dataset/birds/text/'
width,height=224,224
image_size=(width,height,3)
BATCHSIZE=12
num=200
width,height=224,224
image_size=(width,height,3)
BATCHSIZE=64
num=200
#train_gen=data_generator_3(imgInfo_file_path,classInfo_file_path,image_direction,fackimage_direction,txt_direction,image_size,BATCHSIZE,num)
train_images,train_fake01,train_fake02,train_fake03,train_fake04,train_fake05,train_fake11,train_fake12,train_fake13,train_fake14,train_fake15,train_txt1,train_txt2,train_txt3,train_txt4,train_txt5,train_labels=data_generator_5(imgInfo_file_path,classInfo_file_path,image_direction,txt_direction,fake1_direction,fake2_direction,image_size,BATCHSIZE,num)
import random
LLL=len(train_images)
randnum = random.randint(0,LLL)
random.seed(randnum)
random.shuffle(train_images)
random.seed(randnum)
random.shuffle(train_fake01)
random.seed(randnum)
random.shuffle(train_fake02)
random.seed(randnum)
random.shuffle(train_fake03)
random.seed(randnum)
random.shuffle(train_fake04)
random.seed(randnum)
random.shuffle(train_fake05)
random.seed(randnum)
random.shuffle(train_fake11)
random.seed(randnum)
random.shuffle(train_fake12)
random.seed(randnum)
random.shuffle(train_fake13)
random.seed(randnum)
random.shuffle(train_fake14)
random.seed(randnum)
random.shuffle(train_fake15)
random.seed(randnum)
random.shuffle(train_txt1)
random.seed(randnum)
random.shuffle(train_txt2)
random.seed(randnum)
random.shuffle(train_txt3)
random.seed(randnum)
random.shuffle(train_txt4)
random.seed(randnum)
random.shuffle(train_txt5)
random.seed(randnum)
random.shuffle(train_labels)
train_images=np.concatenate((train_images,val_images[1234:]), axis=0)
train_fake01=np.concatenate((train_fake01,val_fake01[1234:]), axis=0)
train_fake02=np.concatenate((train_fake02,val_fake02[1234:]), axis=0)
train_fake03=np.concatenate((train_fake03,val_fake03[1234:]), axis=0)
train_fake04=np.concatenate((train_fake04,val_fake04[1234:]), axis=0)
train_fake05=np.concatenate((train_fake05,val_fake05[1234:]), axis=0)
train_fake11=np.concatenate((train_fake11,val_fake11[1234:]), axis=0)
train_fake12=np.concatenate((train_fake12,val_fake12[1234:]), axis=0)
train_fake13=np.concatenate((train_fake13,val_fake13[1234:]), axis=0)
train_fake14=np.concatenate((train_fake14,val_fake14[1234:]), axis=0)
train_fake15=np.concatenate((train_fake15,val_fake15[1234:]), axis=0)
train_txt1=np.concatenate((train_txt1,val_txt1[1234:]), axis=0)
train_txt2= | np.concatenate((train_txt2,val_txt2[1234:]), axis=0) | numpy.concatenate |
# -*- coding: utf-8 -*-
import os
import scipy.io as sio
import numpy as np
import time
from sklearn import linear_model
from sklearn import preprocessing
from joblib import Parallel, delayed
import statsmodels.formula.api as sm
def Ridge_KFold_Sort_Permutation(Subjects_Data, Subjects_Score, Times_IDRange, Fold_Quantity, Alpha_Range, ResultantFolder, Parallel_Quantity, Max_Queued, Queue, Permutation_RandIndex_File_List=''):
if not os.path.exists(ResultantFolder):
os.makedirs(ResultantFolder)
Subjects_Data_Mat = {'Subjects_Data': Subjects_Data}
Subjects_Data_Mat_Path = ResultantFolder + '/Subjects_Data.mat'
sio.savemat(Subjects_Data_Mat_Path, Subjects_Data_Mat)
Finish_File = []
Times_IDRange_Todo = np.int64(np.array([]))
for i in np.arange(len(Times_IDRange)):
ResultantFolder_I = ResultantFolder + '/Time_' + str(Times_IDRange[i])
if not os.path.exists(ResultantFolder_I):
os.makedirs(ResultantFolder_I)
if not os.path.exists(ResultantFolder_I + '/Res_NFold.mat'):
Times_IDRange_Todo = np.insert(Times_IDRange_Todo, len(Times_IDRange_Todo), Times_IDRange[i])
if Permutation_RandIndex_File_List != '':
Permutation_RandIndex_File = Permutation_RandIndex_File_List[i]
else:
Permutation_RandIndex_File = '';
Configuration_Mat = {'Subjects_Data_Mat_Path': Subjects_Data_Mat_Path, 'Subjects_Score': Subjects_Score, 'Fold_Quantity': Fold_Quantity, \
'Alpha_Range': Alpha_Range, 'ResultantFolder_I': ResultantFolder_I, 'Parallel_Quantity': Parallel_Quantity, 'Permutation_RandIndex_File': Permutation_RandIndex_File};
sio.savemat(ResultantFolder_I + '/Configuration.mat', Configuration_Mat)
system_cmd = 'python3 -c ' + '\'import sys;\
sys.path.append("/data/jux/BBL/projects/pncControlEnergy/scripts/Replication/8th_PredictAge");\
from Ridge_CZ_Sort import Ridge_KFold_Sort_Permutation_Sub;\
import os;\
import scipy.io as sio;\
configuration = sio.loadmat("' + ResultantFolder_I + '/Configuration.mat");\
Subjects_Data_Mat_Path = configuration["Subjects_Data_Mat_Path"];\
Subjects_Score = configuration["Subjects_Score"];\
Fold_Quantity = configuration["Fold_Quantity"];\
Alpha_Range = configuration["Alpha_Range"];\
ResultantFolder_I = configuration["ResultantFolder_I"];\
Permutation_RandIndex_File = configuration["Permutation_RandIndex_File"];\
Parallel_Quantity = configuration["Parallel_Quantity"];\
Ridge_KFold_Sort_Permutation_Sub(Subjects_Data_Mat_Path[0], Subjects_Score[0], Fold_Quantity[0][0], Alpha_Range[0], ResultantFolder_I[0], Parallel_Quantity[0][0], Permutation_RandIndex_File)\' ';
system_cmd = system_cmd + ' > "' + ResultantFolder_I + '/perm_' + str(Times_IDRange[i]) + '.log" 2>&1\n'
Finish_File.append(ResultantFolder_I + '/Res_NFold.mat')
script = open(ResultantFolder_I + '/script.sh', 'w')
script.write(system_cmd)
script.close()
if len(Times_IDRange_Todo) > Max_Queued:
Submit_First_Quantity = Max_Queued
else:
Submit_First_Quantity = len(Times_IDRange_Todo)
for i in np.arange(Submit_First_Quantity):
ResultantFolder_I = ResultantFolder + '/Time_' + str(Times_IDRange_Todo[i])
Option = ' -V -o "' + ResultantFolder_I + '/perm_' + str(Times_IDRange_Todo[i]) + '.o" -e "' + ResultantFolder_I + '/perm_' + str(Times_IDRange_Todo[i]) + '.e"';
os.system('qsub ' + ResultantFolder_I + '/script.sh' + ' -q ' + Queue + ' -N perm_' + str(Times_IDRange_Todo[i]) + Option)
if len(Times_IDRange_Todo) > Max_Queued:
Finished_Quantity = 0;
while 1:
for i in np.arange(len(Finish_File)):
if os.path.exists(Finish_File[i]):
Finished_Quantity = Finished_Quantity + 1
print(Finish_File[i])
del(Finish_File[i])
print(time.strftime('%Y-%m-%d-%H-%M-%S',time.localtime(time.time())))
print('Finish quantity = ' + str(Finished_Quantity))
time.sleep(8)
ResultantFolder_I = ResultantFolder + '/Time_' + str(Times_IDRange_Todo[Max_Queued + Finished_Quantity - 1])
Option = ' -V -o "' + ResultantFolder_I + '/perm_' + str(Times_IDRange_Todo[Max_Queued + Finished_Quantity - 1]) + '.o" -e "' + ResultantFolder_I + '/perm_' + str(Times_IDRange_Todo[Max_Queued + Finished_Quantity - 1]) + '.e"';
cmd = 'qsub ' + ResultantFolder_I + '/script.sh' + ' -q ' + Queue + ' -N perm_' + str(Times_IDRange_Todo[Max_Queued + Finished_Quantity - 1]) + Option
# print(cmd)
os.system(cmd)
break
if len(Finish_File) == 0:
break
if Max_Queued + Finished_Quantity >= len(Finish_File):
break
def Ridge_KFold_Sort_Permutation_Sub(Subjects_Data_Mat_Path, Subjects_Score, Fold_Quantity, Alpha_Range, ResultantFolder, Parallel_Quantity, Permutation_RandIndex_File=''):
data = sio.loadmat(Subjects_Data_Mat_Path)
Subjects_Data = data['Subjects_Data']
Ridge_KFold_Sort(Subjects_Data, Subjects_Score, Fold_Quantity, Alpha_Range, ResultantFolder, Parallel_Quantity, 1, Permutation_RandIndex_File);
def Ridge_KFold_Sort(Subjects_Data, Subjects_Score, Fold_Quantity, Alpha_Range, ResultantFolder, Parallel_Quantity, Permutation_Flag, Permutation_RandIndex_File=''):
#Parameters = {'Subjects_Data':Subjects_Data, 'Subjects_Score':Subjects_Score, 'Fold_Quantity':Fold_Quantity, 'Alpha_Range':Alpha_Range, 'ResultantFolder':ResultantFolder, 'Parallel_Quantity':Parallel_Quantity, 'Permutation_Flag':Permutation_Flag, 'Permutation_RandIndex_File':Permutation_RandIndex_File}
#Fold_J_FileName = 'Parameter.mat'
#ResultantFile = os.path.join(ResultantFolder, Fold_J_FileName)
#sio.savemat(ResultantFile, Parameters)
if not os.path.exists(ResultantFolder):
os.makedirs(ResultantFolder)
Subjects_Quantity = len(Subjects_Score)
# Sort the subjects score
Sorted_Index = np.argsort(Subjects_Score)
Subjects_Data = Subjects_Data[Sorted_Index, :]
Subjects_Score = Subjects_Score[Sorted_Index]
EachFold_Size = np.int(np.fix(np.divide(Subjects_Quantity, Fold_Quantity)))
MaxSize = EachFold_Size * Fold_Quantity
EachFold_Max = np.ones(Fold_Quantity, np.int) * MaxSize
tmp = np.arange(Fold_Quantity - 1, -1, -1)
EachFold_Max = EachFold_Max - tmp;
Remain = np.mod(Subjects_Quantity, Fold_Quantity)
for j in np.arange(Remain):
EachFold_Max[j] = EachFold_Max[j] + Fold_Quantity
Fold_Corr = [];
Fold_MAE = [];
Fold_Weight = [];
Features_Quantity = np.shape(Subjects_Data)[1];
for j in np.arange(Fold_Quantity):
Fold_J_Index = np.arange(j, EachFold_Max[j], Fold_Quantity)
Subjects_Data_test = Subjects_Data[Fold_J_Index, :]
Subjects_Score_test = Subjects_Score[Fold_J_Index]
Subjects_Data_train = np.delete(Subjects_Data, Fold_J_Index, axis=0)
Subjects_Score_train = np.delete(Subjects_Score, Fold_J_Index)
if Permutation_Flag:
# If do permutation, the training scores should be permuted, while the testing scores remain
print(Permutation_RandIndex_File);
if len(Permutation_RandIndex_File) == 0:
Subjects_Index_Random = np.arange(len(Subjects_Score_train))
np.random.shuffle(Subjects_Index_Random)
Subjects_Score_train = Subjects_Score_train[Subjects_Index_Random]
else:
#print(Permutation_RandIndex_File);
tmpData = sio.loadmat(Permutation_RandIndex_File[0]);
Subjects_Index_Random = tmpData['Fold_' + str(j)];
Subjects_Score_train = Subjects_Score_train[Subjects_Index_Random[0]]
if j == 0:
RandIndex = {'Fold_0': Subjects_Index_Random}
else:
RandIndex['Fold_' + str(j)] = Subjects_Index_Random
normalize = preprocessing.MinMaxScaler()
Subjects_Data_train = normalize.fit_transform(Subjects_Data_train)
Subjects_Data_test = normalize.transform(Subjects_Data_test)
Optimal_Alpha, Inner_Corr, Inner_MAE_inv = Ridge_OptimalAlpha_KFold(Subjects_Data_train, Subjects_Score_train, Fold_Quantity, Alpha_Range, ResultantFolder, Parallel_Quantity)
clf = linear_model.Ridge(alpha = Optimal_Alpha)
clf.fit(Subjects_Data_train, Subjects_Score_train)
Fold_J_Score = clf.predict(Subjects_Data_test)
Fold_J_Corr = np.corrcoef(Fold_J_Score, Subjects_Score_test)
Fold_J_Corr = Fold_J_Corr[0,1]
Fold_Corr.append(Fold_J_Corr)
Fold_J_MAE = np.mean(np.abs(np.subtract(Fold_J_Score,Subjects_Score_test)))
Fold_MAE.append(Fold_J_MAE)
Fold_J_result = {'Index':Sorted_Index[Fold_J_Index], 'Test_Score':Subjects_Score_test, 'Predict_Score':Fold_J_Score, 'Corr':Fold_J_Corr, 'MAE':Fold_J_MAE, 'alpha':Optimal_Alpha, 'Inner_Corr':Inner_Corr, 'Inner_MAE_inv':Inner_MAE_inv}
Fold_J_FileName = 'Fold_' + str(j) + '_Score.mat'
ResultantFile = os.path.join(ResultantFolder, Fold_J_FileName)
sio.savemat(ResultantFile, Fold_J_result)
Fold_Corr = [0 if np.isnan(x) else x for x in Fold_Corr]
Mean_Corr = np.mean(Fold_Corr)
Mean_MAE = np.mean(Fold_MAE)
Res_NFold = {'Mean_Corr':Mean_Corr, 'Mean_MAE':Mean_MAE};
ResultantFile = os.path.join(ResultantFolder, 'Res_NFold.mat')
sio.savemat(ResultantFile, Res_NFold)
if Permutation_Flag:
sio.savemat(ResultantFolder + '/RandIndex.mat', RandIndex)
return (Mean_Corr, Mean_MAE)
def Ridge_OptimalAlpha_KFold(Training_Data, Training_Score, Fold_Quantity, Alpha_Range, ResultantFolder, Parallel_Quantity):
Subjects_Quantity = len(Training_Score)
Sorted_Index = np.argsort(Training_Score)
Training_Data = Training_Data[Sorted_Index, :]
Training_Score = Training_Score[Sorted_Index]
Inner_EachFold_Size = np.int(np.fix(np.divide(Subjects_Quantity, Fold_Quantity)))
MaxSize = Inner_EachFold_Size * Fold_Quantity
EachFold_Max = np.ones(Fold_Quantity, np.int) * MaxSize
tmp = np.arange(Fold_Quantity - 1, -1, -1)
EachFold_Max = EachFold_Max - tmp
Remain = np.mod(Subjects_Quantity, Fold_Quantity)
for j in np.arange(Remain):
EachFold_Max[j] = EachFold_Max[j] + Fold_Quantity
print(Alpha_Range);
Inner_Corr = np.zeros((Fold_Quantity, len(Alpha_Range)))
Inner_MAE_inv = np.zeros((Fold_Quantity, len(Alpha_Range)))
Alpha_Quantity = len(Alpha_Range)
for k in np.arange(Fold_Quantity):
Inner_Fold_K_Index = np.arange(k, EachFold_Max[k], Fold_Quantity)
Inner_Fold_K_Data_test = Training_Data[Inner_Fold_K_Index, :]
Inner_Fold_K_Score_test = Training_Score[Inner_Fold_K_Index]
Inner_Fold_K_Data_train = np.delete(Training_Data, Inner_Fold_K_Index, axis=0)
Inner_Fold_K_Score_train = np.delete(Training_Score, Inner_Fold_K_Index)
Scale = preprocessing.MinMaxScaler()
Inner_Fold_K_Data_train = Scale.fit_transform(Inner_Fold_K_Data_train)
Inner_Fold_K_Data_test = Scale.transform(Inner_Fold_K_Data_test)
Parallel(n_jobs=Parallel_Quantity,backend="threading")(delayed(Ridge_SubAlpha)(Inner_Fold_K_Data_train, Inner_Fold_K_Score_train, Inner_Fold_K_Data_test, Inner_Fold_K_Score_test, Alpha_Range[l], l, ResultantFolder) for l in np.arange(len(Alpha_Range)))
for l in np.arange(Alpha_Quantity):
print(l)
Fold_l_Mat_Path = ResultantFolder + '/Fold_' + str(l) + '.mat';
Fold_l_Mat = sio.loadmat(Fold_l_Mat_Path)
Inner_Corr[k, l] = Fold_l_Mat['Fold_Corr'][0][0]
Inner_MAE_inv[k, l] = Fold_l_Mat['Fold_MAE_inv']
os.remove(Fold_l_Mat_Path)
Inner_Corr = np.nan_to_num(Inner_Corr)
Inner_Corr_Mean = np.mean(Inner_Corr, axis=0)
Inner_Corr_Mean = (Inner_Corr_Mean - np.mean(Inner_Corr_Mean)) / np.std(Inner_Corr_Mean)
Inner_MAE_inv_Mean = np.mean(Inner_MAE_inv, axis=0)
Inner_MAE_inv_Mean = (Inner_MAE_inv_Mean - np.mean(Inner_MAE_inv_Mean)) / np.std(Inner_MAE_inv_Mean)
Inner_Evaluation = Inner_Corr_Mean + Inner_MAE_inv_Mean
Inner_Evaluation_Mat = {'Inner_Corr':Inner_Corr, 'Inner_MAE_inv':Inner_MAE_inv, 'Inner_Evaluation':Inner_Evaluation}
sio.savemat(ResultantFolder + '/Inner_Evaluation.mat', Inner_Evaluation_Mat)
Optimal_Alpha_Index = np.argmax(Inner_Evaluation)
Optimal_Alpha = Alpha_Range[Optimal_Alpha_Index]
return (Optimal_Alpha, Inner_Corr, Inner_MAE_inv)
def Ridge_SubAlpha(Training_Data, Training_Score, Testing_Data, Testing_Score, Alpha, Alpha_ID, ResultantFolder):
clf = linear_model.Ridge(alpha=Alpha)
clf.fit(Training_Data, Training_Score)
Predict_Score = clf.predict(Testing_Data)
Fold_Corr = | np.corrcoef(Predict_Score, Testing_Score) | numpy.corrcoef |
import numpy as np
def norm(im):
im = im.astype(np.float32)
min_v = np.min(im)
max_v = np.max(im)
im = (im - min_v) / (max_v - min_v)
return im
def read_scan_find_bbox(image, normalize=True, thresh=0.05):
st_x, en_x, st_y, en_y, st_z, en_z = 0, 0, 0, 0, 0, 0
if normalize:
image = norm(image)
for x in range(image.shape[0]):
if np.any(image[x, :, :] > thresh):
st_x = x
break
for x in range(image.shape[0] - 1, -1, -1):
if np.any(image[x, :, :] > thresh):
en_x = x
break
for y in range(image.shape[1]):
if np.any(image[:, y, :] > thresh):
st_y = y
break
for y in range(image.shape[1] - 1, -1, -1):
if np.any(image[:, y, :] > thresh):
en_y = y
break
for z in range(image.shape[2]):
if np.any(image[:, :, z] > thresh):
st_z = z
break
for z in range(image.shape[2] - 1, -1, -1):
if np.any(image[:, :, z] > thresh):
en_z = z
break
image = image[st_x:en_x, st_y:en_y, st_z:en_z]
nbbox = | np.array([st_x, en_x, st_y, en_y, st_z, en_z]) | numpy.array |
import numpy as np
import xarray as xr
import gcpy
# Must have:
# 1. extract_grid (returns an xarray Dataset)
# 2. grid_area (returns a 6xNxN array)
# 3. gen_grid (returns an xarray Dataset)
def extract_grid(ds,src_var='Xdim'):
# Extract grid from xarray dataset but return a cubed-sphere grid
n_cs = ds[src_var].shape[-1]
return gen_grid(n_cs)
def face_area(lon_b, lat_b, r_sphere = 6.375e6):
"""Calculate area of cubed-sphere grid cells on one face
Inputs must be in degrees. Edge arrays must be
shaped [N+1 x N+1]
"""
# Convert inputs to radians
lon_b_rad = lon_b * np.pi / 180.0
lat_b_rad = lat_b * np.pi / 180.0
r_sq = r_sphere * r_sphere
n_cs = lon_b.shape[1] - 1
# Allocate output array
cs_area = np.zeros((n_cs,n_cs))
# Ordering
valid_combo = np.array([[1,2,4],[2,3,1],[3,2,4],[4,1,3]]) - 1
for i_lon in range(n_cs):
for i_lat in range(n_cs):
lon_corner = np.zeros(4)
lat_corner = np.zeros(4)
xyz_corner = np.zeros((4,3))
for i_vert in range(4):
x_lon = i_lon + (i_vert > 1)
x_lat = i_lat + (i_vert == 0 or i_vert == 3)
lon_corner[i_vert] = lon_b_rad[x_lon,x_lat]
lat_corner[i_vert] = lat_b_rad[x_lon,x_lat]
for i_vert in range(4):
xyz_corner[i_vert,:] = ll2xyz(lon_corner[i_vert],lat_corner[i_vert])
tot_ang = 0.0
for i_corner in range(4):
curr_combo = valid_combo[i_corner,:]
xyz_mini = np.zeros((3,3))
for i_mini in range(3):
xyz_mini[i_mini,:] = xyz_corner[curr_combo[i_mini],:]
curr_ang = sphere_angle(xyz_mini[0,:],xyz_mini[1,:],xyz_mini[2,:])
tot_ang += curr_ang
cs_area[i_lon,i_lat] = r_sq * (tot_ang - (2.0*np.pi))
return cs_area
def ll2xyz(lon_pt,lat_pt):
"""Converts a lon/lat pair (in radians) to cartesian co-ordinates
Vector should point to the surface of the unit sphere"""
xPt = np.cos(lat_pt) * np.cos(lon_pt)
yPt = np.cos(lat_pt) * np.sin(lon_pt)
zPt = np.sin(lat_pt)
return [xPt,yPt,zPt]
def sphere_angle(e1,e2,e3):
# e1: Mid-point
# e2 and e3 to either side
pVec = np.ones(3)
qVec = np.ones(3)
pVec[0] = e1[1]*e2[2] - e1[2]*e2[1]
pVec[1] = e1[2]*e2[0] - e1[0]*e2[2]
pVec[2] = e1[0]*e2[1] - e1[1]*e2[0]
qVec[0] = e1[1]*e3[2] - e1[2]*e3[1]
qVec[1] = e1[2]*e3[0] - e1[0]*e3[2]
qVec[2] = e1[0]*e3[1] - e1[1]*e3[0]
ddd = np.sum(pVec*pVec) * np.sum(qVec*qVec)
if ddd <= 0.0:
angle = 0.0;
else:
ddd = np.sum(pVec*qVec)/np.sqrt(ddd);
if (np.abs(ddd)>1.0):
angle = np.pi/2.0;
else:
angle = np.arccos(ddd);
return angle
def grid_area(cs_grid=None,cs_res=None):
"""Return area in m2 for each cell in a cubed-sphere grid
Uses GMAO indexing convention (6xNxN)
"""
# Calculate area on a cubed sphere
if cs_res is None:
cs_res = cs_grid['lon_b'].shape[-1] - 1
elif cs_grid is None:
cs_grid = gcpy.csgrid_GMAO(cs_res)
elif cs_grid is not None and cs_res is not None:
assert cs_res == cs_grid['lon_b'].shape[-1], 'Routine grid_area received inconsistent inputs'
cs_area = np.zeros((6,cs_res,cs_res))
cs_area[0,:,:] = face_area(cs_grid['lon_b'][0,:,:],cs_grid['lat_b'][0,:,:])
for i_face in range(1,6):
cs_area[i_face,:,:] = cs_area[0,:,:].copy()
return cs_area
def gen_grid(n_cs, stretch_factor=None, target_lon=None, target_lat=None):
if stretch_factor is not None:
cs_temp, ignore = gcpy.make_grid_SG(n_cs,stretch_factor,target_lon,target_lat)
else:
cs_temp = gcpy.csgrid_GMAO(n_cs)
return xr.Dataset({'nf': (['nf'],np.array(range(6))),
'Ydim': (['Ydim'],np.array(range(n_cs))),
'Xdim': (['Xdim'],np.array(range(n_cs))),
'Ydim_b': (['Ydim_b'],np.array(range(n_cs+1))),
'Xdim_b': (['Xdim_b'],np.array(range(n_cs+1))),
'lat': (['nf','Ydim','Xdim'], cs_temp['lat']),
'lon': (['nf','Ydim','Xdim'], cs_temp['lon']),
'lat_b': (['nf','Ydim_b','Xdim_b'], cs_temp['lat_b']),
'lon_b': (['nf','Ydim_b','Xdim_b'], cs_temp['lon_b']),
'area': (['nf','Ydim','Xdim'], grid_area(cs_temp))})
def corners_to_xy(xc, yc):
""" Creates xy coordinates for each grid-box. The shape is (n, n, 5) where n is the cubed-sphere size.
Developed, tested, and supplied by <NAME>.
:param xc: grid-box corner longitudes; shape (n+1, n+1)
:param yc: grid-box corner latitudes; shape (n+1, n+1)
:return: grid-box xy coordinates
"""
p0 = slice(0, -1)
p1 = slice(1, None)
boxes_x = np.moveaxis(np.array([xc[p0, p0], xc[p1, p0], xc[p1, p1], xc[p0, p1], xc[p0, p0]]), 0, -1)
boxes_y = np.moveaxis(np.array([yc[p0, p0], yc[p1, p0], yc[p1, p1], yc[p0, p1], yc[p0, p0]]), 0, -1)
return np.moveaxis(np.array([boxes_x, boxes_y]), 0, -1)
def central_angle(x0, y0, x1, y1):
""" Returns the distance (central angle) between coordinates (x0, y0) and (x1, y1). This is vectorizable.
Developed, tested, and supplied by <NAME>.
:param x0: pt0's longitude (degrees)
:param y0: pt0's latitude (degrees)
:param x1: pt1's longitude (degrees)
:param y1: pt1's latitude (degrees)
:return: Distance (degrees)
"""
RAD2DEG = 180 / np.pi
DEG2RAD = np.pi / 180
x0 = x0 * DEG2RAD
x1 = x1 * DEG2RAD
y0 = y0 * DEG2RAD
y1 = y1 * DEG2RAD
return np.arccos(np.sin(y0) * np.sin(y1) + np.cos(y0) * np.cos(y1) * np.cos(np.abs(x0-x1))) * RAD2DEG
def find_index(lat,lon,grid):
# For point-finding
import pyproj
import shapely.ops
import shapely.geometry
# Based on a routine developed, tested, and supplied by <NAME>.
lon_vec = np.asarray(lon)
lat_vec = np.asarray(lat)
n_find = lon_vec.size
# Get the corners
x_corners = grid['lon_b'].values
y_corners = grid['lat_b'].values
x_centers = grid['lon'].values
y_centers = grid['lat'].values
x_centers_flat = x_centers.flatten()
y_centers_flat = y_centers.flatten()
cs_size = x_centers.shape[-1]
# Generate everything that will be reused
# Get XY polygon definitions for grid boxes
xy = np.zeros((6, cs_size, cs_size, 5, 2)) # 5 (x,y) points defining polygon corners (first and last are same)
for nf in range(6):
xy[nf, ...] = corners_to_xy(xc=x_corners[nf, :, :], yc=y_corners[nf, :, :])
latlon_crs = pyproj.Proj("+proj=latlon")
# Find 4 shortest distances to (x_find, y_find)
idx = np.full((3,n_find),np.int(0))
for x_find, y_find, i_find in zip( | np.nditer(lon_vec) | numpy.nditer |
# coding: utf-8
# This is the combination of several scripts that download data from the
# NASA NSRDB database and process.
# The propose of this code it to run experiments around adaptive multitenancy
# driven ENO WSN for an EWSN submission.
import pandas as pd
import time
import math
import simplejson
from NREL import *
import numpy as np
import random
import matplotlib.pyplot as plt
#import matlab.engine
import os
import sys
sys.path.append('energy_prediction')
sys.path.append('eno')
from orchestrator import Orchestrator
from wcewma import WCEWMA
from eno_static import StaticENO
from eno_orchestrator import OrchestratorENO
from eno_less import LESSENO
from eno_kansal import KansalENO
""" Global store of the performance for later graphing """
output_jsons = [] # output file
refSolarPowerVector = [[]]
wcewma_pred_vector = []
# --------------------------------------------------------------------------- #
""" For loading in lighting data for energy harvesting calculation. """
def dfLoad(test):
df = pd.read_csv('datasets/env_data/{}_solarcalc_raw.csv'.format(test),
low_memory=False, index_col=0) # change file name to loop
return df
# --------------------------------------------------------------------------- #
""" This sanitizes the input data, there's some strange temperature artifacts
this removes """
def getTemplist(df):
temperature_list, result = df["Temperature"].tolist(), []
for item in temperature_list:
if (type(item) == str) and item.endswith('.1'):
item = item[:-2]
result.append(float(item))
else:
result.append(float(item))
return result
# --------------------------------------------------------------------------- #
# This function calls the orchastrator to find system requirements and adds
# them to the dataframe. for now it's a placeholder for the length of the file,
# it'll change to be dynamic
def sysRequirements(df, test, orchest):
""" This takes the config file dt and multiplies it to be the length of
the dataframe in multiples of a day """
# print("Shape =>", df.shape[0]/ len(orchest))
# N = df.shape[0] / len(orchest)
# df['Orchastration Requirements'] = list(map(lambda x: x*N, orchest))
df['Orchastration Requirements'] = orchest * int(df.shape[0] / len(orchest))
return df
# --------------------------------------------------------------------------- #
# This function works out the energy generation of the target test
def panelEnergyGen(df, test,solar_panel_active_area_m2,initial_battery_capacity_mah):
DHI_list = df["DHI"].tolist()
DNI_list = df["DNI"].tolist()
Zenith_list = df["Solar Zenith Angle"].tolist()
E_list = [a * (math.cos(math.radians(c))) + b for a, b,
c in zip(DNI_list, DHI_list, Zenith_list)]
#if debug:
# print(" => E coefficient has been calculated ")
# reflection coefficients
rd = (1 + (math.cos(math.radians(sensor_angle_H))) / 2)
rr = (1 - (math.cos(math.radians(sensor_angle_H))) / 2)
# Where variable x is DNI*cos(theta)
x_list = [a * (math.cos(math.radians(sensor_angle_V - b)))
for a, b in zip(DNI_list, Zenith_list)]
# energy in w/m2 impinging on the surface of a solar cell
ES_list = [(a + (b * rd) + (foreground_albedo * c * rr))
for a, b, c in zip(x_list, DHI_list, E_list)]
#if debug:
# print(" => Energy hitting the cell has been calculated")
# including temperature as a function of solar cell efficiency in calculations
temperature_list = getTemplist(df)
tc_list = [(a + (((Noct - 20) / 800) * b))
for a, b in zip(temperature_list, ES_list)]
efficiency_pvg = [((solar_panel_efficiency * power_conditioning_efficiency)
* (1 - temperature_coefficient * (a - ref_cell_temp))) for a in tc_list]
#if debug:
# print(" => Efficiency of the solar cell over time has been calculated")
# conversion from w/m2 to energy generated by solar panel in mA
EG_list = [(abs(a * (solar_panel_active_area_m2 * b * 1000.00)) / (solar_panel_voltage))
for a, b in zip(ES_list, efficiency_pvg)] # change ABS here, can't be right
df['Energy Solar Gen'] = EG_list
return df
# function to take in environmental variables and return current consumption
# --------------------------------------------------------------------------- #
def NRELtoWindPower(df):
energy_type = 3
#if debug:
# print(" => Working out wind parameters")
pressure_list = df["Pressure"].tolist()
# Making an assumption that as the system is light it will have a fan to point in the direction of the wind
wind_speed_list = df["Wind Speed"].tolist()
temperature_list = getTemplist(df)
# 100 here convers millibar to pascal and 273.15 is c to kelvin conversion
air_density_list = [((a * 100) / (R_spec * (b + 273.15)))
for a, b in zip(pressure_list, temperature_list)]
power_e_list = [] # extractable energy by my wind turbine within usable conditions
for a, b in zip(wind_speed_list, air_density_list):
if Ve < a and a < Vo:
temp_power = ((0.5 * b * Area_wind * math.pow(a, 3)
* cp) / wind_turbine_voltage)
power_e_list.append(temp_power)
else:
power_e_list.append(0.0)
df['Energy Wind Gen'] = power_e_list
#if debug:
# print(" => Finished working out wind parameters")
return df
# sometimes returning negative values, which probably isn't right - need to check DC DC rectifies neg voltages
# --------------------------------------------------------------------------- #
def NRELtoTEGPower(df):
energy_type = 2
#if debug:
# print(" => Working out TEG parameters")
temperature_list = getTemplist(df)
vteg_list = []
for key in temperature_list:
temp = abs(key - T_ambient)
if temp > 5:
# Check to confirm that the temperature in reverse generates current with neg voltage also and DC/DC can handle that
vteg_list.append(
abs(((N_TEG * seedback_coeff) * (key - T_ambient) / 4)))
else:
vteg_list.append(0)
Iout_list = [(gm * (a - vmin)) for a in vteg_list]
# pout_list = [(ef*a*b) for a,b in zip(Iout_list,vteg_list)] # Original equation here says that Vout, make sure VTEG is Vout in this context
df['Energy TEG Gen'] = Iout_list
# PlotNREL(df,location,energy_type)
#if debug:
# print(" => Finished with TEG calculations")
return df
# --------------------------------------------------------------------------- #
""" This function creates a prediction of current energy generated. This is a
placeholder """
# def createPrediction(df):
# pred, predoutput, GHI_list, length = [0 for j in range(48)], [], df["GHI"].tolist(
# ), 1000 # This is updated at end of time window (next time window)
# for x in range(0, length):
# for a, b in zip(GHI_list, pred):
# predoutput.append((b + a) * 0.5)
# pred, predoutput = predoutput, []
# df['Prediction'] = pred
# return df
# --------------------------------------------------------------------------- #
# This function calcualtes the performance of a test
def calcPerf(df, test, name,eh,es):
# Calculate metrics for how well system performs
quarter_year_data = len(df['Sense Frequency'].tolist()) // 4
sens_freq_list = df['Sense Frequency'].tolist()
sens_freq_by_quarter_year = [sens_freq_list[i:i+quarter_year_data]
for i in range(0, len(sens_freq_list), quarter_year_data)]
batterylevelflag_list = df['Battery Level Flag'].tolist()
batterylevelflag_by_quarter_year = [batterylevelflag_list[i:i+quarter_year_data]
for i in range(0, len(batterylevelflag_list), quarter_year_data)]
orchastPlace_list = df['Orchastration Requirements'].tolist()
orchastPlace_by_quarter_year = [orchastPlace_list[i:i+quarter_year_data]
for i in range(0, len(orchastPlace_list), quarter_year_data)]
# print("orchastPlace_list size =>", len(orchastPlace_list))
# print("sens_freq_list =>", len(sens_freq_list))
orchestrator_fullfilment_pera = []
for i in range(0,4):
average = round(sum(sens_freq_by_quarter_year[i]) / len(sens_freq_by_quarter_year[i]), 2) # average sensing rate for ENO
dead_metric = batterylevelflag_by_quarter_year[i].count(0)
#print dead_metric
#print len(batterylevelflag_by_quarter_year[i])
dead_metric_per = (dead_metric*100 / len(batterylevelflag_by_quarter_year[i]))#*100)
#print ("--------------------------")
#print ("for method ",name),
#print ('season ',i),
#print ('eh',eh),
#print ('es',es),
#print ('%dead',dead_metric_per)
#print ('--------------------------')
waste_metric = batterylevelflag_by_quarter_year[i].count(2)
waste_metric_per = (waste_metric / len(batterylevelflag_by_quarter_year[i])*100)
varience = np.var(sens_freq_by_quarter_year[i])
orchestrator_fullfilment = []
for sense_freq, orch_reqs in zip(sens_freq_by_quarter_year[i],
orchastPlace_by_quarter_year[i]):
if(sense_freq < orch_reqs):
orchest_met_per = (sense_freq / orch_reqs) * 100
else:
orchest_met_per = 100.0
orchestrator_fullfilment.append(orchest_met_per)
# if name == 'static':
# print("sense_freq =>", sense_freq, "orch_reqs =>", orch_reqs, "orchest_met_per =>", orchest_met_per);
orchestrator_fullfilment_per = (round(sum(orchestrator_fullfilment) / len(orchestrator_fullfilment), 2))
if i == 0:
print ("for method ",name),
print ('sol panel',eh * 10000),
print ('bat size',es),
orchestrator_fullfilment_pera.append(orchestrator_fullfilment_per)
# Time the orchastrator requirements were met - got to think about how this is represented (especically over provisioning)
orchas = []
for a, b in zip(sens_freq_by_quarter_year[i], orchastPlace_by_quarter_year[i]):
orchas.append(a - b)
season = {0: 'jan-march', 1: 'april-jun', 2:'jul-sep', 3:'oct-dec'}
if storage:
output_jsons.append({'source': test, 'test': name, 'season': season[i], 'Dt_average': average, 'variance': varience, 'perTimeDead': dead_metric_per,
'perTimeWasted': waste_metric_per, 'orchFullfilment': orchestrator_fullfilment_per, 'orchas': orchastPlace_list, 'sense_freq': sens_freq_list, 'orchas_diff': orchas})
print ('%fullfilment',(sum(orchestrator_fullfilment_pera)/4))
# Performance here is described as the number of transmissions, time alive, time dead, variance, wasted energy.
# --------------------------------------------------------------------------- #
def dumpData(test):
if output_jsons:
epoch_time = int(time.time())
resultFile = open(
"datasets/results/{}_{}_solartracking_results.json".format(epoch_time, test), 'w+')
simplejson.dump(output_jsons, resultFile)
resultFile.close()
# --------------------------------------------------------------------------- #
def graphData(df):
tests = ['orchas', 'static', 'LESS', 'eno']
static_graph, eno_graph, less_graph, orchas_graph, graph = [], [], [], [], []
# graph.append(min_tx_freq)
for name in tests:
for key in output_jsons:
if key['test'] in "orchas":
orchas_graph.append(key['sense_freq'])
# orchas_graph.append(key['orchas'])
if key['test'] in "eno":
eno_graph.append(key['sense_freq'])
if key['test'] in "LESS":
less_graph.append(key['sense_freq'])
if key['test'] in "static":
static_graph.append(key['sense_freq'])
graph.append(key['orchas'])
# print ('\n================================================'
# '=================================================')
# index=df.index.get_values()
# plt.plot(orchas_graph[0], c='blue', linewidth=1.5, label='Orchestrator')
#plt.plot(static_graph[0], c='green', linewidth=1.5, label='Static')
#plt.plot(eno_graph[0], c='red', linewidth=1.5, label='ENO')
less_graph[0].pop(0)
less_graph.append(2)
plt.plot(less_graph[0], c='orange', linewidth=1.5, label='LESS')
plt.plot(graph[0], '--', linewidth=1.0, c='violet', label='Target')
# plt.plot() plot the orchestration requirement as dotted line TD
legend = plt.legend(loc='upper right', shadow=True)
plt.xlabel('Time Slot, t', {'color': 'black',
'fontsize': 22})
plt.ylabel('Duty Cycle, D_t', {'color': 'black',
'fontsize': 22})
plt.grid(True, which='both')
plt.minorticks_on
plt.ylim(ymax=35, ymin=0)
plt.xlim(xmax=350, xmin=0)
plt.show()
# Add labelling automatically
# Change show graph to save graph
# --------------------------------------------------------------------------- #
# Adding function to take care of summing energy sources
def energyGenTotal(df, energy_source):
# if debug:
# print(" => Calculating Total Energy Production")
solar_list = df["Energy Solar Gen"].tolist()
wind_list = df["Energy Wind Gen"].tolist()
teg_list = df["Energy TEG Gen"].tolist()
currentgen_list = []
for a, b, c in zip(solar_list, wind_list, teg_list):
x = 0
if "s" in energy_source:
x += (a * (random.uniform(solar_prod_var[0], solar_prod_var[1])))
if "w" in energy_source:
x += (b * (random.uniform(wind_prod_var[0], wind_prod_var[1])))
if "t" in energy_source:
x += (c * (random.uniform(teg_prod_var[0], teg_prod_var[1])))
currentgen_list.append(x)
df['Energy Generation Total'] = currentgen_list
#if debug:
# print(" => Energy level calculated and added to dataframe")
return df
# --------------------------------------------------------------------------- #
def graphEg(df):
solar_list = df["Energy Solar Gen"].tolist()
wind_list = df["Energy Wind Gen"].tolist()
teg_list = df["Energy TEG Gen"].tolist()
plt.plot(solar_list, c='blue', linewidth=1.5, label='Solar')
plt.plot(wind_list, c='green', linewidth=1.5, label='Wind')
plt.plot(teg_list, c='red', linewidth=1.5, label='TEG')
# legend = plt.legend(loc='upper right', shadow=True)
plt.xlabel('Time Slot, t', fontsize='x-large')
plt.ylabel('Energy Generated (mAh)', fontsize='x-large')
plt.grid(True, which='both')
plt.minorticks_on
plt.ylim(ymax=33, ymin=0)
plt.xlim(xmax=350, xmin=0)
plt.show()
# --------------------------------------------------------------------------- #
def plotSolarEgen(df, wvList, wcewma_pred_vector):
# print(wcewma_pred_vector)
solar_list = df["Energy Solar Gen"].tolist()
pre_list = []
for i in range(len(refSolarPowerVector)):
for j in range(len(refSolarPowerVector[i])):
pre_list.append(refSolarPowerVector[i][j])
plt.figure(1)
# plt.subplot(211)
plt.plot(pre_list, c='red', linewidth=1.5, label='Pre')
plt.plot(solar_list, c='blue', linewidth=1.5, label='Real Solar Data')
plt.plot(wcewma_pred_vector, c='green', linewidth=1.5, label='WC-EWMA')
plt.xlabel('Time Slot, t', fontsize='x-large')
plt.ylabel('Energy Generated (mAh)', fontsize='x-large')
# plt.grid(True, which='both')
plt.minorticks_on
plt.ylim(ymax=70, ymin=0)
# plt.xlim(xmax=350, xmin=0)
plt.xlim(xmax=366, xmin=0)
# plt.subplot(212)
# x = np.arange(7)
# plt.bar(x, wvList, width=0.4)
# plt.xlabel('Day, t', fontsize='x-large')
# plt.ylabel('Weather volatility', fontsize='x-large')
# plt.gca().yaxis.grid(True)
# plt.minorticks_on
# plt.ylim(ymax=8, ymin=0)
# plt.xlim(xmax=6.5, xmin=-0.5)
plt.show()
# --------------------------------------------------------------------------- #
def plotWeatherVolatility(wvList):
x = | np.arange(7) | numpy.arange |
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 24 13:04:06 2018
@author: <NAME>
"""
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
from scipy.optimize import minimize
import scipy as sp
from sklearn.metrics import mean_squared_error
""" GCRF CLASS """
class GCRF:
def __init__(self):
pass
def muKov(alfa,R,Precison,Noinst,NodeNo):
mu = | np.zeros([Noinst,NodeNo]) | numpy.zeros |
import burer_monteiro as bm
import numpy as np
import spectral_gap_analysis as analysis
import aux
import noise_generator as gen
import sdp
if __name__ == '__main__':
n = 10
k = 2
level = 5
noise_type = 'uniform'
while True:
z = | np.ones(n) | numpy.ones |
import os
import tempfile
import numpy as np
import scipy.ndimage.measurements as meas
from functools import reduce
import warnings
import sys
sys.path.append(os.path.abspath(r'../lib'))
import NumCppPy as NumCpp # noqa E402
####################################################################################
def factors(n):
return set(reduce(list.__add__,
([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0)))
####################################################################################
def test_seed():
np.random.seed(1)
####################################################################################
def test_abs():
randValue = np.random.randint(-100, -1, [1, ]).astype(np.double).item()
assert NumCpp.absScaler(randValue) == np.abs(randValue)
components = np.random.randint(-100, -1, [2, ]).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.absScaler(value), 9) == np.round(np.abs(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.absArray(cArray), np.abs(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols]) + \
1j * np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.absArray(cArray), 9), np.round(np.abs(data), 9))
####################################################################################
def test_add():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
####################################################################################
def test_alen():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.alen(cArray) == shape.rows
####################################################################################
def test_all():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.all(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.all(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.all(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.all(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.all(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.all(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.all(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.all(data, axis=1))
####################################################################################
def test_allclose():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
tolerance = 1e-5
data1 = np.random.randn(shape.rows, shape.cols)
data2 = data1 + tolerance / 10
data3 = data1 + 1
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
assert NumCpp.allclose(cArray1, cArray2, tolerance) and not NumCpp.allclose(cArray1, cArray3, tolerance)
####################################################################################
def test_amax():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.amax(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.amax(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.ROW).flatten(), np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.ROW).flatten(), np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.COL).flatten(), np.max(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.COL).flatten(), np.max(data, axis=1))
####################################################################################
def test_amin():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.amin(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.amin(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.ROW).flatten(), np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.ROW).flatten(), np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.COL).flatten(), np.min(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.COL).flatten(), np.min(data, axis=1))
####################################################################################
def test_angle():
components = np.random.randint(-100, -1, [2, ]).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.angleScaler(value), 9) == np.round(np.angle(value), 9) # noqa
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols]) + \
1j * np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.angleArray(cArray), 9), np.round(np.angle(data), 9))
####################################################################################
def test_any():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.any(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.any(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.any(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.any(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.any(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.any(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.any(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.any(data, axis=1))
####################################################################################
def test_append():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols])
data2 = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.append(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
numRows = np.random.randint(1, 100, [1, ]).item()
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item() + numRows, shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(0, 100, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray(),
np.append(data1, data2, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
NumCppols = np.random.randint(1, 100, [1, ]).item()
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + NumCppols)
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(0, 100, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray(),
np.append(data1, data2, axis=1))
####################################################################################
def test_arange():
start = np.random.randn(1).item()
stop = np.random.randn(1).item() * 100
step = np.abs(np.random.randn(1).item())
if stop < start:
step *= -1
data = np.arange(start, stop, step)
assert np.array_equal(np.round(NumCpp.arange(start, stop, step).flatten(), 9), np.round(data, 9))
####################################################################################
def test_arccos():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arccosScaler(value), 9) == np.round(np.arccos(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arccosScaler(value), 9) == np.round(np.arccos(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccosArray(cArray), 9), np.round(np.arccos(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccosArray(cArray), 9), np.round(np.arccos(data), 9))
####################################################################################
def test_arccosh():
value = np.abs(np.random.rand(1).item()) + 1
assert np.round(NumCpp.arccoshScaler(value), 9) == np.round(np.arccosh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arccoshScaler(value), 9) == np.round(np.arccosh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) + 1
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccoshArray(cArray), 9), np.round(np.arccosh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccoshArray(cArray), 9), np.round(np.arccosh(data), 9))
####################################################################################
def test_arcsin():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arcsinScaler(value), 9) == np.round(np.arcsin(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arcsinScaler(value), 9) == np.round(np.arcsin(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arcsinArray(cArray), 9), np.round(np.arcsin(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arcsinArray(cArray), 9), np.round(np.arcsin(data), 9))
####################################################################################
def test_arcsinh():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arcsinhScaler(value), 9) == np.round(np.arcsinh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arcsinhScaler(value), 9) == np.round(np.arcsinh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arcsinhArray(cArray), 9), np.round(np.arcsinh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arcsinhArray(cArray), 9), np.round(np.arcsinh(data), 9))
####################################################################################
def test_arctan():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arctanScaler(value), 9) == np.round(np.arctan(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arctanScaler(value), 9) == np.round(np.arctan(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arctanArray(cArray), 9), np.round(np.arctan(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arctanArray(cArray), 9), np.round(np.arctan(data), 9))
####################################################################################
def test_arctan2():
xy = np.random.rand(2) * 2 - 1
assert np.round(NumCpp.arctan2Scaler(xy[1], xy[0]), 9) == np.round(np.arctan2(xy[1], xy[0]), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArrayX = NumCpp.NdArray(shape)
cArrayY = NumCpp.NdArray(shape)
xy = np.random.rand(*shapeInput, 2) * 2 - 1
xData = xy[:, :, 0].reshape(shapeInput)
yData = xy[:, :, 1].reshape(shapeInput)
cArrayX.setArray(xData)
cArrayY.setArray(yData)
assert np.array_equal(np.round(NumCpp.arctan2Array(cArrayY, cArrayX), 9), np.round(np.arctan2(yData, xData), 9))
####################################################################################
def test_arctanh():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arctanhScaler(value), 9) == np.round(np.arctanh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arctanhScaler(value), 9) == np.round(np.arctanh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arctanhArray(cArray), 9), np.round(np.arctanh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arctanhArray(cArray), 9), np.round(np.arctanh(data), 9))
####################################################################################
def test_argmax():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.NONE).item(), np.argmax(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.NONE).item(), np.argmax(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.ROW).flatten(), np.argmax(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.ROW).flatten(), np.argmax(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.COL).flatten(), np.argmax(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.COL).flatten(), np.argmax(data, axis=1))
####################################################################################
def test_argmin():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.NONE).item(), np.argmin(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.NONE).item(), np.argmin(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.ROW).flatten(), np.argmin(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.ROW).flatten(), np.argmin(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.COL).flatten(), np.argmin(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.COL).flatten(), np.argmin(data, axis=1))
####################################################################################
def test_argsort():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
dataFlat = data.flatten()
assert np.array_equal(dataFlat[NumCpp.argsort(cArray, NumCpp.Axis.NONE).flatten().astype(np.uint32)],
dataFlat[np.argsort(data, axis=None)])
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
dataFlat = data.flatten()
assert np.array_equal(dataFlat[NumCpp.argsort(cArray, NumCpp.Axis.NONE).flatten().astype(np.uint32)],
dataFlat[np.argsort(data, axis=None)])
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
pIdx = np.argsort(data, axis=0)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.ROW).astype(np.uint16)
allPass = True
for idx, row in enumerate(data.T):
if not np.array_equal(row[cIdx[:, idx]], row[pIdx[:, idx]]):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
pIdx = np.argsort(data, axis=0)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.ROW).astype(np.uint16)
allPass = True
for idx, row in enumerate(data.T):
if not np.array_equal(row[cIdx[:, idx]], row[pIdx[:, idx]]):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
pIdx = np.argsort(data, axis=1)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.COL).astype(np.uint16)
allPass = True
for idx, row in enumerate(data):
if not np.array_equal(row[cIdx[idx, :]], row[pIdx[idx, :]]): # noqa
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
pIdx = np.argsort(data, axis=1)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.COL).astype(np.uint16)
allPass = True
for idx, row in enumerate(data):
if not np.array_equal(row[cIdx[idx, :]], row[pIdx[idx, :]]):
allPass = False
break
assert allPass
####################################################################################
def test_argwhere():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
randValue = np.random.randint(0, 100, [1, ]).item()
data2 = data > randValue
cArray.setArray(data2)
assert np.array_equal(NumCpp.argwhere(cArray).flatten(), np.argwhere(data.flatten() > randValue).flatten())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
randValue = np.random.randint(0, 100, [1, ]).item()
data2 = data > randValue
cArray.setArray(data2)
assert np.array_equal(NumCpp.argwhere(cArray).flatten(), np.argwhere(data.flatten() > randValue).flatten())
####################################################################################
def test_around():
value = np.abs(np.random.rand(1).item()) * np.random.randint(1, 10, [1, ]).item()
numDecimalsRound = np.random.randint(0, 10, [1, ]).astype(np.uint8).item()
assert NumCpp.aroundScaler(value, numDecimalsRound) == np.round(value, numDecimalsRound)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * np.random.randint(1, 10, [1, ]).item()
cArray.setArray(data)
numDecimalsRound = np.random.randint(0, 10, [1, ]).astype(np.uint8).item()
assert np.array_equal(NumCpp.aroundArray(cArray, numDecimalsRound), np.round(data, numDecimalsRound))
####################################################################################
def test_array_equal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, shapeInput)
data2 = np.random.randint(1, 100, shapeInput)
cArray1.setArray(data1)
cArray2.setArray(data1)
cArray3.setArray(data2)
assert NumCpp.array_equal(cArray1, cArray2) and not NumCpp.array_equal(cArray1, cArray3)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
cArray3 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data1)
cArray3.setArray(data2)
assert NumCpp.array_equal(cArray1, cArray2) and not NumCpp.array_equal(cArray1, cArray3)
####################################################################################
def test_array_equiv():
shapeInput1 = np.random.randint(1, 100, [2, ])
shapeInput3 = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput1[1].item(), shapeInput1[0].item())
shape3 = NumCpp.Shape(shapeInput3[0].item(), shapeInput3[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
data1 = np.random.randint(1, 100, shapeInput1)
data3 = np.random.randint(1, 100, shapeInput3)
cArray1.setArray(data1)
cArray2.setArray(data1.reshape([shapeInput1[1].item(), shapeInput1[0].item()]))
cArray3.setArray(data3)
assert NumCpp.array_equiv(cArray1, cArray2) and not NumCpp.array_equiv(cArray1, cArray3)
shapeInput1 = np.random.randint(1, 100, [2, ])
shapeInput3 = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput1[1].item(), shapeInput1[0].item())
shape3 = NumCpp.Shape(shapeInput3[0].item(), shapeInput3[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape1)
cArray2 = NumCpp.NdArrayComplexDouble(shape2)
cArray3 = NumCpp.NdArrayComplexDouble(shape3)
real1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
imag1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data1 = real1 + 1j * imag1
real3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
imag3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data3 = real3 + 1j * imag3
cArray1.setArray(data1)
cArray2.setArray(data1.reshape([shapeInput1[1].item(), shapeInput1[0].item()]))
cArray3.setArray(data3)
assert NumCpp.array_equiv(cArray1, cArray2) and not NumCpp.array_equiv(cArray1, cArray3)
####################################################################################
def test_asarray():
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayArray1D(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayArray1D(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayArray1DCopy(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayArray1DCopy(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2DCopy(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2DCopy(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayVector1D(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayVector1D(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayVector1DCopy(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayVector1DCopy(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVector2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVector2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2DCopy(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2DCopy(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayDeque1D(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayDeque1D(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayDeque2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayDeque2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayList(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayList(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayIterators(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayIterators(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointerIterators(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointerIterators(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointer(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointer(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointer2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointer2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointerShell(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointerShell(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointerShellTakeOwnership(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointerShellTakeOwnership(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2DTakeOwnership(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2DTakeOwnership(*values), data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
cArrayCast = NumCpp.astypeDoubleToUint32(cArray).getNumpyArray()
assert np.array_equal(cArrayCast, data.astype(np.uint32))
assert cArrayCast.dtype == np.uint32
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
cArrayCast = NumCpp.astypeDoubleToComplex(cArray).getNumpyArray()
assert np.array_equal(cArrayCast, data.astype(np.complex128))
assert cArrayCast.dtype == np.complex128
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
cArrayCast = NumCpp.astypeComplexToComplex(cArray).getNumpyArray()
assert np.array_equal(cArrayCast, data.astype(np.complex64))
assert cArrayCast.dtype == np.complex64
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
cArrayCast = NumCpp.astypeComplexToDouble(cArray).getNumpyArray()
warnings.filterwarnings('ignore', category=np.ComplexWarning)
assert np.array_equal(cArrayCast, data.astype(np.double))
warnings.filters.pop() # noqa
assert cArrayCast.dtype == np.double
####################################################################################
def test_average():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.round(NumCpp.average(cArray, NumCpp.Axis.NONE).item(), 9) == np.round(np.average(data), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.round(NumCpp.average(cArray, NumCpp.Axis.NONE).item(), 9) == np.round(np.average(data), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, axis=1), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, axis=1), 9))
####################################################################################
def test_averageWeighted():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cWeights = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
weights = np.random.randint(1, 5, [shape.rows, shape.cols])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.NONE).item(), 9) == \
np.round(np.average(data, weights=weights), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cWeights = NumCpp.NdArray(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
weights = np.random.randint(1, 5, [shape.rows, shape.cols])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.NONE).item(), 9) == \
np.round(np.average(data, weights=weights), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cWeights = NumCpp.NdArray(1, shape.cols)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
weights = np.random.randint(1, 5, [1, shape.rows])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cWeights = NumCpp.NdArray(1, shape.cols)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
weights = np.random.randint(1, 5, [1, shape.rows])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cWeights = NumCpp.NdArray(1, shape.rows)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
weights = np.random.randint(1, 5, [1, shape.cols])
cWeights.setArray(weights)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=1), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cWeights = NumCpp.NdArray(1, shape.rows)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
weights = np.random.randint(1, 5, [1, shape.cols])
cWeights.setArray(weights)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=1), 9))
####################################################################################
def test_binaryRepr():
value = np.random.randint(0, np.iinfo(np.uint64).max, [1, ], dtype=np.uint64).item()
assert NumCpp.binaryRepr(np.uint64(value)) == np.binary_repr(value, np.iinfo(np.uint64).bits)
####################################################################################
def test_bincount():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
assert np.array_equal(NumCpp.bincount(cArray, 0).flatten(), np.bincount(data.flatten(), minlength=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
minLength = int(np.max(data) + 10)
assert np.array_equal(NumCpp.bincount(cArray, minLength).flatten(),
np.bincount(data.flatten(), minlength=minLength))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
cWeights = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
weights = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
cWeights.setArray(weights)
assert np.array_equal(NumCpp.bincountWeighted(cArray, cWeights, 0).flatten(),
np.bincount(data.flatten(), minlength=0, weights=weights.flatten()))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
cWeights = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
weights = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
cWeights.setArray(weights)
minLength = int(np.max(data) + 10)
assert np.array_equal(NumCpp.bincountWeighted(cArray, cWeights, minLength).flatten(),
np.bincount(data.flatten(), minlength=minLength, weights=weights.flatten()))
####################################################################################
def test_bitwise_and():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt64(shape)
cArray2 = NumCpp.NdArrayUInt64(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
data2 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.bitwise_and(cArray1, cArray2), np.bitwise_and(data1, data2))
####################################################################################
def test_bitwise_not():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt64(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray.setArray(data)
assert np.array_equal(NumCpp.bitwise_not(cArray), np.bitwise_not(data))
####################################################################################
def test_bitwise_or():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt64(shape)
cArray2 = NumCpp.NdArrayUInt64(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
data2 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.bitwise_or(cArray1, cArray2), np.bitwise_or(data1, data2))
####################################################################################
def test_bitwise_xor():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt64(shape)
cArray2 = NumCpp.NdArrayUInt64(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
data2 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.bitwise_xor(cArray1, cArray2), np.bitwise_xor(data1, data2))
####################################################################################
def test_byteswap():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt64(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray.setArray(data)
assert np.array_equal(NumCpp.byteswap(cArray).shape, shapeInput)
####################################################################################
def test_cbrt():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cbrtArray(cArray), 9), np.round(np.cbrt(data), 9))
####################################################################################
def test_ceil():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.ceilArray(cArray), 9), np.round(np.ceil(data), 9))
####################################################################################
def test_center_of_mass():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.centerOfMass(cArray, NumCpp.Axis.NONE).flatten(), 9),
np.round(meas.center_of_mass(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
coms = list()
for col in range(data.shape[1]):
coms.append(np.round(meas.center_of_mass(data[:, col])[0], 9))
assert np.array_equal(np.round(NumCpp.centerOfMass(cArray, NumCpp.Axis.ROW).flatten(), 9), np.round(coms, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
coms = list()
for row in range(data.shape[0]):
coms.append(np.round(meas.center_of_mass(data[row, :])[0], 9))
assert np.array_equal(np.round(NumCpp.centerOfMass(cArray, NumCpp.Axis.COL).flatten(), 9), np.round(coms, 9))
####################################################################################
def test_clip():
value = np.random.randint(0, 100, [1, ]).item()
minValue = np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item()
assert NumCpp.clipScaler(value, minValue, maxValue) == np.clip(value, minValue, maxValue)
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
minValue = np.random.randint(0, 10, [1, ]).item() + 1j * np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
assert NumCpp.clipScaler(value, minValue, maxValue) == np.clip(value, minValue, maxValue) # noqa
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
minValue = np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item()
assert np.array_equal(NumCpp.clipArray(cArray, minValue, maxValue), np.clip(data, minValue, maxValue))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
minValue = np.random.randint(0, 10, [1, ]).item() + 1j * np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
assert np.array_equal(NumCpp.clipArray(cArray, minValue, maxValue), np.clip(data, minValue, maxValue)) # noqa
####################################################################################
def test_column_stack():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.column_stack(cArray1, cArray2, cArray3, cArray4),
np.column_stack([data1, data2, data3, data4]))
####################################################################################
def test_complex():
real = np.random.rand(1).astype(np.double).item()
value = complex(real)
assert np.round(NumCpp.complexScaler(real), 9) == np.round(value, 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.complexScaler(components[0], components[1]), 9) == np.round(value, 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
realArray = NumCpp.NdArray(shape)
real = np.random.rand(shape.rows, shape.cols)
realArray.setArray(real)
assert np.array_equal(np.round(NumCpp.complexArray(realArray), 9), np.round(real + 1j * np.zeros_like(real), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
realArray = NumCpp.NdArray(shape)
imagArray = NumCpp.NdArray(shape)
real = np.random.rand(shape.rows, shape.cols)
imag = np.random.rand(shape.rows, shape.cols)
realArray.setArray(real)
imagArray.setArray(imag)
assert np.array_equal(np.round(NumCpp.complexArray(realArray, imagArray), 9), np.round(real + 1j * imag, 9))
####################################################################################
def test_concatenate():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.concatenate(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.NONE).flatten(),
np.concatenate([data1.flatten(), data2.flatten(), data3.flatten(), data4.flatten()]))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
shape3 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
shape4 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.concatenate(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.ROW),
np.concatenate([data1, data2, data3, data4], axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.concatenate(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.COL),
np.concatenate([data1, data2, data3, data4], axis=1))
####################################################################################
def test_conj():
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.conjScaler(value), 9) == np.round(np.conj(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.conjArray(cArray), 9), np.round(np.conj(data), 9))
####################################################################################
def test_contains():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
value = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert NumCpp.contains(cArray, value, NumCpp.Axis.NONE).getNumpyArray().item() == (value in data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert NumCpp.contains(cArray, value, NumCpp.Axis.NONE).getNumpyArray().item() == (value in data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
value = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.COL).getNumpyArray().flatten(), np.asarray(truth))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.COL).getNumpyArray().flatten(), np.asarray(truth))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
value = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data.T:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.asarray(truth))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data.T:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.asarray(truth))
####################################################################################
def test_copy():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.copy(cArray), data)
####################################################################################
def test_copysign():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.copysign(cArray1, cArray2), np.copysign(data1, data2))
####################################################################################
def test_copyto():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray()
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
assert np.array_equal(NumCpp.copyto(cArray2, cArray1), data1)
####################################################################################
def test_cos():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.cosScaler(value), 9) == np.round(np.cos(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.cosScaler(value), 9) == np.round(np.cos(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cosArray(cArray), 9), np.round(np.cos(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cosArray(cArray), 9), np.round(np.cos(data), 9))
####################################################################################
def test_cosh():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.coshScaler(value), 9) == np.round(np.cosh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.coshScaler(value), 9) == np.round(np.cosh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.coshArray(cArray), 9), np.round(np.cosh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.coshArray(cArray), 9), np.round(np.cosh(data), 9))
####################################################################################
def test_count_nonzero():
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 3, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert NumCpp.count_nonzero(cArray, NumCpp.Axis.NONE) == np.count_nonzero(data)
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 3, [shape.rows, shape.cols])
imag = np.random.randint(1, 3, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.count_nonzero(cArray, NumCpp.Axis.NONE) == np.count_nonzero(data)
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 3, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.ROW).flatten(), np.count_nonzero(data, axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 3, [shape.rows, shape.cols])
imag = np.random.randint(1, 3, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.ROW).flatten(), np.count_nonzero(data, axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 3, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.COL).flatten(), np.count_nonzero(data, axis=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 3, [shape.rows, shape.cols])
imag = np.random.randint(1, 3, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.COL).flatten(), np.count_nonzero(data, axis=1))
####################################################################################
def test_cross():
shape = NumCpp.Shape(1, 2)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).item() == np.cross(data1, data2).item()
shape = NumCpp.Shape(1, 2)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).item() == np.cross(data1, data2).item()
shape = NumCpp.Shape(2, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(2, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 2)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.cross(data1, data2, axis=1))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 2)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.cross(data1, data2, axis=1))
shape = NumCpp.Shape(1, 3)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.cross(data1, data2).flatten())
shape = NumCpp.Shape(1, 3)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.cross(data1, data2).flatten())
shape = NumCpp.Shape(3, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(3, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 3)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray(),
np.cross(data1, data2, axis=1))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 3)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray(),
np.cross(data1, data2, axis=1))
####################################################################################
def test_cube():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cube(cArray), 9), np.round(data * data * data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cube(cArray), 9), np.round(data * data * data, 9))
####################################################################################
def test_cumprod():
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.NONE).flatten(), data.cumprod())
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 4, [shape.rows, shape.cols])
imag = np.random.randint(1, 4, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.NONE).flatten(), data.cumprod())
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.ROW), data.cumprod(axis=0))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 4, [shape.rows, shape.cols])
imag = np.random.randint(1, 4, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.ROW), data.cumprod(axis=0))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.COL), data.cumprod(axis=1))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 4, [shape.rows, shape.cols])
imag = np.random.randint(1, 4, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.COL), data.cumprod(axis=1))
####################################################################################
def test_cumsum():
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.NONE).flatten(), data.cumsum())
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.NONE).flatten(), data.cumsum())
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.ROW), data.cumsum(axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.ROW), data.cumsum(axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.COL), data.cumsum(axis=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.COL), data.cumsum(axis=1))
####################################################################################
def test_deg2rad():
value = np.abs(np.random.rand(1).item()) * 360
assert np.round(NumCpp.deg2radScaler(value), 9) == np.round(np.deg2rad(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 360
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.deg2radArray(cArray), 9), np.round(np.deg2rad(data), 9))
####################################################################################
def test_degrees():
value = np.abs(np.random.rand(1).item()) * 2 * np.pi
assert np.round(NumCpp.degreesScaler(value), 9) == np.round(np.degrees(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 2 * np.pi
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.degreesArray(cArray), 9), np.round(np.degrees(data), 9))
####################################################################################
def test_deleteIndices():
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
indices = NumCpp.Slice(0, 100, 4)
indicesPy = slice(0, 99, 4)
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesSlice(cArray, indices, NumCpp.Axis.NONE).flatten(),
np.delete(data, indicesPy, axis=None))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
indices = NumCpp.Slice(0, 100, 4)
indicesPy = slice(0, 99, 4)
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesSlice(cArray, indices, NumCpp.Axis.ROW),
np.delete(data, indicesPy, axis=0))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
indices = NumCpp.Slice(0, 100, 4)
indicesPy = slice(0, 99, 4)
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesSlice(cArray, indices, NumCpp.Axis.COL),
np.delete(data, indicesPy, axis=1))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
index = np.random.randint(0, shape.size(), [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesScaler(cArray, index, NumCpp.Axis.NONE).flatten(),
np.delete(data, index, axis=None))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
index = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesScaler(cArray, index, NumCpp.Axis.ROW), np.delete(data, index, axis=0))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
index = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesScaler(cArray, index, NumCpp.Axis.COL), np.delete(data, index, axis=1))
####################################################################################
def test_diag():
shapeInput = np.random.randint(2, 25, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
k = np.random.randint(0, np.min(shapeInput), [1, ]).item()
elements = np.random.randint(1, 100, shapeInput)
cElements = NumCpp.NdArray(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diag(cElements, k).flatten(), np.diag(elements, k))
shapeInput = np.random.randint(2, 25, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
k = np.random.randint(0, np.min(shapeInput), [1, ]).item()
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
elements = real + 1j * imag
cElements = NumCpp.NdArrayComplexDouble(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diag(cElements, k).flatten(), np.diag(elements, k))
####################################################################################
def test_diagflat():
numElements = np.random.randint(2, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
elements = np.random.randint(1, 100, [numElements, ])
cElements = NumCpp.NdArray(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
numElements = np.random.randint(2, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
real = np.random.randint(1, 100, [numElements, ])
imag = np.random.randint(1, 100, [numElements, ])
elements = real + 1j * imag
cElements = NumCpp.NdArrayComplexDouble(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
numElements = np.random.randint(1, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
elements = np.random.randint(1, 100, [numElements, ])
cElements = NumCpp.NdArray(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
numElements = np.random.randint(1, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
real = np.random.randint(1, 100, [numElements, ])
imag = np.random.randint(1, 100, [numElements, ])
elements = real + 1j * imag
cElements = NumCpp.NdArrayComplexDouble(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
####################################################################################
def test_diagonal():
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.ROW).flatten(),
np.diagonal(data, offset, axis1=0, axis2=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.ROW).flatten(),
np.diagonal(data, offset, axis1=0, axis2=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.COL).flatten(),
np.diagonal(data, offset, axis1=1, axis2=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.COL).flatten(),
np.diagonal(data, offset, axis1=1, axis2=0))
####################################################################################
def test_diff():
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.NONE).flatten(),
np.diff(data.flatten()))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.NONE).flatten(),
np.diff(data.flatten()))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.ROW), np.diff(data, axis=0))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.ROW), np.diff(data, axis=0))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.COL).astype(np.uint32), np.diff(data, axis=1))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.COL), np.diff(data, axis=1))
####################################################################################
def test_divide():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2[data2 == 0] = 1
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = 0
while value == 0:
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
data[data == 0] = 1
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
np.round(value / data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
data2[data2 == complex(0)] = complex(1)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = 0
while value == complex(0):
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
data[data == complex(0)] = complex(1)
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
np.round(value / data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2[data2 == 0] = 1
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
data2[data2 == complex(0)] = complex(1)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
while value == complex(0):
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
data[data == 0] = 1
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
np.round(value / data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = 0
while value == 0:
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
data[data == complex(0)] = complex(1)
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
np.round(value / data, 9))
####################################################################################
def test_dot():
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 50, [shape.rows, shape.cols])
data2 = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.dot(cArray1, cArray2).item() == np.dot(data1, data2.T).item()
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 50, [shape.rows, shape.cols])
real2 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 50, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.dot(cArray1, cArray2).item() == np.dot(data1, data2.T).item()
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 50, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 50, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.dot(cArray1, cArray2).item() == np.dot(data1, data2.T).item()
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 50, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.dot(cArray1, cArray2).item() == np.dot(data1, data2.T).item()
shapeInput = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(1, 50, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 50, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.dot(cArray1, cArray2), np.dot(data1, data2))
shapeInput = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArrayComplexDouble(shape1)
cArray2 = NumCpp.NdArrayComplexDouble(shape2)
real1 = np.random.randint(1, 50, [shape1.rows, shape1.cols])
imag1 = np.random.randint(1, 50, [shape1.rows, shape1.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 50, [shape2.rows, shape2.cols])
imag2 = np.random.randint(1, 50, [shape2.rows, shape2.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.dot(cArray1, cArray2), np.dot(data1, data2))
####################################################################################
def test_empty():
shapeInput = np.random.randint(1, 100, [2, ])
cArray = NumCpp.emptyRowCol(shapeInput[0].item(), shapeInput[1].item())
assert cArray.shape[0] == shapeInput[0]
assert cArray.shape[1] == shapeInput[1]
assert cArray.size == shapeInput.prod()
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.emptyShape(shape)
assert cArray.shape[0] == shape.rows
assert cArray.shape[1] == shape.cols
assert cArray.size == shapeInput.prod()
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.empty_like(cArray1)
assert cArray2.shape().rows == shape.rows
assert cArray2.shape().cols == shape.cols
assert cArray2.size() == shapeInput.prod()
####################################################################################
def test_endianess():
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
assert NumCpp.endianess(cArray) == NumCpp.Endian.NATIVE
####################################################################################
def test_equal():
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 10, [shape.rows, shape.cols])
data2 = np.random.randint(0, 10, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.equal(cArray1, cArray2), np.equal(data1, data2))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.equal(cArray1, cArray2), np.equal(data1, data2))
####################################################################################
def test_exp2():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.expScaler(value), 9) == np.round(np.exp(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.expScaler(value), 9) == np.round(np.exp(value), 9)
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.exp2Scaler(value), 9) == np.round(np.exp2(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.exp2Array(cArray), 9), np.round(np.exp2(data), 9))
####################################################################################
def test_exp():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.expArray(cArray), 9), np.round(np.exp(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.expArray(cArray), 9), np.round(np.exp(data), 9))
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.expm1Scaler(value), 9) == np.round(np.expm1(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.expm1Scaler(value), 9) == np.round(np.expm1(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.expm1Array(cArray), 9), np.round(np.expm1(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.rand(shape.rows, shape.cols)
imag = np.random.rand(shape.rows, shape.cols)
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.expm1Array(cArray), 9), np.round(np.expm1(data), 9))
####################################################################################
def test_eye():
shapeInput = np.random.randint(1, 100, [1, ]).item()
randK = np.random.randint(0, shapeInput, [1, ]).item()
assert np.array_equal(NumCpp.eye1D(shapeInput, randK), np.eye(shapeInput, k=randK))
shapeInput = np.random.randint(1, 100, [1, ]).item()
randK = np.random.randint(0, shapeInput, [1, ]).item()
assert np.array_equal(NumCpp.eye1DComplex(shapeInput, randK),
np.eye(shapeInput, k=randK) + 1j * np.zeros([shapeInput, shapeInput]))
shapeInput = np.random.randint(10, 100, [2, ])
randK = np.random.randint(0, np.min(shapeInput), [1, ]).item()
assert np.array_equal(NumCpp.eye2D(shapeInput[0].item(), shapeInput[1].item(), randK),
np.eye(shapeInput[0].item(), shapeInput[1].item(), k=randK))
shapeInput = np.random.randint(10, 100, [2, ])
randK = np.random.randint(0, np.min(shapeInput), [1, ]).item()
assert np.array_equal(NumCpp.eye2DComplex(shapeInput[0].item(), shapeInput[1].item(), randK),
np.eye(shapeInput[0].item(), shapeInput[1].item(), k=randK) +
1j * np.zeros(shapeInput))
shapeInput = np.random.randint(10, 100, [2, ])
cShape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
randK = np.random.randint(0, np.min(shapeInput), [1, ]).item()
assert np.array_equal(NumCpp.eyeShape(cShape, randK), np.eye(shapeInput[0].item(), shapeInput[1].item(), k=randK))
shapeInput = np.random.randint(10, 100, [2, ])
cShape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
randK = np.random.randint(0, np.min(shapeInput), [1, ]).item()
assert np.array_equal(NumCpp.eyeShapeComplex(cShape, randK),
np.eye(shapeInput[0].item(), shapeInput[1].item(), k=randK) +
1j * np.zeros(shapeInput))
####################################################################################
def test_fill_diagonal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
NumCpp.fillDiagonal(cArray, 666)
np.fill_diagonal(data, 666)
assert np.array_equal(cArray.getNumpyArray(), data)
####################################################################################
def test_find():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
value = data.mean()
cMask = NumCpp.operatorGreater(cArray, value)
cMaskArray = NumCpp.NdArrayBool(cMask.shape[0], cMask.shape[1])
cMaskArray.setArray(cMask)
idxs = NumCpp.find(cMaskArray).astype(np.int64)
idxsPy = np.nonzero((data > value).flatten())[0]
assert np.array_equal(idxs.flatten(), idxsPy)
####################################################################################
def test_findN():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
value = data.mean()
cMask = NumCpp.operatorGreater(cArray, value)
cMaskArray = NumCpp.NdArrayBool(cMask.shape[0], cMask.shape[1])
cMaskArray.setArray(cMask)
idxs = NumCpp.findN(cMaskArray, 8).astype(np.int64)
idxsPy = np.nonzero((data > value).flatten())[0]
assert np.array_equal(idxs.flatten(), idxsPy[:8])
####################################################################################
def fix():
value = np.random.randn(1).item() * 100
assert NumCpp.fixScaler(value) == np.fix(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
assert np.array_equal(NumCpp.fixArray(cArray), np.fix(data))
####################################################################################
def test_flatten():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.flatten(cArray).getNumpyArray(), np.resize(data, [1, data.size]))
####################################################################################
def test_flatnonzero():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.flatnonzero(cArray).getNumpyArray().flatten(), np.flatnonzero(data))
####################################################################################
def test_flip():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.flip(cArray, NumCpp.Axis.NONE).getNumpyArray(),
np.flip(data.reshape(1, data.size), axis=1).reshape(shapeInput))
####################################################################################
def test_fliplr():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.fliplr(cArray).getNumpyArray(), np.fliplr(data))
####################################################################################
def test_flipud():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.flipud(cArray).getNumpyArray(), np.flipud(data))
####################################################################################
def test_floor():
value = np.random.randn(1).item() * 100
assert NumCpp.floorScaler(value) == np.floor(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
assert np.array_equal(NumCpp.floorArray(cArray), np.floor(data))
####################################################################################
def test_floor_divide():
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
assert NumCpp.floor_divideScaler(value1, value2) == np.floor_divide(value1, value2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data2 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.floor_divideArray(cArray1, cArray2), np.floor_divide(data1, data2))
####################################################################################
def test_fmax():
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
assert NumCpp.fmaxScaler(value1, value2) == np.fmax(value1, value2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data2 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.fmaxArray(cArray1, cArray2), np.fmax(data1, data2))
####################################################################################
def test_fmin():
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
assert NumCpp.fminScaler(value1, value2) == np.fmin(value1, value2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data2 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.fminArray(cArray1, cArray2), np.fmin(data1, data2))
####################################################################################
def test_fmod():
value1 = np.random.randint(1, 100, [1, ]).item() * 100 + 1000
value2 = np.random.randint(1, 100, [1, ]).item() * 100 + 1000
assert NumCpp.fmodScaler(value1, value2) == np.fmod(value1, value2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt32(shape)
cArray2 = NumCpp.NdArrayUInt32(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32) * 100 + 1000
data2 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.fmodArray(cArray1, cArray2), np.fmod(data1, data2))
####################################################################################
def test_fromfile():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
tempDir = r'C:\Temp'
if not os.path.exists(tempDir):
os.mkdir(tempDir)
tempFile = os.path.join(tempDir, 'NdArrayDump.bin')
NumCpp.dump(cArray, tempFile)
assert os.path.isfile(tempFile)
data2 = NumCpp.fromfile(tempFile, '').reshape(shape)
assert np.array_equal(data, data2)
os.remove(tempFile)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
tempDir = tempfile.gettempdir()
tempFile = os.path.join(tempDir, 'NdArrayDump')
NumCpp.tofile(cArray, tempFile, '\n')
assert os.path.exists(tempFile + '.txt')
data2 = NumCpp.fromfile(tempFile + '.txt', '\n').reshape(shape)
assert np.array_equal(data, data2)
os.remove(tempFile + '.txt')
####################################################################################
def test_fromiter():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.fromiter(cArray).flatten(), data.flatten())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.fromiter(cArray).flatten(), data.flatten())
####################################################################################
def test_full():
shapeInput = np.random.randint(1, 100, [1, ]).item()
value = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullSquare(shapeInput, value)
assert (cArray.shape[0] == shapeInput and cArray.shape[1] == shapeInput and
cArray.size == shapeInput**2 and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [1, ]).item()
value = np.random.randint(1, 100, [1, ]).item() + 1j * np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullSquareComplex(shapeInput, value)
assert (cArray.shape[0] == shapeInput and cArray.shape[1] == shapeInput and
cArray.size == shapeInput**2 and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [2, ])
value = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullRowCol(shapeInput[0].item(), shapeInput[1].item(), value)
assert (cArray.shape[0] == shapeInput[0] and cArray.shape[1] == shapeInput[1] and
cArray.size == shapeInput.prod() and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [2, ])
value = np.random.randint(1, 100, [1, ]).item() + 1j * np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullRowColComplex(shapeInput[0].item(), shapeInput[1].item(), value)
assert (cArray.shape[0] == shapeInput[0] and cArray.shape[1] == shapeInput[1] and
cArray.size == shapeInput.prod() and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
value = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullShape(shape, value)
assert (cArray.shape[0] == shape.rows and cArray.shape[1] == shape.cols and
cArray.size == shapeInput.prod() and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
value = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullShape(shape, value)
assert (cArray.shape[0] == shape.rows and cArray.shape[1] == shape.cols and
cArray.size == shapeInput.prod() and np.all(cArray == value))
####################################################################################
def test_full_like():
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
value = np.random.randint(1, 100, [1, ]).item()
cArray2 = NumCpp.full_like(cArray1, value)
assert (cArray2.shape().rows == shape.rows and cArray2.shape().cols == shape.cols and
cArray2.size() == shapeInput.prod() and np.all(cArray2.getNumpyArray() == value))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
value = np.random.randint(1, 100, [1, ]).item() + 1j * np.random.randint(1, 100, [1, ]).item()
cArray2 = NumCpp.full_likeComplex(cArray1, value)
assert (cArray2.shape().rows == shape.rows and cArray2.shape().cols == shape.cols and
cArray2.size() == shapeInput.prod() and np.all(cArray2.getNumpyArray() == value))
####################################################################################
def test_gcd():
if not NumCpp.NUMCPP_NO_USE_BOOST or NumCpp.STL_GCD_LCM:
value1 = np.random.randint(1, 1000, [1, ]).item()
value2 = np.random.randint(1, 1000, [1, ]).item()
assert NumCpp.gcdScaler(value1, value2) == np.gcd(value1, value2)
if not NumCpp.NUMCPP_NO_USE_BOOST:
size = np.random.randint(20, 100, [1, ]).item()
cArray = NumCpp.NdArrayUInt32(1, size)
data = np.random.randint(1, 1000, [size, ], dtype=np.uint32)
cArray.setArray(data)
assert NumCpp.gcdArray(cArray) == np.gcd.reduce(data) # noqa
####################################################################################
def test_gradient():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 1000, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.ROW), np.gradient(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 1000, [shape.rows, shape.cols])
imag = np.random.randint(1, 1000, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.ROW), np.gradient(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 1000, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.COL), np.gradient(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 1000, [shape.rows, shape.cols])
imag = np.random.randint(1, 1000, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.COL), np.gradient(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 1000, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.NONE).flatten(),
np.gradient(data.flatten(), axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 1000, [shape.rows, shape.cols])
imag = np.random.randint(1, 1000, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.NONE).flatten(),
np.gradient(data.flatten(), axis=0))
####################################################################################
def test_greater():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.greater(cArray1, cArray2).getNumpyArray(),
np.greater(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.greater(cArray1, cArray2).getNumpyArray(),
np.greater(data1, data2))
####################################################################################
def test_greater_equal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.greater_equal(cArray1, cArray2).getNumpyArray(),
np.greater_equal(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.greater_equal(cArray1, cArray2).getNumpyArray(),
np.greater_equal(data1, data2))
####################################################################################
def test_histogram():
shape = NumCpp.Shape(1024, 1024)
cArray = NumCpp.NdArray(shape)
data = np.random.randn(1024, 1024) * np.random.randint(1, 10, [1, ]).item() + np.random.randint(1, 10, [1, ]).item()
cArray.setArray(data)
numBins = np.random.randint(10, 30, [1, ]).item()
histogram, bins = NumCpp.histogram(cArray, numBins)
h, b = np.histogram(data, numBins)
assert np.array_equal(histogram.getNumpyArray().flatten().astype(np.int32), h)
assert np.array_equal(np.round(bins.getNumpyArray().flatten(), 9), np.round(b, 9))
shape = NumCpp.Shape(1024, 1024)
cArray = NumCpp.NdArray(shape)
data = np.random.randn(1024, 1024) * np.random.randint(1, 10, [1, ]).item() + np.random.randint(1, 10, [1, ]).item()
cArray.setArray(data)
binEdges = np.linspace(data.min(), data.max(), 15, endpoint=True)
cBinEdges = NumCpp.NdArray(1, binEdges.size)
cBinEdges.setArray(binEdges)
histogram = NumCpp.histogram(cArray, cBinEdges)
h, _ = np.histogram(data, binEdges)
assert np.array_equal(histogram.flatten().astype(np.int32), h)
####################################################################################
def test_hstack():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.hstack(cArray1, cArray2, cArray3, cArray4),
np.hstack([data1, data2, data3, data4]))
####################################################################################
def test_hypot():
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
assert NumCpp.hypotScaler(value1, value2) == np.hypot(value1, value2)
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
value3 = np.random.randn(1).item() * 100 + 1000
assert (np.round(NumCpp.hypotScalerTriple(value1, value2, value3), 9) ==
np.round(np.sqrt(value1**2 + value2**2 + value3**2), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data2 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.hypotArray(cArray1, cArray2), 9),
np.round(np.hypot(data1, data2), 9))
####################################################################################
def test_identity():
squareSize = np.random.randint(10, 100, [1, ]).item()
assert np.array_equal(NumCpp.identity(squareSize).getNumpyArray(), np.identity(squareSize))
squareSize = np.random.randint(10, 100, [1, ]).item()
assert np.array_equal(NumCpp.identityComplex(squareSize).getNumpyArray(),
np.identity(squareSize) + 1j * np.zeros([squareSize, squareSize]))
####################################################################################
def test_imag():
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.imagScaler(value), 9) == np.round(np.imag(value), 9) # noqa
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.imagArray(cArray), 9), np.round(np.imag(data), 9))
####################################################################################
def test_interp():
endPoint = np.random.randint(10, 20, [1, ]).item()
numPoints = np.random.randint(50, 100, [1, ]).item()
resample = np.random.randint(2, 5, [1, ]).item()
xpData = np.linspace(0, endPoint, numPoints, endpoint=True)
fpData = np.sin(xpData)
xData = np.linspace(0, endPoint, numPoints * resample, endpoint=True)
cXp = NumCpp.NdArray(1, numPoints)
cFp = NumCpp.NdArray(1, numPoints)
cX = NumCpp.NdArray(1, numPoints * resample)
cXp.setArray(xpData)
cFp.setArray(fpData)
cX.setArray(xData)
assert np.array_equal(np.round(NumCpp.interp(cX, cXp, cFp).flatten(), 9),
np.round(np.interp(xData, xpData, fpData), 9))
####################################################################################
def test_intersect1d():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt32(shape)
cArray2 = NumCpp.NdArrayUInt32(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols]).astype(np.uint32)
data2 = np.random.randint(1, 100, [shape.rows, shape.cols]).astype(np.uint32)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.intersect1d(cArray1, cArray2).getNumpyArray().flatten(), np.intersect1d(data1, data2))
####################################################################################
def test_invert():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.invert(cArray).getNumpyArray(), np.invert(data))
####################################################################################
def test_isclose():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.rand(shape.rows, shape.cols)
data2 = data1 + np.random.randn(shape.rows, shape.cols) * 1e-5
cArray1.setArray(data1)
cArray2.setArray(data2)
rtol = 1e-5
atol = 1e-8
assert np.array_equal(NumCpp.isclose(cArray1, cArray2, rtol, atol).getNumpyArray(),
np.isclose(data1, data2, rtol=rtol, atol=atol))
####################################################################################
def test_isinf():
value = np.random.randn(1).item() * 100 + 1000
assert NumCpp.isinfScaler(value) == np.isinf(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data[data > 1000] = np.inf
cArray.setArray(data)
assert np.array_equal(NumCpp.isinfArray(cArray), np.isinf(data))
####################################################################################
def test_isnan():
value = np.random.randn(1).item() * 100 + 1000
assert NumCpp.isnanScaler(value) == np.isnan(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data[data > 1000] = np.nan
cArray.setArray(data)
assert np.array_equal(NumCpp.isnanArray(cArray), np.isnan(data))
####################################################################################
def test_lcm():
if not NumCpp.NUMCPP_NO_USE_BOOST or NumCpp.STL_GCD_LCM:
value1 = np.random.randint(1, 1000, [1, ]).item()
value2 = np.random.randint(1, 1000, [1, ]).item()
assert NumCpp.lcmScaler(value1, value2) == np.lcm(value1, value2)
if not NumCpp.NUMCPP_NO_USE_BOOST:
size = np.random.randint(2, 10, [1, ]).item()
cArray = NumCpp.NdArrayUInt32(1, size)
data = np.random.randint(1, 100, [size, ], dtype=np.uint32)
cArray.setArray(data)
assert NumCpp.lcmArray(cArray) == np.lcm.reduce(data) # noqa
####################################################################################
def test_ldexp():
value1 = np.random.randn(1).item() * 100
value2 = np.random.randint(1, 20, [1, ]).item()
assert np.round(NumCpp.ldexpScaler(value1, value2), 9) == np.round(np.ldexp(value1, value2), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayUInt8(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100
data2 = np.random.randint(1, 20, [shape.rows, shape.cols], dtype=np.uint8)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.ldexpArray(cArray1, cArray2), 9), np.round(np.ldexp(data1, data2), 9))
####################################################################################
def test_left_shift():
shapeInput = np.random.randint(20, 100, [2, ])
bitsToshift = np.random.randint(1, 32, [1, ]).item()
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(1, np.iinfo(np.uint32).max, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.left_shift(cArray, bitsToshift).getNumpyArray(),
np.left_shift(data, bitsToshift))
####################################################################################
def test_less():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.less(cArray1, cArray2).getNumpyArray(),
np.less(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.less(cArray1, cArray2).getNumpyArray(),
np.less(data1, data2))
####################################################################################
def test_less_equal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.less_equal(cArray1, cArray2).getNumpyArray(),
np.less_equal(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.less_equal(cArray1, cArray2).getNumpyArray(),
np.less_equal(data1, data2))
####################################################################################
def test_load():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
tempDir = tempfile.gettempdir()
tempFile = os.path.join(tempDir, 'NdArrayDump.bin')
NumCpp.dump(cArray, tempFile)
assert os.path.isfile(tempFile)
data2 = NumCpp.load(tempFile).reshape(shape)
assert np.array_equal(data, data2)
os.remove(tempFile)
####################################################################################
def test_linspace():
start = np.random.randint(1, 10, [1, ]).item()
end = np.random.randint(start + 10, 100, [1, ]).item()
numPoints = np.random.randint(1, 100, [1, ]).item()
assert np.array_equal(np.round(NumCpp.linspace(start, end, numPoints, True).getNumpyArray().flatten(), 9),
np.round(np.linspace(start, end, numPoints, endpoint=True), 9))
start = np.random.randint(1, 10, [1, ]).item()
end = np.random.randint(start + 10, 100, [1, ]).item()
numPoints = np.random.randint(1, 100, [1, ]).item()
assert np.array_equal(np.round(NumCpp.linspace(start, end, numPoints, False).getNumpyArray().flatten(), 9),
np.round(np.linspace(start, end, numPoints, endpoint=False), 9))
####################################################################################
def test_log():
value = np.random.randn(1).item() * 100 + 1000
assert np.round(NumCpp.logScaler(value), 9) == np.round(np.log(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.logScaler(value), 9) == np.round(np.log(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.logArray(cArray), 9), np.round(np.log(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.logArray(cArray), 9), np.round(np.log(data), 9))
####################################################################################
def test_log10():
value = np.random.randn(1).item() * 100 + 1000
assert np.round(NumCpp.log10Scaler(value), 9) == np.round(np.log10(value), 9)
components = np.random.randn(2).astype(np.double) * 100 + 100
value = complex(components[0], components[1])
assert np.round(NumCpp.log10Scaler(value), 9) == np.round(np.log10(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.log10Array(cArray), 9), np.round(np.log10(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.log10Array(cArray), 9), np.round(np.log10(data), 9))
####################################################################################
def test_log1p():
value = np.random.randn(1).item() * 100 + 1000
assert np.round(NumCpp.log1pScaler(value), 9) == np.round(np.log1p(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.log1pArray(cArray), 9), np.round(np.log1p(data), 9))
####################################################################################
def test_log2():
value = np.random.randn(1).item() * 100 + 1000
assert np.round(NumCpp.log2Scaler(value), 9) == np.round(np.log2(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.log2Array(cArray), 9), np.round(np.log2(data), 9))
####################################################################################
def test_logical_and():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 20, [shape.rows, shape.cols])
data2 = np.random.randint(0, 20, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.logical_and(cArray1, cArray2).getNumpyArray(), np.logical_and(data1, data2))
####################################################################################
def test_logical_not():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 20, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.logical_not(cArray).getNumpyArray(), np.logical_not(data))
####################################################################################
def test_logical_or():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 20, [shape.rows, shape.cols])
data2 = np.random.randint(0, 20, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.logical_or(cArray1, cArray2).getNumpyArray(), np.logical_or(data1, data2))
####################################################################################
def test_logical_xor():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 20, [shape.rows, shape.cols])
data2 = np.random.randint(0, 20, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.logical_xor(cArray1, cArray2).getNumpyArray(), np.logical_xor(data1, data2))
####################################################################################
def test_matmul():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 20, [shape1.rows, shape1.cols])
data2 = np.random.randint(0, 20, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.matmul(cArray1, cArray2), np.matmul(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArrayComplexDouble(shape2)
data1 = np.random.randint(0, 20, [shape1.rows, shape1.cols])
real2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
imag2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.matmul(cArray1, cArray2), np.matmul(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape1)
cArray2 = NumCpp.NdArrayComplexDouble(shape2)
real1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
imag1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
imag2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.matmul(cArray1, cArray2), np.matmul(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape1)
cArray2 = NumCpp.NdArray(shape2)
real1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
imag1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(0, 20, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.matmul(cArray1, cArray2), np.matmul(data1, data2))
####################################################################################
def test_max():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.max(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.max(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.max(cArray, NumCpp.Axis.ROW).flatten(),
np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.max(cArray, NumCpp.Axis.ROW).flatten(),
np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.max(cArray, NumCpp.Axis.COL).flatten(),
np.max(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.max(cArray, NumCpp.Axis.COL).flatten(),
np.max(data, axis=1))
####################################################################################
def test_maximum():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols])
data2 = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.maximum(cArray1, cArray2), np.maximum(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.maximum(cArray1, cArray2), np.maximum(data1, data2))
####################################################################################
def test_mean():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.round(NumCpp.mean(cArray, NumCpp.Axis.NONE).getNumpyArray().item(), 9) == \
np.round(np.mean(data, axis=None).item(), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.round(NumCpp.mean(cArray, NumCpp.Axis.NONE).getNumpyArray().item(), 9) == \
np.round(np.mean(data, axis=None).item(), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.mean(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 9),
np.round(np.mean(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.mean(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 9),
np.round(np.mean(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.mean(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 9),
np.round(np.mean(data, axis=1), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.mean(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 9),
np.round(np.mean(data, axis=1), 9))
####################################################################################
def test_median():
isEven = True
while isEven:
shapeInput = np.random.randint(20, 100, [2, ])
isEven = shapeInput.prod().item() % 2 == 0
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) # noqa
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.median(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten().item() == np.median(data, axis=None).item()
isEven = True
while isEven:
shapeInput = np.random.randint(20, 100, [2, ])
isEven = shapeInput.prod().item() % 2 == 0
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.median(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.median(data, axis=0))
isEven = True
while isEven:
shapeInput = np.random.randint(20, 100, [2, ])
isEven = shapeInput.prod().item() % 2 == 0
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.median(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), np.median(data, axis=1))
####################################################################################
def test_meshgrid():
start = np.random.randint(0, 20, [1, ]).item()
end = np.random.randint(30, 100, [1, ]).item()
step = np.random.randint(1, 5, [1, ]).item()
dataI = np.arange(start, end, step)
iSlice = NumCpp.Slice(start, end, step)
start = np.random.randint(0, 20, [1, ]).item()
end = np.random.randint(30, 100, [1, ]).item()
step = np.random.randint(1, 5, [1, ]).item()
dataJ = np.arange(start, end, step)
jSlice = NumCpp.Slice(start, end, step)
iMesh, jMesh = np.meshgrid(dataI, dataJ)
iMeshC, jMeshC = NumCpp.meshgrid(iSlice, jSlice)
assert np.array_equal(iMeshC.getNumpyArray(), iMesh)
assert np.array_equal(jMeshC.getNumpyArray(), jMesh)
####################################################################################
def test_min():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.min(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.min(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.min(cArray, NumCpp.Axis.ROW).flatten(),
np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.min(cArray, NumCpp.Axis.ROW).flatten(),
np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.min(cArray, NumCpp.Axis.COL).flatten(),
np.min(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.min(cArray, NumCpp.Axis.COL).flatten(),
np.min(data, axis=1))
####################################################################################
def test_minimum():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols])
data2 = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.minimum(cArray1, cArray2), np.minimum(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.minimum(cArray1, cArray2), np.minimum(data1, data2))
####################################################################################
def test_mod():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt32(shape)
cArray2 = NumCpp.NdArrayUInt32(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
data2 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.mod(cArray1, cArray2).getNumpyArray(), np.mod(data1, data2))
####################################################################################
def test_multiply():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.multiply(cArray1, cArray2), data1 * data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(cArray, value), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(value, cArray), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.multiply(cArray1, cArray2), data1 * data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(cArray, value), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(value, cArray), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.multiply(cArray1, cArray2), data1 * data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.multiply(cArray1, cArray2), data1 * data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(cArray, value), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(value, cArray), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(cArray, value), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(value, cArray), data * value)
####################################################################################
def test_nan_to_num():
shapeInput = np.random.randint(50, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.size(), ]).astype(np.double)
nan_idx = np.random.choice(range(data.size), 10, replace=False)
pos_inf_idx = np.random.choice(range(data.size), 10, replace=False)
neg_inf_idx = np.random.choice(range(data.size), 10, replace=False)
data[nan_idx] = np.nan
data[pos_inf_idx] = np.inf
data[neg_inf_idx] = -np.inf
data = data.reshape(shapeInput)
cArray.setArray(data)
nan_replace = float(np.random.randint(100))
pos_inf_replace = float(np.random.randint(100))
neg_inf_replace = float(np.random.randint(100))
assert np.array_equal(NumCpp.nan_to_num(cArray, nan_replace, pos_inf_replace, neg_inf_replace),
np.nan_to_num(data, nan=nan_replace, posinf=pos_inf_replace, neginf=neg_inf_replace))
####################################################################################
def test_nanargmax():
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nanargmax(cArray, NumCpp.Axis.NONE).item() == np.nanargmax(data)
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanargmax(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.nanargmax(data, axis=0))
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanargmax(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.nanargmax(data, axis=1))
####################################################################################
def test_nanargmin():
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nanargmin(cArray, NumCpp.Axis.NONE).item() == np.nanargmin(data)
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanargmin(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.nanargmin(data, axis=0))
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanargmin(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.nanargmin(data, axis=1))
####################################################################################
def test_nancumprod():
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nancumprod(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.nancumprod(data, axis=None))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nancumprod(cArray, NumCpp.Axis.ROW).getNumpyArray(), np.nancumprod(data, axis=0))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nancumprod(cArray, NumCpp.Axis.COL).getNumpyArray(), np.nancumprod(data, axis=1))
####################################################################################
def test_nancumsum():
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nancumsum(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.nancumsum(data, axis=None))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nancumsum(cArray, NumCpp.Axis.ROW).getNumpyArray(), np.nancumsum(data, axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nancumsum(cArray, NumCpp.Axis.COL).getNumpyArray(), np.nancumsum(data, axis=1))
####################################################################################
def test_nanmax():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nanmax(cArray, NumCpp.Axis.NONE).item() == np.nanmax(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanmax(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.nanmax(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanmax(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.nanmax(data, axis=1))
####################################################################################
def test_nanmean():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nanmean(cArray, NumCpp.Axis.NONE).item() == np.nanmean(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanmean(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.nanmean(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanmean(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.nanmean(data, axis=1))
####################################################################################
def test_nanmedian():
isEven = True
while isEven:
shapeInput = np.random.randint(20, 100, [2, ])
isEven = shapeInput.prod().item() % 2 == 0
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) # noqa
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert (NumCpp.nanmedian(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten().item() ==
np.nanmedian(data, axis=None).item())
# isEven = True
# while isEven:
# shapeInput = np.random.randint(20, 100, [2, ])
# isEven = shapeInput[0].item() % 2 == 0
# shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
# cArray = NumCpp.NdArray(shape)
# data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
# data = data.flatten()
# data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
# data = data.reshape(shapeInput)
# cArray.setArray(data)
# assert np.array_equal(NumCpp.nanmedian(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(),
# np.nanmedian(data, axis=0))
#
# isEven = True
# while isEven:
# shapeInput = np.random.randint(20, 100, [2, ])
# isEven = shapeInput[1].item() % 2 == 0
# shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
# cArray = NumCpp.NdArray(shape)
# data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
# data = data.flatten()
# data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
# data = data.reshape(shapeInput)
# cArray.setArray(data)
# assert np.array_equal(NumCpp.nanmedian(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(),
# np.nanmedian(data, axis=1))
####################################################################################
def test_nanmin():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nanmin(cArray, NumCpp.Axis.NONE).item() == np.nanmin(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanmin(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.nanmin(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanmin(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.nanmin(data, axis=1))
####################################################################################
def test_nanpercentile():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.NONE, 'lower').item() ==
np.nanpercentile(data, percentile, axis=None, interpolation='lower'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.NONE, 'higher').item() ==
np.nanpercentile(data, percentile, axis=None, interpolation='higher'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.NONE, 'nearest').item() ==
np.nanpercentile(data, percentile, axis=None, interpolation='nearest'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.NONE, 'midpoint').item() ==
np.nanpercentile(data, percentile, axis=None, interpolation='midpoint'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.NONE, 'linear').item() ==
np.nanpercentile(data, percentile, axis=None, interpolation='linear'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.ROW, 'lower').getNumpyArray().flatten(),
np.nanpercentile(data, percentile, axis=0, interpolation='lower'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.ROW, 'higher').getNumpyArray().flatten(),
np.nanpercentile(data, percentile, axis=0, interpolation='higher'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.nanpercentile(cArray,
percentile,
NumCpp.Axis.ROW,
'nearest').getNumpyArray().flatten(),
np.nanpercentile(data, percentile, axis=0, interpolation='nearest'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(
np.round(NumCpp.nanpercentile(cArray,
percentile,
NumCpp.Axis.ROW,
'midpoint').getNumpyArray().flatten(), 9),
np.round(np.nanpercentile(data, percentile, axis=0, interpolation='midpoint'), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(np.round(NumCpp.nanpercentile(cArray,
percentile,
NumCpp.Axis.ROW,
'linear').getNumpyArray().flatten(), 9),
np.round(np.nanpercentile(data, percentile, axis=0, interpolation='linear'), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.COL, 'lower').getNumpyArray().flatten(),
np.nanpercentile(data, percentile, axis=1, interpolation='lower'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.COL, 'higher').getNumpyArray().flatten(),
np.nanpercentile(data, percentile, axis=1, interpolation='higher'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.nanpercentile(cArray,
percentile,
NumCpp.Axis.COL,
'nearest').getNumpyArray().flatten(),
np.nanpercentile(data, percentile, axis=1, interpolation='nearest'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(
np.round(NumCpp.nanpercentile(cArray,
percentile,
NumCpp.Axis.COL,
'midpoint').getNumpyArray().flatten(), 9),
np.round(np.nanpercentile(data, percentile, axis=1, interpolation='midpoint'), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(
np.round(NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.COL, 'linear').getNumpyArray().flatten(), 9),
np.round(np.nanpercentile(data, percentile, axis=1, interpolation='linear'), 9))
####################################################################################
def test_nanprod():
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nanprod(cArray, NumCpp.Axis.NONE).item() == np.nanprod(data, axis=None)
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanprod(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.nanprod(data, axis=0))
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanprod(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), np.nanprod(data, axis=1))
####################################################################################
def test_nans():
shapeInput = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.nansSquare(shapeInput)
assert (cArray.shape[0] == shapeInput and cArray.shape[1] == shapeInput and
cArray.size == shapeInput ** 2 and np.all(np.isnan(cArray)))
shapeInput = np.random.randint(20, 100, [2, ])
cArray = NumCpp.nansRowCol(shapeInput[0].item(), shapeInput[1].item())
assert (cArray.shape[0] == shapeInput[0] and cArray.shape[1] == shapeInput[1] and
cArray.size == shapeInput.prod() and np.all(np.isnan(cArray)))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.nansShape(shape)
assert (cArray.shape[0] == shape.rows and cArray.shape[1] == shape.cols and
cArray.size == shapeInput.prod() and np.all(np.isnan(cArray)))
####################################################################################
def test_nans_like():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.nans_like(cArray1)
assert (cArray2.shape().rows == shape.rows and cArray2.shape().cols == shape.cols and
cArray2.size() == shapeInput.prod() and np.all(np.isnan(cArray2.getNumpyArray())))
####################################################################################
def test_nanstd():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.round(NumCpp.nanstdev(cArray, NumCpp.Axis.NONE).item(), 9) == np.round(np.nanstd(data), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.nanstdev(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 9),
np.round(np.nanstd(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.nanstdev(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 9),
np.round(np.nanstd(data, axis=1), 9))
####################################################################################
def test_nansum():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nansum(cArray, NumCpp.Axis.NONE).item() == np.nansum(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nansum(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.nansum(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nansum(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), np.nansum(data, axis=1))
####################################################################################
def test_nanvar():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.round(NumCpp.nanvar(cArray, NumCpp.Axis.NONE).item(), 8) == np.round(np.nanvar(data), 8)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.nanvar(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 8),
np.round(np.nanvar(data, axis=0), 8))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.nanvar(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 8),
np.round(np.nanvar(data, axis=1), 8))
####################################################################################
def test_nbytes():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.nbytes(cArray) == data.size * 8
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.nbytes(cArray) == data.size * 16
####################################################################################
def test_negative():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.negative(cArray).getNumpyArray(), 9),
np.round(np.negative(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.negative(cArray).getNumpyArray(), 9),
np.round(np.negative(data), 9))
####################################################################################
def test_newbyteorderArray():
value = np.random.randint(1, 100, [1, ]).item()
assert (NumCpp.newbyteorderScaler(value, NumCpp.Endian.BIG) ==
np.asarray([value], dtype=np.uint32).newbyteorder().item())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.newbyteorderArray(cArray, NumCpp.Endian.BIG),
data.newbyteorder())
####################################################################################
def test_none():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.none(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.logical_not(np.any(data).item())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.none(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.logical_not(np.any(data).item())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.none(cArray, NumCpp.Axis.ROW).flatten().astype(bool),
np.logical_not(np.any(data, axis=0)))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.none(cArray, NumCpp.Axis.ROW).flatten().astype(bool),
np.logical_not(np.any(data, axis=0)))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.none(cArray, NumCpp.Axis.COL).flatten().astype(bool),
np.logical_not(np.any(data, axis=1)))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.none(cArray, NumCpp.Axis.COL).flatten().astype(bool),
np.logical_not(np.any(data, axis=1)))
####################################################################################
def test_nonzero():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
row, col = np.nonzero(data)
rowC, colC = NumCpp.nonzero(cArray)
assert (np.array_equal(rowC.getNumpyArray().flatten(), row) and
np.array_equal(colC.getNumpyArray().flatten(), col))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
row, col = np.nonzero(data)
rowC, colC = NumCpp.nonzero(cArray)
assert (np.array_equal(rowC.getNumpyArray().flatten(), row) and
np.array_equal(colC.getNumpyArray().flatten(), col))
####################################################################################
def test_norm():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.norm(cArray, NumCpp.Axis.NONE).flatten() == np.linalg.norm(data.flatten())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.norm(cArray, NumCpp.Axis.NONE).item() is not None
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
norms = NumCpp.norm(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten()
allPass = True
for idx, row in enumerate(data.transpose()):
if norms[idx] != np.linalg.norm(row):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
norms = NumCpp.norm(cArray, NumCpp.Axis.COL).getNumpyArray().flatten()
allPass = True
for idx, row in enumerate(data):
if norms[idx] != np.linalg.norm(row):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
norms = NumCpp.norm(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten()
assert norms is not None
####################################################################################
def test_not_equal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.not_equal(cArray1, cArray2).getNumpyArray(), np.not_equal(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.not_equal(cArray1, cArray2).getNumpyArray(), np.not_equal(data1, data2))
####################################################################################
def test_ones():
shapeInput = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.onesSquare(shapeInput)
assert (cArray.shape[0] == shapeInput and cArray.shape[1] == shapeInput and
cArray.size == shapeInput ** 2 and np.all(cArray == 1))
shapeInput = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.onesSquareComplex(shapeInput)
assert (cArray.shape[0] == shapeInput and cArray.shape[1] == shapeInput and
cArray.size == shapeInput ** 2 and np.all(cArray == complex(1, 0)))
shapeInput = np.random.randint(20, 100, [2, ])
cArray = NumCpp.onesRowCol(shapeInput[0].item(), shapeInput[1].item())
assert (cArray.shape[0] == shapeInput[0] and cArray.shape[1] == shapeInput[1] and
cArray.size == shapeInput.prod() and np.all(cArray == 1))
shapeInput = np.random.randint(20, 100, [2, ])
cArray = NumCpp.onesRowColComplex(shapeInput[0].item(), shapeInput[1].item())
assert (cArray.shape[0] == shapeInput[0] and cArray.shape[1] == shapeInput[1] and
cArray.size == shapeInput.prod() and np.all(cArray == complex(1, 0)))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.onesShape(shape)
assert (cArray.shape[0] == shape.rows and cArray.shape[1] == shape.cols and
cArray.size == shapeInput.prod() and np.all(cArray == 1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.onesShapeComplex(shape)
assert (cArray.shape[0] == shape.rows and cArray.shape[1] == shape.cols and
cArray.size == shapeInput.prod() and np.all(cArray == complex(1, 0)))
####################################################################################
def test_ones_like():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.ones_like(cArray1)
assert (cArray2.shape().rows == shape.rows and cArray2.shape().cols == shape.cols and
cArray2.size() == shapeInput.prod() and np.all(cArray2.getNumpyArray() == 1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.ones_likeComplex(cArray1)
assert (cArray2.shape().rows == shape.rows and cArray2.shape().cols == shape.cols and
cArray2.size() == shapeInput.prod() and np.all(cArray2.getNumpyArray() == complex(1, 0)))
####################################################################################
def test_outer():
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 50, [shape.rows, shape.cols], dtype=np.uint32)
data2 = np.random.randint(1, 50, [shape.rows, shape.cols], dtype=np.uint32)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.outer(cArray1, cArray2), np.outer(data1, data2))
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 50, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 50, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.outer(cArray1, cArray2), np.outer(data1, data2))
####################################################################################
def test_pad():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
padWidth = np.random.randint(1, 10, [1, ]).item()
padValue = np.random.randint(1, 100, [1, ]).item()
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray = NumCpp.NdArray(shape)
cArray.setArray(data)
assert np.array_equal(NumCpp.pad(cArray, padWidth, padValue).getNumpyArray(),
np.pad(data, padWidth, mode='constant', constant_values=padValue))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
padWidth = np.random.randint(1, 10, [1, ]).item()
padValue = np.random.randint(1, 100, [1, ]).item() + 1j * np.random.randint(1, 100, [1, ]).item()
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray = NumCpp.NdArrayComplexDouble(shape)
cArray.setArray(data)
assert np.array_equal(NumCpp.pad(cArray, padWidth, padValue).getNumpyArray(),
np.pad(data, padWidth, mode='constant', constant_values=padValue))
####################################################################################
def test_partition():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput.prod(), [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.NONE).getNumpyArray().flatten()
assert (np.all(partitionedArray[kthElement] <= partitionedArray[kthElement]) and
np.all(partitionedArray[kthElement:] >= partitionedArray[kthElement]))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput.prod(), [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.NONE).getNumpyArray().flatten()
assert (np.all(partitionedArray[kthElement] <= partitionedArray[kthElement]) and
np.all(partitionedArray[kthElement:] >= partitionedArray[kthElement]))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput[0], [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.ROW).getNumpyArray().transpose()
allPass = True
for row in partitionedArray:
if not (np.all(row[kthElement] <= row[kthElement]) and
np.all(row[kthElement:] >= row[kthElement])):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput[0], [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.ROW).getNumpyArray().transpose()
allPass = True
for row in partitionedArray:
if not (np.all(row[kthElement] <= row[kthElement]) and
np.all(row[kthElement:] >= row[kthElement])):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput[1], [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.COL).getNumpyArray()
allPass = True
for row in partitionedArray:
if not (np.all(row[kthElement] <= row[kthElement]) and
np.all(row[kthElement:] >= row[kthElement])):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput[1], [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.COL).getNumpyArray()
allPass = True
for row in partitionedArray:
if not (np.all(row[kthElement] <= row[kthElement]) and
np.all(row[kthElement:] >= row[kthElement])):
allPass = False
break
assert allPass
####################################################################################
def test_percentile():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.percentile(cArray, percentile, NumCpp.Axis.NONE, 'lower').item() ==
np.percentile(data, percentile, axis=None, interpolation='lower'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.percentile(cArray, percentile, NumCpp.Axis.NONE, 'higher').item() ==
np.percentile(data, percentile, axis=None, interpolation='higher'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.percentile(cArray, percentile, NumCpp.Axis.NONE, 'nearest').item() ==
np.percentile(data, percentile, axis=None, interpolation='nearest'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.percentile(cArray, percentile, NumCpp.Axis.NONE, 'midpoint').item() ==
np.percentile(data, percentile, axis=None, interpolation='midpoint'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.percentile(cArray, percentile, NumCpp.Axis.NONE, 'linear').item() ==
np.percentile(data, percentile, axis=None, interpolation='linear'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.ROW, 'lower').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=0, interpolation='lower'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.ROW, 'higher').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=0, interpolation='higher'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.ROW, 'nearest').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=0, interpolation='nearest'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(np.round(NumCpp.percentile(cArray,
percentile,
NumCpp.Axis.ROW,
'midpoint').getNumpyArray().flatten(), 9),
np.round(np.percentile(data, percentile, axis=0, interpolation='midpoint'), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(np.round(NumCpp.percentile(cArray,
percentile,
NumCpp.Axis.ROW,
'linear').getNumpyArray().flatten(), 9),
np.round(np.percentile(data, percentile, axis=0, interpolation='linear'), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.COL, 'lower').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=1, interpolation='lower'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.COL, 'higher').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=1, interpolation='higher'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.COL, 'nearest').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=1, interpolation='nearest'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(np.round(NumCpp.percentile(cArray,
percentile,
NumCpp.Axis.COL,
'midpoint').getNumpyArray().flatten(), 9),
np.round(np.percentile(data, percentile, axis=1, interpolation='midpoint'), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(np.round(NumCpp.percentile(cArray,
percentile,
NumCpp.Axis.COL,
'linear').getNumpyArray().flatten(), 9),
np.round(np.percentile(data, percentile, axis=1, interpolation='linear'), 9))
####################################################################################
def test_polar():
components = np.random.rand(2).astype(np.double)
assert NumCpp.polarScaler(components[0], components[1])
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
magArray = NumCpp.NdArray(shape)
angleArray = NumCpp.NdArray(shape)
mag = np.random.rand(shape.rows, shape.cols)
angle = np.random.rand(shape.rows, shape.cols)
magArray.setArray(mag)
angleArray.setArray(angle)
assert NumCpp.polarArray(magArray, angleArray) is not None
####################################################################################
def test_power():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
exponent = np.random.randint(0, 5, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.powerArrayScaler(cArray, exponent), 9),
np.round(np.power(data, exponent), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
exponent = np.random.randint(0, 5, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.powerArrayScaler(cArray, exponent), 9),
np.round(np.power(data, exponent), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cExponents = NumCpp.NdArrayUInt8(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
exponents = np.random.randint(0, 5, [shape.rows, shape.cols]).astype(np.uint8)
cArray.setArray(data)
cExponents.setArray(exponents)
assert np.array_equal(np.round(NumCpp.powerArrayArray(cArray, cExponents), 9),
np.round(np.power(data, exponents), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cExponents = NumCpp.NdArrayUInt8(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
exponents = np.random.randint(0, 5, [shape.rows, shape.cols]).astype(np.uint8)
cArray.setArray(data)
cExponents.setArray(exponents)
assert np.array_equal(np.round(NumCpp.powerArrayArray(cArray, cExponents), 9),
np.round(np.power(data, exponents), 9))
####################################################################################
def test_powerf():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
exponent = np.random.rand(1).item() * 3
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.powerfArrayScaler(cArray, exponent), 9),
np.round(np.power(data, exponent), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
exponent = np.random.rand(1).item() * 3
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.powerfArrayScaler(cArray, exponent), 9),
np.round(np.power(data, exponent), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cExponents = NumCpp.NdArray(shape)
data = np.random.randint(0, 20, [shape.rows, shape.cols])
exponents = np.random.rand(shape.rows, shape.cols) * 3
cArray.setArray(data)
cExponents.setArray(exponents)
assert np.array_equal(np.round(NumCpp.powerfArrayArray(cArray, cExponents), 9),
np.round(np.power(data, exponents), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cExponents = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
exponents = np.random.rand(shape.rows, shape.cols) * 3 + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
cExponents.setArray(exponents)
assert np.array_equal(np.round(NumCpp.powerfArrayArray(cArray, cExponents), 9),
np.round(np.power(data, exponents), 9))
####################################################################################
def test_prod():
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert NumCpp.prod(cArray, NumCpp.Axis.NONE).item() == data.prod()
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 15, [shape.rows, shape.cols])
imag = np.random.randint(1, 15, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.prod(cArray, NumCpp.Axis.NONE).item() == data.prod()
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(NumCpp.prod(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), data.prod(axis=0))
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 15, [shape.rows, shape.cols])
imag = np.random.randint(1, 15, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.prod(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), data.prod(axis=0))
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(NumCpp.prod(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), data.prod(axis=1))
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 15, [shape.rows, shape.cols])
imag = np.random.randint(1, 15, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.prod(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), data.prod(axis=1))
####################################################################################
def test_proj():
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert NumCpp.projScaler(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cData = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cData.setArray(data)
assert NumCpp.projArray(cData) is not None
####################################################################################
def test_ptp():
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.ptp(cArray, NumCpp.Axis.NONE).getNumpyArray().item() == data.ptp()
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.ptp(cArray, NumCpp.Axis.NONE).getNumpyArray().item() == data.ptp()
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.ptp(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten().astype(np.uint32),
data.ptp(axis=0))
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.ptp(cArray, NumCpp.Axis.COL).getNumpyArray().flatten().astype(np.uint32),
data.ptp(axis=1))
####################################################################################
def test_put():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
numIndices = np.random.randint(0, shape.size())
indices = np.asarray(range(numIndices), np.uint32)
value = np.random.randint(1, 500)
cIndices = NumCpp.NdArrayUInt32(1, numIndices)
cIndices.setArray(indices)
NumCpp.put(cArray, cIndices, value)
data.put(indices, value)
assert np.array_equal(cArray.getNumpyArray(), data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
numIndices = np.random.randint(0, shape.size())
indices = np.asarray(range(numIndices), dtype=np.uint32)
values = np.random.randint(1, 500, [numIndices, ])
cIndices = NumCpp.NdArrayUInt32(1, numIndices)
cValues = NumCpp.NdArray(1, numIndices)
cIndices.setArray(indices)
cValues.setArray(values)
NumCpp.put(cArray, cIndices, cValues)
data.put(indices, values)
assert np.array_equal(cArray.getNumpyArray(), data)
####################################################################################
def test_rad2deg():
value = np.abs(np.random.rand(1).item()) * 2 * np.pi
assert np.round(NumCpp.rad2degScaler(value), 9) == np.round(np.rad2deg(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 2 * np.pi
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.rad2degArray(cArray), 9), np.round(np.rad2deg(data), 9))
####################################################################################
def test_radians():
value = np.abs(np.random.rand(1).item()) * 360
assert np.round(NumCpp.radiansScaler(value), 9) == np.round(np.radians(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 360
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.radiansArray(cArray), 9), np.round(np.radians(data), 9))
####################################################################################
def test_ravel():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
cArray2 = NumCpp.ravel(cArray)
assert np.array_equal(cArray2.getNumpyArray().flatten(), np.ravel(data))
####################################################################################
def test_real():
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.realScaler(value), 9) == np.round(np.real(value), 9) # noqa
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.realArray(cArray), 9), np.round(np.real(data), 9))
####################################################################################
def test_reciprocal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.reciprocal(cArray), 9), np.round(np.reciprocal(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols]).astype(np.double)
imag = np.random.randint(1, 100, [shape.rows, shape.cols]).astype(np.double)
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.reciprocal(cArray), 9), np.round(np.reciprocal(data), 9))
####################################################################################
def test_remainder():
# numpy and cmath remainders are calculated differently, so convert for testing purposes
values = np.random.rand(2) * 100
values = np.sort(values)
res = NumCpp.remainderScaler(values[1].item(), values[0].item())
if res < 0:
res += values[0].item()
assert np.round(res, 9) == np.round(np.remainder(values[1], values[0]), 9)
# numpy and cmath remainders are calculated differently, so convert for testing purposes
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.rand(shape.rows, shape.cols) * 100 + 10
data2 = data1 - np.random.rand(shape.rows, shape.cols) * 10
cArray1.setArray(data1)
cArray2.setArray(data2)
res = NumCpp.remainderArray(cArray1, cArray2)
res[res < 0] = res[res < 0] + data2[res < 0]
assert np.array_equal(np.round(res, 9), np.round(np.remainder(data1, data2), 9))
####################################################################################
def test_replace():
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
oldValue = np.random.randint(1, 100, 1).item()
newValue = np.random.randint(1, 100, 1).item()
dataCopy = data.copy()
dataCopy[dataCopy == oldValue] = newValue
assert np.array_equal(NumCpp.replace(cArray, oldValue, newValue), dataCopy)
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
oldValue = np.random.randint(1, 100, 1).item() + 1j * np.random.randint(1, 100, 1).item()
newValue = np.random.randint(1, 100, 1).item() + 1j * np.random.randint(1, 100, 1).item()
dataCopy = data.copy()
dataCopy[dataCopy == oldValue] = newValue
assert np.array_equal(NumCpp.replace(cArray, oldValue, newValue), dataCopy)
####################################################################################
def test_reshape():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
newShape = data.size
NumCpp.reshape(cArray, newShape)
assert np.array_equal(cArray.getNumpyArray(), data.reshape(1, newShape))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
newShape = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
NumCpp.reshape(cArray, newShape)
assert np.array_equal(cArray.getNumpyArray(), data.reshape(shapeInput[::-1]))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
newShape = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
NumCpp.reshapeList(cArray, newShape)
assert np.array_equal(cArray.getNumpyArray(), data.reshape(shapeInput[::-1]))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
newNumCols = np.random.choice(np.array(list(factors(data.size))), 1).item()
NumCpp.reshape(cArray, -1, newNumCols)
assert np.array_equal(cArray.getNumpyArray(), data.reshape(-1, newNumCols))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
newNumRows = np.random.choice(np.array(list(factors(data.size))), 1).item()
NumCpp.reshape(cArray, newNumRows, -1)
assert np.array_equal(cArray.getNumpyArray(), data.reshape(newNumRows, -1))
####################################################################################
def test_resize():
shapeInput1 = np.random.randint(1, 100, [2, ])
shapeInput2 = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput2[0].item(), shapeInput2[1].item())
cArray = NumCpp.NdArray(shape1)
data = np.random.randint(1, 100, [shape1.rows, shape1.cols], dtype=np.uint32)
cArray.setArray(data)
NumCpp.resizeFast(cArray, shape2)
assert cArray.shape().rows == shape2.rows
assert cArray.shape().cols == shape2.cols
shapeInput1 = np.random.randint(1, 100, [2, ])
shapeInput2 = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput2[0].item(), shapeInput2[1].item())
cArray = NumCpp.NdArray(shape1)
data = np.random.randint(1, 100, [shape1.rows, shape1.cols], dtype=np.uint32)
cArray.setArray(data)
NumCpp.resizeSlow(cArray, shape2)
assert cArray.shape().rows == shape2.rows
assert cArray.shape().cols == shape2.cols
####################################################################################
def test_right_shift():
shapeInput = np.random.randint(20, 100, [2, ])
bitsToshift = np.random.randint(1, 32, [1, ]).item()
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(1, np.iinfo(np.uint32).max, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.right_shift(cArray, bitsToshift).getNumpyArray(),
np.right_shift(data, bitsToshift))
####################################################################################
def test_rint():
value = np.abs(np.random.rand(1).item()) * 2 * np.pi
assert NumCpp.rintScaler(value) == np.rint(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 2 * np.pi
cArray.setArray(data)
assert np.array_equal(NumCpp.rintArray(cArray), np.rint(data))
####################################################################################
def test_rms():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert (np.round(NumCpp.rms(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten().item(), 9) ==
np.round(np.sqrt(np.mean(np.square(data), axis=None)).item(), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert (np.round(NumCpp.rms(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten().item(), 9) ==
np.round(np.sqrt(np.mean(np.square(data), axis=None)).item(), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.rms(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 9),
np.round(np.sqrt(np.mean(np.square(data), axis=0)), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.rms(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 9),
np.round(np.sqrt(np.mean(np.square(data), axis=0)), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.rms(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 9),
np.round(np.sqrt(np.mean(np.square(data), axis=1)), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.rms(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 9),
np.round(np.sqrt(np.mean(np.square(data), axis=1)), 9))
####################################################################################
def test_roll():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
amount = np.random.randint(0, data.size, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.roll(cArray, amount, NumCpp.Axis.NONE).getNumpyArray(),
np.roll(data, amount, axis=None))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
amount = np.random.randint(0, shape.cols, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.roll(cArray, amount, NumCpp.Axis.ROW).getNumpyArray(),
np.roll(data, amount, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
amount = np.random.randint(0, shape.rows, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.roll(cArray, amount, NumCpp.Axis.COL).getNumpyArray(),
np.roll(data, amount, axis=1))
####################################################################################
def test_rot90():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
amount = np.random.randint(1, 4, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.rot90(cArray, amount).getNumpyArray(), np.rot90(data, amount))
####################################################################################
def test_round():
value = np.abs(np.random.rand(1).item()) * 2 * np.pi
assert NumCpp.roundScaler(value, 10) == np.round(value, 10)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 2 * np.pi
cArray.setArray(data)
assert np.array_equal(NumCpp.roundArray(cArray, 9), np.round(data, 9))
####################################################################################
def test_row_stack():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
shape3 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
shape4 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.row_stack(cArray1, cArray2, cArray3, cArray4),
np.row_stack([data1, data2, data3, data4]))
####################################################################################
def test_setdiff1d():
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt32(shape)
cArray2 = NumCpp.NdArrayUInt32(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
data2 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.setdiff1d(cArray1, cArray2).getNumpyArray().flatten(),
np.setdiff1d(data1, data2))
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.setdiff1d(cArray1, cArray2).getNumpyArray().flatten(),
np.setdiff1d(data1, data2))
####################################################################################
def test_shape():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
assert cArray.shape().rows == shape.rows and cArray.shape().cols == shape.cols
####################################################################################
def test_sign():
value = np.random.randn(1).item() * 100
assert NumCpp.signScaler(value) == np.sign(value)
value = np.random.randn(1).item() * 100 + 1j * np.random.randn(1).item() * 100
assert NumCpp.signScaler(value) == np.sign(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
assert np.array_equal(NumCpp.signArray(cArray), np.sign(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.signArray(cArray), np.sign(data))
####################################################################################
def test_signbit():
value = np.random.randn(1).item() * 100
assert NumCpp.signbitScaler(value) == np.signbit(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
assert np.array_equal(NumCpp.signbitArray(cArray), np.signbit(data))
####################################################################################
def test_sin():
value = np.random.randn(1).item()
assert np.round(NumCpp.sinScaler(value), 9) == np.round(np.sin(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.sinScaler(value), 9) == np.round(np.sin(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sinArray(cArray), 9), np.round(np.sin(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sinArray(cArray), 9), np.round(np.sin(data), 9))
####################################################################################
def test_sinc():
value = np.random.randn(1)
assert np.round(NumCpp.sincScaler(value.item()), 9) == np.round(np.sinc(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sincArray(cArray), 9), np.round(np.sinc(data), 9))
####################################################################################
def test_sinh():
value = np.random.randn(1).item()
assert np.round(NumCpp.sinhScaler(value), 9) == np.round(np.sinh(value), 9)
value = np.random.randn(1).item() + 1j * np.random.randn(1).item()
assert np.round(NumCpp.sinhScaler(value), 9) == np.round(np.sinh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sinhArray(cArray), 9), np.round(np.sinh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.randn(shape.rows, shape.cols) + 1j * np.random.randn(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sinhArray(cArray), 9), np.round(np.sinh(data), 9))
####################################################################################
def test_size():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
assert cArray.size() == shapeInput.prod().item()
####################################################################################
def test_sort():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
d = data.flatten()
d.sort()
assert np.array_equal(NumCpp.sort(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten(), d)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
d = data.flatten()
d.sort()
assert np.array_equal(NumCpp.sort(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten(), d)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
pSorted = np.sort(data, axis=0)
cSorted = NumCpp.sort(cArray, NumCpp.Axis.ROW).getNumpyArray()
assert np.array_equal(cSorted, pSorted)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
pSorted = np.sort(data, axis=0)
cSorted = NumCpp.sort(cArray, NumCpp.Axis.ROW).getNumpyArray()
assert np.array_equal(cSorted, pSorted)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
pSorted = np.sort(data, axis=1)
cSorted = NumCpp.sort(cArray, NumCpp.Axis.COL).getNumpyArray()
assert np.array_equal(cSorted, pSorted)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
pSorted = np.sort(data, axis=1)
cSorted = NumCpp.sort(cArray, NumCpp.Axis.COL).getNumpyArray()
assert np.array_equal(cSorted, pSorted)
####################################################################################
def test_sqrt():
value = np.random.randint(1, 100, [1, ]).item()
assert np.round(NumCpp.sqrtScaler(value), 9) == np.round(np.sqrt(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.sqrtScaler(value), 9) == np.round(np.sqrt(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sqrtArray(cArray), 9), np.round(np.sqrt(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sqrtArray(cArray), 9), np.round(np.sqrt(data), 9))
####################################################################################
def test_square():
value = np.random.randint(1, 100, [1, ]).item()
assert np.round(NumCpp.squareScaler(value), 9) == np.round(np.square(value), 9)
value = np.random.randint(1, 100, [1, ]).item() + 1j * np.random.randint(1, 100, [1, ]).item()
assert np.round(NumCpp.squareScaler(value), 9) == np.round(np.square(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.squareArray(cArray), 9), np.round(np.square(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.squareArray(cArray), 9), np.round(np.square(data), 9))
####################################################################################
def test_stack():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
cArray4 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data3 = np.random.randint(1, 100, [shape.rows, shape.cols])
data4 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.stack(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.ROW),
np.vstack([data1, data2, data3, data4]))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
cArray4 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data3 = np.random.randint(1, 100, [shape.rows, shape.cols])
data4 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.stack(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.COL),
np.hstack([data1, data2, data3, data4]))
####################################################################################
def test_stdev():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.round(NumCpp.stdev(cArray, NumCpp.Axis.NONE).item(), 9) == np.round(np.std(data), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.stdev(cArray, NumCpp.Axis.NONE).item() is not None
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.stdev(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 9),
np.round(np.std(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.stdev(cArray, NumCpp.Axis.ROW) is not None
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.stdev(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 9),
np.round(np.std(data, axis=1), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.stdev(cArray, NumCpp.Axis.COL) is not None
####################################################################################
def test_subtract():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.subtract(cArray1, cArray2), data1 - data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = | np.random.randint(-100, 100) | numpy.random.randint |
"""
By default GDAL uses Pillow and Pillow uses it's own version of libtiff when GDAL uses the system one.
And as result we have segfaults on some TIFFs in jupyter notebooks. Maybe resolved by:
pip2 uninstall Pillow
pip2 install --no-binary :all: Pillow
pip3 uninstall Pillow
pip3 install --no-binary :all: Pillow
brew uninstall gdal
brew install gdal
#Homebrew: gdal 2.4.4_4 is already installed
pip3 install GDAL
pip3 uninstall psycopg2
pip3 install psycopg2
pip3.7 install vtk
pip3.7 install rasterio
"""
from osgeo import osr, gdal, ogr
import os, sys, ctypes
from numpy.ctypeslib import ndpointer
import numpy as np
import xarray as xr
import pandas as pd
# it's similar to xr.open_rasterio() but attributes are different
# function to load source GeoTIF image
def gdal_raster(src_filename, NoData=None):
ds = gdal.Open(src_filename)
datains = []
NoDatas = []
for bandidx in range(ds.RasterCount):
# read NoData value from raster (if possible)
band = ds.GetRasterBand(bandidx+1)
datain = np.array(band.ReadAsArray())
if NoData is None:
nodatain = band.GetNoDataValue()
if nodatain is not None and datain.dtype in ['float32','float64']:
NoData = nodatain
elif nodatain is not None:
# gdal returns float NoData value for integer bands
NoData = int(nodatain) if int(nodatain) == nodatain else nodatain
else:
NoData = 0
datains.append(datain)
NoDatas.append(NoData)
if len(datains) == 1:
NoDatas = NoDatas[0]
raster = xr.DataArray(datains[0],
coords=[range(ds.RasterYSize),range(ds.RasterXSize)],
dims=['y','x'])
else:
if np.all(NoDatas) == NoDatas[0]:
NoDatas = NoDatas[0]
else:
NoDatas = np.array(NoDatas)
raster = xr.DataArray(datains,
coords=[range(ds.RasterCount),range(ds.RasterYSize),range(ds.RasterXSize)],
dims=['band','y','x'])
wkt = ds.GetProjection()
srs = osr.SpatialReference()
srs.ImportFromWkt(wkt)
if 'EPSG' == srs.GetAttrValue("AUTHORITY", 0):
epsg = srs.GetAttrValue("AUTHORITY", 1)
else:
epsg = ''
ulx, xres, xskew, uly, yskew, yres = ds.GetGeoTransform()
lrx = ulx + (ds.RasterXSize - 1) * xres
lry = uly + (ds.RasterYSize - 1) * yres
raster['y'] = uly + yres*(raster.y.values + 0.5)
raster['x'] = ulx + xres*(raster.x.values + 0.5)
raster.attrs['nodata'] = NoDatas
raster.attrs['ulx'] = ulx
raster.attrs['xres'] = xres
raster.attrs['xskew'] = xskew
raster.attrs['uly'] = uly
raster.attrs['yskew'] = yskew
raster.attrs['yres'] = yres
raster.attrs['lrx'] = lrx
raster.attrs['lry'] = lry
raster.attrs['spatial_ref'] = wkt
raster.attrs['epsg'] = epsg
return raster
"""
raster = gdal_raster("IMAGE_HH_SRA_wide_001.tif")
raster
<xarray.DataArray (y: 17366, x: 20633)>
array([[0, 0, 0, ..., 0, 0, 0],
[0, 0, 0, ..., 0, 0, 0],
[0, 0, 0, ..., 0, 0, 0],
...,
[0, 0, 0, ..., 0, 0, 0],
[0, 0, 0, ..., 0, 0, 0],
[0, 0, 0, ..., 0, 0, 0]], dtype=uint16)
Coordinates:
* y (y) float64 4.66e+05 4.66e+05 4.66e+05 4.66e+05 4.659e+05 ...
* x (x) float64 3.69e+05 3.69e+05 3.69e+05 3.69e+05 3.691e+05 ...
Attributes:
nodata: 0
ulx: 368992.5
xres: 15.0
xskew: 0.0
uly: 466007.5
yskew: 0.0
yres: -15.0
lrx: 678487.5
lry: 205517.5
"""
# function to load 2D/3D points, lines, polygons from shapefile
# see output "proj" attribute to check projection
def ogr_vector(shapefile):
'Given a shapefile path, return a list of 3D points in GIS coordinates'
shapeData = ogr.Open(shapefile)
if not shapeData:
raise Exception('The shapefile is invalid')
# Make sure there is exactly one layer
if shapeData.GetLayerCount() != 1:
raise Exception('The shapefile must have exactly one layer')
# Get the first layer
layer = shapeData.GetLayer()
# get all field names
layerDefinition = layer.GetLayerDefn()
fieldnames = []
for i in range(layerDefinition.GetFieldCount()):
fieldname = layerDefinition.GetFieldDefn(i).GetName()
fieldnames.append(fieldname)
# process all features in the layer
points = []
# For each point,
for index in range(layer.GetFeatureCount()):
feature = layer.GetFeature(index)
geometry = feature.GetGeometryRef()
if geometry is None:
continue
gtype = geometry.GetGeometryType()
fields = {}
for fieldname in fieldnames:
fields[fieldname] = feature.GetField(fieldname)
#print fieldname, feature.GetField(fieldname)
if gtype in [ogr.wkbPoint25D, ogr.wkbPoint]:
pointCoordinates = dict(x=geometry.GetX(), y=geometry.GetY(), z=geometry.GetZ())
points.append(dict(pointCoordinates,**fields))
elif gtype in [ogr.wkbLineString, ogr.wkbLineString25D]:
for point in range(geometry.GetPointCount()):
pointCoordinates = dict(x=geometry.GetX(point), y=geometry.GetY(point), z=geometry.GetZ(point))
points.append(dict(pointCoordinates,**fields))
elif gtype in [ogr.wkbPolygon, ogr.wkbPolygon25D]:
# extract boundary box
(minX, maxX, minY, maxY, minZ, maxZ) = geometry.GetEnvelope3D()
pointCoordinates = dict(x=minX, y=minY, z=minZ)
points.append(dict(pointCoordinates,**fields))
pointCoordinates = dict(x=maxX, y=maxY, z=maxZ)
points.append(dict(pointCoordinates,**fields))
else:
raise Exception('This module can only load points, lines and polygons')
feature.Destroy()
# Get spatial reference as proj4
if layer.GetSpatialRef() is None:
proj4 = ''
else:
proj4 = layer.GetSpatialRef().ExportToProj4()
shapeData.Destroy()
#points = np.array(points)
#df = pd.DataFrame({'x': points[:,0], 'y': points[:,1], 'z': points[:,2]})
df = pd.DataFrame(points)
# add "proj" attribute to output dataframe
df.proj4 = proj4
return df
"""
df = ogr_vector("test_points/test_points.shp")
df.head()
Id gsAttrib x y z
0 0 0.040432 469827.964459 390884.634456 0.040432
1 1 0.434915 470083.763310 390884.634456 0.434915
2 2 0.758500 470339.562162 390884.634456 0.758500
3 3 0.488747 470595.361013 390884.634456 0.488747
4 4 0.945799 470851.159865 390884.634456 0.945799
"""
# cell center (0.5, 0.5) should be pixel (0,0) but not rounded (1,1)
def geomed_round(arr):
#return np.array([ (round(x,0)-1 if (x % 1 == 0.5) else round(x,0) ) for x in arr ]).astype(int)
return np.array(arr).astype(int)
# main geomed library function for statistics calculations
def geomed(lib, raster, grid, radius_min, radius_max, gridded=False, scale_factor=0.707):
# build mask of input points
_grid = grid.copy()
# use zero surface if z is not defined
if not 'z' in _grid:
_grid['z'] = 0
# prepare attributes
if 'nodata' not in raster:
raster.attrs['nodata'] = np.nan
# see also raster.attrs['res']
if 'transform' in raster.attrs:
raster.attrs['ulx'] = raster.attrs['transform'][2]
#float(raster.x.min()) - raster.attrs['transform'][0]/2
raster.attrs['xres'] = raster.attrs['transform'][0]
raster.attrs['lrx'] = raster.attrs['transform'][2]+raster.attrs['transform'][0]*raster.x.size
#float(raster.x.max()) + raster.attrs['transform'][0]/2
raster.attrs['yres'] = raster.attrs['transform'][4]
raster.attrs['uly'] = raster.attrs['transform'][5]
#float(raster.y.max()) - raster.attrs['transform'][4]/2
raster.attrs['lry'] = raster.attrs['transform'][5]+raster.attrs['transform'][4]*raster.y.size
#float(raster.y.min()) + raster.attrs['transform'][4]/2
if gridded:
mask = xr.Dataset.from_dataframe(_grid.set_index(['y','x']))
mask['pixelx'] = geomed_round((mask.x - raster.ulx)/raster.xres)
mask['pixely'] = geomed_round((mask.y - raster.uly)/raster.yres)
# use zero surface depth instead of missed values
mask.z.values = mask.z.fillna(0)
else:
_grid['pixelx'] = geomed_round((_grid.x - raster.ulx)/raster.xres)
_grid['pixely'] = geomed_round((_grid.y - raster.uly)/raster.yres)
mask = xr.Dataset.from_dataframe(_grid)
del _grid
if abs(np.round(raster.xres)) != abs(np.round(raster.yres)):
raise Exception('The raster pixel x and pixel y resolutions must be ± equal')
# define function to get stats count & names
pygeomed_stats = lib.pygeomed_stats
pygeomed_stat = lib.pygeomed_stat
pygeomed_stat.restype = ctypes.c_char_p
# define function to calculate focal statistics
pygeomed = lib.pygeomed
pygeomed.argtypes = [ndpointer(ctypes.c_float, flags="C_CONTIGUOUS"),
ctypes.c_uint32,
ctypes.c_uint32,
ndpointer(ctypes.c_int32, flags="C_CONTIGUOUS"),
ndpointer(ctypes.c_int32,flags="C_CONTIGUOUS"),
ndpointer(ctypes.c_int32,flags="C_CONTIGUOUS"),
ctypes.c_uint32,
ctypes.c_uint32,
ctypes.c_uint32,
ctypes.c_float,
ndpointer(ctypes.c_float, flags="C_CONTIGUOUS")]
pygeomed.restype = None
# prepare input points mask for calculation function
if gridded:
mask_length = len(mask.pixelx)*len(mask.pixely)
x, y = np.meshgrid(mask.pixelx, mask.pixely)
x = (x).reshape((-1,mask_length))
y = (y).reshape((-1,mask_length))
else:
mask_length = len(mask.pixelx)
x = mask.pixelx.values
y = mask.pixely.values
z = mask.z.values.reshape((-1,mask_length))
z = np.round(z/(abs(raster.xres)*0.7)).astype(int)
zmax = int(np.max(z))
# create output buffer for calculation function
dataout = np.empty((mask_length,pygeomed_stats(),(radius_max-radius_min+1)),dtype=np.float32)
# prepate source raster for calculation function
datain = raster.values.astype(ctypes.c_float)
# call calculation function
pygeomed(datain, ctypes.c_uint32(raster.shape[1]), ctypes.c_uint32(raster.shape[0]),
x.astype(ctypes.c_int32),y.astype(ctypes.c_int32),z.astype(ctypes.c_int32),ctypes.c_uint32(mask_length),
ctypes.c_uint32(radius_min),ctypes.c_uint32(radius_max),ctypes.c_float(raster.nodata),
dataout)
# prepared buffer for source raster is not required later
del datain
# define data variables for NetCDF dataset
statnames = []
datavars = {}
if gridded:
dataout = dataout.reshape((pygeomed_stats(),(radius_max-radius_min+1),len(mask.y),len(mask.x)))
dims = ['z','y','x']
else:
dataout = dataout.reshape((pygeomed_stats(),(radius_max-radius_min+1),mask_length))
dims = ['z','l']
datavars['y'] = (['l'],mask.y)
datavars['x'] = (['l'],mask.x)
datavars['surface'] = (['l'],mask.z)
for statidx in range(0,pygeomed_stats()):
if sys.version_info >= (3, 0):
statname = "".join(map(chr, pygeomed_stat(statidx)))
else:
statname = pygeomed_stat(statidx)
datavars[statname] = (dims,dataout[statidx,:,:])
del dataout
# build NetCDF dataset
if gridded:
ds = xr.Dataset(datavars,
coords={
'surface': mask.z,
'z': np.arange(radius_min,radius_max+1)[::-1]
}
)
else:
ds = xr.Dataset(datavars,
coords={
'l': 1.*np.arange(0,mask_length),
'z': np.arange(radius_min,radius_max+1)[::-1]
}
)
# change lat/lon variables to coordinates
ds.coords['y'] = ds.data_vars['y']
ds.coords['x'] = ds.data_vars['x']
ds.coords['surface']= ds.data_vars['surface']
# length per profile
ds.l.values[1:] = np.cumsum(np.sqrt(np.diff(ds.y.values)**2 + np.diff(ds.x.values)**2))
del datavars
# set real depth (negative)
ds['z'] = (scale_factor*abs(raster.xres))*(zmax-ds.z.values)
# add projection information from source raster to NetCDF dataset
epsg=np.int32(raster.epsg if 'epsg' in raster and raster.epsg is not None and raster.epsg != '' else 0)
ds.attrs['epsg'] = epsg
ds['projection']=''
if 'spatial_ref' in raster.attrs:
ds.projection.attrs['spatial_ref'] = raster.attrs['spatial_ref']
ds.coords['projection'] = ds.data_vars['projection']
for datavar in ds.data_vars:
ds[datavar].attrs = {'grid_mapping': 'projection', 'epsg': epsg}
# return NetCDF dataset
return ds
# libraries to work with PostgreSQL database
import psycopg2
# https://stackoverflow.com/questions/11914472/stringio-in-python3
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
def ds2postgis(conn, ds, table):
# PostgreSQL cursor to execute SQL commands
cur = conn.cursor()
cur.execute("""
DROP TABLE IF EXISTS """ + table + """;
CREATE TABLE """ + table + """ (
z double precision,
y double precision,
x double precision,
""" + ','.join([x + ' double precision' for x in ds.data_vars]) + """
)
WITH OIDS;
""")
# process dataset
data = np.array([ds[s].values for s in ds.data_vars])
zvals = ds.z.values
yvals = ds.y.values
xvals = ds.x.values
def _ds2postgis(datastring):
# Convert string to file
pg_input = StringIO( "\n".join(datastring) )
pg_input.seek(0)
# Load CSV data to PostgreSQL
cur.copy_expert("COPY " + table + " FROM STDIN DELIMITER AS ',' NULL 'nan'", pg_input)
if 'l' in ds.coords:
# 1D & 2D
lvals = ds.l.values
# build CSV datastring
datastring = []
for lidx,l in enumerate(lvals):
for zidx, z in enumerate(zvals):
line = ','.join([str(v) for v in (z,yvals[lidx],xvals[lidx])] + [str(v) for v in data[:,zidx,lidx]])
datastring.append( line )
_ds2postgis(datastring)
else:
# 3D
for zidx, z in enumerate(zvals):
# build CSV datastring
datastring = []
for yidx,y in enumerate(yvals):
for xidx,x in enumerate(xvals):
line = ','.join([str(v) for v in (z,y,x)] + [str(v) for v in data[:,zidx,yidx,xidx]])
datastring.append( line )
_ds2postgis(datastring)
# Add spatial column to the table
cur.execute("ALTER TABLE " + table + " ADD COLUMN geom GEOMETRY;")
cur.execute("UPDATE " + table + " SET geom=ST_SetSRID(ST_Point(x,y)," + str(ds.epsg.values) + ");")
conn.commit()
cur.close()
"""
# Save as PostGIS wide tables
import psycopg2
# String to connect to PostgreSQL database
connstring = "dbname='mbg' user='mbg' host='localhost' password=''"
# Connect to PostgreSQL
conn = psycopg2.connect(connstring)
ds2postgis(conn, ds1d, 'ds2d')
# Retrieve saved data from PostgreSQL
df1d = pd.read_sql_query("SELECT oid, * FROM ds1d ORDER by oid LIMIT 10", conn, coerce_float=True)
df1d.head()
ds2postgis(conn, ds2d, 'ds1d')
# Retrieve saved data from PostgreSQL
df2d = pd.read_sql_query("SELECT oid, * FROM ds2d ORDER BY oid LIMIT 10", conn, coerce_float=True)
df2d.head()
ds2postgis(conn, ds3d, 'ds3d')
# Retrieve saved data from PostgreSQL
df3d = pd.read_sql_query("SELECT oid, * FROM ds3d ORDER BY oid LIMIT 10", conn, coerce_float=True)
df3d.head()
"""
#q = [25,75]
def ds_percentile(ds, q):
ds_q23 = ds.copy(deep=True)
for stat in ds.data_vars:
if stat == 'orig':
continue
pcnt = np.nanpercentile(ds_q23[stat].values.reshape(-1),q)
ds_q23[stat].values = np.clip(ds_q23[stat].values,pcnt[0],pcnt[1])
return ds_q23
# Z-Minus
#q = [25,75]
def ds_minus(ds, q=None):
ds_minus = ds.copy(deep=True)
for stat in ds.data_vars:
if stat == 'orig':
continue
# depth and r orders are reverted so X(R)-X(R-1) <=> X(z-1)-X(z)
arr0 = ds_minus[stat]
arr = np.nan*np.zeros(arr0.shape)
for z in range(1,arr0.shape[0]):
arr[z,:] = arr0[z-1,:] - arr0[z,:]
ds_minus[stat].values = arr
if q is not None:
pcnt = np.nanpercentile(ds_minus[stat].values.reshape(-1),q)
ds_minus[stat].values = np.clip(ds_minus[stat].values,pcnt[0],pcnt[1])
return ds_minus
# Z-Minus
#q = [25,75]
def da_minus(da, q=None):
da_minus = da.copy(deep=True)
# depth and r orders are reverted so X(R)-X(R-1) <=> X(z-1)-X(z)
arr = np.nan*np.zeros(da_minus.shape)
for z in range(1,da_minus.values.shape[0]):
arr[z,:] = da_minus.values[z-1,:] - da_minus.values[z,:]
if q is not None:
pcnt = np.nanpercentile(arr.reshape(-1),q)
arr = np.clip(arr,pcnt[0],pcnt[1])
da_minus.values = arr
return da_minus
# Z-Plus
#q = [25,75]
def ds_plus(ds, q=None):
ds_plus = ds.copy(deep=True)
for stat in ds.data_vars:
if stat == 'orig':
continue
arr0 = ds_plus[stat]
arr = np.nan*np.zeros(arr0.shape)
for z in range(1,arr0.shape[0]):
arr[z,:] = (arr0[z-1,:] + arr0[z,:])/2.
ds_plus[stat].values = arr
if q is not None:
pcnt = np.nanpercentile(ds_plus[stat].values.reshape(-1),q)
ds_plus[stat].values = np.clip(ds_plus[stat].values,pcnt[0],pcnt[1])
return ds_plus
#https://en.wikipedia.org/wiki/Gaussian_filter
#https://docs.scipy.org/doc/scipy-0.16.1/reference/generated/scipy.ndimage.filters.gaussian_filter.html
from scipy.ndimage.filters import gaussian_filter
# raster = raster_gamma_range(raster0, 11, 20)
#def raster_gamma_range(raster0, g1, g2, compress=False):
# raster = raster0.copy()
# raster.values = raster.values.astype(np.float32)
# raster.values = gaussian_filter(raster.values,g1) - gaussian_filter(raster.values,g2)
# if compress:
# raise ValueError('"compress" option is disabled')
# #raster.values = np.sign(raster.values)*np.sqrt(np.abs(raster.values))
# return raster
def raster_gamma_range(raster0, g1, g2, backward=False):
raster = raster0.copy()
raster.values = raster.values.astype(np.float32)
if backward:
raster.values = gaussian_filter(raster.values,g1) \
- gaussian_filter(raster.values,g2)
else:
raster.values = gaussian_filter(raster.values,g1,mode='constant', cval=np.nan) \
- gaussian_filter(raster.values,g2,mode='constant', cval=np.nan)
return raster
# raster = raster_gamma(raster0, 11)
#def raster_gamma(raster0, g, compress=False):
# raster = raster0.copy()
# raster.values = gaussian_filter(raster.values.astype(np.float32),g)
# if compress:
# raise ValueError('"compress" option is disabled')
# #raster.values = np.sign(raster.values)*np.sqrt(np.abs(raster.values))
# return raster
def raster_gamma(raster0, g, backward=False):
raster = raster0.copy()
if backward:
raster.values = gaussian_filter(raster.values.astype(np.float32),g)
else:
raster.values = gaussian_filter(raster.values.astype(np.float32),g,mode='constant', cval=np.nan)
return raster
#https://en.wikipedia.org/wiki/Web_Mercator#EPSG:3785
#http://gis.stackexchange.com/questions/62343/how-can-i-convert-a-ascii-file-to-geotiff-using-python
def ds2gtif_south(data, filename):
coordz = list(data.coords)[0]
coordl = list(data.coords)[1]
shape = data.shape
pixelz = round(data[coordz].values[1]-data[coordz].values[0],5)
pixell = round(data[coordl].values[1]-data[coordl].values[0],5)
types = ['uint8','uint16','int16','uint32','int32','float32','float64']
gtypes = [gdal.GDT_Byte, gdal.GDT_UInt16, gdal.GDT_Int16, gdal.GDT_UInt32, gdal.GDT_Int32,
gdal.GDT_Float32, gdal.GDT_Float64]
dtype = data.values.dtype
tidx = types.index(dtype)
gtype = gtypes[tidx]
if tidx in [0,1,2,3,4]:
nodata = np.iinfo(dtype).min
else:
nodata = 170141000918780003225695629360656023552.000
driver = gdal.GetDriverByName("GTiff")
dst = driver.Create(filename, shape[1], shape[0], 1, gtype, options = [ 'COMPRESS=LZW' ])
# top left x, w-e pixel resolution, rotation, top left y, rotation, n-s pixel resolution
if data[coordz].values[0] < data[coordz].values[-1]:
zlim = min(data[coordz].values)-pixelz/2
else:
zlim = max(data[coordz].values)-pixelz/2
dst.SetGeoTransform( [ min(data[coordl].values)-pixell/2, pixell, 0, zlim, 0, pixelz ] )
if 'epsg' in data and data.epsg is not None:
srs = osr.SpatialReference()
#srs.SetWellKnownGeogCS("WGS84")
srs.ImportFromEPSG(int(data.epsg))
dst.SetProjection( srs.ExportToWkt() )
arr = data.values.copy()
arr[np.isnan(arr)] = nodata
dst.GetRasterBand(1).SetNoDataValue(nodata)
dst.GetRasterBand(1).WriteArray(arr)
# north semisphere, usually increasing x,y order
def ds2gtif_north(data, filename):
coordz = list(data.coords)[0]
coordl = list(data.coords)[1]
shape = data.shape
pixelz = round(data[coordz].values[1]-data[coordz].values[0],5)
pixell = round(data[coordl].values[1]-data[coordl].values[0],5)
types = ['uint8','uint16','int16','uint32','int32','float32','float64']
gtypes = [gdal.GDT_Byte, gdal.GDT_UInt16, gdal.GDT_Int16, gdal.GDT_UInt32, gdal.GDT_Int32,
gdal.GDT_Float32, gdal.GDT_Float64]
dtype = data.values.dtype
tidx = types.index(dtype)
gtype = gtypes[tidx]
if tidx in [0,1,2,3,4]:
nodata = np.iinfo(dtype).min
else:
nodata = 170141000918780003225695629360656023552.000
driver = gdal.GetDriverByName("GTiff")
dst = driver.Create(filename, shape[1], shape[0], 1, gtype, options = [ 'COMPRESS=LZW' ])
# top left x, w-e pixel resolution, rotation, top left y, rotation, n-s pixel resolution
if data[coordz].values[0] < data[coordz].values[-1]:
zlim = max(data[coordz].values)+pixelz/2
else:
zlim = min(data[coordz].values)+pixelz/2
dst.SetGeoTransform( [ min(data[coordl].values)-pixell/2, pixell, 0, zlim, 0, -pixelz ] )
if 'epsg' in data and data.epsg is not None:
srs = osr.SpatialReference()
#srs.SetWellKnownGeogCS("WGS84")
srs.ImportFromEPSG(int(data.epsg))
dst.SetProjection( srs.ExportToWkt() )
arr = np.flipud(data.values.copy())
arr[np.isnan(arr)] = nodata
dst.GetRasterBand(1).SetNoDataValue(nodata)
dst.GetRasterBand(1).WriteArray(arr)
#ds2gtif_north(ds3d.orig[10], 'TX_AllenDome/test.tif')
def ds2ascii(ds, stat, depth, filename):
#nodata = 1.70141000918780003225695629360656023552e38
nodata = 170141000918780003225695629360656023552.000
# ignore depth when there is no 'z' dimention
if 'z' in list(ds.dims):
plan = ds[stat].sel(z=depth,method='nearest')
else:
plan = ds[stat]
minx = np.min(plan.x.values)
miny = np.min(plan.y.values)
pixelx = np.diff(plan.x.values)[0]
pixely = np.diff(plan.y.values)[0]
assert( abs(pixelx) == abs(pixely) )
if pixely < 0:
values = np.flipud(plan.values)
else:
values = plan.values
height = plan.shape[0]
width = plan.shape[1]
f = open(filename, 'w')
f.write("ncols %i\r\n" % width);
f.write("nrows %i\r\n" % height);
f.write("xllcorner %f\r\n" % (minx-pixelx/2));
# TODO: CHECK FOR pixely > 0
if pixely < 0:
f.write("yllcorner %f\r\n" % (miny+pixely/2));
else:
f.write("yllcorner %f\r\n" % (miny-pixely/2));
f.write("cellsize %f\r\n" % pixelx);
f.write("NODATA_value %f\r\n" % nodata);
for h in range(0,height):
for w in range(0,width):
f.write(" %.8e" % values[height-1-h,w]);
f.write("\r\n")
f.close()
# save 2d sections as GeoTIFF with fake coordinates and true aspect ratio
# ds2fakegtif(ds2d_plus.rotstd, 'ds2d_plus_rotstd.tif')
def da2fakegtif(data, filename):
shape = data.shape
pixelz = round(data.z.values[1]-data.z.values[0],5)
pixell = round(data.l.values[1]-data.l.values[0],5)
types = ['uint8','uint16','int16','uint32','int32','float32','float64']
gtypes = [gdal.GDT_Byte, gdal.GDT_UInt16, gdal.GDT_Int16, gdal.GDT_UInt32, gdal.GDT_Int32,
gdal.GDT_Float32, gdal.GDT_Float64]
dtype = data.values.dtype
tidx = types.index(dtype)
gtype = gtypes[tidx]
if tidx in [0,1,2,3,4]:
nodata = np.iinfo(dtype).min
else:
nodata = 170141000918780003225695629360656023552.000
driver = gdal.GetDriverByName("GTiff")
dst = driver.Create(filename, shape[1], shape[0], 1, gtype, options = [ 'COMPRESS=LZW' ])
# top left x, w-e pixel resolution, rotation, top left y, rotation, n-s pixel resolution
dst.SetGeoTransform( [ 0, pixell, 0, max(data.z.values), 0, -pixelz ] )
if data.epsg != '':
srs = osr.SpatialReference()
#srs.SetWellKnownGeogCS("WGS84")
srs.ImportFromEPSG(int(data.epsg))
dst.SetProjection( srs.ExportToWkt() )
arr = np.nan*np.ones(data.values.shape)
h = arr.shape[0]
for z in range(0,h):
arr[z,:] = data.values[h-z-1,:]
arr[np.isnan(arr)] = nodata
dst.GetRasterBand(1).SetNoDataValue(nodata)
dst.GetRasterBand(1).WriteArray(arr)
# deprecated - use dem.interp_like(raster, 'linear') or dem.interp_like(raster, 'nearest') instead
def dem2df(dem, _df):
import numpy as np
df = _df.copy()
# calculate x,y indicies on DEM raster
xs = np.array((df.x - dem.ulx)/dem.xres, dtype=int)
ys = np.array((df.y - dem.uly)/dem.yres, dtype=int)
# ignore idices outside of DEM raster
# get z values from DEM
df['z'] = [dem.values[yidx,xidx] if (yidx>=0 and yidx<dem.shape[0] and xidx>=0 and xidx<dem.shape[1]) else 0
for (yidx,xidx) in zip(ys,xs)]
return df
def ogrline2grid(raster,line):
import numpy as np
prev = None
df = None
for idx in range(len(line)):
row = line.iloc[idx,:]
x = row['x']
y = row['y']
# get pixel coordinates
px = np.argmin(abs(raster.x.values-row['x']))
py = np.argmin(abs(raster.y.values-row['y']))
#print row
#print idx, px, py
if prev is not None:
#print '\tcalculate segment...'
if abs(px-prev[0]) >= abs(py-prev[1]):
#print '\tdx > dy'
maxlen = abs(prev[0]-px)+1
else:
#print '\tdy > dx'
maxlen = abs(prev[1]-py)+1
#xs = [int(round(x)) for x in np.linspace(prev[0],x,maxlen)]
#ys = [int(round(y)) for y in np.linspace(prev[1],y,maxlen)]
xs = np.linspace(prev[2],x,maxlen)
ys = np.linspace(prev[3],y,maxlen)
#print xs
#print ys
_df = pd.DataFrame.from_dict({'x':xs, 'y':ys})
#print df.head()
#print df.tail()
if df is None:
df = _df
else:
df = df.append([_df])
prev = (px,py,x,y)
df['z'] = 0
return df
# save 2d sections as TXT files with real coordinates
def da2txt(da, filename):
import numpy as np
vals = da.values
ls = da.l.values
xs = da.x.values
ys = da.y.values
zs = da.z.values
#print l,x,y,z
with open(filename, "w") as f:
f.write("x,y,z,%s\r\n" % da.name)
for lidx, l in enumerate(ls):
x = xs[lidx]
y = ys[lidx]
for zidx, z in enumerate(zs):
z = zs[zidx]
val = vals[zidx,lidx]
#print x, y, z, val
if np.isnan(val):
continue
f.write("%.1f,%.1f,%.1f,%f\r\n" % (x, y, z, val));
def da2ascii(da, filename):
import numpy as np
types = ['uint8','uint16','int16','uint32','int32','float32','float64']
dtype = da.values.dtype
tidx = types.index(dtype)
if tidx in [0,1,2,3,4]:
nodata = np.iinfo(dtype).min
nodata_str = "%d" % nodata
pattern = " %d"
else:
nodata = 170141000918780003225695629360656023552.000
nodata_str = "%f" % nodata
pattern = " %.8e"
minx = np.min(da.x.values)
miny = np.min(da.y.values)
pixelx = np.diff(da.x.values)[0]
pixely = np.diff(da.y.values)[0]
assert( abs(pixelx) == abs(pixely) )
if pixely < 0:
values = np.flipud(da.values)
else:
values = da.values
height = da.shape[0]
width = da.shape[1]
f = open(filename, 'w')
f.write("ncols %i\r\n" % width);
f.write("nrows %i\r\n" % height);
f.write("xllcorner %f\r\n" % (minx-pixelx/2));
# TODO: CHECK FOR pixely > 0
if pixely < 0:
f.write("yllcorner %f\r\n" % (miny+pixely/2));
else:
f.write("yllcorner %f\r\n" % (miny-pixely/2));
f.write("cellsize %f\r\n" % pixelx);
f.write("NODATA_value %s\r\n" % nodata_str);
for h in range(0,height):
for w in range(0,width):
f.write( pattern % values[height-1-h,w]);
f.write("\r\n")
f.close()
#q = [25,75]
def da_percentile(da, q):
import numpy as np
ds = da.copy(deep=True)
pcnt = np.nanpercentile(da.values.reshape(-1),q)
da.values = np.clip(da.values,pcnt[0],pcnt[1])
return da
#https://stackoverflow.com/questions/11727822/reading-a-vtk-file-with-python
def vtk2da(filename, varname='None'):
from vtk import vtkStructuredPointsReader
from vtk.util import numpy_support as VN
reader = vtkStructuredPointsReader()
reader.SetFileName(filename)
reader.ReadAllScalarsOn()
reader.Update()
data = reader.GetOutput()
dim = data.GetDimensions()
bnd = data.GetBounds()
values = VN.vtk_to_numpy(data.GetPointData().GetArray(varname))
values = values.reshape(dim,order='F')
da = xr.DataArray(values.transpose([2,1,0]),
coords=[np.linspace(bnd[4],bnd[5],dim[2]),
np.linspace(bnd[2],bnd[3],dim[1]),
np.linspace(bnd[0],bnd[1],dim[0])],
dims=['z','y','x'])
return da
### Save to VTK (version 1) files
def da2vtk1(da, filename, filter_by_output_range=None):
import numpy as np
import sys
vals = da.values
vals = 100.*(vals - np.nanmin(vals))/(np.nanmax(vals)-np.nanmin(vals))
if not filter_by_output_range is None:
vals[(vals<filter_by_output_range[0])|(vals>filter_by_output_range[1])] = np.nan
vals = 100.*(vals - np.nanmin(vals))/(np.nanmax(vals)-np.nanmin(vals))
# Use "A*(A/A)" expression in Voxler 4 "math" unit
#vals[np.isnan(vals)] = 0
#vals[vals==0] = np.nan
header = """# vtk DataFile Version 1.0
vtk output
BINARY
DATASET STRUCTURED_POINTS
DIMENSIONS %d %d %d
ASPECT_RATIO %f %f %f
ORIGIN %f %f %f
POINT_DATA %d
SCALARS %s float
LOOKUP_TABLE default
""" % (da.x.shape[0],da.y.shape[0],da.z.shape[0],
(np.nanmax(da.x.values)-np.nanmin(da.x.values))/(da.x.shape[0]-1),
(np.nanmax(da.y.values)-np.nanmin(da.y.values))/(da.y.shape[0]-1),
(np.nanmax(da.z.values)-np.nanmin(da.z.values))/(da.z.shape[0]-1),
np.nanmin(da.x.values),
np.nanmin(da.y.values),
np.nanmin(da.z.values),
da.x.shape[0]*da.y.shape[0]*da.z.shape[0],
da.name)
with open(filename, 'wb') as f:
if sys.version_info >= (3, 0):
f.write(bytes(header,'utf-8'))
else:
f.write(header)
np.array(vals, dtype=np.float32).byteswap().tofile(f)
### Save vector with components (i,j,k) to VTK (version 4.2) binary files
# ds2vtk3(ds, 'velocity', fname + '.vtk')
def ds2vtk3(ds, name, filename):
import numpy as np
import sys
da = ds.transpose('z','y','x')
header = """# vtk DataFile Version 4.2
vtk output
BINARY
DATASET STRUCTURED_POINTS
DIMENSIONS %d %d %d
SPACING %f %f %f
ORIGIN %f %f %f
POINT_DATA %d
VECTORS %s float
""" % (da.x.shape[0],da.y.shape[0],da.z.shape[0],
(np.nanmax(da.x.values)-np.nanmin(da.x.values))/(da.x.shape[0]-1),
(np.nanmax(da.y.values)-np.nanmin(da.y.values))/(da.y.shape[0]-1),
(np.nanmax(da.z.values)-np.nanmin(da.z.values))/(da.z.shape[0]-1),
np.nanmin(da.x.values),
np.nanmin(da.y.values),
np.nanmin(da.z.values),
da.x.shape[0]*da.y.shape[0]*da.z.shape[0],
name)
with open(filename, 'wb') as f:
f.write(bytes(header,'utf-8'))
arr = np.stack([da.i.values, da.j.values, da.k.values],axis=-1)
np.array(arr, dtype=np.float32).byteswap().tofile(f)
def da2vtk1_int(da, filename):
import numpy as np
import sys
vals = da.values
header = """# vtk DataFile Version 1.0
vtk output
BINARY
DATASET STRUCTURED_POINTS
DIMENSIONS %d %d %d
ASPECT_RATIO %f %f %f
ORIGIN %f %f %f
POINT_DATA %d
SCALARS %s int32
LOOKUP_TABLE default
""" % (da.x.shape[0],da.y.shape[0],da.z.shape[0],
(np.nanmax(da.x.values)-np.nanmin(da.x.values))/(da.x.shape[0]-1),
(np.nanmax(da.y.values)-np.nanmin(da.y.values))/(da.y.shape[0]-1),
(np.nanmax(da.z.values)-np.nanmin(da.z.values))/(da.z.shape[0]-1),
np.nanmin(da.x.values),
np.nanmin(da.y.values),
np.nanmin(da.z.values),
da.x.shape[0]*da.y.shape[0]*da.z.shape[0],
da.name)
with open(filename, 'wb') as f:
if sys.version_info >= (3, 0):
f.write(bytes(header,'utf-8'))
else:
f.write(header)
np.array(vals, dtype=np.int32).byteswap().tofile(f)
#https://stackoverflow.com/questions/39073973/how-to-generate-a-matrix-with-circle-of-ones-in-numpy-scipy
def unit_circle_2d(r):
import numpy as np
A = np.arange(-r,r+1)**2
dists = np.sqrt( A[:,None] + A)
# circle
#return (np.abs(dists-r)<=0).astype(int)
# filled circle
if r <= 2:
return ((dists-r)<=0).astype(int)
return ((dists-r)<=0.5).astype(int)
# z, y, x
#footprint = np.array((2*rz+1)*[unit_circle_2d(r)])
#print (footprint.shape)
#plt.imshow(footprint[0], interpolation='None')
def unit_ring_2d(r):
import numpy as np
A = np.arange(-r,r+1)**2
dists = np.sqrt( A[:,None] + A)
if r <= 2:
return (np.abs(dists-r)<=0).astype(int)
return (np.abs(dists-r)<=0.5).astype(int)
# y, x
#footprint = unit_ring_2d(4)
#print (footprint.shape)
#plt.imshow(footprint, interpolation='None')
# GEE helper functions
#import urllib
#import shutil
#import ee
# create worldfile to define image coordinates
def worldfile_tofile(fname, area, dimensions):
import os
name, ext = os.path.splitext(fname)
# use QGIS worldfile names convention
jext = ext[1] + ext[-1] + 'w'
fname = os.path.join(str(os.extsep).join([name,jext]))
with open(fname, 'w') as outfile:
xres = (area[2]-area[0])/dimensions[0]
yres = (area[1]-area[3])/dimensions[1]
coefficients = [xres, 0, 0, yres, area[0], area[3]]
print('\n'.join(map(str, coefficients)), file=outfile)
# download GEE URL and save to file
def geeurl_tofile(GEEurl, fname):
import urllib
import shutil
with urllib.request.urlopen(GEEurl) as response, open(fname, 'wb') as outfile:
shutil.copyfileobj(response, outfile)
def gee_preview_tofile(GEEimage, vis, dimensions, fname=None):
import ee
import geopandas as gpd
from shapely.ops import Polygon
# WGS84 coordinates
geom = Polygon(GEEimage.getInfo()['properties']['system:footprint']['coordinates'][0])
# define 1st band projection
proj = GEEimage.getInfo()['bands'][0]['crs']
# extract area bounds in the 1st band projection
area = gpd.GeoSeries(geom,crs='epsg:4326').to_crs(proj)[0].bounds
GEEurl = GEEimage\
.visualize(**vis)\
.getThumbURL({'dimensions':dimensions, 'format': 'jpg'})
#print (GEEurl)
if fname is not None:
geeurl_tofile(GEEurl, fname)
worldfile_tofile(fname, area, dimensions)
return {'url': GEEurl, 'width': dimensions[0], 'height': dimensions[1]}
def split_rect(rect, n):
import numpy as np
lats = np.linspace(rect[0], rect[2], n+1)
lons = np.linspace(rect[1], rect[3], n+1)
#print (lats, lons)
cells = []
for lt1, lt2 in zip(lats.ravel()[:-1], lats.ravel()[1:]):
for ll1, ll2 in zip(lons.ravel()[:-1], lons.ravel()[1:]):
cell = [lt1, ll1, lt2, ll2]
cells.append(cell)
return cells
def zipsbands2image(files):
import xarray as xr
import zipfile
dss = []
# merge separate file areas
for fname in sorted(files):
#print ('fname', fname)
zip = zipfile.ZipFile(fname)
# merge separate file to dataset
ds = xr.Dataset()
for bandname in zip.namelist():
varname = bandname.split('.')[1]
da = xr.open_rasterio(f'/vsizip/{fname}/{bandname}').squeeze(drop=True)
ds[varname] = da
da.close()
dss.append(ds)
return xr.merge(dss)
def rasterize(image, areas, with_nodata=False):
import xarray as xr
from rasterio import features
# increment class value to use 0 as placeholder later
if 'class' in areas:
geoms = [(g,c+1) for g,c in zip(areas['geometry'], areas['class'])]
else:
geoms = [(g,1) for g in areas['geometry']]
# rasterio transform is broken, we need to build it from image extent
# note: gdal uses pixel borders and xarray uses pixel centers
if 'latitude' in image:
band = 'latitude'
else:
# suppose the same geometries per bands
band = list(image.data_vars)[0]
#res = image[band].attrs['res']
# be careful with ordering
res = [float(image[band].x.diff('x')[0]), float(image[band].y.diff('y')[0])]
xmin = image[band].x.values.min()
ymax = image[band].y.values.max()
transform = [res[0], 0, xmin - res[0]/2, 0, -res[1], ymax+res[1]/2]
# rasterize geometries
da = xr.zeros_like(image[band]).rename('class').astype(np.uint8)
da.values = np.flipud(features.rasterize(geoms,
dtype=np.uint8,
out_shape=image[band].shape,
transform=transform)) - 1
df = da.to_dataframe().reset_index()
if not with_nodata:
# remove placeholder zero value
df = df[df['class']<255]
# return dataarray with placeholder 255 and dataframe
return da, df
def vtkpoints2ds(filename):
import xarray as xr
import numpy as np
#from vtk import vtkStructuredGridReader
from vtk import vtkStructuredPointsReader
from vtk.util import numpy_support as VN
reader = vtkStructuredPointsReader()
reader.SetFileName(filename)
reader.ReadAllScalarsOn()
reader.Update()
data = reader.GetOutput()
dim = data.GetDimensions()
bnd = data.GetBounds()
points = data.GetPointData()
ds = xr.Dataset()
for idx in range(points.GetNumberOfArrays()):
arrayname = points.GetArrayName(idx)
values = VN.vtk_to_numpy(points.GetArray(arrayname))
values = values.reshape(dim,order='F')
da = xr.DataArray(values.transpose([2,1,0]),
coords=[np.linspace(bnd[4],bnd[5],dim[2]),
| np.linspace(bnd[2],bnd[3],dim[1]) | numpy.linspace |
import json
import numpy as np
from scipy.spatial.distance import cdist
import os
try:
import cPickle as pickle
except ImportError:
import pickle
import torch
import torch.nn.functional as F
import cv2
import argparse
name2id = {}
results = []
def morpho(mask, iter, bigger=True):
# return mask
mask = mask * 255
mask = mask.astype(np.uint8)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
# print(kernel)
if bigger:
mask = cv2.dilate(mask, kernel, iterations=iter)
else:
mask = cv2.erode(mask, kernel, iterations=iter)
return mask / 255
def TPS(P1, P2, _lambda=1e-3, width=768, height=1024, calc_new_pos=False):
def radius_basis(r):
epsilon = 1e-14
return r ** 2 * np.log(r ** 2 + epsilon)
def homogenization(P):
point_num = P.shape[0]
P_homo = np.ones((point_num, 3))
P_homo[:, 1:3] = P
return P_homo
point_num = P1.shape[0]
K = radius_basis(cdist(P1, P1)) + _lambda * np.eye(point_num)
L = np.zeros((point_num + 3, point_num + 3))
L[:point_num, :point_num] = K
L[:point_num, point_num:point_num + 3] = homogenization(P1)
L[point_num:point_num + 3, :point_num] = homogenization(P1).T
# target value, calculate in turn
v_x = np.zeros(point_num + 3)
v_y = np.zeros(point_num + 3)
v_x[:point_num] = P2[:, 0]
v_y[:point_num] = P2[:, 1]
w_x = np.linalg.solve(L, v_x)
a_x = w_x[point_num:]
w_x = w_x[:point_num]
w_y = np.linalg.solve(L, v_y)
a_y = w_y[point_num:]
w_y = w_y[:point_num]
if calc_new_pos:
points = np.zeros((width * height, 2))
for i in range(width):
points[i * height:(i + 1) * height, 0] = np.ones(height) * i / width
points[i * height:(i + 1) * height, 1] = np.arange(height) / height
h_points = homogenization(points)
new_x = np.matmul(h_points, a_x) + np.matmul(w_x.T, radius_basis(cdist(P1, points)))
new_y = np.matmul(h_points, a_y) + np.matmul(w_y.T, radius_basis(cdist(P1, points)))
new_x = new_x.reshape(width, height).T
new_y = new_y.reshape(width, height).T
new_x = np.stack((new_x, new_y), axis=2)
return None, new_x if calc_new_pos else None
def normalize(p, w, h):
return p / np.array([w, h]).astype(np.float32)
def load_name_to_memory(keypoint_path):
global results, name2id, x
with open(keypoint_path, 'r') as f:
results += json.load(f)
for i in range(len(results)):
result = results[i]
name2id[result['image_id'].split('/')[-1]] = i
print(name2id)
def load_keypoints(source_keypoint_path='', target_keypoint_path='',
w=100, h=100, source_name='', target_name=''):
# print(source_keypoint_path, target_keypoint_path)
if len(name2id) == 0:
load_name_to_memory(keypoint_path=source_keypoint_path)
load_name_to_memory(keypoint_path=target_keypoint_path)
source_id = name2id[source_name]
target_id = name2id[target_name]
raw_source_keypoint = np.array(results[source_id]['keypoints'], dtype=np.float32).reshape((-1, 3))[:25, :2]
source_keypoint = normalize(raw_source_keypoint, w, h)
raw_target_keypoint = np.array(results[target_id]['keypoints'], dtype=np.float32).reshape((-1, 3))[:25, :2]
target_keypoint = normalize(raw_target_keypoint, w, h)
return source_keypoint, target_keypoint, raw_source_keypoint, raw_target_keypoint
def get_midpoint(point1, point2, x_val):
slope = (point2[1] - point1[1]) / (point2[0] - point1[0])
bias = point1[1] - slope * point1[0]
y_val = x_val * slope + bias
return np.array([x_val, y_val]).reshape(1, 2)
def get_slanted_x(point1, point2, shoulder, const=0.7):
delta = point2 - point1
if delta[1] == 0 or delta[0] == 0:
return point2[0]
tan_theta = delta[0] / delta[1]
return point2[0] + tan_theta * shoulder * const
def get_align_keypoint(keypoint, is_source=True):
if is_source:
for i in range(11, 15):
keypoint[i, 1] = (keypoint[i, 1] + keypoint[30 - i, 1]) / 2
keypoint[30 - i, 1] = keypoint[i, 1]
else:
point1 = get_midpoint(keypoint[14, :], keypoint[16, :], keypoint[11, 0])
point3 = get_midpoint(keypoint[14, :], keypoint[16, :], keypoint[19, 0])
keypoint[14, :] = point1
keypoint[16, :] = point3
point1 = get_midpoint(keypoint[13, :], keypoint[17, :], keypoint[11, 0])
point3 = get_midpoint(keypoint[13, :], keypoint[17, :], keypoint[19, 0])
keypoint[13, :] = point1
keypoint[17, :] = point3
x = get_slanted_x(keypoint[0, :], keypoint[3, :], keypoint[5, 0] - keypoint[1, 0])
point1 = get_midpoint(keypoint[13, :], keypoint[17, :], x)
point2 = get_midpoint(keypoint[14, :], keypoint[16, :], x)
point3 = get_midpoint(keypoint[13, :], keypoint[17, :], keypoint[3, 0])
point4 = get_midpoint(keypoint[14, :], keypoint[16, :], keypoint[3, 0])
# x = get_slanted_x(keypoint[0, :], keypoint[3, :], keypoint[5, 0] - keypoint[1, 0], const=0.9)
# point5 = get_midpoint(keypoint[12, :], keypoint[18, :], x)
# point6 = get_midpoint(keypoint[12, :], keypoint[18, :], keypoint[3, 0])
align_keypoint = point2
for i in [2, 4, 6, 11, 12, 13, 14, 16, 17, 18, 19, 24, 3, 0]:
align_keypoint = np.concatenate((align_keypoint, keypoint[i:i + 1, :]), axis=0)
align_keypoint = np.concatenate((align_keypoint, point4), axis=0)
return keypoint, align_keypoint
cnt = 0
def visualize(keypoint, img_path='', output_root='./visualize_landmark', prefix='black'):
if not os.path.exists(output_root):
os.mkdir(output_root)
global cnt
cnt += 1
img = cv2.imread(img_path)
for i in range(keypoint.shape[0]):
cv2.circle(img, (int(keypoint[i, 0]), int(keypoint[i, 1])), 4, [255, 0, 170], thickness=-1)
cv2.imwrite(os.path.join(output_root, f'{prefix}_{cnt}.jpg'), img)
def H_cosine(cloth, logo, base, name=''):
cv2.imwrite(f'./cloth{name}.jpg', cloth)
cv2.imwrite(f'./logo_{name}.jpg', logo)
cloth_hsv = cv2.cvtColor(cloth, cv2.COLOR_BGR2HSV)
logo_hsv = cv2.cvtColor(logo, cv2.COLOR_BGR2HSV)
base_hsv = cv2.cvtColor(base, cv2.COLOR_BGR2HSV)
cloth_h_rad = cloth_hsv[:, :, 0] / 255 * np.pi * 2
logo_h_rad = logo_hsv[:, :, 0] / 255 * np.pi * 2
base_h_rad = base_hsv[:, :, 0] / 255 * np.pi * 2
return np.arccos(np.cos(cloth_h_rad - base_h_rad)), np.arccos(np.cos(logo_h_rad - base_h_rad))
def HS_cosine(cloth_hsv, logo_hsv, base_hsv, dim=0, name=''):
if dim == 0:
cloth_h_rad = cloth_hsv[:, :, dim] / 255 * np.pi * 2
logo_h_rad = logo_hsv[:, :, dim] / 255 * np.pi * 2
base_h_rad = base_hsv[:, :, dim] / 255 * np.pi * 2
return np.arccos(np.cos(cloth_h_rad - base_h_rad)), np.arccos(np.cos(logo_h_rad - base_h_rad))
print('base_hsv', base_hsv)
return np.abs(cloth_hsv[:, :, dim].astype(int) - base_hsv[:, :, dim].astype(int)) / 255, np.abs(logo_hsv[:, :, dim].astype(int) - base_hsv[:, :, dim].astype(int)) / 255
def standardization(base, arr, mask):
x_arr, y_arr, _ = np.nonzero(mask)
val_arr = arr[x_arr, y_arr, :].astype(np.float32)
mu = np.mean(val_arr, axis=0)
scale = base[0, 0, :] / mu
print(mu, base[0, 0, :], scale)
arr = ((arr.astype(np.float32) - mu) * scale + base).astype(np.float32)
return np.clip(arr, 0, 255).astype(np.uint8), base, scale, mu
def inv_standardization(arr, base, scale, mu):
base[:, :, 0] = 0
scale[0] = 1
mu[0] = 0
arr = ((arr.astype(np.float32) - base) / scale + mu).astype(np.float32)
# x_arr, y_arr, _ = np.nonzero(mask)
# val_arr = arr[x_arr, y_arr, :]
# arr_mu = np.mean(val_arr, axis=0)
# scale = mu / arr_mu
# arr = (arr.astype(np.float32) - arr_mu) * scale + mu
return np.clip(arr, 0, 255).astype(np.uint8)
def main(source_img_root='./data', target_img_root='./data', source_name='image_2', target_name='image_1',
source_keypoint_path='', target_keypoint_path='', output_root='./output', target_folder=''):
print(target_name)
if not os.path.exists(output_root):
os.mkdir(output_root)
source_fn = os.path.join(source_img_root, source_name)
target_fn = os.path.join(target_img_root, target_name)
target_seg_fn = os.path.join('./segmentation/segmentation_model/gray_atr/' + target_folder,
'.'.join(target_name.split('.')[:-1]) + '.png')
source_img = cv2.imread(source_fn)
target_img = cv2.imread(target_fn)
target_seg = cv2.imread(target_seg_fn, 0)
print(type(target_seg))
target_seg = (target_seg == 4).astype(np.float64)
sh, sw, _ = source_img.shape
th, tw, _ = target_img.shape
w = max(sw, tw)
h = max(sh, th)
target_seg = np.pad(target_seg, ((0, h - th), (0, w - tw)), 'constant', constant_values=(0, 0))
target_seg = np.expand_dims(target_seg, axis=2)
source_img = np.pad(source_img, ((0, h - sh), (0, w - sw), (0, 0)), 'constant', constant_values=(255, 255))
target_img = np.pad(target_img, ((0, h - th), (0, w - tw), (0, 0)), 'constant', constant_values=(255, 255))
source_keypoint, target_keypoint, raw_source_keypoint, raw_target_keypoint = \
load_keypoints(w=w, h=h, source_name=source_name, target_name=target_name,
source_keypoint_path=source_keypoint_path, target_keypoint_path=target_keypoint_path)
raw_target_keypoint, target_keypoint = get_align_keypoint(raw_target_keypoint, is_source=False)
raw_source_keypoint, source_keypoint = get_align_keypoint(raw_source_keypoint, is_source=True)
visualize(target_keypoint, target_fn)
visualize(source_keypoint, source_fn)
target_keypoint = normalize(target_keypoint[:-2, :], w, h)
source_keypoint = normalize(source_keypoint[:-2, :], w, h)
"""
crop logo, output mask and source_mask
"""
left_down = raw_source_keypoint[13, :] / 5 + raw_source_keypoint[14, :] * 4 / 5
right_down = raw_source_keypoint[17, :] / 5 + raw_source_keypoint[16, :] * 4 / 5
raw_source_keypoint[14, :] = left_down
raw_source_keypoint[16, :] = right_down
convex_poly = raw_source_keypoint[[6, 11, 12, 13, 14, 16, 17, 18, 19, 24, 3], :].astype(int)
mask = np.zeros((h, w, 1)).astype(np.uint8)
cv2.fillPoly(mask, [convex_poly], 255)
mask = mask / 255
mask = morpho(mask, 15, False)
mask = mask[:, :, np.newaxis]
source_mask = np.copy(mask)
"""
calculate source base color
"""
new_mask = (mask - np.expand_dims(morpho(mask, 5, False), axis=2)).astype(int)
print(new_mask.shape)
cv2.imwrite(f'./mask_{target_name}.jpg', (new_mask * np.clip(source_img, 0, 255).astype(np.uint8)).astype(np.uint8))
pixels = np.sum(new_mask)
base_color = (np.sum((np.clip(source_img, 0, 255).astype(np.uint8) * new_mask).astype(int),
axis=(0, 1)) / pixels).astype(np.uint8).reshape(1, 1, 3).astype(np.uint8)
"""
use color hsv find cloth area
hsv_source: base hsv of source color
target_img_hsv: hsv of target image
"""
img_hsv = cv2.cvtColor(target_img, cv2.COLOR_BGR2HSV)
mask = np.where(np.logical_and(np.logical_and(35 < img_hsv[:, :, 0], img_hsv[:, :, 0] < 77), img_hsv[:, :, 1] > 70), 1, 0).astype(np.uint8)
mask = cv2.blur(cv2.blur(mask, (5, 5)), (3, 3))[:, :, np.newaxis]
target_img_hsv = img_hsv.copy()
cloth = mask * target_img_hsv
hsv_source = cv2.cvtColor(base_color, cv2.COLOR_BGR2HSV)
"""
transfer color of target cloth
"""
target_img_hsv, base, scale, mu = standardization(hsv_source, cloth, mask)
target_img = cv2.cvtColor(target_img_hsv, cv2.COLOR_HSV2BGR) * mask + target_img * (1 - mask)
cv2.imwrite(f'./target_transfer_{target_name}_mask.jpg', mask * 255)
arr = np.ones((h, w, 3)) * base_color
cv2.imwrite(f'./target_transfer_{target_name}.jpg', target_img)
cv2.imwrite(f'./source_color_{target_name}.jpg', arr.astype(np.uint8))
"""
crop target image cloth area
"""
left_down = raw_target_keypoint[13, :] / 5 + raw_target_keypoint[14, :] * 4 / 5
right_down = raw_target_keypoint[17, :] / 5 + raw_target_keypoint[16, :] * 4 / 5
raw_target_keypoint[14, :] = left_down
raw_target_keypoint[16, :] = right_down
convex_poly = raw_target_keypoint[[6, 11, 12, 13, 14, 16, 17, 18, 19, 24, 3], :].astype(int)
mask = np.zeros((h, w, 1)).astype(np.uint8)
cv2.fillPoly(mask, [convex_poly], 255)
mask = mask / 255
"""
calculate transferred target cloth color and fill the logo image
"""
new_mask = morpho(mask, 15, False)[:, :, np.newaxis]
pixels = np.sum(new_mask)
target_color = (np.sum((target_img * new_mask).astype(int), axis=(0, 1)) / pixels).astype(np.uint8).reshape(1, 1, 3).astype(np.uint8)
source_img = source_img * source_mask + target_color * (1 - source_mask)
arr = np.ones((h, w, 3)) * target_color
""""""
"""
align keypoint
"""
_, grid = TPS(target_keypoint, source_keypoint, width=w, height=h,
calc_new_pos=True)
grid = torch.from_numpy(grid)
source_img = torch.from_numpy(source_img.astype(np.float64)).unsqueeze(dim=0).permute(0, 3, 1, 2)
target_img = torch.from_numpy(target_img.astype(np.float64)).unsqueeze(dim=0).permute(0, 3, 1, 2)
grid = grid.unsqueeze(dim=0) * 2 - 1.0
warp_img = F.grid_sample(source_img, grid, mode='bilinear', padding_mode='border')
warp_img = warp_img.squeeze(dim=0).permute(1, 2, 0)
warp_img = warp_img.numpy().astype(np.uint8)
raw_target_keypoint = raw_target_keypoint.astype(int)
target_img = target_img.squeeze(dim=0).permute(1, 2, 0).numpy().astype(np.uint8)
warp_img = warp_img * mask + 255 * (1 - mask)
logo = np.copy(warp_img).astype(np.uint8)
warp_img = | np.clip(warp_img, 0, 255) | numpy.clip |
#!/usr/bin/env python
import os, sys, cv2, json
import math, PIL, cairo
import copy, random, re
from copy import deepcopy
import numpy as np
import os.path as osp
from time import time
from datetime import datetime
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from collections import Counter
import matplotlib.pyplot as plt
from abstract_config import get_config
import torch, torchtext
import torch.nn as nn
###########################################################
## Directory
###########################################################
this_dir = osp.dirname(__file__)
def maybe_create(dir_path):
if not osp.exists(dir_path):
os.makedirs(dir_path)
def prepare_directories(config):
postfix = datetime.now().strftime("%m%d_%H%M%S")
model_name = '{}_{}'.format(config.exp_name, postfix)
config.model_name = model_name
config.model_dir = osp.join(config.log_dir, model_name)
maybe_create(config.model_dir)
def pickle_load(path):
with open(path, 'rb') as fid:
data_ = pickle.load(fid)
return data_
def pickle_save(path, data):
with open(path, 'wb') as fid:
pickle.dump(data, fid, pickle.HIGHEST_PROTOCOL)
def json_load(path):
with open(path, 'r') as fid:
data_ = json.load(fid)
return data_
def json_save(path, data):
with open(path, 'w') as fid:
json.dump(data, fid, indent=4, sort_keys=True)
###########################################################
## Discretization
###########################################################
class LocationMap(object):
def __init__(self, config):
self.cfg = config
self.cols, self.col_step = \
np.linspace(config.margin, config.image_size[0]-config.margin,
num=config.grid_size[0],
endpoint=True, retstep=True, dtype=np.float)
self.rows, self.row_step = \
np.linspace(config.margin, config.image_size[1]-config.margin,
num=config.grid_size[1],
endpoint=True, retstep=True, dtype=np.float)
Xs, Ys = np.meshgrid(self.cols, self.rows)
self.coords = np.vstack((Xs.flatten(), Ys.flatten())).transpose()
def index2coord(self, index):
return self.coords[index].copy()
def indices2coords(self, indices):
return self.coords[indices].copy()
def coord2index(self, coord):
col_idx = int(float(coord[0] - self.cfg.margin)/self.col_step + 0.5)
row_idx = int(float(coord[1] - self.cfg.margin)/self.row_step + 0.5)
col_idx = max(0, min(col_idx, self.cfg.grid_size[0]-1))
row_idx = max(0, min(row_idx, self.cfg.grid_size[1]-1))
return row_idx * self.cfg.grid_size[0] + col_idx
def coords2indices(self, coords):
grids = (coords - self.cfg.margin)/np.array([self.col_step, self.row_step]).reshape((1,2)).astype(np.float)
grids = (grids + 0.5).astype(np.int)
grids[:, 0] = np.maximum(0, np.minimum(grids[:, 0], self.cfg.grid_size[0]-1))
grids[:, 1] = np.maximum(0, np.minimum(grids[:, 1], self.cfg.grid_size[1]-1))
return grids[:, 1] * self.cfg.grid_size[0] + grids[:, 0]
###########################################################
## Vocabulary
###########################################################
import string
punctuation_table = str.maketrans('', '', string.punctuation)
stop_words = set(stopwords.words('english'))
# print('stop_words: ', stop_words)
def further_token_process(tokens):
tokens = [w.translate(punctuation_table) for w in tokens]
tokens = [w for w in tokens if w.isalpha()]
tokens = [w for w in tokens if not w in stop_words]
return tokens
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
return [ atoi(c) for c in re.split('(\d+)', text) ]
class Vocab(object):
def __init__(self, name):
self.name = name
self.word2index = {}
self.word2count = {}
self.index2word = []
for idx, word in enumerate(['<pad>', '<sos>', '<eos>']):
self.word2index[word] = idx
self.index2word.append(word)
self.word2count[word] = 1
self.n_words = 3
self.glovec = torchtext.vocab.GloVe(cache=osp.join(this_dir, '..', 'data', 'caches'))
def get_glovec(self):
vectors = []
self.word2vector = {}
for i in range(len(self.index2word)):
w = self.index2word[i]
v_th = self.glovec[w].squeeze()
v_np = v_th.numpy()
vectors.append(v_th)
self.word2vector[w] = v_np
self.vectors = torch.stack(vectors, 0)
del self.glovec
self.glovec = None
def load(self, path):
with open(path, 'r') as fp:
vocab_info = json.loads(fp.read())
self.word2index = vocab_info['word2index']
self.word2count = vocab_info['word2count']
self.index2word = vocab_info['index2word']
self.n_words = len(self.index2word)
def save(self, path):
vocab_info = {}
vocab_info['word2index'] = self.word2index
vocab_info['word2count'] = self.word2count
vocab_info['index2word'] = self.index2word
with open(path, 'w') as fp:
json.dump(vocab_info, fp, indent=4, sort_keys=True)
def addSentence(self, sentence):
tokens = word_tokenize(sentence.lower())
tokens = further_token_process(tokens)
for word in tokens:
self.addWord(word)
def addWord(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word.append(word)
self.n_words += 1
else:
self.word2count[word] += 1
def filter_words(self, max_size=None, min_freq=1):
counter = Counter(self.word2count)
# rm special tokens before sorting
counter['<pad>'] = 0; counter['<sos>'] = 0; counter['<eos>'] = 0
# sort by frequency, then alphabetically
words_and_frequencies = sorted(counter.items(), key=lambda tup: tup[0])
words_and_frequencies.sort(key=lambda tup: tup[1], reverse=True)
# reset
self.index2word = []
self.word2index = {}
self.n_words = 0
for idx, word in enumerate(['<pad>', '<sos>', '<eos>']):
self.word2index[word] = idx
self.index2word.append(word)
self.n_words += 1
for word, freq in words_and_frequencies:
if freq < min_freq or len(self.index2word) == max_size:
break
self.index2word.append(word)
self.word2index[word] = self.n_words
self.n_words += 1
counter['<pad>'] = 1; counter['<sos>'] = 1; counter['<eos>'] = 1
self.word2count = dict(counter)
def word_to_index(self, w):
return self.word2index.get(w, -1)
###########################################################
## Pytorch
###########################################################
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def get_n_params(model):
pp=0
for p in list(model.parameters()):
nn=1
for s in list(p.size()):
nn = nn*s
pp += nn
return pp
def indices2onehots(indices, out_dim):
bsize, slen = indices.size()
inds = indices.view(bsize, slen, 1)
onehots = torch.zeros(bsize, slen, out_dim).float()
onehots.scatter_(-1, inds, 1.0)
return onehots.float()
###########################################################
## Data
###########################################################
def normalize(input_img, mean=None, std=None):
if (mean is None) or (std is None):
mean = np.array([0.485, 0.456, 0.406]).reshape((1,1,3))
std = np.array([0.229, 0.224, 0.225]).reshape((1,1,3))
# [0, 255] --> [0, 1]
img_np = input_img.astype(np.float32)/255.0
# BGR --> RGB
img_np = img_np[:, :, ::-1].copy()
# Normalize
img_np = (img_np - mean)/std
# H x W x C --> C x H x W
img_np = img_np.transpose((2, 0, 1))
return img_np
def unnormalize(input_img, mean=None, std=None):
if (mean is None) or (std is None):
mean = np.array([0.485, 0.456, 0.406]).reshape((1,1,3))
std = np.array([0.229, 0.224, 0.225]).reshape((1,1,3))
# C x H x W --> H x W x C
img_np = input_img.transpose((1, 2, 0))
# Unnormalize
img_np = img_np * std + mean
# RGB --> BGR
img_np = img_np[:, :, ::-1].copy()
# [0, 1] --> [0, 255]
img_np = (255.0 * img_np).astype(np.int)
img_np = np.maximum(0, img_np)
img_np = np.minimum(255, img_np)
img_np = img_np.astype(np.uint8)
return img_np
class image_normalize(object):
def __init__(self, field):
self.mean = np.array([0.485, 0.456, 0.406]).reshape((1,1,3))
self.std = np.array([0.229, 0.224, 0.225]).reshape((1,1,3))
self.field = field
def __call__(self, sample):
raws = sample[self.field]
imgs = []
for i in range(len(raws)):
img = normalize(raws[i], self.mean, self.std)
imgs.append(img)
imgs = np.stack(imgs, 0)
sample[self.field] = imgs
return sample
def img_to_tensor(input_imgs, mean=None, std=None):
imgs_np = []
for i in range(len(input_imgs)):
img_np = normalize(input_imgs[i], mean, std)
imgs_np.append(img_np)
imgs_np = np.stack(imgs_np, 0)
# to pytorch
imgs_th = torch.from_numpy(imgs_np).float()
return imgs_th
def tensor_to_img(input_imgs_th, mean=None, std=None):
imgs_np = []
for i in range(len(input_imgs_th)):
img_np = input_imgs_th[i].cpu().data.numpy()
img_np = unnormalize(img_np, mean, std)
imgs_np.append(img_np)
imgs_np = np.stack(imgs_np, 0)
return imgs_np
###########################################################
## Visualization
###########################################################
def surface_to_image(surface):
# get numpy data from cairo surface
pimg = PIL.Image.frombuffer("RGBA",
(surface.get_width(), surface.get_height()),
surface.get_data(), "raw", "RGBA", 0, 1)
frame = np.array(pimg)[:,:,:-1]
return frame
###########################################################
## Evaluation
###########################################################
def bb_iou(A, B):
eps = 1e-8
A_area = float(A[2] - A[0]) * (A[3] - A[1])
B_area = float(B[2] - B[0]) * (B[3] - B[1])
minx = max(A[0], B[0]); miny = max(A[1], B[1])
maxx = min(A[2], B[2]); maxy = min(A[3], B[3])
w = max(0, maxx - minx)
h = max(0, maxy - miny)
I_area = w * h
return I_area/(A_area + B_area - I_area + eps)
def gaussian2d(x, y, sigmas):
v = (x - y)/np.array(sigmas)
return np.exp(-0.5 * np.sum(v * v))
def batch_gaussian1d(x, y, sigma):
v = (x - y)/sigma
return np.exp(-0.5 * np.sum(v * v, -1))
###########################################################
## Bounding box
###########################################################
def clip_xyxy(box, width, height):
box[0] = max(0, box[0])
box[1] = max(0, box[1])
box[2] = min(box[2], width-1)
box[3] = min(box[3], height-1)
return box.astype(np.int32)
def clip_xyxys(boxes, width, height):
boxes[:, 0] = | np.maximum(boxes[:, 0], 0) | numpy.maximum |
#
from __future__ import division
import timeit
from sklearn import preprocessing
import numpy as np
import pandas as pd
import multiprocessing
import matplotlib.pyplot as plt
from IOHMM import UnSupervisedIOHMM
from IOHMM import OLS, DiscreteMNL, CrossEntropyMNL
from IOHMM import forward_backward
from scipy.special import logsumexp
import pickle
from copy import deepcopy
import random
from sklearn.decomposition import PCA
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
from sklearn.feature_selection import SelectPercentile, chi2, SelectFromModel, f_regression
from sklearn.svm import LinearSVC
from sklearn.linear_model import LassoCV, Lasso
from sklearn.metrics import r2_score
from sklearn.ensemble import ExtraTreesClassifier
import os
Accurate_duration=[]
#filename1='data/activity_index_test.txt'
#file1=open(filename1,'r')
#activity_index_test=eval(file1.read())
activity_index_test = {}
def process_data(Card_ID, data, test_proportion, C, dependent_variables, percent_feature, test_last, model_based_select, SCALAR_DURATION):
data.loc[data['duration_last']==-1,'duration_last'] = 0 # first activity, assign to 0
data['if_first'] = 0
data.loc[data['act_ID'] == 0, 'if_first'] = 1
column_list = list(data.columns.values)
location_list = []
hour_list = []
for ele in column_list:
if 'location' in ele:
location_list.append(ele)
if 'hour' in ele:
hour_list.append(ele)
location_list.remove('location_o')
location_list.remove('location')
hour_list.remove('hour')
hour_list.remove('duration_hour')
# set covariates to this OLS model
weather_list=['rain','heavy_rain','sun','cloud','Avrg_Temp','fengli']
Weekday_list=['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday']
holiday_list=['National_holiday','Observance']
last_activity=['duration_last','duration_trip']
previous_trips = ['Last_trip_time_yesterday','N_days_withtrip_past20',
'N_consec_days_no_trips','N_trips_yesterday']
Other = ['if_first']
Ut_list=weather_list + hour_list + Weekday_list+ location_list + holiday_list +last_activity + previous_trips + Other
# U1_list=Weekday_list+weather_list + holiday_list
x_array = np.array(data.loc[:,Ut_list])
min_max_scaler = preprocessing.MinMaxScaler()
x_array_minmax = min_max_scaler.fit_transform(x_array)
y = np.array(data.loc[:,dependent_variables])
print(x_array_minmax.shape)
if C == -1 and percent_feature == -1:
Ut_list_1 = []
Ut_list_2 = []
Ut_list_new = Ut_list
else:
# ============
if model_based_select:
if len(dependent_variables) >0:
lsvc = LinearSVC(C=C, penalty="l1", dual=False).fit(x_array_minmax, y[:,1])
Feature_select2 = SelectFromModel(lsvc, prefit=True)
else:
lsvc = LinearSVC(C = C, penalty="l1", dual=False).fit(x_array_minmax, y)
Feature_select = SelectFromModel(lsvc, prefit=True)
#----------
# clf = ExtraTreesClassifier(n_estimators=50)
# clf = clf.fit(x_array_minmax, y)
# Feature_select = SelectFromModel(clf, prefit=True)
#----------
else:
if len(dependent_variables) > 0:
Feature_select2 = SelectPercentile(chi2, percentile=percent_feature).fit(x_array_minmax, y[:,1])
Feature_select1 = SelectPercentile(f_regression, percentile=percent_feature).fit(x_array_minmax, y[:,0])
a=1
else:
Feature_select = SelectPercentile(chi2, percentile=percent_feature).fit(x_array_minmax, y)
# ============
if len(dependent_variables) > 0:
# thresh2 = Feature_select2.threshold_
# X_new2 = Feature_select2.transform(x_array_minmax)
if model_based_select:
idx_features2 = Feature_select2.get_support(indices = True)
num_feature = len(idx_features2)
clf = LassoCV().fit(x_array_minmax, y[:, 0])
importance = np.abs(clf.coef_)
idx_thresh = importance.argsort()[-num_feature]
threshold = importance[idx_thresh]
sfm = SelectFromModel(clf, threshold=threshold)
sfm.fit(x_array_minmax, y[:, 0])
# X_new1 = sfm.transform(x_array_minmax)
idx_features1 = sfm.get_support(indices = True)
used_feature_index = list(set(idx_features2).union(idx_features1))
Ut_list_new = [Ut_list[i] for i in used_feature_index]
Ut_list_1 = [Ut_list[i] for i in idx_features1]
Ut_list_2 = [Ut_list[i] for i in idx_features2]
else:
idx_features2 = Feature_select2.get_support(indices = True)
idx_features1 = Feature_select1.get_support(indices = True)
# assert len(idx_features1) == len(idx_features2)
used_feature_index = list(set(idx_features2).union(idx_features1))
Ut_list_new = [Ut_list[i] for i in used_feature_index]
Ut_list_1 = [Ut_list[i] for i in idx_features1]
Ut_list_2 = [Ut_list[i] for i in idx_features2]
else:
X_new = Feature_select.transform(x_array_minmax)
# Ut_list_new = [Ut_list[i] for i in range(len(Ut_list)) if used_feature_index[i]]
# print(X_new.shape)
data.loc[:,Ut_list] = x_array_minmax
if SCALAR_DURATION:
min_max_scaler_dep = preprocessing.MinMaxScaler()
data[dependent_variables[0]] = min_max_scaler_dep.fit_transform(data[[dependent_variables[0]]])
else:
min_max_scaler_dep = None
total_days = data['seq_ID'].max()
train_days = int(total_days - round(total_days*test_proportion))
if test_last:
# last 30 days
data_train = data.loc[data['seq_ID']<=train_days]
data_test = data.loc[data['seq_ID']>train_days]
else:
random.seed(Card_ID)
test_seq = random.sample(list(range(1,total_days+1)), total_days - train_days)
data_train = data.loc[~data['seq_ID'].isin(test_seq)]
data_test = data.loc[data['seq_ID'].isin(test_seq)]
return min_max_scaler,min_max_scaler_dep, data, data_train, data_test, Ut_list_new, Ut_list_1, Ut_list_2
def predict(sequence, num_states, dependent_variables, Card_ID, data, SHMM, Ut_list, Ut_list_1,Ut_list_2,
save_info_list, C, percent_feature, save_predicted_rank, scaler_y, SCALAR_DURATION):
results={}
show_duration_predict = True
for info in save_info_list:
results[info] = []
Dt_h_2 = np.array(sorted(data.loc[:,dependent_variables[1]].unique()))
Dt_h_1 = np.array(np.arange(round(min(data['duration'])) - 0.5, round(max(data['duration'])) + 0.5, 0.01)) # candidate duration
for seq in sequence:
seq = seq.reset_index(drop=True)
for idx, row in seq.iterrows():
if idx == 0:
X_emi_1 = np.array([row[Ut_list_1]])
X_emi_2 = np.array([row[Ut_list_2]])
############################ location
X_ini = np.array([row[Ut_list]])
Log_ini_st = SHMM.model_initial.predict_log_proba(X_ini).reshape(num_states,)
log_Emission = np.zeros((len(Dt_h_2), num_states))
Ut_input = np.repeat(X_emi_2, len(Dt_h_2), axis=0)
for st in range(num_states):
# print(Dt_h.shape)
# print(X.shape)
log_Emission[:, st] = SHMM.model_emissions[st][1].loglike_per_sample(Ut_input, Dt_h_2)
log_P_temp = log_Emission + Log_ini_st
P_final = np.sum(np.exp(log_P_temp), axis=1)
Predict_value = Dt_h_2[np.argmax(P_final)]
True_value = row[dependent_variables[1]]
comb_results = [[P_final[i], Dt_h_2[i]] for i in range(len(Dt_h_2))]
comb_results = sorted(comb_results, reverse=True)
for i in range(save_predicted_rank):
if i >= len(comb_results):
rank_name = 'Predict' + str(i+1)
results[rank_name].append(-1) # no 20 candidates
else:
rank_name = 'Predict' + str(i+1)
results[rank_name].append(comb_results[i][1])
# plt.plot(Dt_h,P_final)
# plt.plot([True_value,True_value],[0,max(P_final)])
# plt.show()
results['ID'].append(row['ID'])
results['Card_ID'].append(Card_ID)
results['Ground_truth'].append(True_value)
if Predict_value == True_value:
results['Correct'].append(1)
else:
results['Correct'].append(0)
results['activity_index'].append(idx)
results['total_activity'].append(num_states)
results['percent_feature'].append(percent_feature)
results['C_reg'].append(C)
################################################################ continuous duration
Log_ini_st = SHMM.model_initial.predict_log_proba(X_ini).reshape(num_states,)
predict_Emission = np.zeros(num_states)
dispersion = np.zeros(num_states)
for st in range(num_states):
# print(Dt_h.shape)
# print(X.shape)
predict_Emission[st] = SHMM.model_emissions[st][0].predict(X_emi_1)
# dispersion[st] = SHMM.model_emissions[st][0].get_dispersion(Y_len = 1) #
# a=1
P_int_st = np.exp(Log_ini_st)
Predict_value_mean = sum(P_int_st * predict_Emission)
# Predict_value_var = sum((P_int_st**2) * dispersion)
True_value = row[dependent_variables[0]]
if SCALAR_DURATION:
predict_dur = scaler_y.inverse_transform( | np.array([Predict_value_mean]) | numpy.array |
import numpy
import numpy.matlib
import copy
import pandas
import wave
import struct
import os
import math
import ctypes
import multiprocessing
import warnings
import scipy
from scipy import ndimage
import scipy.stats as stats
from scipy.fftpack import fft
from scipy.signal import decimate
from scipy.signal import lfilter
from scipy.fftpack.realtransforms import dct
def read_sph(input_file_name, mode='p'):
"""
Read a SPHERE audio file
:param input_file_name: name of the file to read
:param mode: specifies the following (\* =default)
.. note::
- Scaling:
- 's' Auto scale to make data peak = +-1 (use with caution if reading in chunks)
- 'r' Raw unscaled data (integer values)
- 'p' Scaled to make +-1 equal full scale
- 'o' Scale to bin centre rather than bin edge (e.g. 127 rather than 127.5 for 8 bit values,
can be combined with n+p,r,s modes)
- 'n' Scale to negative peak rather than positive peak (e.g. 128.5 rather than 127.5 for 8 bit values,
can be combined with o+p,r,s modes)
- Format
- 'l' Little endian data (Intel,DEC) (overrides indication in file)
- 'b' Big endian data (non Intel/DEC) (overrides indication in file)
- File I/O
- 'f' Do not close file on exit
- 'd' Look in data directory: voicebox('dir_data')
- 'w' Also read the annotation file \*.wrd if present (as in TIMIT)
- 't' Also read the phonetic transcription file \*.phn if present (as in TIMIT)
- NMAX maximum number of samples to read (or -1 for unlimited [default])
- NSKIP number of samples to skip from start of file (or -1 to continue from previous read when FFX
is given instead of FILENAME [default])
:return: a tupple such that (Y, FS)
.. note::
- Y data matrix of dimension (samples,channels)
- FS sample frequency in Hz
- WRD{\*,2} cell array with word annotations: WRD{\*,:)={[t_start t_end],'text'} where times are in seconds
only present if 'w' option is given
- PHN{\*,2} cell array with phoneme annotations: PHN{\*,:)={[t_start t_end],'phoneme'} where times
are in seconds only present if 't' option is present
- FFX Cell array containing
1. filename
2. header information
1. first header field name
2. first header field value
3. format string (e.g. NIST_1A)
4.
1. file id
2. current position in file
3. dataoff byte offset in file to start of data
4. order byte order (l or b)
5. nsamp number of samples
6. number of channels
7. nbytes bytes per data value
8. bits number of bits of precision
9. fs sample frequency
10. min value
11. max value
12. coding 0=PCM,1=uLAW + 0=no compression, 0=shorten,20=wavpack,30=shortpack
13. file not yet decompressed
5. temporary filename
If no output parameters are specified,
header information will be printed.
The code to decode shorten-encoded files, is
not yet released with this toolkit.
"""
codings = dict([('pcm', 1), ('ulaw', 2)])
compressions = dict([(',embedded-shorten-', 1),
(',embedded-wavpack-', 2),
(',embedded-shortpack-', 3)])
byteorder = 'l'
endianess = dict([('l', '<'), ('b', '>')])
if not mode == 'p':
mode = [mode, 'p']
k = list((m >= 'p') & (m <= 's') for m in mode)
# scale to input limits not output limits
mno = all([m != 'o' for m in mode])
sc = ''
if k[0]:
sc = mode[0]
# Get byte order (little/big endian)
if any([m == 'l' for m in mode]):
byteorder = 'l'
elif any([m == 'b' for m in mode]):
byteorder = 'b'
ffx = ['', '', '', '', '']
if isinstance(input_file_name, str):
if os.path.exists(input_file_name):
fid = open(input_file_name, 'rb')
elif os.path.exists("".join((input_file_name, '.sph'))):
input_file_name = "".join((input_file_name, '.sph'))
fid = open(input_file_name, 'rb')
else:
raise Exception('Cannot find file {}'.format(input_file_name))
ffx[0] = input_file_name
elif not isinstance(input_file_name, str):
ffx = input_file_name
else:
fid = input_file_name
# Read the header
if ffx[3] == '':
fid.seek(0, 0) # go to the begining of the file
l1 = fid.readline().decode("utf-8")
l2 = fid.readline().decode("utf-8")
if not (l1 == 'NIST_1A\n') & (l2 == ' 1024\n'):
logging.warning('File does not begin with a SPHERE header')
ffx[2] = l1.rstrip()
hlen = int(l2[3:7])
hdr = {}
while True: # Read the header and fill a dictionary
st = fid.readline().decode("utf-8").rstrip()
if st[0] != ';':
elt = st.split(' ')
if elt[0] == 'end_head':
break
if elt[1][0] != '-':
logging.warning('Missing ''-'' in SPHERE header')
break
if elt[1][1] == 's':
hdr[elt[0]] = elt[2]
elif elt[1][1] == 'i':
hdr[elt[0]] = int(elt[2])
else:
hdr[elt[0]] = float(elt[2])
if 'sample_byte_format' in list(hdr.keys()):
if hdr['sample_byte_format'][0] == '0':
bord = 'l'
else:
bord = 'b'
if (bord != byteorder) & all([m != 'b' for m in mode]) \
& all([m != 'l' for m in mode]):
byteorder = bord
icode = 0 # Get encoding, default is PCM
if 'sample_coding' in list(hdr.keys()):
icode = -1 # unknown code
for coding in list(codings.keys()):
if hdr['sample_coding'].startswith(coding):
# is the signal compressed
# if len(hdr['sample_coding']) > codings[coding]:
if len(hdr['sample_coding']) > len(coding):
for compression in list(compressions.keys()):
if hdr['sample_coding'].endswith(compression):
icode = 10 * compressions[compression] \
+ codings[coding] - 1
break
else: # if the signal is not compressed
icode = codings[coding] - 1
break
# initialize info of the files with default values
info = [fid, 0, hlen, ord(byteorder), 0, 1, 2, 16, 1, 1, -1, icode]
# Get existing info from the header
if 'sample_count' in list(hdr.keys()):
info[4] = hdr['sample_count']
if not info[4]: # if no info sample_count or zero
# go to the end of the file
fid.seek(0, 2) # Go to te end of the file
# get the sample count
info[4] = int(math.floor((fid.tell() - info[2]) / (info[5] * info[6]))) # get the sample_count
if 'channel_count' in list(hdr.keys()):
info[5] = hdr['channel_count']
if 'sample_n_bytes' in list(hdr.keys()):
info[6] = hdr['sample_n_bytes']
if 'sample_sig_bits' in list(hdr.keys()):
info[7] = hdr['sample_sig_bits']
if 'sample_rate' in list(hdr.keys()):
info[8] = hdr['sample_rate']
if 'sample_min' in list(hdr.keys()):
info[9] = hdr['sample_min']
if 'sample_max' in list(hdr.keys()):
info[10] = hdr['sample_max']
ffx[1] = hdr
ffx[3] = info
info = ffx[3]
ksamples = info[4]
if ksamples > 0:
fid = info[0]
if (icode >= 10) & (ffx[4] == ''): # read compressed signal
# need to use a script with SHORTEN
raise Exception('compressed signal, need to unpack in a script with SHORTEN')
info[1] = ksamples
# use modes o and n to determine effective peak
pk = 2 ** (8 * info[6] - 1) * (1 + (float(mno) / 2 - int(all([m != 'b'
for m in
mode]))) / 2 **
info[7])
fid.seek(1024) # jump after the header
nsamples = info[5] * ksamples
if info[6] < 3:
if info[6] < 2:
logging.debug('Sphere i1 PCM')
y = numpy.fromfile(fid, endianess[byteorder]+"i1", -1)
if info[11] % 10 == 1:
if y.shape[0] % 2:
y = numpy.frombuffer(audioop.ulaw2lin(
numpy.concatenate((y, numpy.zeros(1, 'int8'))), 2),
numpy.int16)[:-1]/32768.
else:
y = numpy.frombuffer(audioop.ulaw2lin(y, 2), numpy.int16)/32768.
pk = 1.
else:
y = y - 128
else:
logging.debug('Sphere i2')
y = numpy.fromfile(fid, endianess[byteorder]+"i2", -1)
else: # non verifie
if info[6] < 4:
y = numpy.fromfile(fid, endianess[byteorder]+"i1", -1)
y = y.reshape(nsamples, 3).transpose()
y = (numpy.dot(numpy.array([1, 256, 65536]), y) - (numpy.dot(y[2, :], 2 ** (-7)).astype(int) * 2 ** 24))
else:
y = numpy.fromfile(fid, endianess[byteorder]+"i4", -1)
if sc != 'r':
if sc == 's':
if info[9] > info[10]:
info[9] = numpy.min(y)
info[10] = numpy.max(y)
sf = 1 / numpy.max(list(list(map(abs, info[9:11]))), axis=0)
else:
sf = 1 / pk
y = sf * y
if info[5] > 1:
y = y.reshape(ksamples, info[5])
else:
y = numpy.array([])
if mode != 'f':
fid.close()
info[0] = -1
if not ffx[4] == '':
pass # VERIFY SCRIPT, WHICH CASE IS HANDLED HERE
return y.astype(numpy.float32), int(info[8]), int(info[6])
def read_wav(input_file_name):
"""
:param input_file_name:
:return:
"""
wfh = wave.open(input_file_name, "r")
(nchannels, sampwidth, framerate, nframes, comptype, compname) = wfh.getparams()
raw = wfh.readframes(nframes * nchannels)
out = struct.unpack_from("%dh" % nframes * nchannels, raw)
sig = numpy.reshape(numpy.array(out), (-1, nchannels)).squeeze()
wfh.close()
return sig.astype(numpy.float32), framerate, sampwidth
def read_pcm(input_file_name):
"""Read signal from single channel PCM 16 bits
:param input_file_name: name of the PCM file to read.
:return: the audio signal read from the file in a ndarray encoded on 16 bits, None and 2 (depth of the encoding in bytes)
"""
with open(input_file_name, 'rb') as f:
f.seek(0, 2) # Go to te end of the file
# get the sample count
sample_count = int(f.tell() / 2)
f.seek(0, 0) # got to the begining of the file
data = numpy.asarray(struct.unpack('<' + 'h' * sample_count, f.read()))
return data.astype(numpy.float32), None, 2
def read_audio(input_file_name, framerate=None):
""" Read a 1 or 2-channel audio file in SPHERE, WAVE or RAW PCM format.
The format is determined from the file extension.
If the sample rate read from the file is a multiple of the one given
as parameter, we apply a decimation function to subsample the signal.
:param input_file_name: name of the file to read from
:param framerate: frame rate, optional, if lower than the one read from the file, subsampling is applied
:return: the signal as a numpy array and the sampling frequency
"""
if framerate is None:
raise TypeError("Expected sampling frequency required in sidekit.frontend.io.read_audio")
ext = os.path.splitext(input_file_name)[-1]
if ext.lower() == '.sph':
sig, read_framerate, sampwidth = read_sph(input_file_name, 'p')
elif ext.lower() == '.wav' or ext.lower() == '.wave':
sig, read_framerate, sampwidth = read_wav(input_file_name)
elif ext.lower() == '.pcm' or ext.lower() == '.raw':
sig, read_framerate, sampwidth = read_pcm(input_file_name)
read_framerate = framerate
else:
raise TypeError("Unknown extension of audio file")
# Convert to 16 bit encoding if needed
sig *= (2**(15-sampwidth))
if framerate > read_framerate:
print("Warning in read_audio, up-sampling function is not implemented yet!")
elif read_framerate % float(framerate) == 0 and not framerate == read_framerate:
print("downsample")
sig = decimate(sig, int(read_framerate / float(framerate)), n=None, ftype='iir', axis=0)
return sig.astype(numpy.float32), framerate
def rasta_filt(x):
"""Apply RASTA filtering to the input signal.
:param x: the input audio signal to filter.
cols of x = critical bands, rows of x = frame
same for y but after filtering
default filter is single pole at 0.94
"""
x = x.T
numerator = numpy.arange(.2, -.3, -.1)
denominator = numpy.array([1, -0.94])
# Initialize the state. This avoids a big spike at the beginning
# resulting from the dc offset level in each band.
# (this is effectively what rasta/rasta_filt.c does).
# Because Matlab uses a DF2Trans implementation, we have to
# specify the FIR part to get the state right (but not the IIR part)
y = numpy.zeros(x.shape)
zf = numpy.zeros((x.shape[0], 4))
for i in range(y.shape[0]):
y[i, :4], zf[i, :4] = lfilter(numerator, 1, x[i, :4], axis=-1, zi=[0, 0, 0, 0])
# .. but don't keep any of these values, just output zero at the beginning
y = numpy.zeros(x.shape)
# Apply the full filter to the rest of the signal, append it
for i in range(y.shape[0]):
y[i, 4:] = lfilter(numerator, denominator, x[i, 4:], axis=-1, zi=zf[i, :])[0]
return y.T
def cms(features, label=None, global_mean=None):
"""Performs cepstral mean subtraction
:param features: a feature stream of dimension dim x nframes
where dim is the dimension of the acoustic features and nframes the
number of frames in the stream
:param label: a logical vector
:param global_mean: pre-computed mean to use for feature normalization if given
:return: a feature stream
"""
# If no label file as input: all speech are speech
if label is None:
label = numpy.ones(features.shape[0]).astype(bool)
if label.sum() == 0:
mu = numpy.zeros((features.shape[1]))
if global_mean is not None:
mu = global_mean
else:
mu = numpy.mean(features[label, :], axis=0)
features -= mu
def cmvn(features, label=None, global_mean=None, global_std=None):
"""Performs mean and variance normalization
:param features: a feature stream of dimension dim x nframes
where dim is the dimension of the acoustic features and nframes the
number of frames in the stream
:param global_mean: pre-computed mean to use for feature normalization if given
:param global_std: pre-computed standard deviation to use for feature normalization if given
:param label: a logical verctor
:return: a sequence of features
"""
# If no label file as input: all speech are speech
if label is None:
label = numpy.ones(features.shape[0]).astype(bool)
if global_mean is not None and global_std is not None:
mu = global_mean
stdev = global_std
features -= mu
features /= stdev
elif not label.sum() == 0:
mu = numpy.mean(features[label, :], axis=0)
stdev = numpy.std(features[label, :], axis=0)
features -= mu
features /= stdev
def stg(features, label=None, win=301):
"""Performs feature warping on a sliding window
:param features: a feature stream of dimension dim x nframes
where dim is the dimension of the acoustic features and nframes the
number of frames in the stream
:param label: label of selected frames to compute the Short Term Gaussianization, by default, al frames are used
:param win: size of the frame window to consider, must be an odd number to get a symetric context on left and right
:return: a sequence of features
"""
# If no label file as input: all speech are speech
if label is None:
label = numpy.ones(features.shape[0]).astype(bool)
speech_features = features[label, :]
add_a_feature = False
if win % 2 == 1:
# one feature per line
nframes, dim = numpy.shape(speech_features)
# If the number of frames is not enough for one window
if nframes < win:
# if the number of frames is not odd, duplicate the last frame
# if nframes % 2 == 1:
if not nframes % 2 == 1:
nframes += 1
add_a_feature = True
speech_features = numpy.concatenate((speech_features, [speech_features[-1, ]]))
win = nframes
# create the output feature stream
stg_features = numpy.zeros(numpy.shape(speech_features))
# Process first window
r = numpy.argsort(speech_features[:win, ], axis=0)
r = numpy.argsort(r, axis=0)
arg = (r[: (win - 1) / 2] + 0.5) / win
stg_features[: (win - 1) / 2, :] = stats.norm.ppf(arg, 0, 1)
# process all following windows except the last one
for m in range(int((win - 1) / 2), int(nframes - (win - 1) / 2)):
idx = list(range(int(m - (win - 1) / 2), int(m + (win - 1) / 2 + 1)))
foo = speech_features[idx, :]
r = numpy.sum(foo < foo[(win - 1) / 2], axis=0) + 1
arg = (r - 0.5) / win
stg_features[m, :] = stats.norm.ppf(arg, 0, 1)
# Process the last window
r = numpy.argsort(speech_features[list(range(nframes - win, nframes)), ], axis=0)
r = numpy.argsort(r, axis=0)
arg = (r[(win + 1) / 2: win, :] + 0.5) / win
stg_features[list(range(int(nframes - (win - 1) / 2), nframes)), ] = stats.norm.ppf(arg, 0, 1)
else:
# Raise an exception
raise Exception('Sliding window should have an odd length')
# wrapFeatures = np.copy(features)
if add_a_feature:
stg_features = stg_features[:-1]
features[label, :] = stg_features
def cep_sliding_norm(features, win=301, label=None, center=True, reduce=False):
"""
Performs a cepstal mean substitution and standard deviation normalization
in a sliding windows. MFCC is modified.
:param features: the MFCC, a numpy array
:param win: the size of the sliding windows
:param label: vad label if available
:param center: performs mean subtraction
:param reduce: performs standard deviation division
"""
if label is None:
label = numpy.ones(features.shape[0]).astype(bool)
if numpy.sum(label) <= win:
if reduce:
cmvn(features, label)
else:
cms(features, label)
else:
d_win = win // 2
df = pandas.DataFrame(features[label, :])
r = df.rolling(window=win, center=True)
mean = r.mean().values
std = r.std().values
mean[0:d_win, :] = mean[d_win, :]
mean[-d_win:, :] = mean[-d_win-1, :]
std[0:d_win, :] = std[d_win, :]
std[-d_win:, :] = std[-d_win-1, :]
if center:
features[label, :] -= mean
if reduce:
features[label, :] /= std
def pre_emphasis(input_sig, pre):
"""Pre-emphasis of an audio signal.
:param input_sig: the input vector of signal to pre emphasize
:param pre: value that defines the pre-emphasis filter.
"""
if input_sig.ndim == 1:
return (input_sig - numpy.c_[input_sig[numpy.newaxis, :][..., :1],
input_sig[numpy.newaxis, :][..., :-1]].squeeze() * pre)
else:
return input_sig - numpy.c_[input_sig[..., :1], input_sig[..., :-1]] * pre
"""Generate a new array that chops the given array along the given axis
into overlapping frames.
This method has been implemented by <NAME>,
as part of the talk box toolkit
example::
segment_axis(arange(10), 4, 2)
array([[0, 1, 2, 3],
( [2, 3, 4, 5],
[4, 5, 6, 7],
[6, 7, 8, 9]])
:param a: the array to segment
:param length: the length of each frame
:param overlap: the number of array elements by which the frames should overlap
:param axis: the axis to operate on; if None, act on the flattened array
:param end: what to do with the last frame, if the array is not evenly
divisible into pieces. Options are:
- 'cut' Simply discard the extra values
- 'wrap' Copy values from the beginning of the array
- 'pad' Pad with a constant value
:param endvalue: the value to use for end='pad'
:return: a ndarray
The array is not copied unless necessary (either because it is unevenly
strided and being flattened or because end is set to 'pad' or 'wrap').
"""
if axis is None:
a = numpy.ravel(a) # may copy
axis = 0
l = a.shape[axis]
if overlap >= length:
raise ValueError("frames cannot overlap by more than 100%")
if overlap < 0 or length <= 0:
raise ValueError("overlap must be nonnegative and length must" +
"be positive")
if l < length or (l - length) % (length - overlap):
if l > length:
roundup = length + (1 + (l - length) // (length - overlap)) * (length - overlap)
rounddown = length + ((l - length) // (length - overlap)) * (length - overlap)
else:
roundup = length
rounddown = 0
assert rounddown < l < roundup
assert roundup == rounddown + (length - overlap) or (roundup == length and rounddown == 0)
a = a.swapaxes(-1, axis)
if end == 'cut':
a = a[..., :rounddown]
l = a.shape[0]
elif end in ['pad', 'wrap']: # copying will be necessary
s = list(a.shape)
s[-1] = roundup
b = numpy.empty(s, dtype=a.dtype)
b[..., :l] = a
if end == 'pad':
b[..., l:] = endvalue
elif end == 'wrap':
b[..., l:] = a[..., :roundup - l]
a = b
a = a.swapaxes(-1, axis)
if l == 0:
raise ValueError("Not enough data points to segment array " +
"in 'cut' mode; try 'pad' or 'wrap'")
assert l >= length
assert (l - length) % (length - overlap) == 0
n = 1 + (l - length) // (length - overlap)
s = a.strides[axis]
new_shape = a.shape[:axis] + (n, length) + a.shape[axis + 1:]
new_strides = a.strides[:axis] + ((length - overlap) * s, s) + a.strides[axis + 1:]
try:
return numpy.ndarray.__new__(numpy.ndarray, strides=new_strides,
shape=new_shape, buffer=a, dtype=a.dtype)
except TypeError:
a = a.copy()
# Shape doesn't change but strides does
new_strides = a.strides[:axis] + ((length - overlap) * s, s) + a.strides[axis + 1:]
return numpy.ndarray.__new__(numpy.ndarray, strides=new_strides,
shape=new_shape, buffer=a, dtype=a.dtype)
def speech_enhancement(X, Gain, NN):
"""This program is only to process the single file seperated by the silence
section if the silence section is detected, then a counter to number of
buffer is set and pre-processing is required.
Usage: SpeechENhance(wavefilename, Gain, Noise_floor)
:param X: input audio signal
:param Gain: default value is 0.9, suggestion range 0.6 to 1.4,
higher value means more subtraction or noise redcution
:param NN:
:return: a 1-dimensional array of boolean that
is True for high energy frames.
Copyright 2014 <NAME> and <NAME>
"""
if X.shape[0] < 512: # creer une exception
return X
num1 = 40 # dsiable buffer number
Alpha = 0.75 # original value is 0.9
FrameSize = 32 * 2 # 256*2
FrameShift = int(FrameSize / NN) # FrameSize/2=128
nfft = FrameSize # = FrameSize
Fmax = int(numpy.floor(nfft / 2) + 1) # 128+1 = 129
# arising hamming windows
Hamm = 1.08 * (0.54 - 0.46 * numpy.cos(2 * numpy.pi * numpy.arange(FrameSize) / (FrameSize - 1)))
y0 = numpy.zeros(FrameSize - FrameShift) # 128 zeros
Eabsn = numpy.zeros(Fmax)
Eta1 = Eabsn
###################################################################
# initial parameter for noise min
mb = numpy.ones((1 + FrameSize // 2, 4)) * FrameSize / 2 # 129x4 set four buffer * FrameSize/2
im = 0
Beta1 = 0.9024 # seems that small value is better;
pxn = numpy.zeros(1 + FrameSize // 2) # 1+FrameSize/2=129 zeros vector
###################################################################
old_absx = Eabsn
x = numpy.zeros(FrameSize)
x[FrameSize - FrameShift:FrameSize] = X[
numpy.arange(numpy.min((int(FrameShift), X.shape[0])))]
if x.shape[0] < FrameSize:
EOF = 1
return X
EOF = 0
Frame = 0
###################################################################
# add the pre-noise estimates
for i in range(200):
Frame += 1
fftn = fft(x * Hamm) # get its spectrum
absn = numpy.abs(fftn[0:Fmax]) # get its amplitude
# add the following part from noise estimation algorithm
pxn = Beta1 * pxn + (1 - Beta1) * absn # Beta=0.9231 recursive pxn
im = (im + 1) % 40 # noise_memory=47; im=0 (init) for noise level estimation
if im:
mb[:, 0] = numpy.minimum(mb[:, 0], pxn) # 129 by 4 im<>0 update the first vector from PXN
else:
mb[:, 1:] = mb[:, :3] # im==0 every 47 time shift pxn to first vector of mb
mb[:, 0] = pxn
# 0-2 vector shifted to 1 to 3
pn = 2 * numpy.min(mb, axis=1) # pn = 129x1po(9)=1.5 noise level estimate compensation
# over_sub_noise= oversubtraction factor
# end of noise detection algotihm
x[:FrameSize - FrameShift] = x[FrameShift:FrameSize]
index1 = numpy.arange(FrameShift * Frame, numpy.min((FrameShift * (Frame + 1), X.shape[0])))
In_data = X[index1] # fread(ifp, FrameShift, 'short');
if In_data.shape[0] < FrameShift: # to check file is out
EOF = 1
break
else:
x[FrameSize - FrameShift:FrameSize] = In_data # shift new 128 to position 129 to FrameSize location
# end of for loop for noise estimation
# end of prenoise estimation ************************
x = numpy.zeros(FrameSize)
x[FrameSize - FrameShift:FrameSize] = X[numpy.arange(numpy.min((int(FrameShift), X.shape[0])))]
if x.shape[0] < FrameSize:
EOF = 1
return X
EOF = 0
Frame = 0
X1 = numpy.zeros(X.shape)
Frame = 0
while EOF == 0:
Frame += 1
xwin = x * Hamm
fftx = fft(xwin, nfft) # FrameSize FFT
absx = numpy.abs(fftx[0:Fmax]) # Fmax=129,get amplitude of x
argx = fftx[:Fmax] / (absx + numpy.spacing(1)) # normalize x spectrum phase
absn = absx
# add the following part from rainer algorithm
pxn = Beta1 * pxn + (1 - Beta1) * absn # s Beta=0.9231 recursive pxn
im = int((im + 1) % (num1 * NN / 2)) # original =40 noise_memory=47; im=0 (init) for noise level estimation
if im:
mb[:, 0] = numpy.minimum(mb[:, 0], pxn) # 129 by 4 im<>0 update the first vector from PXN
else:
mb[:, 1:] = mb[:, :3] # im==0 every 47 time shift pxn to first vector of mb
mb[:, 0] = pxn
pn = 2 * numpy.min(mb, axis=1) # pn = 129x1po(9)=1.5 noise level estimate compensation
Eabsn = pn
Gaina = Gain
temp1 = Eabsn * Gaina
Eta1 = Alpha * old_absx + (1 - Alpha) * numpy.maximum(absx - temp1, 0)
new_absx = (absx * Eta1) / (Eta1 + temp1) # wiener filter
old_absx = new_absx
ffty = new_absx * argx # multiply amplitude with its normalized spectrum
y = numpy.real(numpy.fft.fftpack.ifft(numpy.concatenate((ffty,
numpy.conj(ffty[numpy.arange(Fmax - 2, 0, -1)])))))
y[:FrameSize - FrameShift] = y[:FrameSize - FrameShift] + y0
y0 = y[FrameShift:FrameSize] # keep 129 to FrameSize point samples
x[:FrameSize - FrameShift] = x[FrameShift:FrameSize]
index1 = numpy.arange(FrameShift * Frame, numpy.min((FrameShift * (Frame + 1), X.shape[0])))
In_data = X[index1] # fread(ifp, FrameShift, 'short');
z = 2 / NN * y[:FrameShift] # left channel is the original signal
z /= 1.15
z = numpy.minimum(z, 32767)
z = numpy.maximum(z, -32768)
index0 = numpy.arange(FrameShift * (Frame - 1), FrameShift * Frame)
if not all(index0 < X1.shape[0]):
idx = 0
while (index0[idx] < X1.shape[0]) & (idx < index0.shape[0]):
X1[index0[idx]] = z[idx]
idx += 1
else:
X1[index0] = z
if In_data.shape[0] == 0:
EOF = 1
else:
x[numpy.arange(FrameSize - FrameShift, FrameSize + In_data.shape[0] - FrameShift)] = In_data
X1 = X1[X1.shape[0] - X.shape[0]:]
# }
# catch{
# }
return X1
def vad_percentil(log_energy, percent):
"""
:param log_energy:
:param percent:
:return:
"""
thr = numpy.percentile(log_energy, percent)
return log_energy > thr, thr
def vad_energy(log_energy,
distrib_nb=3,
nb_train_it=8,
flooring=0.0001, ceiling=1.0,
alpha=2):
# center and normalize the energy
log_energy = (log_energy - numpy.mean(log_energy)) / numpy.std(log_energy)
# Initialize a Mixture with 2 or 3 distributions
world = Mixture()
# set the covariance of each component to 1.0 and the mean to mu + meanIncrement
world.cst = numpy.ones(distrib_nb) / (numpy.pi / 2.0)
world.det = numpy.ones(distrib_nb)
world.mu = -2 + 4.0 * numpy.arange(distrib_nb) / (distrib_nb - 1)
world.mu = world.mu[:, numpy.newaxis]
world.invcov = numpy.ones((distrib_nb, 1))
# set equal weights for each component
world.w = numpy.ones(distrib_nb) / distrib_nb
world.cov_var_ctl = copy.deepcopy(world.invcov)
# Initialize the accumulator
accum = copy.deepcopy(world)
# Perform nbTrainIt iterations of EM
for it in range(nb_train_it):
accum._reset()
# E-step
world._expectation(accum, log_energy)
# M-step
world._maximization(accum, ceiling, flooring)
# Compute threshold
threshold = world.mu.max() - alpha * numpy.sqrt(1.0 / world.invcov[world.mu.argmax(), 0])
# Apply frame selection with the current threshold
label = log_energy > threshold
return label, threshold
def vad_snr(sig, snr, fs=16000, shift=0.01, nwin=256):
"""Select high energy frames based on the Signal to Noise Ratio
of the signal.
Input signal is expected encoded on 16 bits
:param sig: the input audio signal
:param snr: Signal to noise ratio to consider
:param fs: sampling frequency of the input signal in Hz. Default is 16000.
:param shift: shift between two frames in seconds. Default is 0.01
:param nwin: number of samples of the sliding window. Default is 256.
"""
overlap = nwin - int(shift * fs)
sig /= 32768.
sig = speech_enhancement(numpy.squeeze(sig), 1.2, 2)
# Compute Standard deviation
sig += 0.1 * numpy.random.randn(sig.shape[0])
std2 = segment_axis(sig, nwin, overlap, axis=None, end='cut', endvalue=0).T
std2 = numpy.std(std2, axis=0)
std2 = 20 * numpy.log10(std2) # convert the dB
# APPLY VAD
label = (std2 > numpy.max(std2) - snr) & (std2 > -75)
return label
def label_fusion(label, win=3):
"""Apply a morphological filtering on the label to remove isolated labels.
In case the input is a two channel label (2D ndarray of boolean of same
length) the labels of two channels are fused to remove
overlaping segments of speech.
:param label: input labels given in a 1D or 2D ndarray
:param win: parameter or the morphological filters
"""
channel_nb = len(label)
if channel_nb == 2:
overlap_label = numpy.logical_and(label[0], label[1])
label[0] = numpy.logical_and(label[0], ~overlap_label)
label[1] = numpy.logical_and(label[1], ~overlap_label)
for idx, lbl in enumerate(label):
cl = ndimage.grey_closing(lbl, size=win)
label[idx] = ndimage.grey_opening(cl, size=win)
return label
def hz2mel(f, htk=True):
"""Convert an array of frequency in Hz into mel.
:param f: frequency to convert
:return: the equivalence on the mel scale.
"""
if htk:
return 2595 * numpy.log10(1 + f / 700.)
else:
f = numpy.array(f)
# Mel fn to match Slaney's Auditory Toolbox mfcc.m
# Mel fn to match Slaney's Auditory Toolbox mfcc.m
f_0 = 0.
f_sp = 200. / 3.
brkfrq = 1000.
brkpt = (brkfrq - f_0) / f_sp
logstep = numpy.exp(numpy.log(6.4) / 27)
linpts = f < brkfrq
z = numpy.zeros_like(f)
# fill in parts separately
z[linpts] = (f[linpts] - f_0) / f_sp
z[~linpts] = brkpt + (numpy.log(f[~linpts] / brkfrq)) / numpy.log(logstep)
if z.shape == (1,):
return z[0]
else:
return z
def mel2hz(z, htk=True):
"""Convert an array of mel values in Hz.
:param m: ndarray of frequencies to convert in Hz.
:return: the equivalent values in Hertz.
"""
if htk:
return 700. * (10**(z / 2595.) - 1)
else:
z = numpy.array(z, dtype=float)
f_0 = 0
f_sp = 200. / 3.
brkfrq = 1000.
brkpt = (brkfrq - f_0) / f_sp
logstep = numpy.exp(numpy.log(6.4) / 27)
linpts = (z < brkpt)
f = numpy.zeros_like(z)
# fill in parts separately
f[linpts] = f_0 + f_sp * z[linpts]
f[~linpts] = brkfrq * numpy.exp(numpy.log(logstep) * (z[~linpts] - brkpt))
if f.shape == (1,):
return f[0]
else:
return f
def hz2bark(f):
"""
Convert frequencies (Hertz) to Bark frequencies
:param f: the input frequency
:return:
"""
return 6. * numpy.arcsinh(f / 600.)
def bark2hz(z):
"""
Converts frequencies Bark to Hertz (Hz)
:param z:
:return:
"""
return 600. * numpy.sinh(z / 6.)
def compute_delta(features,
win=3,
method='filter',
filt=numpy.array([.25, .5, .25, 0, -.25, -.5, -.25])):
"""features is a 2D-ndarray each row of features is a a frame
:param features: the feature frames to compute the delta coefficients
:param win: parameter that set the length of the computation window.
The size of the window is (win x 2) + 1
:param method: method used to compute the delta coefficients
can be diff or filter
:param filt: definition of the filter to use in "filter" mode, default one
is similar to SPRO4: filt=numpy.array([.2, .1, 0, -.1, -.2])
:return: the delta coefficients computed on the original features.
"""
# First and last features are appended to the begining and the end of the
# stream to avoid border effect
x = numpy.zeros((features.shape[0] + 2 * win, features.shape[1]), dtype=numpy.float32)
x[:win, :] = features[0, :]
x[win:-win, :] = features
x[-win:, :] = features[-1, :]
delta = numpy.zeros(x.shape, dtype=numpy.float32)
if method == 'diff':
filt = numpy.zeros(2 * win + 1, dtype=numpy.float32)
filt[0] = -1
filt[-1] = 1
for i in range(features.shape[1]):
delta[:, i] = numpy.convolve(features[:, i], filt)
return delta[win:-win, :]
def pca_dct(cep, left_ctx=12, right_ctx=12, p=None):
"""Apply DCT PCA as in [McLaren 2015] paper:
<NAME> and <NAME>, 'Improved Speaker Recognition
Using DCT coefficients as features' in ICASSP, 2015
A 1D-dct is applied to the cepstral coefficients on a temporal
sliding window.
The resulting matrix is then flatten and reduced by using a Principal
Component Analysis.
:param cep: a matrix of cepstral cefficients, 1 line per feature vector
:param left_ctx: number of frames to consider for left context
:param right_ctx: number of frames to consider for right context
:param p: a PCA matrix trained on a developpment set to reduce the
dimension of the features. P is a portait matrix
"""
y = numpy.r_[numpy.resize(cep[0, :], (left_ctx, cep.shape[1])),
cep,
numpy.resize(cep[-1, :], (right_ctx, cep.shape[1]))]
ceps = framing(y, win_size=left_ctx + 1 + right_ctx).transpose(0, 2, 1)
dct_temp = (dct_basis(left_ctx + 1 + right_ctx, left_ctx + 1 + right_ctx)).T
if p is None:
p = numpy.eye(dct_temp.shape[0] * cep.shape[1], dtype=numpy.float32)
return (numpy.dot(ceps.reshape(-1, dct_temp.shape[0]),
dct_temp).reshape(ceps.shape[0], -1)).dot(p)
def shifted_delta_cepstral(cep, d=1, p=3, k=7):
"""
Compute the Shifted-Delta-Cepstral features for language identification
:param cep: matrix of feature, 1 vector per line
:param d: represents the time advance and delay for the delta computation
:param k: number of delta-cepstral blocks whose delta-cepstral
coefficients are stacked to form the final feature vector
:param p: time shift between consecutive blocks.
return: cepstral coefficient concatenated with shifted deltas
"""
y = numpy.r_[numpy.resize(cep[0, :], (d, cep.shape[1])),
cep,
numpy.resize(cep[-1, :], (k * 3 + d, cep.shape[1]))]
delta = compute_delta(y, win=d, method='diff')
sdc = numpy.empty((cep.shape[0], cep.shape[1] * k))
idx = numpy.zeros(delta.shape[0], dtype='bool')
for ii in range(k):
idx[d + ii * p] = True
for ff in range(len(cep)):
sdc[ff, :] = delta[idx, :].reshape(1, -1)
idx = numpy.roll(idx, 1)
return numpy.hstack((cep, sdc))
def trfbank(fs, nfft, lowfreq, maxfreq, nlinfilt, nlogfilt, midfreq=1000):
"""Compute triangular filterbank for cepstral coefficient computation.
:param fs: sampling frequency of the original signal.
:param nfft: number of points for the Fourier Transform
:param lowfreq: lower limit of the frequency band filtered
:param maxfreq: higher limit of the frequency band filtered
:param nlinfilt: number of linear filters to use in low frequencies
:param nlogfilt: number of log-linear filters to use in high frequencies
:param midfreq: frequency boundary between linear and log-linear filters
:return: the filter bank and the central frequencies of each filter
"""
# Total number of filters
nfilt = nlinfilt + nlogfilt
# ------------------------
# Compute the filter bank
# ------------------------
# Compute start/middle/end points of the triangular filters in spectral
# domain
frequences = numpy.zeros(nfilt + 2, dtype=numpy.float32)
if nlogfilt == 0:
linsc = (maxfreq - lowfreq) / (nlinfilt + 1)
frequences[:nlinfilt + 2] = lowfreq + numpy.arange(nlinfilt + 2) * linsc
elif nlinfilt == 0:
low_mel = hz2mel(lowfreq)
max_mel = hz2mel(maxfreq)
mels = numpy.zeros(nlogfilt + 2)
# mels[nlinfilt:]
melsc = (max_mel - low_mel) / (nfilt + 1)
mels[:nlogfilt + 2] = low_mel + numpy.arange(nlogfilt + 2) * melsc
# Back to the frequency domain
frequences = mel2hz(mels)
else:
# Compute linear filters on [0;1000Hz]
linsc = (min([midfreq, maxfreq]) - lowfreq) / (nlinfilt + 1)
frequences[:nlinfilt] = lowfreq + numpy.arange(nlinfilt) * linsc
# Compute log-linear filters on [1000;maxfreq]
low_mel = hz2mel(min([1000, maxfreq]))
max_mel = hz2mel(maxfreq)
mels = numpy.zeros(nlogfilt + 2, dtype=numpy.float32)
melsc = (max_mel - low_mel) / (nlogfilt + 1)
# Verify that mel2hz(melsc)>linsc
while mel2hz(melsc) < linsc:
# in this case, we add a linear filter
nlinfilt += 1
nlogfilt -= 1
frequences[:nlinfilt] = lowfreq + numpy.arange(nlinfilt) * linsc
low_mel = hz2mel(frequences[nlinfilt - 1] + 2 * linsc)
max_mel = hz2mel(maxfreq)
mels = numpy.zeros(nlogfilt + 2, dtype=numpy.float32)
melsc = (max_mel - low_mel) / (nlogfilt + 1)
mels[:nlogfilt + 2] = low_mel + numpy.arange(nlogfilt + 2) * melsc
# Back to the frequency domain
frequences[nlinfilt:] = mel2hz(mels)
heights = 2. / (frequences[2:] - frequences[0:-2])
# Compute filterbank coeff (in fft domain, in bins)
fbank = numpy.zeros((nfilt, int(numpy.floor(nfft / 2)) + 1), dtype=numpy.float32)
# FFT bins (in Hz)
n_frequences = numpy.arange(nfft) / (1. * nfft) * fs
for i in range(nfilt):
low = frequences[i]
cen = frequences[i + 1]
hi = frequences[i + 2]
lid = numpy.arange(numpy.floor(low * nfft / fs) + 1, numpy.floor(cen * nfft / fs) + 1, dtype=numpy.int)
left_slope = heights[i] / (cen - low)
rid = numpy.arange(numpy.floor(cen * nfft / fs) + 1,
min(numpy.floor(hi * nfft / fs) + 1, nfft), dtype=numpy.int)
right_slope = heights[i] / (hi - cen)
fbank[i][lid] = left_slope * (n_frequences[lid] - low)
fbank[i][rid[:-1]] = right_slope * (hi - n_frequences[rid[:-1]])
return fbank, frequences
def mel_filter_bank(fs, nfft, lowfreq, maxfreq, widest_nlogfilt, widest_lowfreq, widest_maxfreq,):
"""Compute triangular filterbank for cepstral coefficient computation.
:param fs: sampling frequency of the original signal.
:param nfft: number of points for the Fourier Transform
:param lowfreq: lower limit of the frequency band filtered
:param maxfreq: higher limit of the frequency band filtered
:param widest_nlogfilt: number of log filters
:param widest_lowfreq: lower frequency of the filter bank
:param widest_maxfreq: higher frequency of the filter bank
:param widest_maxfreq: higher frequency of the filter bank
:return: the filter bank and the central frequencies of each filter
"""
# ------------------------
# Compute the filter bank
# ------------------------
# Compute start/middle/end points of the triangular filters in spectral
# domain
widest_freqs = numpy.zeros(widest_nlogfilt + 2, dtype=numpy.float32)
low_mel = hz2mel(widest_lowfreq)
max_mel = hz2mel(widest_maxfreq)
mels = numpy.zeros(widest_nlogfilt+2)
melsc = (max_mel - low_mel) / (widest_nlogfilt + 1)
mels[:widest_nlogfilt + 2] = low_mel + numpy.arange(widest_nlogfilt + 2) * melsc
# Back to the frequency domain
widest_freqs = mel2hz(mels)
# Select filters in the narrow band
sub_band_freqs = numpy.array([fr for fr in widest_freqs if lowfreq <= fr <= maxfreq], dtype=numpy.float32)
heights = 2./(sub_band_freqs[2:] - sub_band_freqs[0:-2])
nfilt = sub_band_freqs.shape[0] - 2
# Compute filterbank coeff (in fft domain, in bins)
fbank = numpy.zeros((nfilt, numpy.floor(nfft/2)+1), dtype=numpy.float32)
# FFT bins (in Hz)
nfreqs = numpy.arange(nfft) / (1. * nfft) * fs
for i in range(nfilt):
low = sub_band_freqs[i]
cen = sub_band_freqs[i+1]
hi = sub_band_freqs[i+2]
lid = numpy.arange(numpy.floor(low * nfft / fs) + 1, numpy.floor(cen * nfft / fs) + 1, dtype=numpy.int)
left_slope = heights[i] / (cen - low)
rid = numpy.arange(numpy.floor(cen * nfft / fs) + 1, min(numpy.floor(hi * nfft / fs) + 1,
nfft), dtype=numpy.int)
right_slope = heights[i] / (hi - cen)
fbank[i][lid] = left_slope * (nfreqs[lid] - low)
fbank[i][rid[:-1]] = right_slope * (hi - nfreqs[rid[:-1]])
return fbank, sub_band_freqs
def power_spectrum(input_sig,
fs=8000,
win_time=0.025,
shift=0.01,
prefac=0.97):
"""
Compute the power spectrum of the signal.
:param input_sig:
:param fs:
:param win_time:
:param shift:
:param prefac:
:return:
"""
window_length = int(round(win_time * fs))
overlap = window_length - int(shift * fs)
framed = framing(input_sig, window_length, win_shift=window_length-overlap).copy()
# Pre-emphasis filtering is applied after framing to be consistent with stream processing
framed = pre_emphasis(framed, prefac)
l = framed.shape[0]
n_fft = 2 ** int(numpy.ceil(numpy.log2(window_length)))
# Windowing has been changed to hanning which is supposed to have less noisy sidelobes
# ham = numpy.hamming(window_length)
window = numpy.hanning(window_length)
spec = numpy.ones((l, int(n_fft / 2) + 1), dtype=numpy.float32)
log_energy = numpy.log((framed**2).sum(axis=1) + 1e-5)
dec = 500000
start = 0
stop = min(dec, l)
while start < l:
ahan = framed[start:stop, :] * window
mag = numpy.fft.rfft(ahan, n_fft, axis=-1)
spec[start:stop, :] = mag.real**2 + mag.imag**2
start = stop
stop = min(stop + dec, l)
return spec, log_energy
def mfcc(input_sig,
lowfreq=100, maxfreq=8000,
nlinfilt=0, nlogfilt=24,
nwin=0.025,
fs=16000,
nceps=13,
shift=0.01,
get_spec=False,
get_mspec=False,
prefac=0.97):
"""Compute Mel Frequency Cepstral Coefficients.
:param input_sig: input signal from which the coefficients are computed.
Input audio is supposed to be RAW PCM 16bits
:param lowfreq: lower limit of the frequency band filtered.
Default is 100Hz.
:param maxfreq: higher limit of the frequency band filtered.
Default is 8000Hz.
:param nlinfilt: number of linear filters to use in low frequencies.
Default is 0.
:param nlogfilt: number of log-linear filters to use in high frequencies.
Default is 24.
:param nwin: length of the sliding window in seconds
Default is 0.025.
:param fs: sampling frequency of the original signal. Default is 16000Hz.
:param nceps: number of cepstral coefficients to extract.
Default is 13.
:param shift: shift between two analyses. Default is 0.01 (10ms).
:param get_spec: boolean, if true returns the spectrogram
:param get_mspec: boolean, if true returns the output of the filter banks
:param prefac: pre-emphasis filter value
:return: the cepstral coefficients in a ndaray as well as
the Log-spectrum in the mel-domain in a ndarray.
.. note:: MFCC are computed as follows:
- Pre-processing in time-domain (pre-emphasizing)
- Compute the spectrum amplitude by windowing with a Hamming window
- Filter the signal in the spectral domain with a triangular filter-bank, whose filters are approximatively
linearly spaced on the mel scale, and have equal bandwith in the mel scale
- Compute the DCT of the log-spectrom
- Log-energy is returned as first coefficient of the feature vector.
For more details, refer to [Davis80]_.
"""
# Compute power spectrum
spec, log_energy = power_spectrum(input_sig,
fs,
win_time=nwin,
shift=shift,
prefac=prefac)
# Filter the spectrum through the triangle filter-bank
n_fft = 2 ** int(numpy.ceil(numpy.log2(int(round(nwin * fs)))))
fbank = trfbank(fs, n_fft, lowfreq, maxfreq, nlinfilt, nlogfilt)[0]
mspec = numpy.log(numpy.dot(spec, fbank.T) + 1e-5) # A tester avec log10 et log
# Use the DCT to 'compress' the coefficients (spectrum -> cepstrum domain)
# The C0 term is removed as it is the constant term
ceps = dct(mspec, type=2, norm='ortho', axis=-1)[:, 1:nceps + 1]
lst = list()
lst.append(ceps)
lst.append(log_energy)
if get_spec:
lst.append(spec)
else:
lst.append(None)
del spec
if get_mspec:
lst.append(mspec)
else:
lst.append(None)
del mspec
return lst
def fft2barkmx(n_fft, fs, nfilts=0, width=1., minfreq=0., maxfreq=8000):
"""
Generate a matrix of weights to combine FFT bins into Bark
bins. n_fft defines the source FFT size at sampling rate fs.
Optional nfilts specifies the number of output bands required
(else one per bark), and width is the constant width of each
band in Bark (default 1).
While wts has n_fft columns, the second half are all zero.
Hence, Bark spectrum is fft2barkmx(n_fft,fs) * abs(fft(xincols, n_fft));
2004-09-05 <EMAIL> based on rastamat/audspec.m
:param n_fft: the source FFT size at sampling rate fs
:param fs: sampling rate
:param nfilts: number of output bands required
:param width: constant width of each band in Bark (default 1)
:param minfreq:
:param maxfreq:
:return: a matrix of weights to combine FFT bins into Bark bins
"""
maxfreq = min(maxfreq, fs / 2.)
min_bark = hz2bark(minfreq)
nyqbark = hz2bark(maxfreq) - min_bark
if nfilts == 0:
nfilts = numpy.ceil(nyqbark) + 1
wts = numpy.zeros((nfilts, n_fft))
# bark per filt
step_barks = nyqbark / (nfilts - 1)
# Frequency of each FFT bin in Bark
binbarks = hz2bark(numpy.arange(n_fft / 2 + 1) * fs / n_fft)
for i in range(nfilts):
f_bark_mid = min_bark + i * step_barks
# Linear slopes in log-space (i.e. dB) intersect to trapezoidal window
lof = (binbarks - f_bark_mid - 0.5)
hif = (binbarks - f_bark_mid + 0.5)
wts[i, :n_fft // 2 + 1] = 10 ** (numpy.minimum(numpy.zeros_like(hif), numpy.minimum(hif, -2.5 * lof) / width))
return wts
def fft2melmx(n_fft,
fs=8000,
nfilts=0,
width=1.,
minfreq=0,
maxfreq=4000,
htkmel=False,
constamp=False):
"""
Generate a matrix of weights to combine FFT bins into Mel
bins. n_fft defines the source FFT size at sampling rate fs.
Optional nfilts specifies the number of output bands required
(else one per "mel/width"), and width is the constant width of each
band relative to standard Mel (default 1).
While wts has n_fft columns, the second half are all zero.
Hence, Mel spectrum is fft2melmx(n_fft,fs)*abs(fft(xincols,n_fft));
minfreq is the frequency (in Hz) of the lowest band edge;
default is 0, but 133.33 is a common standard (to skip LF).
maxfreq is frequency in Hz of upper edge; default fs/2.
You can exactly duplicate the mel matrix in Slaney's mfcc.m
as fft2melmx(512, 8000, 40, 1, 133.33, 6855.5, 0);
htkmel=1 means use HTK's version of the mel curve, not Slaney's.
constamp=1 means make integration windows peak at 1, not sum to 1.
frqs returns bin center frqs.
% 2004-09-05 <EMAIL> based on fft2barkmx
:param n_fft:
:param fs:
:param nfilts:
:param width:
:param minfreq:
:param maxfreq:
:param htkmel:
:param constamp:
:return:
"""
maxfreq = min(maxfreq, fs / 2.)
if nfilts == 0:
nfilts = numpy.ceil(hz2mel(maxfreq, htkmel) / 2.)
wts = numpy.zeros((nfilts, n_fft))
# Center freqs of each FFT bin
fftfrqs = numpy.arange(n_fft / 2 + 1) / n_fft * fs
# 'Center freqs' of mel bands - uniformly spaced between limits
minmel = hz2mel(minfreq, htkmel)
maxmel = hz2mel(maxfreq, htkmel)
binfrqs = mel2hz(minmel + numpy.arange(nfilts + 2) / (nfilts + 1) * (maxmel - minmel), htkmel)
for i in range(nfilts):
_fs = binfrqs[i + numpy.arange(3, dtype=int)]
# scale by width
_fs = _fs[1] + width * (_fs - _fs[1])
# lower and upper slopes for all bins
loslope = (fftfrqs - _fs[0]) / (_fs[1] - __fs[0])
hislope = (_fs[2] - fftfrqs)/(_fs[2] - _fs[1])
wts[i, 1 + numpy.arange(n_fft//2 + 1)] =numpy.maximum(numpy.zeros_like(loslope),numpy.minimum(loslope, hislope))
if not constamp:
# Slaney-style mel is scaled to be approx constant E per channel
wts = numpy.dot(numpy.diag(2. / (binfrqs[2 + numpy.arange(nfilts)] - binfrqs[numpy.arange(nfilts)])) , wts)
# Make sure 2nd half of FFT is zero
wts[:, n_fft // 2 + 1: n_fft] = 0
return wts, binfrqs
def audspec(power_spectrum,
fs=16000,
nfilts=None,
fbtype='bark',
minfreq=0,
maxfreq=8000,
sumpower=True,
bwidth=1.):
"""
:param power_spectrum:
:param fs:
:param nfilts:
:param fbtype:
:param minfreq:
:param maxfreq:
:param sumpower:
:param bwidth:
:return:
"""
if nfilts is None:
nfilts = int(numpy.ceil(hz2bark(fs / 2)) + 1)
if not fs == 16000:
maxfreq = min(fs / 2, maxfreq)
nframes, nfreqs = power_spectrum.shape
n_fft = (nfreqs -1 ) * 2
if fbtype == 'bark':
wts = fft2barkmx(n_fft, fs, nfilts, bwidth, minfreq, maxfreq)
elif fbtype == 'mel':
wts = fft2melmx(n_fft, fs, nfilts, bwidth, minfreq, maxfreq)
elif fbtype == 'htkmel':
wts = fft2melmx(n_fft, fs, nfilts, bwidth, minfreq, maxfreq, True, True)
elif fbtype == 'fcmel':
wts = fft2melmx(n_fft, fs, nfilts, bwidth, minfreq, maxfreq, True, False)
else:
print('fbtype {} not recognized'.format(fbtype))
wts = wts[:, :nfreqs]
if sumpower:
audio_spectrum = power_spectrum.dot(wts.T)
else:
audio_spectrum = numpy.dot(numpy.sqrt(power_spectrum), wts.T)**2
return audio_spectrum, wts
def postaud(x, fmax, fbtype='bark', broaden=0):
"""
do loudness equalization and cube root compression
:param x:
:param fmax:
:param fbtype:
:param broaden:
:return:
"""
nframes, nbands = x.shape
# Include frequency points at extremes, discard later
nfpts = nbands + 2 * broaden
if fbtype == 'bark':
bandcfhz = bark2hz(numpy.linspace(0, hz2bark(fmax), num=nfpts))
elif fbtype == 'mel':
bandcfhz = mel2hz(numpy.linspace(0, hz2bark(fmax), num=nfpts))
elif fbtype == 'htkmel' or fbtype == 'fcmel':
bandcfhz = mel2hz(numpy.linspace(0, hz2mel(fmax,1), num=nfpts),1)
else:
print('unknown fbtype {}'.format(fbtype))
# Remove extremal bands (the ones that will be duplicated)
bandcfhz = bandcfhz[broaden:(nfpts - broaden)]
# Hynek's magic equal-loudness-curve formula
fsq = bandcfhz ** 2
ftmp = fsq + 1.6e5
eql = ((fsq / ftmp) ** 2) * ((fsq + 1.44e6) / (fsq + 9.61e6))
# weight the critical bands
z = numpy.matlib.repmat(eql.T,nframes,1) * x
# cube root compress
z = z ** .33
# replicate first and last band (because they are unreliable as calculated)
if broaden == 1:
y = z[:, numpy.hstack((0,numpy.arange(nbands), nbands - 1))]
else:
y = z[:, numpy.hstack((1,numpy.arange(1, nbands - 1), nbands - 2))]
return y, eql
def dolpc(x, model_order=8):
"""
compute autoregressive model from spectral magnitude samples
:param x:
:param model_order:
:return:
"""
nframes, nbands = x.shape
r = numpy.real(numpy.fft.ifft(numpy.hstack((x,x[:,numpy.arange(nbands-2,0,-1)]))))
# First half only
r = r[:, :nbands]
# Find LPC coeffs by Levinson-Durbin recursion
y_lpc = numpy.ones((r.shape[0], model_order + 1))
for ff in range(r.shape[0]):
y_lpc[ff, 1:], e, _ = levinson(r[ff, :-1].T, order=model_order, allow_singularity=True)
# Normalize each poly by gain
y_lpc[ff, :] /= e
return y_lpc
def lpc2cep(a, nout):
"""
Convert the LPC 'a' coefficients in each column of lpcas
into frames of cepstra.
nout is number of cepstra to produce, defaults to size(lpcas,1)
2003-04-11 <EMAIL>
:param a:
:param nout:
:return:
"""
ncol , nin = a.shape
order = nin - 1
if nout is None:
nout = order + 1
c = numpy.zeros((ncol, nout))
# First cep is log(Error) from Durbin
c[:, 0] = -numpy.log(a[:, 0])
# Renormalize lpc A coeffs
a /= numpy.tile(a[:, 0][:, None], (1, nin))
for n in range(1, nout):
sum = 0
for m in range(1, n):
sum += (n - m) * a[:, m] * c[:, n - m]
c[:, n] = -(a[:, n] + sum / n)
return c
def lpc2spec(lpcas, nout=17):
"""
Convert LPC coeffs back into spectra
nout is number of freq channels, default 17 (i.e. for 8 kHz)
:param lpcas:
:param nout:
:return:
"""
[cols, rows] = lpcas.shape
order = rows - 1
gg = lpcas[:, 0]
aa = lpcas / numpy.tile(gg, (rows,1)).T
# Calculate the actual z-plane polyvals: nout points around unit circle
zz = numpy.exp((-1j * numpy.pi / (nout - 1)) * numpy.outer( | numpy.arange(nout) | numpy.arange |
"""
Created on Dec 16 2021
@author: <NAME>
Poisson equation solver for the Hall effect.
Includes classes for Hall bars, Hall bars in a nonlocal geometry, and Corbino disks.
The Hall bar class has build in methods for longitudinal and Hall 4-probe resistance measurements.
Plotting functions assume coordinates are in microns, but the Poisson equation is scale-invariant.
"""
import time
import math
import numpy as np
import scipy.sparse as sp # import sparse matrix library
import matplotlib.pyplot as plt
from scipy.sparse.linalg import spsolve
# import the file where the differentiation matrix operators are defined
from diff_matrices import Diff_mat_1D, Diff_mat_2D
class hallbar():
"""The class for a Hall bar device
Source is the left terminal, drain is the right terminal.
Args:
Lx : length in x direction
Ly : length in y direction
Nx : number of points in grid along x
Ny : number of points in grid along y
"""
def __init__(self, Lx, Ly, Nx = 301, Ny = 201):
# Initiate with no contacts
self.contacts = []
# Define coordinate variables
self.Nx = Nx
self.Ny = Ny
self.Lx = Lx
self.Ly = Ly
self.x = np.linspace(0,self.Lx,self.Nx)
self.y = np.linspace(0,self.Ly,self.Ny)
self.dx = self.x[1] - self.x[0] # grid spacing along x direction
self.dy = self.y[1] - self.y[0] # grid spacing along y direction
self.X,self.Y = np.meshgrid(self.x,self.y) # 2D meshgrid
# 1D indexing
self.Xu = self.X.ravel() # Unravel 2D meshgrid to 1D array
self.Yu = self.Y.ravel()
# Search for boundary indices
start_time = time.time()
self.ind_unravel_L = np.squeeze(np.where(self.Xu==self.x[0])) # Left boundary
self.ind_unravel_R = np.squeeze(np.where(self.Xu==self.x[self.Nx-1])) # Right boundary
self.ind_unravel_B = np.squeeze(np.where(self.Yu==self.y[0])) # Bottom boundary
self.ind_unravel_T = np.squeeze(np.where(self.Yu==self.y[self.Ny-1])) # Top boundary
self.ind_boundary_unravel = np.squeeze(np.where((self.Xu==self.x[0]) | (self.Xu==self.x[self.Nx-1]) | (self.Yu==self.y[0]) | (self.Yu==self.y[self.Ny-1]))) # outer boundaries 1D unravel indices
self.ind_boundary = np.where((self.X==self.x[0]) | (self.X==self.x[self.Nx-1]) | (self.Y==self.y[0]) | (self.Y==self.y[self.Ny-1])) # outer boundary
print("Boundary search time = %1.4s" % (time.time()-start_time))
# Load finite difference matrix operators
self.Dx_2d, self.Dy_2d, self.D2x_2d, self.D2y_2d = Diff_mat_2D(self.Nx,self.Ny)
# Initiate empty solution matrix
self.u = 0
def solve(self, lmbda):
# constructs matrix problem and solves Poisson equation
# Args: lmbda : sigma_xy / sigma_xx. Must be finite
# Returns: self.u : electric potential
self.lmbda = lmbda
# Construct system matrix without boundary conditions
start_time = time.time()
I_sp = sp.eye(self.Nx*self.Ny).tocsr()
L_sys = self.D2x_2d/self.dx**2 + self.D2y_2d/self.dy**2
# Boundary operators
BD = I_sp # Dirichlet boundary operator
BNx = self.Dx_2d / (2 * self.dx) # Neumann boundary operator for x component
BNy = self.Dy_2d / (2 * self.dy) # Neumann boundary operator for y component
# DIRICHLET BOUNDARY CONDITIONS FOR CONTACTS
L_sys[self.ind_unravel_L,:] = BD[self.ind_unravel_L,:] # Boundaries at the left layer
L_sys[self.ind_unravel_R,:] = BD[self.ind_unravel_R,:] # Boundaries at the right edges
# CURRENT THROUGH EDGES
L_sys[self.ind_unravel_T,:] = BNy[self.ind_unravel_T,:] - lmbda * BNx[self.ind_unravel_T,:] # Boundaries at the top layer
L_sys[self.ind_unravel_B,:] = BNy[self.ind_unravel_B,:] - lmbda * BNx[self.ind_unravel_B,:] # Boundaries at the bottom layer
# Source function (right hand side vector)
g = np.zeros(self.Nx*self.Ny)
# Insert boundary values at the boundary points
g[self.ind_unravel_L] = 1 # Dirichlet boundary condition at source
g[self.ind_unravel_R] = 0 # Dirichlet boundary condition at drain
g[self.ind_unravel_T] = 0 # No current through top
g[self.ind_unravel_B] = 0 # No current through bottom
print("System matrix and right hand vector computation time = %1.6s" % (time.time()-start_time))
start_time = time.time()
self.u = spsolve(L_sys,g).reshape(self.Ny,self.Nx).T
print("spsolve() time = %1.6s" % (time.time()-start_time))
def voltage_measurement(self, x1, x2, side='top'):
# Args: x1 : point of V_A
# x2 : point of V_B
# side ('top', 'bottom', or 'hall') : which side of Hall bar to measure
# Returns: V_A - V_B
if np.all(self.u==0):
raise Exception('System has not been solved')
if x1 > self.Lx or x1 < 0 or x2 > self.Lx or x2 < 0:
raise Exception('Points out of bounds')
if side=='top':
ya = self.Ny-1
yb = self.Ny-1
elif side=='bottom':
ya = 0
yb = 0
elif side=='hall':
ya = 0
yb = self.Ny-1
else:
raise Exception('Side must be top or bottom')
# Find nearest index value to input coordinates
xa = np.searchsorted(self.x, x1, side='left')
xb = np.searchsorted(self.x, x2, side='left')
return self.u[xa, ya] - self.u[xb, yb]
def plot_potential(self):
if | np.all(self.u==0) | numpy.all |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.v2.fluid.core as core
import unittest
import numpy as np
class TestFeedFetch(unittest.TestCase):
def test_feed_fetch(self):
scope = core.Scope()
place = core.CPUPlace()
input_array = np.ones((4, 4, 6)).astype("float32")
input_array[0, 0, 0] = 3
input_array[3, 3, 5] = 10
input_tensor = core.LoDTensor([[0, 2, 4]])
input_tensor.set(input_array, place)
core.set_feed_variable(scope, input_tensor, "feed", 0)
output_tensor = core.get_fetch_variable(scope, "feed", 0)
output_lod = output_tensor.lod()
self.assertEqual(0, output_lod[0][0])
self.assertEqual(2, output_lod[0][1])
self.assertEqual(4, output_lod[0][2])
output_array = | np.array(output_tensor) | numpy.array |
d# gradcam_experiments.py
# runs the experiments in section 5.2
# visualizes the disagreement and confusing input elements using GradCam
import cv2
import os
import torch
from torchvision.utils import save_image
import numpy as np
import torch.nn as nn
from shutil import copyfile
from torch.autograd import Variable
from deeplearning import aux_funcs as af
from deeplearning import network_architectures as arcs
def save_gradient_images(gradient, path_to_file):
gradient = gradient - gradient.min()
gradient /= gradient.max()
gradient = gradient.transpose(1, 2, 0) * 255
cv2.imwrite(path_to_file, np.uint8(gradient))
def preprocess_image(img):
means = [0.4802, 0.4481, 0.3975]
stds = [0.2302, 0.2265, 0.2262]
preprocessed_img = img.copy()[: , :, ::-1]
for i in range(3):
preprocessed_img[:, :, i] = preprocessed_img[:, :, i] - means[i]
preprocessed_img[:, :, i] = preprocessed_img[:, :, i] / stds[i]
preprocessed_img = np.ascontiguousarray(np.transpose(preprocessed_img, (2, 0, 1)))
preprocessed_img = torch.from_numpy(preprocessed_img)
preprocessed_img.unsqueeze_(0)
input = Variable(preprocessed_img, requires_grad = True)
return input
def save_gradcam(img, mask, path_to_file):
heatmap = cv2.applyColorMap(np.uint8(255*mask), cv2.COLORMAP_JET)
heatmap = np.float32(heatmap) / 255
cam = heatmap + np.float32(img)
cam = cam / np.max(cam)
cv2.imwrite(path_to_file, | np.uint8(255 * cam) | numpy.uint8 |
import matplotlib.pyplot as plt
from Model import Model
import numpy as np
class Boundary(Model):
def __init__(self):
Model.__init__(self)
# Cálculo das matrizes auxiliares A e C
self.calc_A_C()
# Cálculo dos fatores de absorção
self.calc_fat()
def calc_A_C(self):
self.C = np.zeros((self.Nzz, self.Nxx)) # Matriz para simplificação da equação da onda discretizada
self.A = np.zeros((self.Nzz, self.Nxx)) # Matriz para simplificação da equação de bordas não reflexivas
self.A[:,:] = self.extended_model[:,:] * (self.dt/self.h)
self.C[:,:] = -(self.A[:,:] ** 2) / 12
def calc_fat(self):
self.fat_s = | np.ones((self.Nzz, self.Nxx)) | numpy.ones |
'''
Test basis, which represents the basis of the Legendre Polynomials
'''
import numpy as np
import sympy as sp
import unittest
from gsplines.basis.basis0010 import cBasis0010
class cMyTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
''' Initialize the symbolic expression of the Legendre Polynomials
'''
super(cMyTest, self).__init__(*args, **kwargs)
np.set_printoptions(linewidth=500, precision=4)
tau = sp.symbols('tau', positive=True)
s = sp.symbols('s', real=True)
basis = [0, 0, 0, 0, 0, 0]
basis[0] = sp.sympify(1.0)
basis[1] = s
for i in range(1, 5):
basis[i + 1] = 1.0 / (
i + 1.0) * ((2.0 * i + 1.0) * s * basis[i] - i * basis[i - 1])
self.Bsym_ = basis
self.tausym_ = tau
self.ssym_ = s
def test_value(self):
''' Compare the value of the symbolic and the implemented Legendre
Polynomials'''
for item in range(0, 100):
Bimpl = cBasis0010()
tau_ = 10.0 * np.random.rand()
s_ = np.random.rand() * 2.0 - 1.0
B = [Bi.subs({self.tausym_: tau_}) for Bi in self.Bsym_]
B = [sp.lambdify(self.ssym_, Bi) for Bi in B]
B = np.array([Bi(s_) for Bi in B])
e = np.max(np.abs(B - Bimpl.evalOnWindow(s_, tau_)))
# print('error = {:.3e}\r'.format(e), end='')
assert (e < 1.0e-10)
def test_derivatives_wrt_t(self):
''' Compare the derivative w.r.t. t of the symbolic and the implemented
Legendre Polynomials'''
for item in range(0, 100):
Bimpl = cBasis0010()
tau_ = 10.0 * np.random.rand()
s_ = np.random.rand() * 2.0 - 1.0
ddeg = np.random.randint(1, 6)
B = [
Bi.diff(self.ssym_, ddeg) * sp.Pow(2 / self.tausym_, ddeg)
for Bi in self.Bsym_
]
B = [Bi.subs({self.tausym_: tau_}) for Bi in B]
B = [sp.lambdify(self.ssym_, Bi) for Bi in B]
B = np.array([Bi(s_) for Bi in B])
e = np.max(np.abs(B - Bimpl.evalDerivOnWindow(s_, tau_, ddeg)))
assert (
e < 5.0e-3
), 'Large error on derivatives wrt t, error = {:+.3e}'.format(e)
def test_derivatives_wrt_tau(self):
''' Compare the derivative w.r.t. tau of the symbolic and the implemented
Legendre Polynomials'''
for item in range(0, 100):
Bimpl = cBasis0010()
tau_ = 10.0 * np.random.rand()
s_ = | np.random.rand() | numpy.random.rand |
import numpy as np
from astropy.io import fits, ascii
import matplotlib.pyplot as plt
import warnings
try:
from ..utils import get_baseDir
except ImportError as err1:
warnings.warn("Could not import baseDir. Will save diagnostics to . ")
def get_baseDir():
return "."
import os
import pdb
from copy import deepcopy
def do_even_odd(thisAmp):
"""
Do an even-odd correction for a given amplifier
If only one amplifier is used, it can be the whole image
"""
even_odd_model = np.zeros_like(thisAmp)
even_offset = np.nanmedian(thisAmp[:,0::2])
even_odd_model[:,0::2] = even_offset
odd_offset = np.nanmedian(thisAmp[:,1::2])
even_odd_model[:,1::2] = odd_offset
return thisAmp - even_odd_model, even_odd_model
def do_backsub(img,photObj=None,amplifiers=4,saveDiagnostics=False,
evenOdd=True,activePixMask=None,backgMask=None,
grismr=False):
"""
Do background subtraction amplifier-by-amplifier, row-by-row around the sources
Parameters
---------------
img: numpy 2D array
The input uncorrected image
photObj: (optional) a tshirt photometry pipeline object
If supplied, it will use the background apertures to mask out sources
amplifiers: int
How many outputamplifiers are used? 4 for NIRCam stripe mode and 1 is for 1 output amplifier
evenOdd: bool
Remove the even and odd offsets before doing row-by-row medians?
saveDiagnostics: bool
Save diagnostic files?
activePixMask: numpy 2D array or None
Mask of reference pixels to ignore (optionally). Pixels that are False
will be ignored in the background estimation, and pixels that are
true will be kept in the background estimation. If refpixMask is None
no extra points will be masked
backgMask: numpy 2D array or None
Mask for the background. Pixels that are False will be ignored in
row-by-row background estimation. Pixels that are true will
be kept for the background estimation.
grismr: bool
Is this NIRCam GRISMR data? Special treatment is needed for NIRCam
GRISMR, where the spectra run through multiple amplifiers
"""
## Npix Threshold
## this many pixels must be in a row in order to do an median
## otherwise, it attempts to interpolate from other amplifiers
## this only applies when noutputs=4
Npix_threshold = 3
## Start by including all pixels
useMask = np.ones_like(img,dtype=bool)
## make a mask with Tshirt object
if photObj is not None:
y, x = np.mgrid[0:img.shape[0],0:img.shape[1]]
xcen = photObj.srcApertures.positions[:,0]
ycen = photObj.srcApertures.positions[:,1]
useMask = np.ones_like(img,dtype=bool)
for srcInd in np.arange(photObj.nsrc):
r = np.sqrt((x - xcen)**2 + (y - ycen)**2)
srcPts = r < photObj.param['backStart']
useMask[srcPts] = False
if backgMask is not None:
useMask = useMask & backgMask
## only include active pixels
if activePixMask is None:
activeMask = np.ones_like(img,dtype=bool)
else:
activeMask = activePixMask
useMask = useMask & activeMask
## let nanmedian do the work of masking
maskedPts = (useMask == False)
masked_img = deepcopy(img)
masked_img[maskedPts] = np.nan
outimg = | np.zeros_like(img) | numpy.zeros_like |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 17 11:14:48 2020
@author: lorenz
"""
import numpy as np
def vecnorm(mat, ax = 1):
return np.sum( np.abs(mat)**2,axis=ax)**(1./2)
def rms(mat, ax = 0):
return (np.sum( np.abs(mat)**2,axis=ax) / mat.shape[ax] )**(1./2)
def skew(v) :
Q = np.zeros((3,3))
Q[0,0:3] = np.block( [ 0, -v[2], v[1]])
Q[1,0:3] = np.block( [ v[2], 0 , -v[0]])
Q[2,0:3] = np.block( [ -v[1], v[0], 0 ])
return Q
def matrix3_times_array3(a, matrix):
if matrix.ndim >2:
matrix = matrix.reshape((3,3))
out_array = np.empty(a.shape)
out_array[:,0] = matrix[0,0] * a[:,0] + matrix[0,1] * a[:,1] + matrix[0,2] * a[:,2]
out_array[:,1] = matrix[1,0] * a[:,0] + matrix[1,1] * a[:,1] + matrix[1,2] * a[:,2]
out_array[:,2] = matrix[2,0] * a[:,0] + matrix[2,1] * a[:,1] + matrix[2,2] * a[:,2]
return out_array
def correlation_lags(in1_len, in2_len, mode='full'):
r"""
Calculates the lag / displacement indices array for 1D cross-correlation.
Parameters
----------
in1_size : int
First input size.
in2_size : int
Second input size.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output.
See the documentation `correlate` for more information.
See Also
--------
correlate : Compute the N-dimensional cross-correlation.
Returns
-------
lags : array
Returns an array containing cross-correlation lag/displacement indices.
Indices can be indexed with the np.argmax of the correlation to return
the lag/displacement.
Notes
-----
Cross-correlation for continuous functions :math:`f` and :math:`g` is
defined as:
.. math ::
\left ( f\star g \right )\left ( \tau \right )
\triangleq \int_{t_0}^{t_0 +T}
\overline{f\left ( t \right )}g\left ( t+\tau \right )dt
Where :math:`\tau` is defined as the displacement, also known as the lag.
Cross correlation for discrete functions :math:`f` and :math:`g` is
defined as:
.. math ::
\left ( f\star g \right )\left [ n \right ]
\triangleq \sum_{-\infty}^{\infty}
\overline{f\left [ m \right ]}g\left [ m+n \right ]
Where :math:`n` is the lag.
Examples
--------
Cross-correlation of a signal with its time-delayed self.
>>> from scipy import signal
>>> rng = np.random.RandomState(0)
>>> x = rng.standard_normal(1000)
>>> y = np.concatenate([rng.standard_normal(100), x])
>>> correlation = signal.correlate(x, y, mode="full")
>>> lags = signal.correlation_lags(x.size, y.size, mode="full")
>>> lag = lags[np.argmax(correlation)]
"""
# calculate lag ranges in different modes of operation
if mode == "full":
# the output is the full discrete linear convolution
# of the inputs. (Default)
lags = np.arange(-in2_len + 1, in1_len)
elif mode == "same":
# the output is the same size as `in1`, centered
# with respect to the 'full' output.
# calculate the full output
lags = np.arange(-in2_len + 1, in1_len)
# determine the midpoint in the full output
mid = lags.size // 2
# determine lag_bound to be used with respect
# to the midpoint
lag_bound = in1_len // 2
# calculate lag ranges for even and odd scenarios
if in1_len % 2 == 0:
lags = lags[(mid-lag_bound):(mid+lag_bound)]
else:
lags = lags[(mid-lag_bound):(mid+lag_bound)+1]
elif mode == "valid":
# the output consists only of those elements that do not
# rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
# must be at least as large as the other in every dimension.
# the lag_bound will be either negative or positive
# this let's us infer how to present the lag range
lag_bound = in1_len - in2_len
if lag_bound >= 0:
lags = np.arange(lag_bound + 1)
else:
lags = | np.arange(lag_bound, 1) | numpy.arange |
# Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Measures on and between quantum states and operations."""
from typing import Optional, TYPE_CHECKING, Tuple
import numpy as np
import scipy
from cirq import protocols, value
from cirq.qis.states import (
QuantumState,
infer_qid_shape,
quantum_state,
validate_density_matrix,
validate_normalized_state_vector,
)
if TYPE_CHECKING:
import cirq
def _sqrt_positive_semidefinite_matrix(mat: np.ndarray) -> np.ndarray:
"""Square root of a positive semidefinite matrix."""
eigs, vecs = scipy.linalg.eigh(mat)
return vecs @ (np.sqrt(np.abs(eigs)) * vecs).T.conj()
def _validate_int_state(state: int, qid_shape: Optional[Tuple[int, ...]]) -> None:
if state < 0:
raise ValueError(
'Invalid state: A state specified as an integer must be non-negative, '
f'but {state} was given.'
)
if qid_shape is not None:
dim = np.prod(qid_shape)
if state >= dim:
raise ValueError(
'Invalid state for given qid shape: '
'The maximum computational basis state for qid shape '
f'{qid_shape} is {dim - 1}, but {state} was given.'
)
def _validate_product_state(
state: 'cirq.ProductState', qid_shape: Optional[Tuple[int, ...]]
) -> None:
if qid_shape is not None and qid_shape != (2,) * len(state):
raise ValueError(
'Invalid state for given qid shape: '
f'Specified shape {qid_shape} but product state '
f'has shape {(2,) * len(state)}.'
)
def fidelity(
state1: 'cirq.QUANTUM_STATE_LIKE',
state2: 'cirq.QUANTUM_STATE_LIKE',
qid_shape: Optional[Tuple[int, ...]] = None,
validate: bool = True,
atol: float = 1e-7,
) -> float:
"""Fidelity of two quantum states.
The fidelity of two density matrices ρ and σ is defined as
trace(sqrt(sqrt(ρ) σ sqrt(ρ)))^2.
The given states can be state vectors or density matrices.
Args:
state1: The first state.
state2: The second state.
qid_shape: The qid shape of the given states.
validate: Whether to check if the given states are valid quantum states.
atol: Absolute numerical tolerance to use for validation.
Returns:
The fidelity.
Raises:
ValueError: The qid shape of the given states was not specified and
could not be inferred.
ValueError: Invalid quantum state.
"""
# Two ints
if isinstance(state1, int) and isinstance(state2, int):
if validate:
_validate_int_state(state1, qid_shape)
_validate_int_state(state2, qid_shape)
return float(state1 == state2)
# Two ProductStates
if isinstance(state1, value.ProductState) and isinstance(state2, value.ProductState):
if len(state1) != len(state2):
raise ValueError(
'Mismatched number of qubits in product states: '
f'{len(state1)} and {len(state2)}.'
)
if validate:
_validate_product_state(state1, qid_shape)
_validate_product_state(state2, qid_shape)
prod = 1.0
for q, s1 in state1:
s2 = state2[q]
prod *= np.abs(np.vdot(s1.state_vector(), s2.state_vector()))
return prod ** 2
# Two numpy arrays that are either state vector, state tensor, or
# density matrix
if (
isinstance(state1, np.ndarray)
and state1.dtype.kind == 'c'
and isinstance(state2, np.ndarray)
and state2.dtype.kind == 'c'
):
state1, state2 = _numpy_arrays_to_state_vectors_or_density_matrices(
state1, state2, qid_shape=qid_shape, validate=validate, atol=atol
)
return _fidelity_state_vectors_or_density_matrices(state1, state2)
# Use QuantumState machinery for the general case
if qid_shape is None:
try:
qid_shape = infer_qid_shape(state1, state2)
except:
raise ValueError(
'Failed to infer the qid shape of the given states. '
'Please specify the qid shape explicitly using the `qid_shape` argument.'
)
state1 = quantum_state(state1, qid_shape=qid_shape, validate=validate, atol=atol)
state2 = quantum_state(state2, qid_shape=qid_shape, validate=validate, atol=atol)
state1_arr = state1.state_vector_or_density_matrix()
state2_arr = state2.state_vector_or_density_matrix()
return _fidelity_state_vectors_or_density_matrices(state1_arr, state2_arr)
def _numpy_arrays_to_state_vectors_or_density_matrices(
state1: np.ndarray,
state2: np.ndarray,
qid_shape: Optional[Tuple[int, ...]],
validate: bool,
atol: float,
) -> Tuple[np.ndarray, np.ndarray]:
if state1.ndim > 2 or (state1.ndim == 2 and state1.shape[0] != state1.shape[1]):
# State tensor, convert to state vector
state1 = np.reshape(state1, (np.prod(state1.shape).item(),))
if state2.ndim > 2 or (state2.ndim == 2 and state2.shape[0] != state2.shape[1]):
# State tensor, convert to state vector
state2 = np.reshape(state2, (np.prod(state2.shape).item(),))
if state1.ndim == 2 and state2.ndim == 2:
# Must be square matrices
if state1.shape == state2.shape:
if qid_shape is None:
# Ambiguous whether state tensor or density matrix
raise ValueError(
'The qid shape of the given states is ambiguous. '
'Try specifying the qid shape explicitly or '
'using a wrapper function like cirq.density_matrix.'
)
if state1.shape == qid_shape:
# State tensors, convert to state vectors
state1 = np.reshape(state1, (np.prod(qid_shape).item(),))
state2 = np.reshape(state2, (np.prod(qid_shape).item(),))
elif state1.shape[0] < state2.shape[0]:
# state1 is state tensor and state2 is density matrix.
# Convert state1 to state vector
state1 = np.reshape(state1, (np.prod(state1.shape).item(),))
else: # state1.shape[0] > state2.shape[0]
# state2 is state tensor and state1 is density matrix.
# Convert state2 to state vector
state2 = np.reshape(state2, (np.prod(state2.shape).item(),))
elif state1.ndim == 2 and state2.ndim < 2 and np.prod(state1.shape) == np.prod(state2.shape):
# state1 is state tensor, convert to state vector
state1 = np.reshape(state1, (np.prod(state1.shape).item(),))
elif state1.ndim < 2 and state2.ndim == 2 and np.prod(state1.shape) == np.prod(state2.shape):
# state2 is state tensor, convert to state vector
state2 = np.reshape(state2, (np.prod(state2.shape).item(),))
if validate:
dim1: int = state1.shape[0] if state1.ndim == 2 else np.prod(state1.shape).item()
dim2: int = state2.shape[0] if state2.ndim == 2 else np.prod(state2.shape).item()
if dim1 != dim2:
raise ValueError('Mismatched dimensions in given states: ' f'{dim1} and {dim2}.')
if qid_shape is None:
qid_shape = (dim1,)
else:
expected_dim = np.prod(qid_shape)
if dim1 != expected_dim:
raise ValueError(
'Invalid state dimension for given qid shape: '
f'Expected dimension {expected_dim} but '
f'got dimension {dim1}.'
)
for state in (state1, state2):
if state.ndim == 2:
validate_density_matrix(state, qid_shape=qid_shape, atol=atol)
else:
validate_normalized_state_vector(state, qid_shape=qid_shape, atol=atol)
return state1, state2
def _fidelity_state_vectors_or_density_matrices(state1: np.ndarray, state2: np.ndarray) -> float:
if state1.ndim == 1 and state2.ndim == 1:
# Both state vectors
return np.abs(np.vdot(state1, state2)) ** 2
elif state1.ndim == 1 and state2.ndim == 2:
# state1 is a state vector and state2 is a density matrix
return np.real(np.conjugate(state1) @ state2 @ state1)
elif state1.ndim == 2 and state2.ndim == 1:
# state1 is a density matrix and state2 is a state vector
return np.real(np.conjugate(state2) @ state1 @ state2)
elif state1.ndim == 2 and state2.ndim == 2:
# Both density matrices
state1_sqrt = _sqrt_positive_semidefinite_matrix(state1)
eigs = scipy.linalg.eigvalsh(state1_sqrt @ state2 @ state1_sqrt)
trace = np.sum(np.sqrt(np.abs(eigs)))
return trace ** 2
raise ValueError(
'The given arrays must be one- or two-dimensional. '
f'Got shapes {state1.shape} and {state2.shape}.'
)
def von_neumann_entropy(
state: 'cirq.QUANTUM_STATE_LIKE',
qid_shape: Optional[Tuple[int, ...]] = None,
validate: bool = True,
atol: float = 1e-7,
) -> float:
"""Calculates the von Neumann entropy of a quantum state in bits.
If `state` is a square matrix, it is assumed to be a density matrix rather
than a (pure) state tensor.
Args:
state: The quantum state.
qid_shape: The qid shape of the given state.
validate: Whether to check if the given state is a valid quantum state.
atol: Absolute numerical tolerance to use for validation.
Returns:
The calculated von Neumann entropy.
Raises:
ValueError: Invalid quantum state.
"""
if isinstance(state, QuantumState) and state._is_density_matrix():
state = state.data
if isinstance(state, np.ndarray) and state.ndim == 2 and state.shape[0] == state.shape[1]:
if validate:
if qid_shape is None:
qid_shape = (state.shape[0],)
validate_density_matrix(state, qid_shape=qid_shape, dtype=state.dtype, atol=atol)
eigenvalues = np.linalg.eigvalsh(state)
# We import here to avoid a costly module level load time dependency on scipy.stats.
import scipy.stats
return scipy.stats.entropy(np.abs(eigenvalues), base=2)
if validate:
_ = quantum_state(state, qid_shape=qid_shape, copy=False, validate=True, atol=atol)
return 0.0
def entanglement_fidelity(operation: 'cirq.SupportsChannel') -> float:
r"""Returns entanglement fidelity of a given quantum channel.
Entanglement fidelity $F_e$ of a quantum channel $E: L(H) \to L(H)$ is the overlap between
the maximally entangled state $|\phi\rangle = \frac{1}{\sqrt{dim H}} \sum_i|i\rangle|i\rangle$
and the state obtained by sending one half of $|\phi\rangle$ through the channel $E$, i.e.
$$
F_e = \langle\phi|(E \otimes I)(|\phi\rangle\langle\phi|)|\phi\rangle
$$
where $I: L(H) \to L(H)$ is the identity map.
Args:
operation: Quantum channel whose entanglement fidelity is to be computed.
Returns:
Entanglement fidelity of the channel represented by operation.
"""
f = 0.0
for k in protocols.channel(operation):
f += np.abs( | np.trace(k) | numpy.trace |
# Copyright (C) 2020-2022 <NAME>, <NAME>, and others
# SPDX-License-Identifier: MIT
from typing import Tuple
import numpy as np
from warnings import warn
from . import kernel as _kernel
from . import poly
from . import sve
class AbstractBasis:
"""Abstract base class for intermediate representation bases."""
@property
def u(self):
"""Basis functions on the (reduced) imaginary time axis.
Set of IR basis functions on the imaginary time (`tau`) or reduced
imaginary time (`x`) axis.
To obtain the value of all basis functions at a point or a array of
points `x`, you can call the function ``u(x)``. To obtain a single
basis function, a slice or a subset `l`, you can use ``u[l]``.
"""
raise NotImplementedError()
@property
def uhat(self):
"""Basis functions on the reduced Matsubara frequency (`wn`) axis.
To obtain the value of all basis functions at a Matsubara frequency
or a array of points `wn`, you can call the function ``uhat(wn)``.
Note that we expect reduced frequencies, which are simply even/odd
numbers for bosonic/fermionic objects. To obtain a single basis
function, a slice or a subset `l`, you can use ``uhat[l]``.
"""
raise NotImplementedError()
@property
def s(self):
"""Vector of singular values of the continuation kernel"""
raise NotImplementedError()
@property
def v(self):
"""Basis functions on the (reduced) real frequency axis.
Set of IR basis functions on the real frequency (`omega`) or reduced
real-frequency (`y`) axis.
To obtain the value of all basis functions at a point or a array of
points `y`, you can call the function ``v(y)``. To obtain a single
basis function, a slice or a subset `l`, you can use ``v[l]``.
"""
raise NotImplementedError()
@property
def statistics(self):
"""Quantum statistic (`"F"` for fermionic, `"B"` for bosonic)"""
raise NotImplementedError()
@property
def accuracy(self):
"""Accuracy of singular value cutoff"""
return self.s[-1] / self.s[0]
def __getitem__(self, index):
"""Return basis functions/singular values for given index/indices.
This can be used to truncate the basis to the n most significant
singular values: `basis[:3]`.
"""
raise NotImplementedError()
@property
def size(self):
"""Number of basis functions / singular values"""
return self.s.size
@property
def shape(self):
"""Shape of the basis function set"""
return self.s.shape
@property
def kernel(self):
"""Kernel of which this is the singular value expansion"""
raise NotImplementedError()
@property
def sve_result(self):
raise NotImplementedError()
@property
def lambda_(self):
"""Basis cutoff parameter Λ = β * ωmax"""
return self.kernel.lambda_
@property
def beta(self):
"""Inverse temperature or `None` because unscaled basis"""
raise NotImplementedError()
@property
def wmax(self):
"""Real frequency cutoff (this is `None` because unscaled basis)"""
raise NotImplementedError()
def default_tau_sampling_points(self):
"""Default sampling points on the imaginary time/x axis"""
return _default_sampling_points(self.u)
def default_omega_sampling_points(self):
"""Default sampling points on the real frequency axis"""
return self.v[-1].deriv().roots()
def default_matsubara_sampling_points(self, *, mitigate=True):
"""Default sampling points on the imaginary frequency axis"""
return _default_matsubara_sampling_points(self.uhat, mitigate)
@property
def is_well_conditioned(self):
"""Returns True if the sampling is expected to be well-conditioned"""
return True
class DimensionlessBasis(AbstractBasis):
"""Intermediate representation (IR) basis in reduced variables.
For a continuation kernel from real frequencies, ω ∈ [-ωmax, ωmax], to
imaginary time, τ ∈ [0, β], this class stores the truncated singular
value expansion or IR basis::
K(x, y) ≈ sum(u[l](x) * s[l] * v[l](y) for l in range(L))
The functions are given in reduced variables, ``x = 2*τ/β - 1`` and
``y = ω/ωmax``, which scales both sides to the interval ``[-1, 1]``. The
kernel then only depends on a cutoff parameter ``Λ = β * ωmax``.
Example:
The following example code assumes the spectral function is a single
pole at x = 0.2::
# Compute IR basis suitable for fermions and β*W <= 42
import sparse_ir
basis = sparse_ir.DimensionlessBasis(statistics='F', lambda_=42)
# Assume spectrum is a single pole at x = 0.2, compute G(iw)
# on the first few Matsubara frequencies
gl = basis.s * basis.v(0.2)
giw = gl @ basis.uhat([1, 3, 5, 7])
See also:
:class:`FiniteTempBasis` for a basis directly in time/frequency.
"""
def __init__(self, statistics, lambda_, eps=None, *, kernel=None,
sve_result=None):
if not (lambda_ >= 0):
raise ValueError("kernel cutoff lambda must be non-negative")
if eps is None and sve_result is None and not sve.HAVE_XPREC:
warn("xprec package is not available:\n"
"expect single precision (1.5e-8) only as both cutoff and\n"
"accuracy of the basis functions")
# Calculate basis functions from truncated singular value expansion
self._kernel = _get_kernel(statistics, lambda_, kernel)
if sve_result is None:
sve_result = sve.compute(self._kernel, eps)
u, s, v = sve_result
else:
u, s, v = sve_result
if u.shape != s.shape or s.shape != v.shape:
raise ValueError("mismatched shapes in SVE")
self._statistics = statistics
# The radius of convergence of the asymptotic expansion is Lambda/2,
# so for significantly larger frequencies we use the asymptotics,
# since it has lower relative error.
even_odd = {'F': 'odd', 'B': 'even'}[statistics]
self._u = u
self._uhat = u.hat(even_odd, n_asymp=self._kernel.conv_radius)
self._s = s
self._v = v
def __getitem__(self, index):
u, s, v = self.sve_result
sve_result = u[index], s[index], v[index]
return DimensionlessBasis(self._statistics, self._kernel.lambda_,
kernel=self._kernel, sve_result=sve_result)
@property
def statistics(self): return self._statistics
@property
def u(self) -> poly.PiecewiseLegendrePoly: return self._u
@property
def uhat(self) -> poly.PiecewiseLegendreFT: return self._uhat
@property
def s(self) -> np.ndarray: return self._s
@property
def v(self) -> poly.PiecewiseLegendrePoly: return self._v
@property
def kernel(self): return self._kernel
@property
def beta(self): return None
@property
def wmax(self): return None
@property
def sve_result(self):
return self._u, self._s, self._v
class FiniteTempBasis(AbstractBasis):
"""Intermediate representation (IR) basis for given temperature.
For a continuation kernel from real frequencies, ω ∈ [-ωmax, ωmax], to
imaginary time, τ ∈ [0, beta], this class stores the truncated singular
value expansion or IR basis::
K(τ, ω) ≈ sum(u[l](τ) * s[l] * v[l](ω) for l in range(L))
This basis is inferred from a reduced form by appropriate scaling of
the variables.
Example:
The following example code assumes the spectral function is a single
pole at ω = 2.5::
# Compute IR basis for fermions and β = 10, W <= 4.2
import sparse_ir
basis = sparse_ir.FiniteTempBasis(statistics='F', beta=10, wmax=4.2)
# Assume spectrum is a single pole at ω = 2.5, compute G(iw)
# on the first few Matsubara frequencies
gl = basis.s * basis.v(2.5)
giw = gl @ basis.uhat([1, 3, 5, 7])
"""
def __init__(self, statistics, beta, wmax, eps=None, *, kernel=None,
sve_result=None):
if not (beta > 0):
raise ValueError("inverse temperature beta must be positive")
if not (wmax >= 0):
raise ValueError("frequency cutoff must be non-negative")
if eps is None and sve_result is None and not sve.HAVE_XPREC:
warn("xprec package is not available:\n"
"expect single precision (1.5e-8) only as both cutoff and\n"
"accuracy of the basis functions")
# Calculate basis functions from truncated singular value expansion
self._kernel = _get_kernel(statistics, beta * wmax, kernel)
if sve_result is None:
sve_result = sve.compute(self._kernel, eps)
u, s, v = sve_result
else:
u, s, v = sve_result
if u.shape != s.shape or s.shape != v.shape:
raise ValueError("mismatched shapes in SVE")
if u.xmin != -1 or u.xmax != 1:
raise RuntimeError("u must be defined in the reduced variable.")
self._sve_result = sve_result
self._statistics = statistics
self._beta = beta
self._wmax = wmax
self._accuracy = s[-1] / s[0]
# The polynomials are scaled to the new variables by transforming the
# knots according to: tau = beta/2 * (x + 1), w = wmax * y. Scaling
# the data is not necessary as the normalization is inferred.
self._u = u.__class__(u.data, beta/2 * (u.knots + 1), beta/2 * u.dx, u.symm)
self._v = v.__class__(v.data, wmax * v.knots, wmax * v.dx, v.symm)
# The singular values are scaled to match the change of variables, with
# the additional complexity that the kernel may have an additional
# power of w.
self._s = | np.sqrt(beta/2 * wmax) | numpy.sqrt |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
"""分割检的封装
"""
from functools import wraps
import numpy as np
def nms_test(bounding_boxes, confidence_score, threshold):
picked_boxes = []
picked_score = []
picked_index = []
if len(bounding_boxes) == 0:
return picked_boxes, picked_score, picked_index
# 边界框
boxes = np.array(bounding_boxes)
# 边界框坐标
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
# 边界框的置信度得分
score = np.array(confidence_score)
# 计算边界框的区域
areas = (x2 - x1) * (y2 - y1)
# 按边界框的置信度分数排序
order = np.argsort(score)
while order.size > 0:
# 选择置信度最高的比边界框
index = order[-1]
picked_boxes.append(bounding_boxes[index])
picked_score.append(confidence_score[index])
picked_index.append(index)
# 并集区域
_x1 = | np.maximum(x1[index], x1[order[:-1]]) | numpy.maximum |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 9 00:54:57 2017
@author: scott
"""
import os
import re
import numpy as np
# import matplotlib as mpl
from scipy.interpolate import interp1d
from matplotlib import pyplot as plt
import time
import pickle
try:
from moviepy.editor import VideoClip
from moviepy.video.io.bindings import mplfig_to_npimage
except ImportError:
print("you need the package moviepy to be able to make movies!")
from .import_data import (
load_from_file,
read_macro,
epoch_time_to_timestamp,
timestamp_to_epoch_time,
)
from .pilatus import Pilatus, calibration_0, shape_0
from .XRD import integrate_peak, get_background_line, get_peak_background
from .XRD import Peak
timestamp_matcher = "([0-9]{2}\:){2}[0-9]{2}"
def get_images(
directory,
tag,
shape=shape_0,
calibration=calibration_0,
slits=True,
xslits=None,
yslits=[60, 430],
pixelmax=None,
verbose=True,
vverbose=False,
):
if verbose:
print("\n\nfunction 'get_images' at your service!\n")
try:
lslist = os.listdir(directory)
except FileNotFoundError:
print(
"The directory doesn't exist. get_images is returning a blank dictionary."
)
return {}
# print(tag) # debugging
if verbose:
print(str(len(lslist)) + " items in " + directory)
imagenames = [f for f in lslist if f[-4:] == ".raw" and tag in f]
if verbose:
print(
" of which "
+ str(len(imagenames))
+ " are image files including '"
+ tag
+ "'"
)
images = {}
for f in imagenames:
n = int(f[-8:-4]) # this is the image number as SPEC saves them
filepath = directory + os.sep + f
images[n] = Pilatus(
filepath,
shape=shape,
calibration=calibration,
slits=slits,
xslits=xslits,
yslits=yslits,
pixelmax=pixelmax,
verbose=vverbose,
)
if verbose:
print("\nfunction 'get_images' finished!\n\n")
return images
def peak_colors(peak_list, colors=["k", "b", "r", "g", "c", "m"]):
"""
This is a fill-in function until I've got some kind of standard colors
implemented. It takes a list of integral ranges and returns an identically
indexed dictionary with each value of the form (integral_range, color)
"""
integrals = {}
for i, integral in enumerate(peak_list):
integrals[i] = (integral, colors[i])
return integrals
def get_direction_mask(x, direction=True):
"""
Returns a mask selecting the values of x that are greater than (direction
= True) or less than (direction = False) all previous values
"""
if type(direction) in [int, float]:
direction = direction > 0
mask = []
X = x[0]
for x_i in x:
mask += [(x_i > X) == direction]
if mask[-1]:
X = x_i
return np.array(mask)
# ----------------- here comes the CLASS -----------------
class ScanImages:
# -------------- functions for defining the scan ----------
def __init__(
self,
name=None,
csvfile=None,
directory=None,
pilatusfilebase="default",
usecsv=True,
tag=None,
scan_type="time",
calibration=calibration_0,
macro=None,
tth=None,
alpha=None,
timestamp=None,
tstamp=None,
pixelmax=None,
timecol=None,
abstimecol=None,
tz=None,
slits=True,
xslits=None,
yslits=[60, 430],
scan=None,
copy=False,
load=False,
verbose=True,
vverbose=False,
):
"""
give EITHER a csvfile name with full path, or a directory and a tag.
pilatusfilebase can be constructed from this, and used to import the
Pilatus image objects.
The calibration is passed on to the Pilatus objects.
The macro is read to get the (tth, alpha) values which aren't scanned,
though they can also be put in manually.os.path.expanduser('~/o/FYSIK/list-SurfCat/setups/Synchrotron/May2018')
timestamp can be either a str like 'hh:mm:ss' or a pointer.
timestamp='abstimecol' uses the first value of the specified timecol in the csvfile
timestamp=None tries to get it from the file
"""
# ------- load a pickle, to save computing time and space -------- #
if load:
try:
with open(name, "rb") as f:
scan = pickle.load(f)
except FileNotFoundError:
print("Couldn't find " + name)
loadname = name + ".pkl"
print("Trying " + loadname + ".")
with open(loadname, "rb") as f:
scan = pickle.load(f)
print("Loaded " + name)
# ------ for development: new code with pre-loaded data -------#
if copy or load: # take all data (images, csv_data, etc) from another scan
for attr in dir(scan):
if attr not in dir(
self
): # because we don't want to replace e.g. functions
setattr(self, attr, getattr(scan, attr))
try:
self.copied += 1
except AttributeError:
self.copied = 1
return
# ---- parse inputs for name and, if csv used, csvname -----------------#
csvname = None
if usecsv:
if csvfile is None:
csv_directory = directory
# print(load) # debugging
if (tag is None or directory is None) and not load:
print("need a csv file name or a directory and a tag!")
return
lslist = os.listdir(directory)
try:
csvname = next(
f
for f in lslist
if f[-4:] == ".csv" and "_scan" in f and tag in f
)
except StopIteration:
if load:
pass
else:
print(lslist)
print(
"Cound not find a csvname containing "
+ tag
+ " in "
+ directory
+ "\n(ls above)"
)
else:
csv_directory, csvname = os.path.split(csvfile)
if len(csv_directory) == 0:
csv_directory = directory
if csvname is not None:
print(
"Loading Scan from directory = "
+ directory
+ "\n found csvname = "
+ str(csvname)
)
if name is None:
if tag is not None:
name = tag
elif csvname is not None:
name = csvname[:-4] # to drop the .csv
elif csvfile is not None:
name = csvfile
print("scan name = '" + name + "'")
# -------------- install easy metadata ------------- #
self.directory = directory
self.name = name
self.timecol = timecol
self.abstimecol = abstimecol
self.tz = tz
self.bg = False # stores whether background has been subtracted
self.verbose = verbose
self.vverbose = vverbose
if scan_type in ["time", "t"]:
self.scan_type = "t"
elif scan_type in ["tth", "TwoTheta"]:
self.scan_type = "tth"
elif scan_type in ["alpha", "a", "th", "Theta"]:
self.scan_type = "alpha"
if macro is not None:
self.macro = macro
self.settings = read_macro(macro)
if tth is None:
tth = self.settings["tth"][-1]
if alpha is None:
alpha = self.settings["alpha"][-1]
self.tth = tth
self.alpha = alpha
# try to read stuff from file name
if csvname is not None:
for match in re.findall("_[A-Za-z]+[n?][0-9]+[p[0-9]+]?", csvname):
attr = re.search("[A-Za-z]", match).group()
value = re.search("[0-9]+[n]?[p[0-9]+]?", match).group()
try:
value = float(value.replace("p", ".").replace("n", "-"))
except ValueError:
print("not sure what " + value + " is.")
if not hasattr(self, attr):
setattr(self, attr, value)
elif getattr(self, value) is None:
setattr(self, attr, value)
# --------- import csv if requested
if csvname is not None:
csvfilepath = csv_directory + os.sep + csvname
self.csv_data = load_from_file(
csvfilepath, data_type="SPEC", timestamp=timestamp, tstamp=tstamp, tz=tz
)
self.csvfilepath = csvfilepath
# -------------------- get images! ------------------------#
if pilatusfilebase == "default":
for foldername in ["images", "Pilatus"]:
pilatus_directory = directory + os.sep + foldername
if os.path.isdir(pilatus_directory):
break
else:
print("could not find pilatus directory!")
tag_pilatus = name
else:
pilatus_directory, tag_pilatus = os.path.split(pilatusfilebase)
self.images = get_images(
pilatus_directory,
tag=tag_pilatus,
calibration=calibration,
verbose=verbose,
pixelmax=pixelmax,
slits=slits,
xslits=xslits,
yslits=yslits,
vverbose=vverbose,
)
if len(self.images) == 0:
raise Warning("THIS SCAN IS EMPTY!!!!")
self.empty = True
return
else:
self.empty = False
# ------------------------- organize csvdata and metadata ---------- #
if hasattr(self, "csv_data"):
self.data = self.csv_data.copy()
# self.csv_into_images() # this causes problems now. csv is more likely to be currupt than images.
else:
self.data = {"title": name, "data_type": "spec"}
self.data["data_cols"] = []
for col, attr in [("tth_scan", "tth"), ("alpha", "alpha"), ("t_abs", "tstamp")]:
try:
self.data[col] = np.array(
[getattr(self.images[i], attr) for i in range(len(self))]
)
self.data["data_cols"] += [col]
if verbose:
print(
"got '"
+ attr
+ "' from Pilatus objects"
+ " and saved it as self.data['"
+ col
+ "']"
)
except AttributeError:
if verbose:
print("could not get " + col + ", (" + attr + ") from images.")
# this will conveniently store useful data, some from csv_data
if timecol is None and abstimecol is not None:
# put in the timecol!
self.get_timecol_from_abstimecol()
# ---------------------- get timestamp and timecol -----------------#
if verbose:
print(
"\nGetting tstamp and t according to inputs:\n\t"
+ "timestamp = "
+ str(timestamp)
+ ", tstamp = "
+ str(tstamp)
)
if timestamp in ["filename", "csv", "file"]:
tstamp = self.csv_data["tstamp"]
timestamp = epoch_time_to_timestamp(tstamp, tz=tz)
if verbose:
print("got self.tstamp from self.csv_data")
elif timestamp in ["pdi"]:
tstamp = self.images[0].tstamp
if verbose:
print("got self.tstamp from self.images[0]")
elif timestamp in ["abstimecol"]:
try:
value = self.csv_data[abstimecol][0]
try:
a = re.search(timestamp_matcher, value)
except TypeError:
print(
"ERROR: You're trying to get the timestamp from an absolute"
+ " time column.\n Inputs:\ttimestamp='abstimecol',\tabstimecol='"
+ str(abstimecol)
+ "'\n but self.csv_data[abstimecol] = "
+ str(value)
+ "."
)
raise
timestamp = a.group()
tstamp = timestamp_to_epoch_time(value, tz=tz)
# print('line 163: timestamp = ' + timestamp) # debugging
if timecol is not None:
t = a.csv_data[timecol]
tstamp = tstamp - t[0]
# this is to correct for the fact that tstamp refers to the
timestamp = epoch_time_to_timestamp(tstamp, tz=tz)
# first datapoint
if verbose:
print(
"got self.tstamp from self.csv_data['"
+ abstimecol
+ "'], i.e., abstimecol."
)
except OSError: # a dummy error... I want to actually get the error messages at first
pass
elif "tstamp" in self.csv_data:
tstamp = self.csv_data["tstamp"]
print("got tstamp from self.csv_data")
if "t" not in self.data:
if timecol is not None:
print("getting t from self.csv_data['" + timecol + "'].")
t = self.csv_data[timecol]
elif "t_abs" in self.data:
tstamp = self.data["t_abs"][0]
t = self.data["t_abs"] - tstamp
if verbose:
print("got self.tstamp and self.t from self.data['t_abs']")
else:
try:
t = self.csv_data[self.timecol]
if verbose:
print(
"got self.t from self.csv_data['"
+ self.timecol
+ "'], i.e. timecol."
)
except KeyError:
if self.timecol is not None:
print(
"self.timecol = "
+ str(self.timecol)
+ " is not in csv data. Check yo self."
)
else:
print(
"This is a timescan but there's no time "
+ "variable specified. \nConsider using "
+ "EC_Xray.time_cal() to calibrate and specify one."
)
return
# we can only reach here if 't' has been successfully put into self.data_cols
self.tstamp = tstamp
self.timestamp = timestamp
self.data["tstamp"] = tstamp
self.data["timestamp"] = timestamp
self.data["t"] = t
if "t" not in self.data["data_cols"]:
self.data["data_cols"] += ["t"]
# print('line 170: self.timestamp = ' + str(self.timestamp))
# This code is a bit of a mess, and timestamp is here only for sanity-checking
# purposes. All math will refer to tstamp
# ------- finished ---------- #
if self.verbose:
print("\nScanImages object with name " + self.name + " imported!\n\n")
def __len__(self):
try:
return len(self.images)
except AttributeError:
print(
"len(self) is tricky for scan named '"
+ self.name
+ "' which was loaded without images. Will try to use "
+ "len(self.data['t']) instead"
)
try:
return len(self.data["t"])
except AttributeError:
print("There is no self.data")
except KeyError:
print("self.data has no t.")
return None
def __getitem__(self, indices):
if type(indices) is int:
if type(indices) is int and indices < 0:
indices = len(self) + indices
return self.images[indices]
elif type(indices) in [list, tuple]:
return [self.images[i] for i in indices]
print("indices must be an integer or sequence of integers")
def save(self, filename=None, with_images=False):
savescan = ScanImages(copy=True, scan=self)
if not with_images and hasattr(savescan, "images"):
del savescan.images
if filename is None:
filename = "./" + self.name + ".pkl"
with open(filename, "wb") as f:
pickle.dump(savescan, f)
def append(self, scan):
N = len(self)
for n in range(len(scan)):
self.images[N + n] = scan.images[n]
for col, attr in [("tth_scan", "tth"), ("alpha", "alpha"), ("t_abs", "tstamp")]:
self.data[col] = np.array(
[getattr(self.images[i], attr) for i in range(len(self))]
)
self.data["data_cols"] += [col]
tstamp = self.data["t_abs"][0]
t = self.data["t_abs"] - tstamp
self.data["t"] = t
def csv_into_images(self):
if self.scan_type == "t":
for i in range(len(self)):
# I don't like it, but that's where SPEC saves t.
# data columns 'TTIMER' and 'Seconds' contain nothing.
# If t is recorded, tth and alpha are constant, but...
# The tth and alpha are not saved anywhere. The user must
# input them, or input a macro to read. Done in self.__init__
# print('putting tth=' + str(self.tth) + ' into image!') #debugging
if self.images[i].tth is None:
self.images[i].tth = self.tth
if self.images[i].alpha is None:
self.images[i].alpha = self.alpha
elif self.scan_type == "tth":
self.data["tth_scan"] = self.csv_data["TwoTheta"]
self.data["data_cols"] += ["tth_scan"]
for i in range(len(self)):
# self.data['tth'] will be saved for when calculating the spectrum from the images
self.images[i].tth = self.data["tth_scan"][i]
self.images[i].alpha = self.alpha
elif self.scan_type == "alpha":
for i in range(len(self)):
self.images[i].tth = self.tth
def set_tth(self, tth, update_images=True):
self.tth = tth
if update_images:
for image in self.images.values():
image.tth = tth
def get_timecol_from_abstimecol(self):
abstimecol = self.csv_data[self.abstimecol]
t = []
# print('line 228: self.timestamp = ' + str(self.timestamp))
t0 = self.tstamp
for timecol in abstimecol:
t += [timestamp_to_epoch_time(time, tz=self.tz) - t0]
self.data["t"] = t
if "t" not in self.data["data_cols"]:
self.data["data_cols"] += ["t"]
# -------------- functions for calculating XRD spectra ----------
def get_combined_spectrum(
self,
stepsize=0.05,
override=False,
slits=True,
xslits=None,
yslits=None,
method="sum",
min_pixels=10,
tth=None,
scan_method="sum",
out="spectrum",
weight=None,
recalculate=False,
normalize=None,
):
"""
Calculates conventional tth spectrum (diffractogram) from the pixels
of each Pilatus image. If the image spectra have already been
calculated, they are used unless override is True.
scan_method says whether to add ('sum') or average ('average') the
contributions from each image.
stepsize, method, min_pixels, xslits, yslits, and weight are
all arguments which are passed on to Pilatus.tth_spectrum()
"""
if self.verbose:
print("\n\nfunction 'get_combined_spectrum' at your service!\n")
if hasattr(self, "spectrum") and not recalculate:
return self.spectrum
elif not hasattr(self, "images"):
print("scan '" + self.name + "' has no images! Can't calcualte spectrum")
return
if self.verbose:
t0 = time.time()
print("t = 0")
print(
"calculating tth spectrum for each of "
+ str(len(self))
+ " images, storing in Pilatus objects, and adding them all up."
)
if tth is not None:
self.tth = tthTru
if normalize:
try:
normalizer = self.data[normalize]
if self.verbose:
print(
"normalizing spectra according to self.data['"
+ normalize
+ "']."
)
except KeyError:
normalize = False
raise Warning("normalize must be a key to self.data won't normalize")
bins = {}
contributors = {}
raw_spectra = {}
for i in range(len(self)):
bins_i = self.images[i].tth_spectrum(
out="bins",
override=override,
stepsize=stepsize,
method=method,
min_pixels=min_pixels,
tth=tth,
xslits=xslits,
yslits=yslits,
weight=weight,
verbose=self.vverbose,
)
raw_spectra[i] = self.images[i].spectrum
if normalize:
try:
norm = normalizer[i]
if type(norm) not in [int, float, np.float64]:
raise IndexError
except IndexError:
print(
"encountered a problem in normalizer for image #"
+ str(i)
+ ". Terminating."
)
break
else:
norm = 1
for n, counts in bins_i.items():
if type(n) is not int:
continue
if n in bins:
bins[n] += counts / norm
contributors[n] += [i]
else:
bins[n] = counts / norm
contributors[n] = [i]
if self.verbose:
print("Counts per tth interval calculated locally and globally. ")
tth_vec = []
counts_vec = []
n_min = min(bins.keys())
n_max = max(bins.keys())
for n in range(n_min, n_max + 1):
tth_vec += [(n + 0.5) * stepsize]
if scan_method == "average":
counts_vec += [bins[n] / len(contributors[n])]
else:
counts_vec += [bins[n]]
tth_vec = np.array(tth_vec)
counts_vec = np.array(counts_vec)
spectrum = np.stack([tth_vec, counts_vec], axis=0)
N_contributors = np.array([len(contributors[i]) for i in bins.keys()])
self.method = method
self.scan_method = scan_method
self.contributors = contributors
self.N_contributors = N_contributors
self.bins = bins
self.raw_spectra = raw_spectra
self.spectrum = spectrum
self.data.update({"tth": tth_vec, "counts": counts_vec})
if "counts" not in self.data["data_cols"]:
self.data["data_cols"] += ["counts"]
if self.verbose:
print("Converted to global tth spectrum and stored in ScanImages oject.")
print("t = " + str(time.time() - t0) + " seconds.")
print("\nfunction 'get_combined_spectrum' finished!\n\n")
if out == "spectrum":
return spectrum
elif out == "bins":
return bins
def get_stacked_spectra(
self,
stepsize=0.05,
override=None,
slits=True,
xslits=None,
yslits=None,
weight=None,
method="average",
min_pixels=10,
tth=None,
normalize=None,
):
if self.verbose:
print("\n\nfunction 'get_stacked_spectra' at your service!\n")
if override is False and hasattr(self, "spectra") and self.spectra is not None:
if hasattr(self, "spectrab"):
spectra = self.spectrab
else:
spectra = self.spectra
if self.verbose:
print("using the already-calculated image spectra")
return spectra
if not override and hasattr(self, "spectrum") and self.spectrum is not None:
combined_spectrum = self.spectrum
if self.verbose:
print("using the already-calculated spectrum for each image")
else:
combined_spectrum = self.get_combined_spectrum(
out="spectrum",
stepsize=stepsize,
method=method,
tth=tth,
min_pixels=min_pixels,
normalize=normalize,
xslits=xslits,
yslits=yslits,
weight=weight,
)
# this generates all the images' spectra, so they're saved when called later.
tth_vec = combined_spectrum[0]
spectrums = [] # collection of the individual spectrum from each image
if normalize:
try:
normalizer = self.data[normalize]
if self.verbose:
print(
"normalizing spectra according to self.data['"
+ normalize
+ "']."
)
except KeyError:
normalize = False
raise Warning("normalize must be a key to self.data won't normalize")
for i in range(len(self)):
tth_i, counts_i = self.raw_spectra[i]
if normalize:
norm = normalizer[i]
else:
norm = 1
# spectra were generated during call to self.get_combined_spectrum
spectrums += [np.interp(tth_vec, tth_i, counts_i, left=0, right=0) / norm]
# print(norm) # debugging
# spectra += [interp_with_zeros(tth_vec, tth_i, counts_i)] #may as well use numpy (above)
spectra = np.stack(spectrums, axis=0) # a 2-d spectrum space for the scan
self.spectra = spectra
if self.verbose:
print("self.spectra.shape = " + str(np.shape(spectra)))
print("\nfunction 'get_stacked_spectra' finished!\n\n")
return spectra
def slim(self):
"""
deletes image maps to save RAM space.
"""
try:
for i, im in self.images.items():
if hasattr(im, "map_xyz"):
del im.map_xyz
if hasattr(im, "map_xyz_prime"):
del im.map_xyz_prime
if hasattr(im, "map_tth"):
del im.map_tth
if hasattr(im, "map_bin"):
del im.map_bin
except AttributeError: # if there's no images, job is already done.
pass
def subtract_background(
self, background="endpoint", background_type="local", show=None, **kwargs
):
"""
Generates background-subtracted tth spectrum and image tth spectra,
to be saved as self.spectrumb and self.spectrab, respectively.
background can be:
'constant': subtracts the minimum non-zero value from
nonzero values
'linear': subtracts a linear interpolation. The interpolation
is a linear fit that is readjusted iteratively with outliers removed
until there are no outliers, defined by significance p. This will
not really work if peaks are large compared to the relevant tth range.
'endpoint': subtracts a simple line connecting endpoints. N_end
points are included in each endpoint, and endpoitns are moved inwards
if it looks (significance given by p) like an endpoint is on a peak.
A 1D array with length equal to self.spectra.shape[0]:
background is simply subtracted from each image spectrum
in spectra.
A 2D array with shape[0]=2 or list or tuple of two 1D arrays:
interpreted as a tth spectrum. This spectrum is subtracted by
interpolation. background_type be specified as 'global' or 'local'.
An integer or float: interpretation depends on background_type. if
background_type is 'global' or 'local', a constant value equal to
background is subtracted.
background_type can be:
'index': the spectra[background] is subtracted from all the spectra
a string corresponding to a column in self.data: The interpolated
spectrum corresponding to self.data[background_type] = background is
subtracted from all the spectra. Used to subtract, for example, the
spectruma at a given time (background_type='t' or electrochemical
potential (backgrouhd_type = 'U vs RHE / [V]').
'global': background subtraction is done directly for spectrum,
indirectly for spectra
'local': background subtraction is done directly for spectra,
indirectly for spectrum.
Additional keward arguments are fed to get_background_line()
For tth scans, a background-subtracted total spectrum should perhaps
be calculated instead... this might not be implemented yet.
"""
# ---------- get spectra and spectrum ----------#
if self.verbose:
print("\n\nfunction 'subtract_background' at your service!\n")
from .combining import get_timecol
# get spectra and spectrum
try:
spectrum = self.spectrum
except AttributeError:
print(
"spectrum not calculated. Call get_combined_spectrum() or"
+ " get_stacked_spectra() before subtracting background."
)
return
spectra = self.get_stacked_spectra()
# alocate space for background-subtracted spectrum and spectra
spectrumb = spectrum.copy()
spectrab = spectra.copy()
tth_vec = spectrumb[0]
# allocate space for actual backgrounds
b0 = None
bgs = None
b1 = None
# numerize
if type(background) is list:
background = np.array(background)
# print('background = ' + str(background) + ', background_type = ' + str(background_type))
# ---- calculate, if appropriate, the constant spectrum to
# subtract from all spectra ------
if type(background) is np.ndarray and background_type == "local":
if background.shape == (spectra.shape[1],):
if self.verbose:
print("will use the constant background to spectra as input.")
b0 = background
elif len(background.shape) == 1:
print("local spectrum input does not have the right shape!")
return
elif type(background) is int and background_type == "index":
if self.verbose:
print("will use the background of spectra[" + str(background) + "].")
b0 = spectra[background]
elif (
type(background) in [int, float, np.float64]
and background_type in self.data["data_cols"]
):
if self.verbose:
print(
"going to interpolate to "
+ background_type
+ " = "
+ str(background)
)
x = self.data[background_type]
diff = np.diff(x)
if not np.all(diff):
print(
"WARNING! self.data['"
+ background_type
+ "'] is not "
+ "monotonially increasing.\n I'll try to fix it but no guarantee..."
)
try:
interpolater = interp1d(x, spectra, axis=0, fill_value="extrapolate")
if self.verbose:
print("interpolater established with scipy.interpolate.interp1d.")
except ValueError as e:
print(
"got this error: "
+ str(e)
+ "\n...gonna try interpolating to 't' first."
)
t_i = self.data[get_timecol(background_type)]
t = self.data["t"]
print(
"t.shape = "
+ str(t.shape)
+ ", spectra.shape = "
+ str(spectra.shape)
)
interpolater = interp1d(t, spectra, axis=0, fill_value="extrapolate")
try:
background > x[0]
if not np.all(diff):
print(
"using a direction mask to interpolate on a "
+ "monotonically increasing list.\nThis will get "
+ "the first time "
+ background_type
+ " passes "
+ str(background)
)
direction = background > x[0]
mask = get_direction_mask(x, direction=direction)
x, t_i = x[mask], t_i[mask]
if not direction:
x, t_i = np.flipud(x), np.flipud(t_i)
background_type = get_timecol(background_type)
background = np.interp(background, x, t_i)
except:
raise
if self.verbose:
print(
"will use the image spectrum corresponding to "
+ background_type
+ " = "
+ str(background)
+ " as background."
)
b0 = interpolater(background)
elif type(background) in [int, float] and background_type == "local":
if self.verbose:
print(
"will subtract the same constant background"
+ " from each image spectrum"
)
b0 = background
elif self.verbose:
print(
"not a combination giving b0. The background for each"
+ " image will be calculated individually."
)
if b0 is not None and self.verbose:
print(
"Inputs make sense: \nA constant background spectrum will "
+ "be subtracted from each image spectrum in self.spectra"
)
# ----- find background to global spectrum directly, if appropriate ---------
if background_type == "global":
if type(background) is np.ndarray:
if background.shape == (spectrum.shape[1],):
b1 = background
elif background.shape[0] == 2:
b1 = np.interp(
spectrumb[0], background[0], background[1], left=0, right=0
)
elif background in ["linear", "endpoint"]:
b1 = get_background_line(
spectrum,
method=background,
name="global",
out="values",
lincutoff=False,
verbose=self.verbose,
**kwargs,
)
elif type(background) in [int, float, np.float64]:
b1 = np.tile(background, np.size(tth_vec))
if self.verbose and b1 is not None:
print(
"Inputs make sense!\n"
+ "A global background spectrum will be subtracted."
)
# --------- subtract directly calculated background
# from each image spectrum in spectra, if appropriate ----------
if b0 is None: # then the background to each spectrum must be found
bg = {}
for i, y_vec in enumerate(spectrab):
bg[i] = np.zeros(np.shape(y_vec)) # has the length of the full tth vec
bg_i = None # will only have the length of the image's tth vec
mask = ~(y_vec == 0)
tth, y = tth_vec[mask], y_vec[mask]
spec = np.array([tth, y])
if background_type == "global":
# print('tth = ' + str(tth) + ', \n spectrumb[0] = ' +
# str(spectrumb[0]) + ', \b and b1 = ' + str(b1)) #debugging
bg_i = np.interp(tth, spectrumb[0], b1, left=0, right=0)
if self.scan_method == "sum":
bg_i = (
bg_i / self.N_contributors[mask]
) # normalize background to one image
elif background in ["linear", "endpoint"]:
# print(i) # for debugging
bg_i = get_background_line(
spec,
method=background,
mode="match",
name=" image number " + str(i),
floor=True,
out="values",
verbose=self.vverbose,
**kwargs,
)
if bg_i is not None:
bg[i][mask] = bg_i
spectrab[i] = y_vec - bg[i]
if b1 is None: # calculate it from the individual backgrouhds, bgs
bgs = np.stack([bg[i] for i in range(len(self))], axis=0)
if self.scan_method == "sum":
b1 = np.sum(bgs, axis=0)
else:
b1 = np.sum(bgs, axis=0) / self.N_contributors
else: # if there is a constant background, subtract it from them all!
spectrab = spectrab - np.tile(b0, (len(self), 1))
if b1 is None: # calculated it from b0
if self.scan_method == "sum":
b1 = len(self) * b0
else:
b1 = b0
if show:
i = show
fig, ax = plt.subplots()
x, y = spectrum[0], spectra[i]
ax.plot(x, y, "k")
yb = bgs[i]
ax.plot(x, yb, "r")
# ---------- finalize and save background-subtracted spectra ------
# print('b1.shape = ' + str(b1.shape) + ', and spectrumb.shape = ' + str(spectrumb.shape))
spectrumb[1] -= b1
self.b0 = b0
self.bgs = bgs
self.b1 = b1
self.spectrab = spectrab
self.spectrumb = spectrumb
self.background = background
self.background_type = background_type
self.bg = True
if self.verbose:
print("\nfunction 'subtract_background' finished!\n\n")
def correct_for_refraction(
self, delta_eff=None, beta_eff=None, alpha=None, delta_tth=None
):
from .XRD import refraction_correction
try:
corrected = self.corrected
except AttributeError:
corrected = False
if corrected:
print(
"scan has already been corrected once for refraction.\n"
+ "... correcting from original angles"
)
tth_0 = self.tth_0.copy()
else:
tth_0 = self.spectrum[0].copy()
self.tth_0 = tth_0
if alpha is None:
alpha = self.alpha
if delta_eff is None:
try:
delta_eff = self.delta_eff
except AttributeError:
delta_eff = 5.94e-6
if beta_eff is None:
try:
beta_eff = self.beta_eff
except AttributeError:
beta_eff = 2.37e-7
if delta_tth is None:
delta_tth = refraction_correction(
alpha=alpha, delta_eff=delta_eff, beta_eff=beta_eff, alpha_c=None
)
else:
print(f"SHIFTING TTH {-delta_tth} DEG!")
tth = tth_0 - delta_tth
self.data["tth_0"] = tth_0
self.data["tth"] = tth
self.spectrum[0] = tth
try:
self.spectrumb[0] = tth
except AttributeError:
pass
self.corrected = True
return delta_tth
# -------------- functions for integrating and characterizing peaks ----------
def integrate_spectrum(
self,
peaks={"Cu_111": ([19.65, 20.65], "brown"), "CuO_111": ([17.55, 18.55], "k")},
override_peaks=False,
bg=None,
background="linear",
background_type=None,
background_points=4,
):
if bg is None:
bg = self.bg
try:
if bg:
spectrum = self.spectrumb
else:
spectrum = self.spectrum
except AttributeError:
print(
"spectrum not calculated. Call get_combined_spectrum() or"
+ " get_stacked_spectra(). If you want background subtraction"
+ "(bg=True), also call subtract_background()"
)
raise
x, y = spectrum
if "peaks" in dir(self) and not override_peaks:
self.peaks.update(peaks)
else:
self.peaks = peaks
self.integrals = {}
integrals = {}
for name, props in peaks.items():
xspan = props[0]
I = integrate_peak(
x, y, xspan, background=background, background_points=background_points
)
if self.vverbose:
print(name)
integrals[name] = I
self.integrals.update(integrals)
if self.vverbose:
print("Integrated peaks!")
return integrals
def integrate_peaks(
self,
peaks={"Cu_111": ([19.65, 20.65], "brown"), "CuO_111": ([17.55, 18.55], "k")},
override_peaks=False,
bg=None,
background="linear",
background_type="global",
background_points=4,
show=None,
ax=None,
):
print("\n\nfunction 'integrate_peaks' at your service!\n")
if self.scan_type == "tth":
return self.integrate_spectrum(
peaks=peaks,
override_peaks=override_peaks,
bg=bg,
background=background,
background_points=background_points,
)
if bg is None:
bg = self.bg
try:
if bg:
spectra = self.spectrab
else:
spectra = self.spectra
except AttributeError:
print(
"spectrum not calculated. Call get_combined_spectrum() or"
+ " get_stacked_spectra(). If you want background subtraction"
+ "(bg=True), also call subtract_background()"
)
raise
if "peaks" in dir(self) and not override_peaks:
self.peaks.update(peaks)
else:
self.peaks = peaks
self.integrals = {}
peak_background = {}
integrals = {}
x = self.spectrum[0]
if background == "existing":
peak_background = self.peak_background
else:
if self.verbose:
print("defining background conditions for peaks")
if type(background) is int:
y = spectra[background]
if self.verbose():
print("using image " + str(background) + " for background")
elif background_type in ["average", "global"]:
y = np.sum(spectra, axis=0) / len(self)
if self.verbose:
print("using a global spectrum for background.")
for name, props in peaks.items():
if background is None or background is False:
bg = np.zeros(np.shape(x))
bg_type = None
elif type(background) is int or background_type in [
"average",
"global",
]:
xspan = props[0]
bg = get_peak_background(
x,
y,
xspan,
background=background,
background_points=background_points,
)
bg_type = "global"
else:
bg = background
bg_type = background_type
peak_background[name] = (bg, bg_type)
self.peak_background = peak_background
for i in range(len(self)):
if self.vverbose:
print("working on image " + str(i))
y = spectra[i]
plotit = show == i
for name, props in peaks.items():
xspan = props[0]
if plotit:
axi = ax
color = props[1]
else:
axi = None
color = None
bg, bg_type = peak_background[name]
I = integrate_peak(
x,
y,
xspan,
background=bg,
background_type=bg_type,
background_points=background_points,
ax=axi,
color=color,
returnax=False,
)
if self.vverbose:
print(name)
if name not in integrals:
integrals[name] = []
integrals[name] += [I]
if self.vverbose:
print("Integrated peaks!")
for key, value in integrals.items(): # numerize and save
value = np.array(value)
integrals[key] = value
self.integrals[key] = value
self.data[key] = value
if key not in self.data["data_cols"]:
self.data["data_cols"] += [key]
print("\nfunction 'integrate_peaks' finished!\n\n")
return integrals
def get_peaks(self, peaks):
x, y = self.get_combined_spectrum()
P = {}
for name, peak in peaks.items():
xspan, color = peaks[name]
try:
xspan[-1] - xspan[0]
except TypeError:
xspan = [xspan, color]
color = "k"
P[name] = Peak(x, y, name=name, xspan=xspan, color=color)
self.P = P
return P
def track_peaks(self, peaks):
spectra = self.get_stacked_spectra(override=False)
x = self.get_combined_spectrum(override=False)[0]
N = len(self)
P = {}
for name, (xspan, color) in peaks.items():
P[name] = []
for n in range(N):
y = spectra[n]
P[name] += [Peak(x, y, xspan=xspan, color=color, name=name)]
self.P = P
return P
# -------------- functions for plots and videos ----------
def plot_spectrum(
self,
bg=None,
ax="new",
fig=None,
color="k",
show_integrals=None,
tthspan=None,
**kwargs,
):
if ax == "new":
fig, ax = plt.subplots()
if bg is None:
bg = self.bg
try:
if bg:
tth, counts = self.spectrumb
else:
tth, counts = self.spectrum
except AttributeError:
print(
"spectrum not calculated. Call get_combined_spectrum() or"
+ " get_stacked_spectra(). If you want background subtraction"
+ "(bg=True), also call subtract_background()"
)
raise
if not tthspan is None:
mask = np.logical_and(tthspan[0] < tth, tth < tthspan[-1])
tth = tth[mask]
counts = counts[mask]
ax.plot(tth, counts, color=color, **kwargs)
ax.set_ylabel("counts: " + self.scan_method + "-" + self.method)
ax.set_xlabel("tth / deg")
if show_integrals:
for (name, (xspan, color)) in self.peaks.items():
integrate_peak(tth, counts, xspan, ax=ax, color=None, fill_color=color)
if fig is None:
fig = ax.get_figure()
return fig, ax
def plot_integrals(
self, peaks="existing", fig=None, ax="new", legend=True, **kwargs
):
if ax == "new":
fig, ax = plt.subplots()
if peaks == "existing":
peaks = self.peaks
if self.scan_type == "t":
x = self.data["t"]
x_str = "time / [s]"
if "integrals" not in dir(self):
self.integrals = {}
for (name, (xspan, color)) in peaks.items():
print(name)
if name not in self.integrals.keys():
self.integrate_peaks(peaks={name: (xspan, color)}, **kwargs)
I = self.integrals[name]
ax.plot(x, I, color=color, label=name)
ax.set_ylabel("counts")
ax.set_xlabel(x_str)
if legend:
ax.legend()
if fig is None:
fig = ax.get_figure()
return fig, ax
def heat_plot(
self,
stepsize=0.05,
override=False,
tthspan=None,
slits=True,
xslits=None,
yslits=None,
tth=None,
method="average",
bg=None,
min_pixels=10,
N_x=300,
ax="new",
orientation="xy",
logscale=False,
zrange=None,
aspect="auto",
colormap="inferno",
split_tth=None,
splitspec={"color": "g", "linestyle": "-"},
tspan="all",
):
# get the raw spectra
if bg is None:
bg = self.bg
try:
if bg:
spectra_raw = self.spectrab
else:
spectra_raw = self.spectra
except AttributeError:
print(
"spectrum not calculated. Call get_combined_spectrum() or"
+ " get_stacked_spectra(). If you want background subtraction"
+ "(bg=True), also call subtract_background()"
)
raise
# print('spectra_raw = \n' + str(spectra_raw)) # debugging
# Whatever we're scanning against is called x now.
if self.scan_type == "t":
if self.timecol is None:
timecol = "t"
else:
timecol = self.timecol
x_i = self.data[timecol]
x_str = "time / [s]"
if self.scan_type == "tth":
x_i = self.data["tth_scan"]
x_str = "center tth / deg"
# we want the scan axis to vary linearly, but the input might not.
f = interp1d(x_i, spectra_raw, axis=0, fill_value="extrapolate")
if tspan == "all":
x = np.linspace(x_i[0], x_i[-1], num=N_x)
else:
x = np.linspace(tspan[0], tspan[-1], num=N_x)
# print('interpolating to time vector x = ' + str(x)) # debugging
spectra = f(x)
# and of course the other dimension, which is tth:
tth_vec = self.spectrum[
0
] # I know this is linear, because it's defined here and in pilatus.py
if tthspan is not None:
mask = np.logical_and(tthspan[0] < tth_vec, tth_vec < tthspan[-1])
spectra = spectra[:, mask]
# print(spectra.shape) # debugging
tth_vec = tth_vec[mask]
if logscale:
spectra = np.log(spectra)
if zrange is None:
good = np.logical_and(~np.isnan(spectra), ~np.isinf(spectra))
# print('spectra = \n' + str(spectra)) # debugging
low = np.min(spectra[good])
high = np.max(spectra[good])
else:
low = zrange[0]
high = zrange[1]
spectra[spectra < low] = low
spectra[spectra > high] = high
spectra[np.isnan(spectra)] = low
spectra[np.isinf(spectra)] = low
if orientation == "xy":
spectra = np.swapaxes(spectra, 0, 1)
extent = [x[0], x[-1], tth_vec[0], tth_vec[-1]]
elif orientation == "yx":
extent = [tth_vec[0], tth_vec[-1], x[0], x[-1]]
if ax == "new":
fig, ax = plt.subplots()
if split_tth:
I_split = np.argmax(tth_vec > split_tth)
spectra1 = spectra[:I_split, :]
extent1 = [x[0], x[-1], tth_vec[0], tth_vec[I_split]]
spectra2 = spectra[I_split:, :]
extent2 = [x[0], x[-1], tth_vec[I_split], tth_vec[-1]]
ax.imshow(
spectra1, extent=extent1, aspect=aspect, origin="lower", cmap=colormap
)
ax.imshow(
spectra2, extent=extent2, aspect=aspect, origin="lower", cmap=colormap
)
ax.plot([x[0], x[-1]], [tth_vec[I_split], tth_vec[I_split]], **splitspec)
else:
ax.imshow(
spectra, extent=extent, aspect=aspect, origin="lower", cmap=colormap
)
if orientation == "xy":
ax.set_xlabel(x_str)
ax.set_ylabel("TwoTheta / deg")
elif orientation == "yx":
ax.set_ylabel(x_str)
ax.set_xlabel("TwoTheta / deg")
return ax
def plot_experiment(self, *args, **kwargs):
from .plotting import plot_experiment
if "plot_type" not in kwargs:
kwargs["plot_type"] = "heat"
return plot_experiment(self, *args, **kwargs)
def make_spectrum_movie(
self,
duration=20,
fps=24,
title="default",
peaks="existing",
bg=None,
slits=True,
xslits=None,
yslits=[60, 430],
xlims=None,
ylims=None,
tspan=None,
full=False,
spectrum_specs={},
):
"""
# tspan is the time/tth/index interval for which the movie is made
"""
if self.scan_type == "t":
t_vec = self.data["t"]
elif self.scan_type == "tth":
t_vec = self.csv_data["TwoTheta"]
else:
t_vec = np.arange(len(self))
if tspan is None: # then use the whole interval
tspan = [t_vec[0], t_vec[-1]]
if peaks == "existing":
try:
peaks = self.peaks
except AttributeError:
peaks = None
elif type(peaks) is list:
peaks = peak_colors(peaks)
if bg is None:
bg = self.bg
try:
if bg:
spectra = self.spectrab
else:
spectra = self.spectra
except AttributeError:
print(
"spectrum not calculated. Call get_combined_spectrum() or"
+ " get_stacked_spectra(). If you want background subtraction"
+ "(bg=True), also call subtract_background()"
)
def make_frame(T):
t = tspan[0] + T / duration * (tspan[-1] - tspan[0])
try:
n = next(i for i, t_i in enumerate(t_vec) if t_i > t) - 1
except StopIteration:
n = len(self) - 1
n = max(n, 0)
if full:
x, y = self.spectrum[0], spectra[n]
else:
y_vec = spectra[n]
mask = ~(y_vec == 0)
x, y = self.spectrum[0][mask], y_vec[mask]
fig, ax = plt.subplots()
ax.plot(
x, y,
)
if peaks is not None:
for (name, (xspan, color)) in peaks.items():
integrate_peak(x, y, xspan, ax=ax, color=None, fill_color=color)
if xlims is not None:
ax.set_xlim(xlims)
if ylims is not None:
ax.set_ylim(ylims)
if self.scan_type == "t":
ax.text(
0,
0.95,
"t = " + str(np.round(t_vec[n], 2)) + " s",
bbox={"facecolor": "white"},
transform=ax.transAxes,
)
return mplfig_to_npimage(fig)
if title == "default":
title = self.name + "_spectrum.mp4"
# mpl.use('Agg') # So that it doesn't print the figures #doesn't seem to work
# imp.reload(plt)
animation = VideoClip(make_frame, duration=duration)
animation.write_videofile(title, fps=fps)
def make_movie(
self,
title="default",
duration=20,
fps=24,
norm="default",
tspan=None,
slits=True,
xslits=None,
yslits=[60, 430],
):
"""
# tspan is the time/tth/index interval for which the movie is made
"""
if self.scan_type == "t":
t_vec = self.data["t"]
elif self.scan_type == "tth":
t_vec = self.csv_data["TwoTheta"]
else:
t_vec = np.arange(len(self))
if tspan is None:
tspan = [t_vec[0], t_vec[-1]]
if norm == "default":
if slits:
immin = None
immax = None
for image in self.images.values():
image.apply_slits(xslits=xslits, yslits=yslits)
if immin is None:
immin = np.min(image.im1)
else:
immin = min(immin, | np.min(image.im1) | numpy.min |
# Functions that would be used to create the topography of the neuraal net
# The neurons and weights are taken as matrices
# Neurons are 1D arrays or lists of the dimension 1 X col
import numpy as np
import pandas as pa
def collectData (sheet_name):
""" Returns an array (numpy) of the input data from the excel sheet """
Input = pa.read_excel(sheet_name) # I is the input data matrix, data has to be extracted one by one from the columns
Input = np.array(Input)
return Input
def getError(Input, row, Output):
""" Returns the error value of the network """
col = int(np.shape(Input)[1]) - 1
error = 0.5* ( (Input[row][col] - Output)**2 ) # MSE
return error
def inputNeurons (Input, row):
""" Returns an input matrix based on the data matrix with data set present in column 'column' """
n_row = int(np.shape(Input)[0])
n_col = int(np.shape(Input)[1])
I = [0]*(n_col - 1)
for c in range(n_col-1):
I[c] = Input[row][c]
return I
def transposeMat (M):
""" Returns the transpose of matrix M, used for neurons in the next layer """
# Not used in the current program
M = np.array(M)
row = int(np.shape(M)[0])
col = int(np.shape(M)[1])
M_trans = np.zeros((col, row))
for r in range(row):
for c in range(col):
M_trans[c][r] = M[r][c].copy()
M_trans = M_trans.tolist()
return M_trans
def getNextLayer (n_out_mat, w_mat):
""" Gets the next layer from output matrix of neurons and weights """
# The layer is in the form of 1 X col array/matrix
N = np.array(n_out_mat)
W = np.array(w_mat)
if (W.ndim == 1):
col_wt = int(np.shape(W)[0])
else:
col_wt = int(np.shape(W)[1])
col_n = int( | np.shape(N) | numpy.shape |
#!/usr/bin/env python
import os
import sys
import numpy as np
import warnings
from astropy.io import fits
from astropy.utils.exceptions import AstropyWarning
from astropy.table import Table, vstack, Column
from astropy.time import Time
import healpy as hp
from dlnpyutils import utils as dln, coords, bindata, db, job_daemon as jd
import subprocess
import time
from argparse import ArgumentParser
import socket
from dustmaps.sfd import SFDQuery
from astropy.coordinates import SkyCoord
from sklearn.cluster import DBSCAN
from scipy.optimize import least_squares
from scipy.interpolate import interp1d
import sqlite3
import gc
import psutil
def writecat2db(cat,dbfile):
""" Write a catalog to the database """
ncat = dln.size(cat)
sqlite3.register_adapter(np.int16, int)
sqlite3.register_adapter(np.int64, int)
sqlite3.register_adapter(np.float64, float)
sqlite3.register_adapter(np.float32, float)
db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
#db = sqlite3.connect('test.db')
#db.text_factory = lambda x: str(x, 'latin1')
#db.row_factory = sqlite3.Row
c = db.cursor()
# Create the table
# the primary key ROWID is automatically generated
if len(c.execute('SELECT name from sqlite_master where type= "table" and name="meas"').fetchall()) < 1:
c.execute('''CREATE TABLE meas(measid TEXT, objlabel INTEGER, exposure TEXT, ccdnum INTEGER, filter TEXT, mjd REAL,
ra REAL, raerr REAL, dec REAL, decerr REAL, mag_auto REAL, magerr_auto REAL, asemi REAL, asemierr REAL,
bsemi REAL, bsemierr REAL, theta REAL, thetaerr REAL, fwhm REAL, flags INTEGER, class_star REAL)''')
data = list(zip(cat['measid'],np.zeros(ncat,int)-1,cat['exposure'],cat['ccdnum'],cat['filter'],cat['mjd'],cat['ra'],
cat['raerr'],cat['dec'],cat['decerr'],cat['mag_auto'],cat['magerr_auto'],cat['asemi'],cat['asemierr'],
cat['bsemi'],cat['bsemierr'],cat['theta'],cat['thetaerr'],cat['fwhm'],cat['flags'],cat['class_star']))
c.executemany('''INSERT INTO meas(measid,objlabel,exposure,ccdnum,filter,mjd,ra,raerr,dec,decerr,mag_auto,magerr_auto,
asemi,asemierr,bsemi,bsemierr,theta,thetaerr,fwhm,flags,class_star)
VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)''', data)
db.commit()
db.close()
def getdbcoords(dbfile):
""" Get the coordinates and ROWID from the database """
sqlite3.register_adapter(np.int16, int)
sqlite3.register_adapter(np.int64, int)
sqlite3.register_adapter(np.float64, float)
sqlite3.register_adapter(np.float32, float)
db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
c = db.cursor()
c.execute('''SELECT rowid,ra,dec FROM meas''')
data = c.fetchall()
db.close()
# Convert to nump structured array
dtype = np.dtype([('ROWID',int),('RA',np.float64),('DEC',np.float64)])
cat = np.zeros(len(data),dtype=dtype)
cat[...] = data
del data
return cat
def createindexdb(dbfile,col='measid',table='meas',unique=True):
""" Index a column in the database """
t0 = time.time()
db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
c = db.cursor()
index_name = 'idx_'+col+'_'+table
# Check if the index exists first
c.execute('select name from sqlite_master')
d = c.fetchall()
for nn in d:
if nn[0]==index_name:
print(index_name+' already exists')
return
# Create the index
print('Indexing '+col)
if unique:
c.execute('CREATE UNIQUE INDEX '+index_name+' ON '+table+'('+col+')')
else:
c.execute('CREATE INDEX '+index_name+' ON '+table+'('+col+')')
data = c.fetchall()
db.close()
print('indexing done after '+str(time.time()-t0)+' sec')
def insertobjlabelsdb(rowid,labels,dbfile):
""" Insert objectlabel values into the database """
print('Inserting object labels')
t0 = time.time()
sqlite3.register_adapter(np.int16, int)
sqlite3.register_adapter(np.int64, int)
sqlite3.register_adapter(np.float64, float)
sqlite3.register_adapter(np.float32, float)
db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
c = db.cursor()
data = list(zip(labels,rowid))
c.executemany('''UPDATE meas SET objlabel=? WHERE rowid=?''', data)
db.commit()
db.close()
print('inserting done after '+str(time.time()-t0)+' sec')
def updatecoldb(selcolname,selcoldata,updcolname,updcoldata,table,dbfile):
""" Update column in database """
print('Updating '+updcolname+' column in '+table+' table using '+selcolname)
t0 = time.time()
sqlite3.register_adapter(np.int16, int)
sqlite3.register_adapter(np.int64, int)
sqlite3.register_adapter(np.float64, float)
sqlite3.register_adapter(np.float32, float)
db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
c = db.cursor()
data = list(zip(updcoldata,selcoldata))
c.executemany('''UPDATE '''+table+''' SET '''+updcolname+'''=? WHERE '''+selcolname+'''=?''', data)
db.commit()
db.close()
print('updating done after '+str(time.time()-t0)+' sec')
def deleterowsdb(colname,coldata,table,dbfile):
""" Delete rows from the database using rowid"""
print('Deleting rows from '+table+' table using '+colname)
t0 = time.time()
sqlite3.register_adapter(np.int16, int)
sqlite3.register_adapter(np.int64, int)
sqlite3.register_adapter(np.float64, float)
sqlite3.register_adapter(np.float32, float)
db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
c = db.cursor()
data = list(zip(coldata))
c.executemany('''DELETE from '''+table+''' WHERE '''+colname+'''=?''', data)
db.commit()
db.close()
print('deleting done after '+str(time.time()-t0)+' sec')
def writeidstr2db(cat,dbfile):
""" Insert IDSTR database values """
t0 = time.time()
sqlite3.register_adapter(np.int16, int)
sqlite3.register_adapter(np.int64, int)
sqlite3.register_adapter(np.float64, float)
sqlite3.register_adapter(np.float32, float)
db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
c = db.cursor()
# Create the table
# the primary key ROWID is automatically generated
if len(c.execute('SELECT name from sqlite_master where type= "table" and name="idstr"').fetchall()) < 1:
c.execute('''CREATE TABLE idstr(measid TEXT, exposure TEXT, objectid TEXT, objectindex INTEGER)''')
data = list(zip(cat['measid'],cat['exposure'],cat['objectid'],cat['objectindex']))
c.executemany('''INSERT INTO idstr(measid,exposure,objectid,objectindex)
VALUES(?,?,?,?)''', data)
db.commit()
db.close()
#print('inserting done after '+str(time.time()-t0)+' sec')
def readidstrdb(dbfile):
""" Get data from IDSTR database"""
data = querydb(dbfile,table='idstr',cols='*')
# Put in catalog
dtype_idstr = np.dtype([('measid',np.str,200),('exposure',np.str,200),('objectid',np.str,200),('objectindex',int)])
cat = np.zeros(len(data),dtype=dtype_idstr)
cat[...] = data
del data
return cat
def querydb(dbfile,table='meas',cols='rowid,*',where=None):
""" Query database table """
sqlite3.register_adapter(np.int16, int)
sqlite3.register_adapter(np.int64, int)
sqlite3.register_adapter(np.float64, float)
sqlite3.register_adapter(np.float32, float)
db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
cur = db.cursor()
cmd = 'SELECT '+cols+' FROM '+table
if where is not None: cmd += ' WHERE '+where
cur.execute(cmd)
data = cur.fetchall()
db.close()
# Return results
return data
def executedb(dbfile,cmd):
""" Execute a database command """
sqlite3.register_adapter(np.int16, int)
sqlite3.register_adapter(np.int64, int)
sqlite3.register_adapter(np.float64, float)
sqlite3.register_adapter(np.float32, float)
db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
cur = db.cursor()
cur.execute(cmd)
data = cur.fetchall()
db.close()
return data
def getdatadb(dbfile,table='meas',cols='rowid,*',objlabel=None,rar=None,decr=None,verbose=False):
""" Get measurements for an object(s) from the database """
t0 = time.time()
sqlite3.register_adapter(np.int16, int)
sqlite3.register_adapter(np.int64, int)
sqlite3.register_adapter(np.float64, float)
sqlite3.register_adapter(np.float32, float)
db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
cur = db.cursor()
cmd = 'SELECT '+cols+' FROM '+table
# OBJLABEL constraints
if objlabel is not None:
if cmd.find('WHERE') == -1:
cmd += ' WHERE '
else:
cmd += ' AND '
if dln.size(objlabel)==2:
cmd += 'objlabel>='+str(objlabel[0])+' AND objlabel<='+str(objlabel[1])
else:
cmd += 'objlabel='+str(objlabel)
# RA constraints
if rar is not None:
if cmd.find('WHERE') == -1:
cmd += ' WHERE '
else:
cmd += ' AND '
cmd += 'ra>='+str(rar[0])+' AND ra<'+str(rar[1])
# DEC constraints
if decr is not None:
if cmd.find('WHERE') == -1:
cmd += ' WHERE '
else:
cmd += ' AND '
cmd += 'dec>='+str(decr[0])+' AND dec<'+str(decr[1])
# Execute the select command
#print('CMD = '+cmd)
cur.execute(cmd)
data = cur.fetchall()
db.close()
# No results
if len(data)==0:
return np.array([])
# Convert to numpy structured array
dtype_hicat = np.dtype([('ROWID',int),('MEASID',np.str,30),('OBJLABEL',int),('EXPOSURE',np.str,40),('CCDNUM',int),('FILTER',np.str,3),
('MJD',float),('RA',float),('RAERR',float),('DEC',float),('DECERR',float),
('MAG_AUTO',float),('MAGERR_AUTO',float),('ASEMI',float),('ASEMIERR',float),('BSEMI',float),('BSEMIERR',float),
('THETA',float),('THETAERR',float),('FWHM',float),('FLAGS',int),('CLASS_STAR',float)])
cat = np.zeros(len(data),dtype=dtype_hicat)
cat[...] = data
del data
if verbose: print('got data in '+str(time.time()-t0)+' sec.')
return cat
def getradecrangedb(dbfile):
""" Get RA/DEC ranges from database """
sqlite3.register_adapter(np.int16, int)
sqlite3.register_adapter(np.int64, int)
sqlite3.register_adapter(np.float64, float)
sqlite3.register_adapter(np.float32, float)
db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
c = db.cursor()
c.execute('''SELECT MIN(ra),MAX(ra),MIN(dec),MAX(dec) FROM meas''')
data = c.fetchall()
db.close()
return data[0]
def add_elements(cat,nnew=300000):
""" Add more elements to a catalog"""
ncat = len(cat)
old = cat.copy()
nnew = dln.gt(nnew,ncat)
cat = np.zeros(ncat+nnew,dtype=old.dtype)
cat[0:ncat] = old
del old
return cat
def seqcluster(cat,dcr=0.5,iter=False,inpobj=None,trim=False):
""" Sequential clustering of measurements in exposures. This was the old method."""
ncat = len(cat)
labels = np.zeros(ncat)-1
# Iterate
if iter is not False:
done = False
niter = 1
maxiter = 10
lastlabels = np.zeros(ncat)-1
while (done is False):
# First time
if niter==1:
inpobj = None
# Second and up
else:
del labels1, obj1
inpobj = obj2
# Cluster
labels1,obj1 = seqcluster(cat,dcr=dcr,iter=False,inpobj=inpobj)
print('Iter='+str(niter)+' '+str(int(np.max(labels1)))+' clusters')
# Calculate average ra/dec
obj2 = propermotion(cat,labels1)
#print(labels1-lastlabels)
# Are we done?
if (niter==maxiter) | (np.sum(labels1-lastlabels)==0): done=True
lastlabels = labels1
niter += 1
return labels1, obj2
# Create exposures index
index = dln.create_index(cat['EXPOSURE'])
nexp = len(index['value'])
# Create object catalog
dtype_obj = np.dtype([('label',int),('ra',np.float64),('dec',np.float64),('ndet',int)])
# Is there an input object catalog that we are starting with?
if inpobj is not None:
obj = inpobj
cnt = len(obj)
else:
obj = np.zeros(np.min([500000,ncat]),dtype=dtype_obj)
cnt = 0
nobj = len(obj)
# Loop over exposures
for i in range(nexp):
#print(str(i)+' '+index['value'][i])
indx = index['index'][index['lo'][i]:index['hi'][i]+1]
cat1 = cat[indx]
ncat1 = len(cat1)
if dln.size(dcr)>1:
dcr1 = dcr[indx]
else:
dcr1 = dcr
# First exposure
if cnt==0:
ind1 = np.arange(ncat1)
obj['label'][ind1] = ind1
obj['ra'][ind1] = cat1['RA']
obj['dec'][ind1] = cat1['DEC']
obj['ndet'][ind1] = 1
labels[indx] = ind1
cnt += ncat1
# Second and up
else:
# Match new sources to the objects
#ind1,ind2,dist = coords.xmatch(obj[0:cnt]['ra'],obj[0:cnt]['dec'],cat1['RA'],cat1['DEC'],dcr,unique=True)
ind2,ind1,dist = coords.xmatch(cat1['RA'],cat1['DEC'],obj[0:cnt]['ra'],obj[0:cnt]['dec'],dcr1,unique=True)
nmatch = dln.size(ind1)
# Some matches, add data to existing record for these sources
if nmatch>0:
obj['ndet'][ind1] += 1
labels[indx[ind2]] = ind1
if nmatch<ncat1:
indx0 = indx.copy()
indx = np.delete(indx,ind2)
cat1 = np.delete(cat1,ind2)
ncat1 = dln.size(cat1)
else:
cat1 = np.array([])
ncat1 = 0
# Some left, add records for these sources
if ncat1>0:
# Add new elements
if (cnt+ncat1)>nobj:
obj = add_elements(obj)
nobj = len(obj)
ind1 = np.arange(ncat1)+cnt
obj['label'][ind1] = ind1
obj['ra'][ind1] = cat1['RA']
obj['dec'][ind1] = cat1['DEC']
obj['ndet'][ind1] = 1
labels[indx] = ind1
cnt += ncat1
# Trim off the excess elements
obj = obj[0:cnt]
# Trim off any objects that do not have any detections
# could happen if an object catalog was input
if trim is True:
bd, nbd = dln.where(obj['ndet']<1)
if nbd>0: obj = np.delete(obj,bd)
# Maybe iterate
# -measure mean ra/dec for each object and go through the process again
return labels, obj
def meancoords(cat,labels):
""" Measure mean RA/DEC."""
# Make object index
index = dln.create_index(labels)
nobj = len(index['value'])
radeg = np.float64(180.00) / np.pi
dtype_obj = np.dtype([('label',int),('ndet',int),('ra',np.float64),('dec',np.float64),('raerr',np.float32),
('decerr',np.float32),('asemi',np.float32),('bsemi',np.float32),('theta',np.float32),('fwhm',np.float32)])
obj = np.zeros(nobj,dtype=dtype_obj)
# Loop over the objects
for i in range(nobj):
indx = index['index'][index['lo'][i]:index['hi'][i]+1]
ncat1 = dln.size(indx)
obj['label'][i] = index['value'][i]
obj['ndet'][i] = ncat1
# Computing quantities
# Mean RA/DEC, RAERR/DECERR
if ncat1>1:
wt_ra = 1.0/cat['RAERR'][indx]**2
wt_dec = 1.0/cat['DECERR'][indx]**2
obj['ra'][i] = np.sum(cat['RA'][indx]*wt_ra)/np.sum(wt_ra)
obj['raerr'][i] = np.sqrt(1.0/np.sum(wt_ra))
obj['dec'][i] = np.sum(cat['DEC'][indx]*wt_dec)/np.sum(wt_dec)
obj['decerr'][i] = np.sqrt(1.0/np.sum(wt_dec))
else:
obj['ra'][i] = cat['RA'][indx]
obj['dec'][i] = cat['DEC'][indx]
obj['raerr'][i] = cat['RAERR'][indx]
obj['decerr'][i] = cat['DECERR'][indx]
# Compute median FWHM
if ncat1>1:
obj['asemi'][i] = np.median(cat['ASEMI'][indx])
obj['bsemi'][i] = np.median(cat['BSEMI'][indx])
obj['theta'][i] = np.median(cat['THETA'][indx])
obj['fwhm'][i] = np.median(cat['FWHM'][indx])
else:
obj['asemi'][i] = cat['ASEMI'][indx]
obj['bsemi'][i] = cat['BSEMI'][indx]
obj['theta'][i] = cat['THETA'][indx]
obj['fwhm'][i] = cat['FWHM'][indx]
return obj
def propermotion(cat,labels):
""" Measure proper motions."""
# Make object index
index = dln.create_index(labels)
nobj = len(index['value'])
radeg = np.float64(180.00) / np.pi
obj = meancoords(cat,labels)
dtype_pm = np.dtype([('pmra',np.float32),('pmdec',np.float32),('pmraerr',np.float32),('pmdecerr',np.float32),('mjd',np.float64)])
obj = dln.addcatcols(obj,dtype_pm)
# Loop over the objects
for i in range(nobj):
indx = index['index'][index['lo'][i]:index['hi'][i]+1]
ncat1 = dln.size(indx)
# Mean proper motion and errors
if ncat1>1:
raerr = np.array(cat['RAERR'][indx]*1e3,np.float64) # milli arcsec
ra = np.array(cat['RA'][indx],np.float64)
ra -= np.mean(ra)
ra *= 3600*1e3 * np.cos(obj['dec'][i]/radeg) # convert to true angle, milli arcsec
t = cat['MJD'][indx].copy()
t -= np.mean(t)
t /= 365.2425 # convert to year
# Calculate robust slope
pmra, pmraerr = dln.robust_slope(t,ra,raerr,reweight=True)
obj['pmra'][i] = pmra # mas/yr
obj['pmraerr'][i] = pmraerr # mas/yr
decerr = np.array(cat['DECERR'][indx]*1e3,np.float64) # milli arcsec
dec = np.array(cat['DEC'][indx],np.float64)
dec -= np.mean(dec)
dec *= 3600*1e3 # convert to milli arcsec
# Calculate robust slope
pmdec, pmdecerr = dln.robust_slope(t,dec,decerr,reweight=True)
obj['pmdec'][i] = pmdec # mas/yr
obj['pmdecerr'][i] = pmdecerr # mas/yr
return obj
def moments(cat,labels):
# Measure XX, YY, XY comments of multiple measurements of an object:
# Make object index
index = dln.create_index(labels)
nobj = len(index['value'])
radeg = np.float64(180.00) / np.pi
obj = meancoords(cat,labels)
dtype_mom = np.dtype([('x2',np.float32),('y2',np.float32),('xy',np.float32),('asemi',np.float32),('bsemi',np.float32),('theta',np.float32)])
obj = dln.addcatcols(obj,dtype_mom)
# Loop over the objects
for i in range(nobj):
indx = index['index'][index['lo'][i]:index['hi'][i]+1]
ncat1 = dln.size(indx)
# Measure moments
if ncat1>1:
# See sextractor.pdf pg. 30
x2 = np.sum( ((cat['RA'][indx]-obj['ra'][i])*np.cos(np.deg2rad(obj['dec'][i])))**2 ) / (ncat1-1) * 3600**2
y2 = np.sum( (cat['DEC'][indx]-obj['dec'][i])**2 ) / (ncat1-1) * 3600**2
xy = np.sum( (cat['RA'][indx]-obj['ra'][i])*np.cos(np.deg2rad(obj['dec'][i])) * (cat['DEC'][indx]-obj['dec'][i]) ) / (ncat1-1) * 3600**2
obj['x2'][i] = x2
obj['y2'][i] = y2
obj['xy'][i] = xy
# See sextractor.pdf pg. 31
obj['asemi'][i] = np.sqrt( 0.5*(x2+y2) + np.sqrt(((x2-y2)*0.5)**2 + xy**2) )
obj['bsemi'][i] = np.sqrt( 0.5*(x2+y2) - np.sqrt(((x2-y2)*0.5)**2 + xy**2) )
if (x2==y2):
obj['theta'][i] = 0.0
else:
obj['theta'][i] = np.rad2deg(np.arctan(2*xy/(x2-y2))*0.5)
else:
obj['x2'][i] = obj['raerr'][i]**2
obj['y2'][i] = obj['decerr'][i]**2
obj['xy'][i] = 0.0
obj['asemi'][i] = obj['x2'][i]
obj['bsemi'][i] = obj['y2'][i]
obj['theta'][i] = 0.0
return obj
def ellipsecoords(pars,npoints=100):
""" Create coordinates of an ellipse."""
# [x,y,asemi,bsemi,theta]
# copied from ellipsecoords.pro
xc = pars[0]
yc = pars[1]
asemi = pars[2]
bsemi = pars[3]
pos_ang = pars[4]
phi = 2*np.pi*(np.arange(npoints,dtype=float)/(npoints-1)) # Divide circle into Npoints
ang = np.deg2rad(pos_ang) # Position angle in radians
cosang = np.cos(ang)
sinang = np.sin(ang)
x = asemi*np.cos(phi) # Parameterized equation of ellipse
y = bsemi*np.sin(phi)
xprime = xc + x*cosang - y*sinang # Rotate to desired position angle
yprime = yc + x*sinang + y*cosang
return xprime, yprime
def checkboundaryoverlap(metafiles,buffdict,verbose=False):
""" Check a list of fits files against a buffer and return metadata of overlapping exposures."""
# New meta-data format
dtype_meta = np.dtype([('file',np.str,500),('base',np.str,200),('instrument',np.str,3),('expnum',int),('ra',np.float64),
('dec',np.float64),('dateobs',np.str,100),('mjd',np.float64),('filter',np.str,50),
('exptime',float),('airmass',float),('nsources',int),('fwhm',float),
('nchips',int),('badchip31',bool),('rarms',float),('decrms',float),
('ebv',float),('gaianmatch',int),('zpterm',float),('zptermerr',float),
('zptermsig',float),('refmatch',int)])
allmeta = None
for m,mfile in enumerate(np.atleast_1d(metafiles)):
noverlap = 0
if os.path.exists(mfile) is False:
if verbose: print(mfile+' NOT FOUND')
continue
meta = fits.getdata(mfile,1)
if verbose: print(str(m+1)+' Loading '+mfile)
t = Time(meta['dateobs'], format='isot', scale='utc')
meta['mjd'] = t.mjd # recompute because some MJD are bad
chmeta = fits.getdata(mfile,2) # chip-level meta-data structure
# Convert META to new format
newmeta = np.zeros(1,dtype=dtype_meta)
# Copy over the meta information
for n in newmeta.dtype.names:
if n.upper() in meta.dtype.names: newmeta[n]=meta[n]
# Get the name
fdir = os.path.dirname(mfile)
fbase, ext = os.path.splitext(os.path.basename(mfile))
fbase = fbase[:-5] # remove _meta at end
# Loop over the chip files
for j in range(len(chmeta)):
# Check that this overlaps the healpix region
inside = True
vra = chmeta['vra'][j]
vdec = chmeta['vdec'][j]
vlon, vlat = coords.rotsphcen(vra,vdec,buffdict['cenra'],buffdict['cendec'],gnomic=True)
if coords.doPolygonsOverlap(buffdict['lon'],buffdict['lat'],vlon,vlat) is False:
if verbose: print('This chip does NOT overlap the HEALPix region+buffer')
inside = False
if inside is True:
#chfile1 = chmeta['FILENAME'][j]
#if os.path.exists(chfile1) is True: chfiles.append(chfile1)
noverlap += 1
if verbose: print(' FILTER='+meta['filter'][0]+' EXPTIME='+str(meta['exptime'][0])+' sec '+str(noverlap)+' chips overlap')
if noverlap>0:
if allmeta is None:
allmeta = newmeta
else:
allmeta = np.hstack((allmeta,newmeta))
if allmeta is None:
nallmeta = 0
else:
nallmeta = len(allmeta)
if verbose: print(str(nallmeta)+' exposures overlap')
return allmeta
def find_obj_parent(obj):
""" Find objects that have other objects "inside" them. """
# Use crossmatch
X1 = np.vstack((obj['ra'],obj['dec'])).T
X2 = np.vstack((obj['ra'],obj['dec'])).T
X1 = X1 * (np.pi / 180.)
X2 = X2 * (np.pi / 180.)
max_distance = (np.max(obj['fwhm']) / 3600) * (np.pi / 180.)
# Convert 2D RA/DEC to 3D cartesian coordinates
Y1 = np.transpose(np.vstack([np.cos(X1[:, 0]) * np.cos(X1[:, 1]),
np.sin(X1[:, 0]) * np.cos(X1[:, 1]),
np.sin(X1[:, 1])]))
Y2 = np.transpose(np.vstack([np.cos(X2[:, 0]) * np.cos(X2[:, 1]),
np.sin(X2[:, 0]) * np.cos(X2[:, 1]),
np.sin(X2[:, 1])]))
# law of cosines to compute 3D distance
max_y = np.sqrt(2 - 2 * np.cos(max_distance))
dist, ind = coords.crossmatch(Y1, Y2, max_y, k=2)
# convert distances back to angles using the law of tangents
not_inf = ~np.isinf(dist)
x = 0.5 * dist[not_inf]
dist[not_inf] = (180. / np.pi * 2 * np.arctan2(x,
np.sqrt(np.maximum(0, 1 - x ** 2))))
dist[not_inf] *= 3600.0 # in arcsec
# Add "parent" column if necessary
if 'parent' not in obj.dtype.names:
obj = dln.addcatcols(obj,np.dtype([('parent',bool)]))
# Check if there are any objects within FWHM
# the closest object will be itself, so check the second one
bd,nbd = dln.where( dist[:,1] <= np.minimum(0.5*obj['fwhm'],obj['asemi']))
# Check that they are inside their ellipse footprint
obj['parent'] = False # all false to start
if nbd>0:
for i in range(nbd):
ind1 = bd[i]
ind2 = ind[bd[i],1]
lon1,lat1 = (0.0, 0.0)
cenra = obj['ra'][ind1]
cendec = obj['dec'][ind1]
lon2,lat2 = coords.rotsphcen(obj['ra'][ind2],obj['dec'][ind2],cenra,cendec,gnomic=True)
pars = [lon1*3600,lon1*3600,obj['asemi'][ind1],obj['bsemi'][ind1],obj['theta'][ind1]]
ll,bb = ellipsecoords(pars,npoints=10)
obj['parent'][ind1] = coords.doPolygonsOverlap(ll,bb,np.atleast_1d(lon2*3600),np.atleast_1d(lat2*3600))
return obj
def hybridcluster(cat):
""" use both DBSCAN and sequential clustering to cluster the data"""
# Hybrid clustering algorithm
# 1) Find "object" centers by using DBSCAN with a smallish eps (~0.2-0.3") and maybe minclusters of 2-3
# 2) Do sequential clustering using the object centers on the leftover measurements.
# Empty catalog input
if len(cat)==0:
return np.array([]), np.array([])
# Only one exposure, don't cluster
expindex = dln.create_index(cat['EXPOSURE'])
nexp = len(expindex['value'])
if nexp==1:
print('Only one exposure. Do not need to cluster')
labels = np.arange(len(cat))
obj = np.zeros(len(cat),dtype=np.dtype([('label',int),('ndet',int),('ra',np.float64),('dec',np.float64),('raerr',np.float32),
('decerr',np.float32),('asemi',np.float32),('bsemi',np.float32),('theta',np.float32),('fwhm',np.float32)]))
obj['label'] = labels
obj['ndet'] = 1
for n in ['ra','dec','raerr','decerr','asemi','bsemi','theta','fwhm']: obj[n]=cat[n.upper()]
return labels, obj
# Step 1: Find object centers using DBSCAN with a small eps
t0 = time.time()
# DBSCAN does not deal with cos(dec), convert to a different projection
cenra = np.mean(cat['RA'])
cendec = np.mean(cat['DEC'])
# Deal with RA=0 wrap
if (np.max(cat['RA'])-np.min(cat['RA']))>100:
rr = cat['RA']
bb,nbb = dln.where(rr>180)
if nbb>0: rr[bb]-=360
cenra = np.mean(rr)
if cenra<0: cenra+=360
lon,lat = coords.rotsphcen(cat['RA'],cat['DEC'],cenra,cendec,gnomic=True)
X1 = np.column_stack((lon,lat))
err = np.sqrt(cat['RAERR']**2+cat['DECERR']**2)
eps = np.maximum(3*np.median(err),0.3)
print('DBSCAN eps=%4.2f' % eps)
# Minimum number of measurements needed to define a cluster/object
minsamples = 3
if nexp<3: minsamples=nexp
dbs1 = DBSCAN(eps=eps/3600, min_samples=minsamples).fit(X1)
gdb,ngdb,bdb,nbdb = dln.where(dbs1.labels_ >= 0,comp=True)
# No clusters, lower minsamples
while (ngdb==0):
minsamples -= 1
print('No clusters. Lowering min_samples to '+str(minsamples))
dbs1 = DBSCAN(eps=eps/3600, min_samples=minsamples).fit(X1)
gdb,ngdb,bdb,nbdb = dln.where(dbs1.labels_ >= 0,comp=True)
print('DBSCAN after %5.2f sec. ' % (time.time()-t0))
# Get mean coordinates for each object
# only use the measurements that were clustered
obj1 = meancoords(cat[gdb],dbs1.labels_[gdb])
inpobj = obj1
print(str(ngdb)+' measurements clustered into '+str(len(obj1))+' objects. '+str(nbdb)+' remaining.')
# Step 2: sequential clustering with original list of objects with the outliers
# this allows more distance measurements with larger errors to be clustered as well
# the RA/DEC uncertainties can be very small, set a lower threshold of EPS
if (nbdb>0):
print('Sequential Clustering the remaining measurements')
dcr = np.maximum(3*err[bdb],eps)
catrem = cat[bdb]
labels2, obj2 = seqcluster(catrem,dcr=dcr,inpobj=inpobj)
# Add these new labels to the original list
# offset the numbers so they don't overlap
labels = dbs1.labels_
labels[bdb] = labels2+np.max(labels)+1
obj = meancoords(cat,labels) # Get mean coordinates again
else:
obj = obj1
labels = dbs1.labels_
print(str(len(obj))+' final objects')
return labels, obj
def loadmeas(metafile=None,buffdict=None,dbfile=None,verbose=False):
t0 = time.time()
if metafile is None:
print('Need metafile')
return np.array([]), np.array([])
# New meta-data format
dtype_meta = np.dtype([('file',np.str,500),('base',np.str,200),('expnum',int),('ra',np.float64),
('dec',np.float64),('dateobs',np.str,100),('mjd',np.float64),('filter',np.str,50),
('exptime',float),('airmass',float),('nsources',int),('fwhm',float),
('nchips',int),('badchip31',bool),('rarms',float),('decrms',float),
('ebv',float),('gaianmatch',int),('zpterm',float),('zptermerr',float),
('zptermsig',float),('refmatch',int)])
# All columns in MEAS catalogs (32)
#dtype_cat = np.dtype([('MEASID',np.str,200),('OBJECTID',np.str,200),('EXPOSURE',np.str,200),('CCDNUM',int),('FILTER',np.str,10),
# ('MJD',float),('X',float),('Y',float),('RA',float),('RAERR',float),('DEC',float),('DECERR',float),
# ('MAG_AUTO',float),('MAGERR_AUTO',float),('MAG_APER1',float),('MAGERR_APER1',float),('MAG_APER2',float),
# ('MAGERR_APER2',float),('MAG_APER4',float),('MAGERR_APER4',float),('MAG_APER8',float),('MAGERR_APER8',float),
# ('KRON_RADIUS',float),('ASEMI',float),('ASEMIERR',float),('BSEMI',float),('BSEMIERR',float),('THETA',float),
# ('THETAERR',float),('FWHM',float),('FLAGS',int),('CLASS_STAR',float)])
# All the columns that we need (20)
#dtype_cat = np.dtype([('MEASID',np.str,30),('EXPOSURE',np.str,40),('CCDNUM',int),('FILTER',np.str,3),
# ('MJD',float),('RA',float),('RAERR',float),('DEC',float),('DECERR',float),
# ('MAG_AUTO',float),('MAGERR_AUTO',float),('ASEMI',float),('ASEMIERR',float),('BSEMI',float),('BSEMIERR',float),
# ('THETA',float),('THETAERR',float),('FWHM',float),('FLAGS',int),('CLASS_STAR',float)])
dtype_cat = np.dtype([('MEASID',np.str,30),('EXPOSURE',np.str,40),('CCDNUM',np.int8),('FILTER',np.str,3),
('MJD',float),('RA',float),('RAERR',np.float16),('DEC',float),('DECERR',np.float16),
('MAG_AUTO',np.float16),('MAGERR_AUTO',np.float16),('ASEMI',np.float16),('ASEMIERR',np.float16),
('BSEMI',np.float16),('BSEMIERR',np.float16),('THETA',np.float16),('THETAERR',np.float16),
('FWHM',np.float16),('FLAGS',np.int16),('CLASS_STAR',np.float16)])
# Loop over exposures
cat = None
ncat = 0
allmeta = None
catcount = 0
metafile = np.atleast_1d(metafile)
for m,mfile in enumerate(metafile):
expcatcount = 0
if os.path.exists(mfile) is False:
print(mfile+' NOT FOUND')
continue
meta = fits.getdata(mfile,1)
print(str(m+1)+' Loading '+mfile)
t = Time(meta['dateobs'], format='isot', scale='utc')
meta['mjd'] = t.mjd # recompute because some MJD are bad
chmeta = fits.getdata(mfile,2) # chip-level meta-data structure
print(' FILTER='+meta['filter'][0]+' EXPTIME='+str(meta['exptime'][0])+' sec')
v = psutil.virtual_memory()
process = psutil.Process(os.getpid())
print('%6.1f Percent of memory used. %6.1f GB available. Process is using %6.2f GB of memory.' % (v.percent,v.available/1e9,process.memory_info()[0]/1e9))
# Convert META to new format
newmeta = np.zeros(1,dtype=dtype_meta)
# Copy over the meta information
for n in newmeta.dtype.names:
if n.upper() in meta.dtype.names: newmeta[n]=meta[n]
# Get the name
fdir = os.path.dirname(mfile)
fbase, ext = os.path.splitext(os.path.basename(mfile))
fbase = fbase[:-5] # remove _meta at end
# Loop over the chip files
for j in range(len(chmeta)):
# Check that this chip was astrometrically calibrated
# and falls in to HEALPix region
# Also check for issues with my astrometric corrections
astokay = True
if (chmeta['ngaiamatch'][j] == 0) | (np.max(np.abs(chmeta['racoef'][j]))>1) | (np.max(np.abs(chmeta['deccoef'][j]))>1):
if verbose: print('This chip was not astrometrically calibrated or has astrometric issues')
astokay = False
# Check that this overlaps the healpix region
inside = True
if buffdict is not None:
vra = chmeta['vra'][j]
vdec = chmeta['vdec'][j]
vlon, vlat = coords.rotsphcen(vra,vdec,buffdict['cenra'],buffdict['cendec'],gnomic=True)
if coords.doPolygonsOverlap(buffdict['lon'],buffdict['lat'],vlon,vlat) is False:
if verbose: print('This chip does NOT overlap the HEALPix region+buffer')
inside = False
# Check if the chip-level file exists
chfile = fdir+'/'+fbase+'_'+str(chmeta['ccdnum'][j])+'_meas.fits'
chfile_exists = os.path.exists(chfile)
if chfile_exists is False:
print(chfile+' NOT FOUND')
# Load this one
if (chfile_exists is True) and (inside is True) and (astokay is True):
# Load the chip-level catalog
cat1 = fits.getdata(chfile,1)
ncat1 = len(cat1)
#print(' chip '+str(chmeta[j]['ccdnum'])+' '+str(ncat1)+' sources')
# Fix negative FWHM values
# use A_WORLD and B_WORLD which are never negative
bd,nbd = dln.where(cat1['FWHM']<0.1)
if nbd>0:
cat1['FWHM'][bd] = np.sqrt(cat1['ASEMI'][bd]**2+cat1['BSEMI'][bd]**2)*2.35
# Fix RAERR=DECERR=0
bd,nbd = dln.where(cat1['RAERR']<0.0001)
if nbd>0:
snr = 1.087/cat1['MAGERR_AUTO'][bd]
coorderr = 0.664*cat1['FWHM'][bd]/snr
cat1['RAERR'][bd] = coorderr
cat1['DECERR'][bd] = coorderr
# Make sure it's in the right format
if len(cat1.dtype.fields) != 32:
if verbose: print(' This catalog does not have the right format. Skipping')
del cat1
ncat1 = 0
# Only include sources inside Boundary+Buffer zone
# -use ROI_CUT
# -reproject to tangent plane first so we don't have to deal
# with RA=0 wrapping or pol issues
if buffdict is not None:
lon, lat = coords.rotsphcen(cat1['ra'],cat1['dec'],buffdict['cenra'],buffdict['cendec'],gnomic=True)
ind_out, ind_in = dln.roi_cut(buffdict['lon'],buffdict['lat'],lon,lat)
nmatch = dln.size(ind_in)
# Only want source inside this pixel
if nmatch>0:
cat1 = cat1[ind_in]
ncat1 = len(cat1)
else:
cat1 = None
ncat1 = 0
#if verbose: print(' '+str(nmatch)+' sources are inside this pixel')
# Combine the catalogs
if ncat1 > 0:
# Keep it all in memory
if dbfile is None:
if cat is None:
#dtype_cat = cat1.dtype
#ncat_init = np.sum(chmeta['nsources'])*dln.size(metafile)
ncat_init = np.maximum(100000,ncat1)
cat = np.zeros(ncat_init,dtype=dtype_cat)
catcount = 0
# Add more elements if necessary
if (catcount+ncat1)>ncat:
cat = add_elements(cat,np.maximum(100000,ncat1))
ncat = len(cat)
# Add it to the main CAT catalog
for n in dtype_cat.names: cat[n][catcount:catcount+ncat1] = cat1[n.upper()]
# Use the database
else:
writecat2db(cat1,dbfile)
if verbose: print(' chip '+str(chmeta['ccdnum'][j])+' '+str(ncat1)+' measurements')
catcount += ncat1
expcatcount += ncat1
# Add metadata to ALLMETA, only if some measurements overlap
if expcatcount>0:
if allmeta is None:
allmeta = newmeta
else:
allmeta = np.hstack((allmeta,newmeta))
# Total measurements for this exposure
print(' '+str(expcatcount)+' measurements')
print(str(catcount)+' measurements total so far')
#print('all exposures loaded. trimming now')
if (cat is not None) & (catcount<ncat): cat=cat[0:catcount] # delete excess elements
if cat is None: cat=np.array([]) # empty cat
if allmeta is None: allmeta=np.array([])
print('loading measurements done after '+str(time.time()-t0))
return cat, catcount, allmeta
def clusterdata(cat,ncat,dbfile=None):
""" Perform spatial clustering """
t00 = time.time()
print('Spatial clustering')
# Divide into subregions
if (ncat>1000000) & (dbfile is not None):
print('Dividing clustering problem into subregions')
# Index RA and DEC
createindexdb(dbfile,'ra',unique=False)
createindexdb(dbfile,'dec',unique=False)
db.analyzetable(dbfile,'meas')
# Subdivide
nsub = int(np.ceil(ncat/100000))
print(str(nsub)+' sub regions')
nx = int(np.ceil(np.sqrt(nsub))) # divide RA and DEC intro nx regions
# Get RA/DEC ranges from the database
ranges = getradecrangedb(dbfile) # [min(ra),max(ra),min(dec),max(dec)]
xr = [ranges[0]-0.001, ranges[1]+0.001] # extend slightly
print('RA: '+str(xr[0])+' '+str(xr[1]))
dx = (xr[1]-xr[0])/nx
if (xr[1]-xr[0])>180: # across RA=0
dx = (xr[0]-(xr[1]-360))/nx
yr = [ranges[2]-0.001, ranges[3]+0.001] # extend slightly
mndec = np.mean(yr)
print('DEC: '+str(yr[0])+' '+str(yr[1]))
dy = (yr[1]-yr[0])/nx
buff = 10./3600.0 # buffer in arc seconds
rabuff = buff/np.cos(np.deg2rad(mndec)) # correct for cos(dec)
objstr = np.zeros(100000,dtype=np.dtype([('OBJLABEL',int),('RA',float),('DEC',float),('NMEAS',int)]))
nobjstr = len(objstr)
# Loop over sub regions
lastobjlabel = -1
objcount = 0
# RA loop
for r in range(nx):
r0 = xr[0]+r*dx
r1 = xr[0]+(r+1)*dx
# DEC loop
for d in range(nx):
d0 = yr[0]+d*dy
d1 = yr[0]+(d+1)*dy
print(str(r+1)+' '+str(d+1))
print('RA: '+str(r0)+' '+str(r1)+' DEC: '+str(d0)+' '+str(d1))
cat1 = getdatadb(dbfile,rar=[r0-rabuff,r1+rabuff],decr=[d0-buff,d1+buff],verbose=True)
ncat1 = len(cat1)
if ncat1>0:
gcat1,ngcat1 = dln.where(cat1['OBJLABEL']==-1) # only want ones that haven't been taken yet
if ngcat1>0:
cat1 = cat1[gcat1]
ncat1 = len(cat1)
print(str(ncat1)+' measurements with no labels')
v = psutil.virtual_memory()
process = psutil.Process(os.getpid())
print('%6.1f Percent of memory used. %6.1f GB available. Process is using %6.2f GB of memory.' % (v.percent,v.available/1e9,process.memory_info()[0]/1e9))
# Some measurements to work with
if ncat1>0:
# Cluster
t0 = time.time()
# Cluster labels are integers and in ascending order, but there are gaps
objlabels1, initobj1 = hybridcluster(cat1)
objlabels1 += lastobjlabel+1 # add offset to labels
labelindex1 = dln.create_index(objlabels1) # create inex
nobj1 = len(labelindex1['value'])
print(str(ncat1)+' measurements for '+str(nobj1)+' objects')
# Compute mean positions
obj1 = np.zeros(nobj1,dtype=np.dtype([('OBJLABEL',int),('RA',float),('DEC',float),('NMEAS',int)]))
obj1['OBJLABEL'] = labelindex1['value']
obj1['NMEAS'] = labelindex1['num']
for k in range(nobj1):
indx = labelindex1['index'][labelindex1['lo'][k]:labelindex1['hi'][k]+1]
wt_ra = 1.0/cat1['RAERR'][indx]**2
wt_dec = 1.0/cat1['DECERR'][indx]**2
obj1['RA'][k] = np.sum(cat1['RA'][indx]*wt_ra)/np.sum(wt_ra)
obj1['DEC'][k] = np.sum(cat1['DEC'][indx]*wt_dec)/np.sum(wt_dec)
# Only keep objects (and measurements) inside the box region
# keep objects on LOWER boundary in RA/DEC
gdobj, ngdobj = dln.where((obj1['RA']>=r0) & (obj1['RA']<r1) & (obj1['DEC']>=d0) & (obj1['DEC']<d1))
print(str(ngdobj)+' objects are inside the boundary')
# Some objects in the region
if ngdobj>0:
obj1 = obj1[gdobj]
nobj1 = ngdobj
# Arrays of rowid and objlabels to add
add_rowid1 = np.zeros(np.sum(labelindex1['num'][gdobj]),int)
add_objlabels1 = np.zeros(np.sum(labelindex1['num'][gdobj]),int)
cnt1 = 0
for k in range(ngdobj):
indx = labelindex1['index'][labelindex1['lo'][gdobj[k]]:labelindex1['hi'][gdobj[k]]+1]
nmeas1 = labelindex1['num'][gdobj[k]]
add_rowid1[cnt1:cnt1+nmeas1] = cat1['ROWID'][indx]
add_objlabels1[cnt1:cnt1+nmeas1] = labelindex1['value'][gdobj[k]]
cnt1 += nmeas1
# Add the object labels into the database
# much faster if in rowid order
si = np.argsort(add_rowid1)
insertobjlabelsdb(add_rowid1[si],add_objlabels1[si],dbfile)
# Add OBJ1 to OBJSTR
if (objcount+nobj1>nobjstr): # add new elements
print('Adding more elements to OBSTR')
t1 = time.time()
objstr = add_elements(objstr,np.max([nobj1,100000]))
nobjstr = len(objstr)
print('more elements added in '+str(time.time()-t1)+' sec.')
objstr[objcount:objcount+nobj1] = obj1
objcount += nobj1
# Keep track of last label
lastobjlabel = np.max(obj1['OBJLABEL'])
#import pdb; pdb.set_trace()
# Trim extra elements
if nobjstr>objcount:
objstr = objstr[0:objcount]
# No subdividing
else:
# Get MEASID, RA, DEC from database
if dbfile is not None:
#cat = getdbcoords(dbfile)
cat = getdatadb(dbfile,verbose=True)
objlabels, initobj = hybridcluster(cat)
labelindex = dln.create_index(objlabels) # create index
nobj = len(labelindex['value'])
print(str(ncat)+' measurements for '+str(nobj)+' objects')
# Make structure
objstr = np.zeros(nobj,dtype=np.dtype([('OBJLABEL',int),('NMEAS',int),('LO',int),('HI',int)]))
objstr['OBJLABEL'] = labelindex['value']
objstr['NMEAS'] = labelindex['num']
nobjstr = len(objstr)
# Insert object label into database
if dbfile is not None:
insertobjlabelsdb(cat['ROWID'],objlabels,dbfile)
# Resort CAT, and use index LO/HI
cat = cat[labelindex['index']]
objstr['LO'] = labelindex['lo']
objstr['HI'] = labelindex['hi']
print(str(len(objstr))+' final objects')
# Index objlabel in database
if dbfile is not None:
createindexdb(dbfile,'objlabel',unique=False)
print('clustering done after '+str(time.time()-t00)+' sec.')
return objstr, cat
def breakup_idstr(dbfile):
""" Break-up idstr file into separate measid/objectid lists per exposure on /data0."""
t00 = time.time()
outdir = '/data0/dnidever/nsc/instcal/v3/idstr/'
# Load the exposures table
expcat = fits.getdata('/net/dl2/dnidever/nsc/instcal/v3/lists/nsc_v3_exposure_table.fits.gz',1)
# Make sure it's a list
if type(dbfile) is str: dbfile=[dbfile]
print('Breaking up '+str(len(dbfile))+' database files')
# Loop over files
for i,dbfile1 in enumerate(dbfile):
print(str(i+1)+' '+dbfile1)
if os.path.exists(dbfile1):
t0 = time.time()
dbbase1 = os.path.basename(dbfile1)[0:-9] # remove _idstr.db ending
# Get existing index names for this database
d = sqlite3.connect(dbfile1, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
cur = d.cursor()
cmd = 'select measid,exposure,objectid from idstr'
t1 = time.time()
data = cur.execute(cmd).fetchall()
print(' '+str(len(data))+' rows read in %5.1f sec. ' % (time.time()-t1))
# Break up data into lists
measid,exposure,objectid = list(zip(*data))
measid = np.array(measid)
objectid = np.array(objectid)
exposure = np.array(exposure)
eindex = dln.create_index(exposure)
# Match exposures to exposure catalog
ind1,ind2 = dln.match(expcat['EXPOSURE'],eindex['value'])
# Loop over exposures and write output files
nexp = len(eindex['value'])
print(' '+str(nexp)+' exposures')
measid_maxlen = np.max(dln.strlen(measid))
objectid_maxlen = np.max(dln.strlen(objectid))
df = np.dtype([('measid',np.str,measid_maxlen+1),('objectid',np.str,objectid_maxlen+1)])
# Loop over the exposures and write out the files
for k in range(nexp):
if nexp>100:
if k % 100 == 0: print(' '+str(k+1))
ind = eindex['index'][eindex['lo'][k]:eindex['hi'][k]+1]
cat = np.zeros(len(ind),dtype=df)
cat['measid'] = measid[ind]
cat['objectid'] = objectid[ind]
instcode = expcat['INSTRUMENT'][ind1[k]]
dateobs = expcat['DATEOBS'][ind1[k]]
night = dateobs[0:4]+dateobs[5:7]+dateobs[8:10]
if os.path.exists(outdir+instcode+'/'+night+'/'+eindex['value'][k]) is False:
# Sometimes this crashes because another process is making the directory at the same time
try:
os.makedirs(outdir+instcode+'/'+night+'/'+eindex['value'][k])
except:
pass
outfile = outdir+instcode+'/'+night+'/'+eindex['value'][k]+'/'+eindex['value'][k]+'__'+dbbase1+'.npy'
np.save(outfile,cat)
print(' dt = %6.1f sec. ' % (time.time()-t0))
else:
print(' '+dbfile1+' NOT FOUND')
print('dt = %6.1f sec.' % (time.time()-t00))
# Combine data for one NSC healpix region
def combine(pix,version,nside=128,redo=False,verbose=False,multilevel=True,outdir=None,nmulti=None):
t0 = time.time()
hostname = socket.gethostname()
host = hostname.split('.')[0]
radeg = np.float64(180.00) / np.pi
tmpdir = '/tmp/' # default
# on thing/hulk use
if (host == "thing") or (host == "hulk"):
dir = "/net/dl1/users/dnidever/nsc/instcal/"+version+"/"
mssdir = "/mss1/"
localdir = "/d0/"
tmproot = localdir+"dnidever/nsc/instcal/"+version+"/tmp/"
# on gp09 use
if (host == "gp09") or (host == "gp08") or (host == "gp07") or (host == "gp06") or (host == "gp05"):
dir = "/net/dl1/users/dnidever/nsc/instcal/"+version+"/"
mssdir = "/net/mss1/"
localdir = "/data0/"
tmproot = localdir+"dnidever/nsc/instcal/"+version+"/tmp/"
t0 = time.time()
# Only nside>=128 supported right now
if nside<128:
print('Only nside=>128 supported')
sys.exit()
print('*** KLUDGE: Forcing output to /net/dl2 ***')
outdir = '/net/dl2/dnidever/nsc/instcal/'+version+'/combine/'
if os.path.exists(outdir) is False: os.mkdir(outdir)
# nside>128
if nside > 128:
# Get parent nside=128 pixel
pra,pdec = hp.pix2ang(nside,pix,lonlat=True)
parentpix = hp.ang2pix(128,pra,pdec,lonlat=True)
print('The nside=128 parent pixel is '+str(parentpix))
# Output filenames
outbase = str(parentpix)+'_n'+str(int(nside))+'_'+str(pix)
subdir = str(int(parentpix)//1000) # use the thousands to create subdirectory grouping
if os.path.exists(outdir+'/'+subdir) is False: os.mkdir(outdir+'/'+subdir)
outfile = outdir+'/'+subdir+'/'+outbase+'.fits'
# nside=128
else:
# Output filenames
outbase = str(pix)
subdir = str(int(pix)//1000) # use the thousands to create subdirectory grouping
if os.path.exists(outdir+'/'+subdir) is False: os.mkdir(outdir+'/'+subdir)
outfile = outdir+'/'+subdir+'/'+str(pix)+'.fits'
# Check if output file already exists
if (os.path.exists(outfile) or os.path.exists(outfile+'.gz')) & (not redo):
print(outfile+' EXISTS already and REDO not set')
sys.exit()
print("Combining InstCal SExtractor catalogs for Healpix pixel = "+str(pix))
# Use the healpix list, nside=128
listfile = localdir+'dnidever/nsc/instcal/'+version+'/nsc_instcal_combine_healpix_list.db'
if os.path.exists(listfile) is False:
print(listfile+" NOT FOUND")
sys.exit()
# nside>128
if nside > 128:
# Find our pixel
hlist = db.query(listfile,'hlist',where='PIX='+str(parentpix))
nlist = len(hlist)
if nlist == 0:
print("No entries for Healpix pixel '"+str(parentpix)+"' in the list")
sys.exit()
hlist = Table(hlist)
# GET EXPOSURES FOR NEIGHBORING PIXELS AS WELL
# so we can deal with the edge cases
neipix = hp.get_all_neighbours(128,parentpix)
for neip in neipix:
hlist1 = db.query(listfile,'hlist',where='PIX='+str(neip))
nhlist1 = len(hlist1)
if nhlist1>0:
hlist1 = Table(hlist1)
hlist = vstack([hlist,hlist1])
# nside=128
else:
parentpix = pix
# Find our pixel
hlist = db.query(listfile,'hlist',where='PIX='+str(pix))
nlist = len(hlist)
if nlist == 0:
print("No entries for Healpix pixel '"+str(pix)+"' in the list")
sys.exit()
hlist = Table(hlist)
# GET EXPOSURES FOR NEIGHBORING PIXELS AS WELL
# so we can deal with the edge cases
neipix = hp.get_all_neighbours(nside,pix)
for neip in neipix:
hlist1 = db.query(listfile,'hlist',where='PIX='+str(neip))
nhlist1 = len(hlist1)
if nhlist1>0:
hlist1 = Table(hlist1)
hlist = vstack([hlist,hlist1])
# Rename to be consistent with the FITS file
hlist['file'].name = 'FILE'
hlist['base'].name = 'BASE'
hlist['pix'].name = 'PIX'
# Use entire exposure files
# Get unique values
u, ui = np.unique(hlist['FILE'],return_index=True)
hlist = hlist[ui]
nhlist = len(hlist)
print(str(nhlist)+' exposures that overlap this pixel and neighbors')
# Get the boundary coordinates
# healpy.boundaries but not sure how to do it in IDL
# pix2vec_ring/nest can optionally return vertices but only 4
# maybe subsample myself between the vectors
# Expand the boundary to include a "buffer" zone
# to deal with edge cases
vecbound = hp.boundaries(nside,pix,step=100)
rabound, decbound = hp.vec2ang(np.transpose(vecbound),lonlat=True)
# Expand the boundary by the buffer size
cenra, cendec = hp.pix2ang(nside,pix,lonlat=True)
# reproject onto tangent plane
lonbound, latbound = coords.rotsphcen(rabound,decbound,cenra,cendec,gnomic=True)
# expand by a fraction, it's not an extact boundary but good enough
buffsize = 10.0/3600. # in deg
radbound = np.sqrt(lonbound**2+latbound**2)
frac = 1.0 + 1.5*np.max(buffsize/radbound)
lonbuff = lonbound*frac
latbuff = latbound*frac
rabuff, decbuff = coords.rotsphcen(lonbuff,latbuff,cenra,cendec,gnomic=True,reverse=True)
if (np.max(rabuff)-np.min(rabuff))>100: # deal with RA=0 wraparound
bd,nbd = dln.where(rabuff>180)
if nbd>0:rabuff[bd] -=360.0
buffdict = {'cenra':cenra,'cendec':cendec,'rar':dln.minmax(rabuff),'decr':dln.minmax(decbuff),'ra':rabuff,'dec':decbuff,\
'lon':lonbuff,'lat':latbuff,'lr':dln.minmax(lonbuff),'br':dln.minmax(latbuff)}
# IDSTR schema
dtype_idstr = np.dtype([('measid',np.str,200),('exposure',np.str,200),('objectid',np.str,200),('objectindex',int)])
# OBJ schema
dtype_obj = np.dtype([('objectid',np.str,100),('pix',int),('ra',np.float64),('dec',np.float64),('raerr',np.float32),('decerr',np.float32),
('pmra',np.float32),('pmdec',np.float32),('pmraerr',np.float32),('pmdecerr',np.float32),('mjd',np.float64),
('deltamjd',np.float32),('ndet',np.int16),('nphot',np.int16),
('ndetu',np.int16),('nphotu',np.int16),('umag',np.float32),('urms',np.float32),('uerr',np.float32),
('uasemi',np.float32),('ubsemi',np.float32),('utheta',np.float32),
('ndetg',np.int16),('nphotg',np.int16),('gmag',np.float32),('grms',np.float32),('gerr',np.float32),
('gasemi',np.float32),('gbsemi',np.float32),('gtheta',np.float32),
('ndetr',np.int16),('nphotr',np.int16),('rmag',np.float32),('rrms',np.float32),('rerr',np.float32),
('rasemi',np.float32),('rbsemi',np.float32),('rtheta',np.float32),
('ndeti',np.int16),('nphoti',np.int16),('imag',np.float32),('irms',np.float32),('ierr',np.float32),
('iasemi',np.float32),('ibsemi',np.float32),('itheta',np.float32),
('ndetz',np.int16),('nphotz',np.int16),('zmag',np.float32),('zrms',np.float32),('zerr',np.float32),
('zasemi',np.float32),('zbsemi',np.float32),('ztheta',np.float32),
('ndety',np.int16),('nphoty',np.int16),('ymag',np.float32),('yrms',np.float32),('yerr',np.float32),
('yasemi',np.float32),('ybsemi',np.float32),('ytheta',np.float32),
('ndetvr',np.int16),('nphotvr',np.int16),('vrmag',np.float32),('vrrms',np.float32),('vrerr',np.float32),
('vrasemi',np.float32),('vrbsemi',np.float32),('vrtheta',np.float32),
('asemi',np.float32),('asemierr',np.float32),('bsemi',np.float32),('bsemierr',np.float32),
('theta',np.float32),('thetaerr',np.float32),('fwhm',np.float32),('flags',np.int16),('class_star',np.float32),
('ebv',np.float32),('rmsvar',np.float32),('madvar',np.float32),('iqrvar',np.float32),('etavar',np.float32),
('jvar',np.float32),('kvar',np.float32),('chivar',np.float32),('romsvar',np.float32),
('variable10sig',np.int16),('nsigvar',np.float32),('overlap',bool)])
# Estimate number of measurements in pixel
metafiles = [m.replace('_cat','_meta').strip() for m in hlist['FILE']]
metastr = checkboundaryoverlap(metafiles,buffdict,verbose=False)
nmeasperarea = np.zeros(dln.size(metastr),int)
areadict = {'c4d':3.0, 'k4m':0.3, 'ksb':1.0} # total area
for j in range(dln.size(metastr)):
nmeasperarea[j] = metastr['nsources'][j]/areadict[metastr['instrument'][j]]
pixarea = hp.nside2pixarea(nside,degrees=True)
nmeasperpix = nmeasperarea * pixarea
totmeasest = np.sum(nmeasperpix)
# Break into smaller healpix regions
if (multilevel is True) & (nside == 128):
nsub = int(np.ceil(totmeasest/500000))
bestval,bestind = dln.closest([1,4,16,64],nsub)
hinside = [128,256,512,1024][bestind]
# Break into multiple smaller healpix
if hinside>128:
print('')
print('----- Breaking into smaller HEALPix using nside='+str(hinside)+' ------')
vecbound = hp.boundaries(nside,pix)
allpix = hp.query_polygon(hinside,np.transpose(vecbound))
print('Pix = '+','.join(allpix.astype(str)))
outfiles = []
# Check if any healpix need to be run/rerun
dopix = []
for i in range(len(allpix)):
pix1 = allpix[i]
# check the output file
outbase1 = str(parentpix)+'_n'+str(int(hinside))+'_'+str(pix1)
subdir1 = str(int(parentpix)//1000) # use the thousands to create subdirectory grouping
outfile1 = outdir+'/'+subdir1+'/'+outbase1+'.fits.gz'
outfiles.append(outfile1)
if (os.path.exists(outfile1) is False) | redo:
dopix.append(pix1)
print(str(len(dopix))+' nside='+str(hinside)+' healpix to run')
# Some healpix to run
if len(dopix)>0:
# Single process, just use subprocess
if nmulti==1:
for i in range(len(dopix)):
pix1 = dopix[i]
print('')
print('########### '+str(i+1)+' '+str(pix1)+' ###########')
print('')
# check the output file
outbase1 = str(parentpix)+'_n'+str(int(hinside))+'_'+str(pix1)
subdir1 = str(int(parentpix)//1000) # use the thousands to create subdirectory grouping
outfile1 = outdir+'/'+subdir1+'/'+outbase1+'.fits.gz'
if redo is True:
retcode = subprocess.call(['python',os.path.abspath(__file__),str(pix1),version,'--nside',str(hinside),'-r'],shell=False)
else:
retcode = subprocess.call(['python',os.path.abspath(__file__),str(pix1),version,'--nside',str(hinside)],shell=False)
# Multiple parallel processes, Running job daemon
else:
cmd = []
for i in range(len(dopix)):
cmd1 = os.path.abspath(__file__)+' '+str(dopix[i])+' '+version+' --nside '+str(hinside)
if redo: cmd1 = cmd1+' -r'
cmd.append(cmd1)
dirs = np.zeros(len(dopix),(np.str,200))
dirs[:] = tmpdir
jobs = jd.job_daemon(cmd,dirs,hyperthread=True,prefix='nsccmb',nmulti=nmulti)
# Load and concatenate all of the files
print('Combining all of the object catalogs')
allmeta = None
allobj = None
nobjects = []
totobjects = 0
for i in range(len(allpix)):
pix1 = allpix[i]
outfile1 = outfiles[i]
if os.path.exists(outfile1) is False:
print(outfile1+' NOT FOUND')
sys.exit()
# meta columns different: nobjects there'll be repeats
meta1 = fits.getdata(outfile1,1)
if allmeta is None:
allmeta = meta1
else:
allmeta = np.hstack((allmeta,meta1))
hd1 = fits.getheader(outfile1,2)
print(str(i+1)+' '+outfile1+' '+str(hd1['naxis2']))
obj1 = fits.getdata(outfile1,2)
nobj1 = len(obj1)
# Update the objectIDs
dbfile_idstr1 = outfile1.replace('.fits.gz','_idstr.db')
objectid_orig = obj1['objectid']
objectid_new = dln.strjoin( str(parentpix)+'.', ((np.arange(nobj1)+1+totobjects).astype(np.str)) )
#updatecoldb(selcolname,selcoldata,updcolname,updcoldata,table,dbfile):
updatecoldb('objectid',objectid_orig,'objectid',objectid_new,'idstr',dbfile_idstr1)
# Update objectIDs in catalog
obj1['objectid'] = objectid_new
# Update objectIDs in high resolution HEALPix output file
print('Updating objectIDs in '+outfile1)
outfile1fits = outfile1.replace('.fits.gz','.fits')
if os.path.exists(outfile1fits): os.remove(outfile1fits)
Table(meta1).write(outfile1fits) # first, summary table
# append other fits binary tables
hdulist = fits.open(outfile1fits)
hdu = fits.table_to_hdu(Table(obj1)) # second, catalog
hdulist.append(hdu)
hdulist.writeto(outfile1fits,overwrite=True)
hdulist.close()
if os.path.exists(outfile1): os.remove(outfile1)
ret = subprocess.call(['gzip',outfile1fits]) # compress final catalog
if allobj is None:
allobj = obj1.copy()
else:
allobj = np.hstack((allobj,obj1.copy()))
nobjects.append(nobj1)
totobjects += nobj1
# Deal with duplicate metas
metaindex = dln.create_index(allmeta['base'])
for i in range(len(metaindex['value'])):
indx = metaindex['index'][metaindex['lo'][i]:metaindex['hi'][i]+1]
meta1 = allmeta[indx[0]].copy()
if len(indx)>1:
meta1['nobjects'] = np.sum(allmeta['nobjects'][indx])
if i==0:
sumstr = meta1
else:
sumstr = np.hstack((sumstr,meta1))
sumstr = Table(sumstr)
# Write the output file
print('Writing combined catalog to '+outfile)
if os.path.exists(outfile): os.remove(outfile)
sumstr.write(outfile) # first, summary table
# append other fits binary tables
hdulist = fits.open(outfile)
hdu = fits.table_to_hdu(Table(allobj)) # second, catalog
hdulist.append(hdu)
hdulist.writeto(outfile,overwrite=True)
hdulist.close()
if os.path.exists(outfile+'.gz'): os.remove(outfile+'.gz')
ret = subprocess.call(['gzip',outfile]) # compress final catalog
dt = time.time()-t0
print('dt = '+str(dt)+' sec.')
print('Breaking-up IDSTR information')
dbfiles_idstr = []
for i in range(len(allpix)):
outfile1 = outfiles[i]
dbfile_idstr1 = outfile1.replace('.fits.gz','_idstr.db')
dbfiles_idstr.append(dbfile_idstr1)
breakup_idstr(dbfiles_idstr)
sys.exit()
# Decide whether to load everything into RAM or use temporary database
usedb = False
if totmeasest>500000: usedb=True
dbfile = None
if usedb:
dbfile = tmproot+outbase+'_combine.db'
print('Using temporary database file = '+dbfile)
if os.path.exists(dbfile): os.remove(dbfile)
else:
print('Keeping all measurement data in memory')
#import pdb; pdb.set_trace()
# IDSTR database file
dbfile_idstr = outdir+'/'+subdir+'/'+outbase+'_idstr.db'
if os.path.exists(dbfile_idstr): os.remove(dbfile_idstr)
# Load the measurement catalog
# this will contain excess rows at the end, if all in RAM
# if using database, CAT is empty
cat, catcount, allmeta = loadmeas(metafiles,buffdict,dbfile=dbfile)
ncat = catcount
print(str(ncat))
# No measurements
if ncat==0:
print('No measurements for this healpix')
if (dbfile is not None):
if os.path.exists(dbfile): os.remove(dbfile)
if os.path.exists(dbfile_idstr): os.remove(dbfile_idstr)
print('Writing blank output file to '+outfile)
fits.PrimaryHDU().writeto(outfile)
if os.path.exists(outfile+'.gz'): os.remove(outfile+'.gz')
ret = subprocess.call(['gzip',outfile]) # compress final catalog
sys.exit()
# Spatially cluster the measurements with DBSCAN
# this might also resort CAT
objstr, cat = clusterdata(cat,ncat,dbfile=dbfile)
nobj = dln.size(objstr)
meascumcount = np.cumsum(objstr['NMEAS'])
print(str(nobj)+' unique objects clustered')
# Initialize the OBJ structured array
obj = np.zeros(nobj,dtype=dtype_obj)
# if nside>128 then we need unique IDs, so use PIX and *not* PARENTPIX
# add nside as well to make it truly unique
if nside>128:
obj['objectid'] = dln.strjoin( str(nside)+'.'+str(pix)+'.', ((np.arange(nobj)+1).astype(np.str)) )
else:
obj['objectid'] = dln.strjoin( str(pix)+'.', ((np.arange(nobj)+1).astype(np.str)) )
obj['pix'] = parentpix # use PARENTPIX
# all bad to start
for f in ['pmra','pmraerr','pmdec','pmdecerr','asemi','bsemi','theta','asemierr',
'bsemierr','thetaerr','fwhm','class_star','rmsvar','madvar','iqrvar',
'etavar','jvar','kvar','chivar','romsvar']: obj[f]=np.nan
for f in ['u','g','r','i','z','y','vr']:
obj[f+'mag'] = 99.99
obj[f+'err'] = 9.99
obj[f+'rms'] = np.nan
obj[f+'asemi'] = np.nan
obj[f+'bsemi'] = np.nan
obj[f+'theta'] = np.nan
obj['variable10sig'] = 0
obj['nsigvar'] = np.nan
#idstr = np.zeros(ncat,dtype=dtype_idstr)
# Initialize temporary IDSTR structure
idstr = np.zeros(100000,dtype=dtype_idstr)
nidstr = dln.size(idstr)
# Higher precision catalog
dtype_hicat = np.dtype([('MEASID',np.str,30),('EXPOSURE',np.str,40),('CCDNUM',int),('FILTER',np.str,3),
('MJD',float),('RA',float),('RAERR',float),('DEC',float),('DECERR',float),
('MAG_AUTO',float),('MAGERR_AUTO',float),('ASEMI',float),('ASEMIERR',float),('BSEMI',float),('BSEMIERR',float),
('THETA',float),('THETAERR',float),('FWHM',float),('FLAGS',int),('CLASS_STAR',float)])
# Convert to nump structured array
dtype_hicatdb = np.dtype([('MEASID',np.str,30),('OBJLABEL',int),('EXPOSURE',np.str,40),('CCDNUM',int),('FILTER',np.str,3),
('MJD',float),('RA',float),('RAERR',float),('DEC',float),('DECERR',float),
('MAG_AUTO',float),('MAGERR_AUTO',float),('ASEMI',float),('ASEMIERR',float),('BSEMI',float),('BSEMIERR',float),
('THETA',float),('THETAERR',float),('FWHM',float),('FLAGS',int),('CLASS_STAR',float)])
t1 = time.time()
# Loop over the objects
meascount = 0
ngroup = -1
grpcount = 0
maxmeasload = 50000
ngrpcat = 0
ncat1 = 0
idstr_count = 0
idstr_grpcount = 0
fidmag = np.zeros(nobj,float)+np.nan # fiducial magnitude
for i,lab in enumerate(objstr['OBJLABEL']):
if (i % 1000)==0: print(i)
if (i % 1000)==0:
v = psutil.virtual_memory()
process = psutil.Process(os.getpid())
print('%6.1f Percent of memory used. %6.1f GB available. Process is using %6.2f GB of memory.' % (v.percent,v.available/1e9,process.memory_info()[0]/1e9))
# Get meas data for this object
if usedb is False:
oindx = np.arange(objstr['LO'][i],objstr['HI'][i]+1) # this fails if start,stop are the same
if objstr['NMEAS'][i]==1: oindx=np.atleast_1d(objstr['LO'][i])
ncat1 = dln.size(oindx)
cat1_orig = cat[oindx]
# Upgrade precisions of catalog
cat1 = np.zeros(ncat1,dtype=dtype_hicat)
cat1[...] = cat1_orig # stuff in the data
#for n in dtype_hicat.names: cat1[n] = cat1_orig[n]
del cat1_orig
# Get from the database
else:
# Get next group of object measurements
if grpcount>=ngroup:
# Use maxmeasload to figure out how many objects we can load
if i==0:
ngroup = np.max(np.where(meascumcount[i:]<=maxmeasload)[0])+1
else:
ngroup = np.max(np.where((meascumcount[i:]-meascumcount[i-1])<=maxmeasload)[0])+1
ngroup = np.max([1,ngroup]) # need to load at least 1
lab0 = lab
lab1 = objstr['OBJLABEL'][np.min([i+ngroup-1,nobj-1])]
#lab1 = labelindex['value'][np.min([i+ngroup-1,nobj-1])]
if ngrpcat>0: del grpcat
if ncat1>0: del cat1
grpcat = getdatadb(dbfile,objlabel=[lab0,lab1])
ngrpcat = dln.size(grpcat)
grpindex = dln.create_index(grpcat['OBJLABEL'])
#ngroup = len(grpindex['value'])
grpcount = 0
# Get the measurement data for this object
gindx = grpindex['index'][grpindex['lo'][grpcount]:grpindex['hi'][grpcount]+1]
cat1 = np.atleast_1d(grpcat[gindx])
ncat1 = len(cat1)
grpcount += 1
oindx = np.arange(ncat1)+meascount
meascount += ncat1
obj['ndet'][i] = ncat1
# Add IDSTR information to IDSTR structure/database
# update in groups to database so it takes less time
if idstr_count+ncat1 > nidstr:
print(' Adding more elements to temporary IDSTR structure')
idstr = add_elements(idstr,50000) # add more elements if necessary
# Add information to temporary IDSTR structure for this object
idstr['measid'][idstr_count:idstr_count+ncat1] = cat1['MEASID']
idstr['exposure'][idstr_count:idstr_count+ncat1] = cat1['EXPOSURE']
idstr['objectid'][idstr_count:idstr_count+ncat1] = obj['objectid'][i]
idstr['objectindex'][idstr_count:idstr_count+ncat1] = i
idstr_count += ncat1
idstr_grpcount += 1
# Write to database and reinitialize the temporary IDSTR structure
if (idstr_grpcount>5000) | (idstr_count>30000) | (i==(nobj-1)):
print(' Writing data to IDSTR database')
writeidstr2db(idstr[0:idstr_count],dbfile_idstr)
idstr = np.zeros(100000,dtype=dtype_idstr)
nidstr = dln.size(idstr)
idstr_count = 0
idstr_grpcount = 0
# Computing quantities
# Mean RA/DEC, RAERR/DECERR
if ncat1>1:
wt_ra = 1.0/cat1['RAERR']**2
wt_dec = 1.0/cat1['DECERR']**2
obj['ra'][i] = np.sum(cat1['RA']*wt_ra)/np.sum(wt_ra)
obj['raerr'][i] = np.sqrt(1.0/np.sum(wt_ra))
obj['dec'][i] = np.sum(cat1['DEC']*wt_dec)/np.sum(wt_dec)
obj['decerr'][i] = np.sqrt(1.0/np.sum(wt_dec))
obj['mjd'][i] = np.mean(cat1['MJD'])
obj['deltamjd'][i] = np.max(cat1['MJD'])-np.min(cat1['MJD'])
else:
obj['ra'][i] = cat1['RA']
obj['dec'][i] = cat1['DEC']
obj['raerr'][i] = cat1['RAERR']
obj['decerr'][i] = cat1['DECERR']
obj['mjd'][i] = cat1['MJD']
obj['deltamjd'][i] = 0
# Check for negative RA values
if obj['ra'][i] < 0:
obj['ra'][i] += 360
# Mean proper motion and errors
if ncat1>1:
raerr = np.array(cat1['RAERR']*1e3,np.float64) # milli arcsec
ra = np.array(cat1['RA'],np.float64)
ra -= np.mean(ra)
ra *= 3600*1e3 * np.cos(obj['dec'][i]/radeg) # convert to true angle, milli arcsec
t = cat1['MJD'].copy()
t -= np.mean(t)
t /= 365.2425 # convert to year
# Calculate robust slope
pmra, pmraerr = dln.robust_slope(t,ra,raerr,reweight=True)
obj['pmra'][i] = pmra # mas/yr
obj['pmraerr'][i] = pmraerr # mas/yr
decerr = np.array(cat1['DECERR']*1e3,np.float64) # milli arcsec
dec = np.array(cat1['DEC'],np.float64)
dec -= np.mean(dec)
dec *= 3600*1e3 # convert to milli arcsec
# Calculate robust slope
pmdec, pmdecerr = dln.robust_slope(t,dec,decerr,reweight=True)
obj['pmdec'][i] = pmdec # mas/yr
obj['pmdecerr'][i] = pmdecerr # mas/yr
# Mean magnitudes
# Convert totalwt and totalfluxwt to MAG and ERR
# and average the morphology parameters PER FILTER
filtindex = dln.create_index(cat1['FILTER'].astype(np.str))
nfilters = len(filtindex['value'])
resid = np.zeros(ncat1)+np.nan # residual mag
relresid = np.zeros(ncat1)+np.nan # residual mag relative to the uncertainty
for f in range(nfilters):
filt = filtindex['value'][f].lower()
findx = filtindex['index'][filtindex['lo'][f]:filtindex['hi'][f]+1]
obj['ndet'+filt][i] = filtindex['num'][f]
gph,ngph = dln.where(cat1['MAG_AUTO'][findx]<50)
obj['nphot'+filt][i] = ngph
if ngph==1:
obj[filt+'mag'][i] = cat1['MAG_AUTO'][findx[gph]]
obj[filt+'err'][i] = cat1['MAGERR_AUTO'][findx[gph]]
if ngph>1:
newmag, newerr = dln.wtmean(cat1['MAG_AUTO'][findx[gph]], cat1['MAGERR_AUTO'][findx[gph]],magnitude=True,reweight=True,error=True)
obj[filt+'mag'][i] = newmag
obj[filt+'err'][i] = newerr
# Calculate RMS
obj[filt+'rms'][i] = np.sqrt(np.mean((cat1['MAG_AUTO'][findx[gph]]-newmag)**2))
# Residual mag
resid[findx[gph]] = cat1['MAG_AUTO'][findx[gph]]-newmag
# Residual mag relative to the uncertainty
# set a lower threshold of 0.02 in the uncertainty
relresid[findx[gph]] = np.sqrt(ngph/(ngph-1)) * (cat1['MAG_AUTO'][findx[gph]]-newmag)/np.maximum(cat1['MAGERR_AUTO'][findx[gph]],0.02)
# Calculate mean morphology parameters
obj[filt+'asemi'][i] = np.mean(cat1['ASEMI'][findx])
obj[filt+'bsemi'][i] = np.mean(cat1['BSEMI'][findx])
obj[filt+'theta'][i] = np.mean(cat1['THETA'][findx])
# Calculate variability indices
gdresid = np.isfinite(resid)
ngdresid = np.sum(gdresid)
if ngdresid>0:
resid2 = resid[gdresid]
sumresidsq = np.sum(resid2**2)
tsi = np.argsort(cat1['MJD'][gdresid])
resid2tsi = resid2[tsi]
quartiles = np.percentile(resid2,[25,50,75])
# RMS
rms = np.sqrt(sumresidsq/ngdresid)
# MAD
madvar = 1.4826*np.median(np.abs(resid2-quartiles[1]))
# IQR
iqrvar = 0.741289*(quartiles[2]-quartiles[0])
# 1/eta
etavar = sumresidsq / np.sum((resid2tsi[1:]-resid2tsi[0:-1])**2)
obj['rmsvar'][i] = rms
obj['madvar'][i] = madvar
obj['iqrvar'][i] = iqrvar
obj['etavar'][i] = etavar
# Calculate variability indices wrt to uncertainties
gdrelresid = np.isfinite(relresid)
ngdrelresid = np.sum(gdrelresid)
if ngdrelresid>0:
relresid2 = relresid[gdrelresid]
pk = relresid2**2-1
jvar = np.sum( np.sign(pk)*np.sqrt( | np.abs(pk) | numpy.abs |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 14 21:31:56 2017
@author: Franz
"""
import scipy.signal
import numpy as np
import scipy.io as so
import os.path
import re
import matplotlib.pylab as plt
import h5py
import matplotlib.patches as patches
import numpy.random as rand
import seaborn as sns
import pandas as pd
from functools import reduce
import random
import pdb
class Mouse :
def __init__(self, idf, list=None, typ='') :
self.recordings = []
self.recordings.append(list)
self.typ = typ
self.idf = idf
def add(self, rec) :
self.recordings.append(rec)
def __len__(self) :
return len(self.recordings)
def __repr__(self) :
return ", ".join(self.recordings)
### PROCESSING OF RECORDING DATA ##############################################
def load_stateidx(ppath, name, ann_name=''):
""" load the sleep state file of recording (folder) $ppath/$name
@Return:
M,K sequence of sleep states, sequence of
0'1 and 1's indicating non- and annotated states
"""
ddir = os.path.join(ppath, name)
ppath, name = os.path.split(ddir)
if ann_name == '':
ann_name = name
sfile = os.path.join(ppath, name, 'remidx_' + ann_name + '.txt')
f = open(sfile, 'r')
lines = f.readlines()
f.close()
n = 0
for l in lines:
if re.match('\d', l):
n += 1
M = np.zeros(n, dtype='int')
K = np.zeros(n, dtype='int')
i = 0
for l in lines :
if re.search('^\s+$', l) :
continue
if re.search('\s*#', l) :
continue
if re.match('\d+\s+-?\d+', l) :
a = re.split('\s+', l)
M[i] = int(a[0])
K[i] = int(a[1])
i += 1
return M,K
def load_recordings(ppath, rec_file) :
"""
load_recordings(ppath, rec_file)
load recording listing with syntax:
[E|C] \s+ recording_name
#COMMENT
@RETURN:
(list of controls, lis of experiments)
"""
exp_list = []
ctr_list = []
rfile = os.path.join(ppath, rec_file)
f = open(rfile, newline=None)
lines = f.readlines()
f.close()
for l in lines :
if re.search('^\s+$', l) :
continue
if re.search('^\s*#', l) :
continue
a = re.split('\s+', l)
if re.search('E', a[0]) :
exp_list.append(a[1])
if re.search('C', a[0]) :
ctr_list.append(a[1])
return ctr_list, exp_list
def load_dose_recordings(ppath, rec_file):
"""
load recording list with following syntax:
A line is either control or experiments; Control recordings look like:
C \s recording_name
Experimental recordings also come with an additional dose parameter
(allowing for comparison of multiple doses with controls)
E \s recording_name \s dose_1
E \s recording_name \s dose_2
"""
rfile = os.path.join(ppath, rec_file)
f = open(rfile, newline=None)
lines = f.readlines()
f.close()
# first get all potential doses
doses = {}
ctr_list = []
for l in lines :
if re.search('^\s+$', l):
continue
if re.search('^\s*#', l):
continue
a = re.split('\s+', l)
if re.search('E', a[0]):
if a[2] in doses:
doses[a[2]].append(a[1])
else:
doses[a[2]] = [a[1]]
if re.search('C', a[0]):
ctr_list.append(a[1])
return ctr_list, doses
def get_snr(ppath, name):
"""
read and return sampling rate (SR) from file $ppath/$name/info.txt
"""
fid = open(os.path.join(ppath, name, 'info.txt'), newline=None)
lines = fid.readlines()
fid.close()
values = []
for l in lines :
a = re.search("^" + 'SR' + ":" + "\s+(.*)", l)
if a :
values.append(a.group(1))
return float(values[0])
def get_infoparam(ifile, field):
"""
NOTE: field is a single string
and the function does not check for the type
of the values for field.
In fact, it just returns the string following field
"""
fid = open(ifile, newline=None)
lines = fid.readlines()
fid.close()
values = []
for l in lines :
a = re.search("^" + field + ":" + "\s+(.*)", l)
if a :
values.append(a.group(1))
return values
def add_infoparam(ifile, field, vals):
"""
:param ifile: info file
:param field: Parameters specifier, e.g. 'SR'
:param vals: list with parameters
"""
fid = open(ifile, 'a')
vals = [str(s) for s in vals]
param = " ".join(vals)
fid.write('%s:\t%s' % (field, param))
fid.write(os.linesep)
fid.close()
def laser_start_end(laser, SR=1525.88, intval=5):
"""laser_start_end(ppath, name)
print start and end index of laser stimulation trains: For example,
if you was stimulated for 2min every 20 min with 20 Hz, return the
start and end index of the each 2min stimulation period (train)
returns the tuple (istart, iend), both indices are inclusive,
i.e. part of the sequence
@Param:
laser - laser, vector of 0s and 1s
intval - minimum time separation [s] between two laser trains
@Return:
(istart, iend) - tuple of two np.arrays with laser start and end indices
"""
idx = np.where(laser > 0.5)[0]
if len(idx) == 0 :
return ([], [])
idx2 = np.nonzero(np.diff(idx)*(1./SR) > intval)[0]
istart = np.hstack([idx[0], idx[idx2+1]])
iend = np.hstack([idx[idx2], idx[-1]])
return (istart, iend)
def load_laser(ppath, name):
"""
load laser from recording ppath/name
@RETURN:
@laser, vector of 0's and 1's
"""
# laser might be .mat or h5py file
# perhaps we could find a better way of testing that
file = os.path.join(ppath, name, 'laser_'+name+'.mat')
try:
laser = np.array(h5py.File(file,'r').get('laser'))
except:
laser = so.loadmat(file)['laser']
return np.squeeze(laser)
def laser_protocol(ppath, name):
"""
What was the stimulation frequency and the inter-stimulation interval for recording
$ppath/$name?
@Return:
iinter-stimulation intervals, avg. inter-stimulation interval, frequency
"""
laser = load_laser(ppath, name)
SR = get_snr(ppath, name)
# first get inter-stimulation interval
(istart, iend) = laser_start_end(laser, SR)
intv = np.diff(np.array(istart/float(SR)))
d = intv/60.0
print("The laser was turned on in average every %.2f min," % (np.mean(d)))
print("with a min. interval of %.2f min and max. interval of %.2f min." % (np.min(d), np.max(d)))
print("Laser stimulation lasted for %f s." % (np.mean(np.array(iend/float(SR)-istart/float(SR)).mean())))
# print laser start times
print("Start time of each laser trial:")
j=1
for t in istart:
print("trial %d: %.2f" % (j, (t / float(SR)) / 60))
j += 1
# for each laser stimulation interval, check laser stimulation frequency
dt = 1/float(SR)
freq = []
laser_up = []
laser_down = []
for (i,j) in zip(istart, iend):
part = laser[i:j+1]
(a,b) = laser_start_end(part, SR, 0.005)
dur = (j-i+1)*dt
freq.append(len(a) / dur)
up_dur = (b-a+1)*dt*1000
down_dur = (a[1:]-b[0:-1]-1)*dt*1000
laser_up.append(np.mean(up_dur))
laser_down.append(np.mean(down_dur))
print(os.linesep + "Laser stimulation freq. was %.2f Hz," % np.mean(np.array(freq)))
print("with laser up and down duration of %.2f and %.2f ms." % (np.mean(np.array(laser_up)), np.mean(np.array(laser_down))))
return d, np.mean(d), np.mean(np.array(freq))
def swap_eeg(ppath, rec, ch='EEG'):
"""
swap EEG and EEG2 or EMG with EMG2 if $ch='EMG'
"""
if ch == 'EEG':
name = 'EEG'
else:
name = ch
EEG = so.loadmat(os.path.join(ppath, rec, name+'.mat'))[name]
EEG2 = so.loadmat(os.path.join(ppath, rec, name+'2.mat'))[name + '2']
tmp = EEG
EEG = EEG2
EEG2 = tmp
file_eeg1 = os.path.join(ppath, rec, '%s.mat' % name)
file_eeg2 = os.path.join(ppath, rec, '%s2.mat' % name)
so.savemat(file_eeg1, {name : EEG})
so.savemat(file_eeg2, {name+'2' : EEG2})
def eeg_conversion(ppath, rec, conv_factor=0.195):
"""
multiply all EEG and EMG channels with the given
conversion factor and write the conversion factor
as parameter (conversion:) into the info file.
Only if there's no conversion factor in the info file
specified, the conversion will be executed
:param ppath: base filder
:param rec: recording
:param conv_factor: conversion factor
:return: n/s
"""
ifile = os.path.join(ppath, rec, 'info.txt')
conv = get_infoparam(ifile, 'conversion')
if len(conv) > 0:
print("found conversion: parameter in info file")
print("returning: no conversion necessary!!!")
return
else:
files = os.listdir(os.path.join(ppath, rec))
files = [f for f in files if re.match('^EEG', f)]
for f in files:
name = re.split('\.', f)[0]
EEG = so.loadmat(os.path.join(ppath, rec, name+'.mat'), squeeze_me=True)[name]
if EEG[0].dtype == 'int16':
EEG = EEG * conv_factor
file_eeg = os.path.join(ppath, rec, '%s.mat' % name)
print(file_eeg)
so.savemat(file_eeg, {name: EEG})
else:
print('Wrong datatype! probably already converted; returning...')
return
files = os.listdir(os.path.join(ppath, rec))
files = [f for f in files if re.match('^EMG', f)]
for f in files:
name = re.split('\.', f)[0]
EMG = so.loadmat(os.path.join(ppath, rec, name+'.mat'), squeeze_me=True)[name]
if EMG[0].dtype == 'int16':
EMG = EMG * conv_factor
file_emg = os.path.join(ppath, rec, '%s.mat' % name)
print(file_emg)
so.savemat(file_emg, {name: EMG})
else:
print('Wrong datatype! probably already converted; returning...')
return
add_infoparam(ifile, 'conversion', [conv_factor])
calculate_spectrum(ppath, rec)
### DEPRICATED ############################################
def video_pulse_detection(ppath, rec, SR=1000, iv = 0.01):
"""
return index of each video frame onset
ppath/rec - recording
@Optional
SR - sampling rate of EEG(!) recording
iv - minimum time inverval (in seconds) between two frames
@Return
index of each video frame onset
"""
V = np.squeeze(so.loadmat(os.path.join(ppath, rec, 'videotime_' + rec + '.mat'))['video'])
TS = np.arange(0, len(V))
# indices where there's a jump in the signal
t = TS[np.where(V<0.5)];
if len(t) == 0:
idx = []
return idx
# time points where the interval between jumps is longer than iv
t2 = np.where(np.diff(t)*(1.0/SR)>=iv)[0]
idx = np.concatenate(([t[0]],t[t2+1]))
return idx
# SIGNAL PROCESSING ###########################################################
def my_lpfilter(x, w0, N=4):
"""
create a lowpass Butterworth filter with a cutoff of w0 * the Nyquist rate.
The nice thing about this filter is that is has zero-phase distortion.
A conventional lowpass filter would introduce a phase lag.
w0 - filter cutoff; value between 0 and 1, where 1 corresponds to nyquist frequency.
So if you want a filter with cutoff at x Hz, the corresponding w0 value is given by
w0 = 2 * x / sampling_rate
N - order of filter
@Return:
low-pass filtered signal
See also my hp_filter, or my_bpfilter
"""
from scipy import signal
b,a = signal.butter(N, w0)
y = signal.filtfilt(b,a, x)
return y
def my_hpfilter(x, w0, N=4):
"""
create an N-th order highpass Butterworth filter with cutoff frequency w0 * sampling_rate/2
"""
from scipy import signal
# use scipy.signal.firwin to generate filter
#taps = signal.firwin(numtaps, w0, pass_zero=False)
#y = signal.lfilter(taps, 1.0, x)
b,a = signal.butter(N, w0, 'high')
y = signal.filtfilt(b,a, x, padlen = x.shape[0]-1)
return y
def my_bpfilter(x, w0, w1, N=4,bf=True):
"""
create N-th order bandpass Butterworth filter with corner frequencies
w0*sampling_rate/2 and w1*sampling_rate/2
"""
#from scipy import signal
#taps = signal.firwin(numtaps, w0, pass_zero=False)
#y = signal.lfilter(taps, 1.0, x)
#return y
from scipy import signal
b,a = signal.butter(N, [w0, w1], 'bandpass')
if bf:
y = signal.filtfilt(b,a, x)
else:
y = signal.lfilter(b,a, x)
return y
def my_notchfilter(x, sr=1000, band=5, freq=60, ripple=10, order=3, filter_type='butter'):
from scipy.signal import iirfilter,lfilter
fs = sr
nyq = fs/2.0
low = freq - band/2.0
high = freq + band/2.0
low = low/nyq
high = high/nyq
b, a = iirfilter(order, [low, high], rp=ripple, btype='bandstop',
analog=False, ftype=filter_type)
filtered_data = lfilter(b, a, x)
return filtered_data
def downsample_vec(x, nbin):
"""
y = downsample_vec(x, nbin)
downsample the vector x by replacing nbin consecutive \
bin by their mean \
@RETURN: the downsampled vector
"""
n_down = int(np.floor(len(x) / nbin))
x = x[0:n_down*nbin]
x_down = np.zeros((n_down,))
# 0 1 2 | 3 4 5 | 6 7 8
for i in range(nbin) :
idx = list(range(i, int(n_down*nbin), int(nbin)))
x_down += x[idx]
return x_down / nbin
def smooth_data(x, sig):
"""
y = smooth_data(x, sig)
smooth data vector @x with gaussian kernel
with standard deviation $sig
"""
sig = float(sig)
if sig == 0.0:
return x
# gaussian:
gauss = lambda x, sig : (1/(sig*np.sqrt(2.*np.pi)))*np.exp(-(x*x)/(2.*sig*sig))
bound = 1.0/10000
L = 10.
p = gauss(L, sig)
while (p > bound):
L = L+10
p = gauss(L, sig)
#F = map(lambda x: gauss((x, sig)), np.arange(-L, L+1.))
# py3:
F = [gauss(x, sig) for x in np.arange(-L, L+1.)]
F = F / np.sum(F)
return scipy.signal.fftconvolve(x, F, 'same')
def power_spectrum(data, length, dt):
"""
scipy's implementation of Welch's method using hanning window to estimate
the power spectrum
The function returns power density with units V**2/Hz
see also https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.signal.welch.html
The label on the y-axis should say PSD [V**2/Hz]
@Parameters
data - time series; float vector!
length - length of hanning window, even integer!
@Return:
power density, frequencies
The function returns power density in units V^2 / Hz
Note that
np.var(data) ~ np.sum(power density) * (frequencies[1]-frequencies[0])
"""
f, pxx = scipy.signal.welch(data, fs=1.0/dt, window='hanning', nperseg=int(length), noverlap=int(length/2))
return pxx, f
def spectral_density(data, length, nfft, dt):
"""
calculate the spectrogram for the time series given by data with time resolution dt
The powerspectrum for each window of length $length is computed using
Welch's method.
The windows for the powerspectrum calculation are half-overlapping. If length contains 5s of data,
then the first windows goes from 0s to 5s, the second window from 2.5 to 7.5s, ...
The last window ends at ceil(len(data)/length)*5s
Another example, assume we have 13 s of data, with 5 s windows, the the powerdensity is calculated for the following
time windows:
0 -- 5, 2.5 -- 7.5, 5 -- 10, 7.5 -- 12.5, 10 -- 15
In total there are thus 2*ceil(13/5)-1 = 5 windows
The last window starts at 2*3-2 * (5/2) = 10 s
Note: the returned time axis starts at time point goes from 0 to 10s in 2.5s steps
@Parameters:
data - time series
length - window length of data used to calculate powerspectrum.
Note that the time resolution of the spectrogram is length/2
nfft - size of the window used to calculate the powerspectrum.
determines the frequency resolution.
@Return:
Powspectrum, frequencies, time axis
"""
n = len(data)
k = int(np.ceil((1.0*n)/length))
data = np.concatenate((data, np.zeros((length*k-n,))))
fdt = length*dt/2 # time step for spectrogram
t = np.arange(0, fdt*(2*k-2)+fdt/2.0, fdt)
# frequency axis of spectrogram
f = np.linspace(0, 1, int(np.ceil(nfft/2.0))+1) * (0.5/dt)
# the power spectrum is calculated for 2*k-1 time points
Pow = np.zeros((len(f), k*2-1))
j = 0
for i in range(0, k-2+1):
w1=data[(length*i):(i+1)*length]
w2=data[length*i+int(length/2):(i+1)*length+int(length/2)]
Pow[:,j] = power_spectrum(w1, nfft, dt)[0]
Pow[:,j+1] = power_spectrum(w2, nfft, dt)[0]
j += 2
# last time point
Pow[:,j],f = power_spectrum(data[length*(k-1):k*length], nfft, dt)
return Pow, f, t
def calculate_spectrum(ppath, name, fres=0.5):
"""
calculate EEG and EMG spectrogram used for sleep stage detection.
Function assumes that data vectors EEG.mat and EMG.mat exist in recording
folder ppath/name; these are used to calculate the powerspectrum
fres - resolution of frequency axis
all data saved in "true" mat files
:return EEG Spectrogram, EMG Spectrogram, frequency axis, time axis
"""
SR = get_snr(ppath, name)
swin = round(SR)*5
fft_win = round(swin/5) # approximate number of data points per second
if (fres == 1.0) or (fres == 1):
fft_win = int(fft_win)
elif fres == 0.5:
fft_win = 2*int(fft_win)
else:
print("Resolution %f not allowed; please use either 1 or 0.5" % fres)
(peeg2, pemg2) = (False, False)
# Calculate EEG spectrogram
EEG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EEG.mat'))['EEG'])
Pxx, f, t = spectral_density(EEG, int(swin), int(fft_win), 1/SR)
if os.path.isfile(os.path.join(ppath, name, 'EEG2.mat')):
peeg2 = True
EEG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EEG2.mat'))['EEG2'])
Pxx2, f, t = spectral_density(EEG, int(swin), int(fft_win), 1/SR)
#save the stuff to a .mat file
spfile = os.path.join(ppath, name, 'sp_' + name + '.mat')
if peeg2 == True:
so.savemat(spfile, {'SP':Pxx, 'SP2':Pxx2, 'freq':f, 'dt':t[1]-t[0],'t':t})
else:
so.savemat(spfile, {'SP':Pxx, 'freq':f, 'dt':t[1]-t[0],'t':t})
# Calculate EMG spectrogram
EMG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EMG.mat'))['EMG'])
Qxx, f, t = spectral_density(EMG, int(swin), int(fft_win), 1/SR)
if os.path.isfile(os.path.join(ppath, name, 'EMG2.mat')):
pemg2 = True
EMG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EMG2.mat'))['EMG2'])
Qxx2, f, t = spectral_density(EMG, int(swin), int(fft_win), 1/SR)
# save the stuff to .mat file
spfile = os.path.join(ppath, name, 'msp_' + name + '.mat')
if pemg2 == True:
so.savemat(spfile, {'mSP':Qxx, 'mSP2':Qxx2, 'freq':f, 'dt':t[1]-t[0],'t':t})
else:
so.savemat(spfile, {'mSP':Qxx, 'freq':f, 'dt':t[1]-t[0],'t':t})
return Pxx, Qxx, f, t
def whiten_spectrogram(ppath, name, fmax=50):
"""
experimental
:param ppath:
:param name:
:param fmax:
:return:
"""
P = so.loadmat(os.path.join(ppath, name, 'sp_' + name + '.mat'), squeeze_me=True)
SPE = P['SP']
freq = P['freq']
ifreq = np.where(freq <= fmax)[0]
SPE = SPE[ifreq,:]
nfilt = 5
filt = np.ones((nfilt, nfilt))
filt = np.divide(filt, filt.sum())
#SPE = scipy.signal.convolve2d(SPE, filt, boundary='symm', mode='same')
m = np.mean(SPE,axis=1)
SPE -= np.tile(m, (SPE.shape[1], 1)).T
SPE = SPE.T
C = np.dot(SPE.T, SPE)
[evals, L] = np.linalg.eigh(C)
idx = np.argsort(evals)
D = np.diag(np.sqrt(evals[idx]))
L = L[:,idx]
W = np.dot(L, np.dot(np.linalg.inv(D),np.dot(L.T,SPE.T)))
nfilt = 2
filt = np.ones((nfilt,nfilt))
filt = np.divide(filt, filt.sum())
W = scipy.signal.convolve2d(W, filt, boundary='symm', mode='same')
return W, D, L
def normalize_spectrogram(ppath, name, fmax=0, band=[], vm=5, pplot=True, sptype='', filt_dim=[]):
"""
Normalize EEG spectrogram by deviding each frequency band by its average value.
:param ppath, name: base folder, recording name
:param fmax: maximum frequency; frequency axis of spectrogram goes from 0 to fmax
if fmax=0, use complete frequency axis
:param band: list or tuple, define lower and upper range of a frequency band,
if pplot=True, plot band, along with spectrogram;
if band=[], disregard
:param vm: color range for plotting spectrogram
:pplot: if True, plot spectrogram along with power band
:sptype: if sptype='fine' plot 'special' spectrogram, save under sp_fine_$name.mat;
otherwise plot 'normal' spectrogram sp_$name.mat
:filt_dim: list or tuple; the two values define the dimensions of box filter
used to filter the normalized spectrogram; if filt_dim=[], then no filtering
:return SPE, t, freq: normalized spectrogram (np.array), time axis, frequency axis
"""
if (len(sptype) == 0) or (sptype=='std'):
P = so.loadmat(os.path.join(ppath, name, 'sp_' + name + '.mat'), squeeze_me=True)
elif sptype == 'fine':
P = so.loadmat(os.path.join(ppath, name, 'sp_fine_' + name + '.mat'), squeeze_me=True)
SPE = P['SP']
freq = P['freq']
t = P['t']
if fmax > 0:
ifreq = np.where(freq <= fmax)[0]
else:
ifreq = np.arange(0, len(freq))
freq = freq[ifreq]
nfilt = 4
filt = np.ones((nfilt,nfilt))
filt = np.divide(filt, filt.sum())
SPE = SPE[ifreq,:]
# before
#SPE = SPE[ifreq]
#W = scipy.signal.convolve2d(SPE, filt, boundary='symm', mode='same')
#sp_mean = W.mean(axis=1)
sp_mean = SPE.mean(axis=1)
SPE = np.divide(SPE, np.tile(sp_mean, (SPE.shape[1], 1)).T)
if len(filt_dim) > 0:
filt = np.ones(filt_dim)
filt = np.divide(filt, filt.sum())
SPE = scipy.signal.convolve2d(SPE, filt, boundary='symm', mode='same')
# get high gamma peaks
if len(band) > 0:
iband = np.where((freq >= band[0]) & (freq <= band[-1]))[0]
pow_band = SPE[iband,:].mean(axis=0)
thr = pow_band.mean() + pow_band.std()
idx = np.where(pow_band > thr)[0]
# plot normalized spectrogram, along with band
if pplot:
plt.ion()
plt.figure()
if len(band) > 0:
med = np.median(SPE.mean(axis=0))
ax1 = plt.subplot(211)
plt.pcolormesh(t, freq, SPE, vmin=0, vmax=vm*med, cmap='jet')
plt.subplot(212, sharex=ax1)
plt.plot(t,SPE[iband,:].mean(axis=0))
plt.plot(t[idx], pow_band[idx], '.')
plt.draw()
return SPE, t, freq[ifreq]
def recursive_spectrogram(ppath, name, sf=0.3, alpha=0.3, pplot=True):
"""
calculate EEG/EMG spectrogram in a way that can be implemented by a closed-loop system.
The spectrogram is temporally filtered using a recursive implementation of a lowpass filter
@Parameters:
ppath/name - mouse EEG recording
sf - smoothing factor along frequency axis
alpha - temporal lowpass filter time constant
pplot - if pplot==True, plot figure
@Return:
SE, SM - EEG, EMG spectrogram
"""
EEG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EEG.mat'))['EEG'])
EMG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EMG.mat'))['EMG'])
len_eeg = len(EEG)
fdt = 2.5
SR = get_snr(ppath, name)
# we calculate the powerspectrum for 5s windows
swin = int(np.round(SR) * 5.0)
# but we sample new data each 2.5 s
swinh = int(swin/2.0)
fft_win = int(swin / 5.0)
# number of 2.5s long samples
spoints = int(np.floor(len_eeg / swinh))
SE = np.zeros((int(fft_win/2+1), spoints))
SM = np.zeros((int(fft_win/2+1), spoints))
print("Starting calculating spectrogram for %s..." % name)
for i in range(2, spoints):
# we take the last two swinh windows (the new 2.5 s long sample and the one from
# the last iteration)
x = EEG[(i-2)*swinh:i*swinh]
[p, f] = power_spectrum(x.astype('float'), fft_win, 1.0/SR)
p = smooth_data(p, sf)
# recursive low pass filtering of spectrogram:
# the current state is an estimate of the current sample and the previous state
SE[:,i] = alpha*p + (1-alpha) * SE[:,i-1]
# and the same of EMG
x = EMG[(i-2)*swinh:i*swinh]
[p, f] = power_spectrum(x.astype('float'), fft_win, 1.0/SR)
p = smooth_data(p, sf)
SM[:,i] = alpha*p + (1-alpha) * SM[:,i-1]
if pplot:
# plot EEG spectrogram
t = np.arange(0, SM.shape[1])*fdt
plt.figure()
ax1 = plt.subplot(211)
im = np.where((f>=0) & (f<=30))[0]
med = np.median(SE.max(axis=0))
ax1.imshow(np.flipud(SE[im,:]), vmin=0, vmax=med*2)
plt.xticks(())
ix = list(range(0, 30, 10))
fi = f[im][::-1]
plt.yticks(ix, list(map(int, fi[ix])))
box_off(ax1)
plt.axis('tight')
plt.ylabel('Freq (Hz)')
# plot EMG amplitude
ax2 = plt.subplot(212)
im = np.where((f>=10) & (f<100))[0]
df = np.mean(np.diff(f))
# amplitude is the square root of the integral
ax2.plot(t, np.sqrt(SM[im,:].sum(axis=0)*df)/1000.0)
plt.xlim((0, t[-1]))
plt.ylabel('EMG Ampl (mV)')
plt.xlabel('Time (s)')
box_off(ax2)
plt.show(block=False)
return SE, SM, f
def recursive_sleepstate_rem(ppath, recordings, sf=0.3, alpha=0.3, past_mu=0.2, std_thdelta = 1.5, past_len=120, sdt=2.5, psave=False, xemg=False):
"""
predict a REM period only based on EEG/EMG history; the same algorithm is also used for
closed-loop REM sleep manipulation.
The algorithm uses for REM sleep detection a threshold on delta power, EMG power, and theta/delta power.
For theta/delta I use two thresholds: A hard (larger) threshold and a soft (lower) threshold. Initially,
theta/delta has to cross the hard threshold to initiate a REM period. Then, as long as,
theta/delta is above the soft threshold (and EMG power stays low) REM sleep continues.
@Parameters:
ppath base folder with recordings
recordings list of recordings
sf smoothing factor for each powerspectrum
alpha smoothing factor along time dimension
past_mu percentage (0 .. 1) of brain states that are allowed to have EMG power larger than threshold
during the last $past_len seconds
past_len window to calculate $past_mu
std_thdelta the hard theta/delta threshold is given by, mean(theta/delta) + $std_thdelta * std(theta/delta)
sdt time bin for brain sttate, typically 2.5s
psave if True, save threshold parameters to file.
"""
idf = re.split('_', recordings[0])[0]
# 02/05/2020 changed from int to float:
past_len = float(np.round(past_len/sdt))
# calculate spectrogram
(SE, SM) = ([],[])
for rec in recordings:
A,B, freq = recursive_spectrogram(ppath, rec, sf=sf, alpha=alpha)
SE.append(A)
SM.append(B)
# fuse lists SE and SM
SE = np.squeeze(reduce(lambda x,y: np.concatenate((x,y)), SE))
if not xemg:
SM = np.squeeze(reduce(lambda x,y: np.concatenate((x,y)), SM))
else:
SM = SE
# EEG, EMG bands
ntbins = SE.shape[1]
r_delta = [0.5, 4]
r_theta = [5,12]
# EMG band
r_mu = [300, 500]
i_delta = np.where((freq >= r_delta[0]) & (freq <= r_delta[1]))[0]
i_theta = np.where((freq >= r_theta[0]) & (freq <= r_theta[1]))[0]
i_mu = np.where((freq >= r_mu[0]) & (freq <= r_mu[1]))[0]
pow_delta = np.sum(SE[i_delta,:], axis=0)
pow_theta = np.sum(SE[i_theta,:], axis=0)
pow_mu = np.sum(SM[i_mu,:], axis=0)
# theta/delta
th_delta = np.divide(pow_theta, pow_delta)
thr_th_delta1 = np.nanmean(th_delta) + std_thdelta*np.nanstd(th_delta)
thr_th_delta2 = np.nanmean(th_delta) + 0.0*np.nanstd(th_delta)
thr_delta = pow_delta.mean()
thr_mu = pow_mu.mean() + 0.5*np.nanstd(pow_mu)
### The actual algorithm for REM detection
rem_idx = np.zeros((ntbins,))
prem = 0 # whether or not we are in REM
for i in range(ntbins):
if prem == 0 and pow_delta[i] < thr_delta and pow_mu[i] < thr_mu:
### could be REM
if th_delta[i] > thr_th_delta1:
### we are potentially entering REM
if (i - past_len) >= 0:
sstart = int(i-past_len)
else:
sstart = 0
# count the percentage of brainstate bins with elevated EMG power
c_mu = np.sum( np.where(pow_mu[sstart:i]>thr_mu)[0] ) / (past_len*1.0)
if c_mu < past_mu:
### we are in REM
prem = 1 # turn laser on
rem_idx[i] = 1
# We are currently in REM; do we stay there?
if prem == 1:
### REM continues, if theta/delta is larger than soft threshold and if there's
### no EMG activation
if (th_delta[i] > thr_th_delta2) and (pow_mu[i] < thr_mu):
rem_idx[i] = 1
else:
prem = 0 #turn laser off
# for loop ends
# Determine which channel is EEG, EMG
ch_alloc = get_infoparam(os.path.join(ppath, recordings[0], 'info.txt'), 'ch_alloc')[0]
# plot the whole stuff:
# (1) spectrogram
# (2) EMG Power
# (3) Delta
# (4) TH_Delta
plt.figure()
t = np.arange(0, sdt*(ntbins-1)+sdt/2.0, sdt)
ax1 = plt.subplot(411)
im = np.where((freq>=0) & (freq<=30))[0]
med = np.median(SE.max(axis=0))
ax1.imshow(np.flipud(SE[im,:]), vmin=0, vmax=med*2)
plt.yticks(list(range(0, 31, 10)), list(range(30, -1, -10)))
plt.ylabel('Freq. (Hz)')
plt.axis('tight')
ax2 = plt.subplot(412)
ax2.plot(t, pow_mu, color='black')
ax2.plot(t, np.ones((len(t),))*thr_mu, color='red')
plt.ylabel('EMG Pow.')
plt.xlim((t[0], t[-1]))
ax3 = plt.subplot(413, sharex=ax2)
ax3.plot(t, pow_delta, color='black')
ax3.plot(t, np.ones((len(t),))*thr_delta, color='red')
plt.ylabel('Delta Pow.')
plt.xlim((t[0], t[-1]))
ax4 = plt.subplot(414, sharex=ax3)
ax4.plot(t, th_delta, color='black')
ax4.plot(t, np.ones((len(t),))*thr_th_delta1, color='red')
ax4.plot(t, np.ones((len(t),))*thr_th_delta2, color='pink')
ax4.plot(t, rem_idx*thr_th_delta1, color='blue')
plt.ylabel('Theta/Delta')
plt.xlabel('Time (s)')
plt.xlim((t[0], t[-1]))
plt.show(block=False)
# write config file
if psave:
cfile = os.path.join(ppath, idf + '_rem.txt')
fid = open(cfile, 'w')
fid.write(('IDF: %s'+os.linesep) % idf)
fid.write(('ch_alloc: %s'+os.linesep) % ch_alloc)
fid.write(('THR_DELTA: %.2f'+os.linesep) % thr_delta)
fid.write(('THR_MU: %.2f'+os.linesep) % thr_mu)
fid.write(('THR_TH_DELTA: %.2f %.2f'+os.linesep) % (thr_th_delta1, thr_th_delta2))
fid.write(('STD_THDELTA: %.2f'+os.linesep) % std_thdelta)
fid.write(('PAST_MU: %.2f'+os.linesep) % past_mu)
fid.write(('SF: %.2f'+os.linesep) % sf)
fid.write(('ALPHA: %.2f'+os.linesep) % alpha)
fid.write(('Bern: %.2f' + os.linesep) % 0.5)
if xemg:
fid.write(('XEMG: %d'+os.linesep) % 1)
else:
fid.write(('XEMG: %d' + os.linesep) % 0)
fid.close()
print('wrote file %s' % cfile)
def recursive_sleepstate_rem_control(ppath, recordings, past_len=120, sdt=2.5, delay=120):
"""
algorithm running laser control for REM sleep dependent activation/inhibtion.
$delay s after a detected REM sleep period, the laser is turned on for the same duration. If a new REM period starts,
the laser stops, but we keep track of the missing time. The next time is laser turns on again,
it stays on for the duration of the most recent REM period + the remaining time.
The algorithm for REM detection is the same as used forclosed-loop REM sleep manipulation.
The function reads in the required parameters from the configuration file (MOUSEID_rem.txt)
The algorithm uses for REM sleep detection a threshold on delta power, EMG power, and theta/delta power.
For theta/delta I use two thresholds: A hard (larger) threshold and a soft (lower) threshold. Initially,
theta/delta has to cross the hard threshold to initiate a REM period. Then, as long as,
theta/delta is above the soft threshold (and EMG power stays low) REM sleep continues.
@Parameters:
ppath base folder with recordings
recordings list of recordings
past_len window to calculate $past_mu
sdt time bin for brain sttate, typically 2.5s
delay delay to wait after a REM sleep periods ends, till the laser is turned on.
"""
idf = re.split('_', recordings[0])[0]
past_len = int(np.round(past_len/sdt))
# load parameters
cfile = os.path.join(ppath, idf + '_rem.txt')
params = load_sleep_params(ppath, cfile)
thr_th_delta1 = params['THR_TH_DELTA'][0]
thr_th_delta2 = params['THR_TH_DELTA'][1]
thr_delta = params['THR_DELTA'][0]
thr_mu = params['THR_MU'][0]
alpha = params['ALPHA'][0]
sf = params['SF'][0]
past_mu = params['PAST_MU'][0]
xemg = params['XEMG'][0]
# calculate spectrogram
(SE, SM) = ([], [])
for rec in recordings:
A, B, freq = recursive_spectrogram(ppath, rec, sf=sf, alpha=alpha)
SE.append(A)
SM.append(B)
# fuse lists SE and SM
SE = np.squeeze(reduce(lambda x, y: np.concatenate((x, y)), SE))
if not xemg:
SM = np.squeeze(reduce(lambda x, y: np.concatenate((x, y)), SM))
else:
SM = SE
# EEG, EMG bands
ntbins = SE.shape[1]
r_delta = [0.5, 4]
r_theta = [5, 12]
# EMG band
r_mu = [300, 500]
i_delta = np.where((freq >= r_delta[0]) & (freq <= r_delta[1]))[0]
i_theta = np.where((freq >= r_theta[0]) & (freq <= r_theta[1]))[0]
i_mu = np.where((freq >= r_mu[0]) & (freq <= r_mu[1]))[0]
pow_delta = np.sum(SE[i_delta, :], axis=0)
pow_theta = np.sum(SE[i_theta, :], axis=0)
pow_mu = np.sum(SM[i_mu, :], axis=0)
th_delta = np.divide(pow_theta, pow_delta)
### The actual algorithm for REM detection
rem_idx = np.zeros((ntbins,))
prem = 0 # whether or not we are in REM
# NEW variables:
laser_idx = np.zeros((ntbins,))
delay = int(np.round(delay/sdt))
delay_count = 0
curr_rem_dur = 0
dur_count = 0
on_delay = False
laser_on = False
for i in range(ntbins):
if prem == 0 and pow_delta[i] < thr_delta and pow_mu[i] < thr_mu:
### could be REM
if th_delta[i] > thr_th_delta1:
### we are potentially entering REM
if (i - past_len) >= 0:
sstart = i - past_len
else:
sstart = 0
# count the percentage of brainstate bins with elevated EMG power
c_mu = np.sum(np.where(pow_mu[sstart:i] > thr_mu)[0]) / past_len
if c_mu < past_mu:
### we are in REM
prem = 1 # turn laser on
rem_idx[i] = 1
curr_rem_dur += 1 #NEW
# We are currently in REM; do we stay there?
if prem == 1:
### REM continues, if theta/delta is larger than soft threshold and if there's
### no EMG activation
if (th_delta[i] > thr_th_delta2) and (pow_mu[i] < thr_mu):
rem_idx[i] = 1
curr_rem_dur += 1
else:
prem = 0 # turn laser off
dur_count += curr_rem_dur #NEW
delay_count = delay #NEW
curr_rem_dur = 0 #NEW
on_delay = True #NEW
# NEW:
if on_delay:
if prem == 0:
delay_count -=1
if delay_count == 0:
laser_on = True
on_delay = False
if laser_on:
if prem == 0:
if dur_count >= 0:
dur_count -= 1
laser_idx[i] = 1
else:
laser_on = False
else:
laser_on = False
# plot the whole stuff:
# (1) spectrogram
# (2) EMG Power
# (3) Delta
# (4) TH_Delta
plt.figure()
t = np.arange(0, sdt*(ntbins-1)+sdt/2.0, sdt)
ax1 = plt.subplot(411)
im = np.where((freq>=0) & (freq<=30))[0]
med = np.median(SE.max(axis=0))
ax1.imshow(np.flipud(SE[im,:]), vmin=0, vmax=med*2)
plt.yticks(list(range(0, 31, 10)), list(range(30, -1, -10)))
plt.ylabel('Freq. (Hz)')
plt.axis('tight')
ax2 = plt.subplot(412)
ax2.plot(t, pow_mu, color='black')
ax2.plot(t, np.ones((len(t),))*thr_mu, color='red')
plt.ylabel('EMG Pow.')
plt.xlim((t[0], t[-1]))
ax3 = plt.subplot(413, sharex=ax2)
ax3.plot(t, pow_delta, color='black')
ax3.plot(t, np.ones((len(t),))*thr_delta, color='red')
plt.ylabel('Delta Pow.')
plt.xlim((t[0], t[-1]))
ax4 = plt.subplot(414, sharex=ax3)
ax4.plot(t, th_delta, color='black')
ax4.plot(t, np.ones((len(t),))*thr_th_delta1, color='red')
ax4.plot(t, np.ones((len(t),))*thr_th_delta2, color='pink')
ax4.plot(t, rem_idx*thr_th_delta1, color='green', label='REM')
ax4.plot(t, laser_idx * thr_th_delta1, color='blue', label='Laser')
plt.ylabel('Theta/Delta')
plt.xlabel('Time (s)')
plt.xlim((t[0], t[-1]))
plt.legend()
plt.show(block=False)
def load_sleep_params(path, param_file):
"""
load parameter file generated by &recursive_sleepstate_rem || &recursive_sleepstate_nrem
@Return:
Dictionary: Parameter --> Value
"""
fid = open(os.path.join(path, param_file), 'r')
lines = fid.readlines()
params = {}
for line in lines:
if re.match('^[\S_]+:', line):
a = re.split('\s+', line)
key = a[0][:-1]
params[key] = a[1:-1]
# transform number strings to floats
for k in params:
vals = params[k]
new_vals = []
for v in vals:
if re.match('^[\d\.]+$', v):
new_vals.append(float(v))
else:
new_vals.append(v)
params[k] = new_vals
return params
def recursive_sleepstate_nrem(ppath, recordings, sf=0.3, alpha=0.3, std_thdelta = 1.5, sdt=2.5, psave=False, xemg=False):
"""
predict NREMs period only based on EEG/EMG history; the same algorithm is also used for
closed-loop NREM sleep manipulation.
The algorithm uses for NREM sleep detection thresholds for delta power, EMG power, and theta/delta power.
For delta I use two thresholds: A hard (larger) threshold and a soft (lower) threshold. Initially,
theta/delta has to cross the hard threshold to initiate a NREM period. Then, as long as,
theta/delta is above the soft threshold (and EMG power stays low) REM sleep continues.
The values for hard and soft threshold are fitted using a Gaussian mixture model
:param ppath: base folder
:param recordings: list of recordings
:param sf: smoothing factor for each powerspectrum
:param alpha: spatial smoothing factor
:param std_thdelta: factor to set threshold for theta/delta
:param sdt: time step of brain state classification, typically 2.5 s
:param psave: save parameters to text file?
:param xemg: use EEG instead of EMG?
"""
# to fit Gaussian mixture model to delta power distributino
from sklearn import mixture
idf = re.split('_', recordings[0])[0]
# calculate spectrogram
(SE, SM) = ([],[])
for rec in recordings:
A,B, freq = recursive_spectrogram(ppath, rec, sf=sf, alpha=alpha)
SE.append(A)
SM.append(B)
# fuse lists SE and SM
SE = np.squeeze(reduce(lambda x,y: np.concatenate((x,y)), SE))
if not xemg:
SM = np.squeeze(reduce(lambda x,y: np.concatenate((x,y)), SM))
else:
SM = SE
# EEG, EMG bands
ntbins = SE.shape[1]
r_delta = [0.5, 4]
r_theta = [5,12]
# EMG band
r_mu = [300, 500]
i_delta = np.where((freq >= r_delta[0]) & (freq <= r_delta[1]))[0]
i_theta = np.where((freq >= r_theta[0]) & (freq <= r_theta[1]))[0]
i_mu = np.where((freq >= r_mu[0]) & (freq <= r_mu[1]))[0]
pow_delta = np.sum(SE[i_delta,:], axis=0)
pow_theta = np.sum(SE[i_theta,:], axis=0)
pow_mu = np.sum(SM[i_mu,:], axis=0)
# theta/delta
th_delta = np.divide(pow_theta, pow_delta)
thr_th_delta1 = np.nanmean(th_delta) + std_thdelta*np.nanstd(th_delta)
thr_th_delta2 = np.nanmean(th_delta) + 0.0*np.nanstd(th_delta)
thr_mu = pow_mu.mean() + 0.5*np.nanstd(pow_mu)
med_delta = np.median(pow_delta)
pow_delta_fit = pow_delta[np.where(pow_delta<=3*med_delta)]
# fit Gaussian mixture model to delta power
# see http://www.astroml.org/book_figures/chapter4/fig_GMM_1D.html
gm = mixture.GaussianMixture(n_components=2)
fit = gm.fit(pow_delta_fit.reshape(-1, 1))
means = np.squeeze(fit.means_)
x = np.arange(0, med_delta*3, 100)
plt.figure()
plt.hist(pow_delta_fit, 100, normed=True, histtype='stepfilled', alpha=0.4)
logprob = fit.score_samples(x.reshape(-1,1))
responsibilities = fit.predict_proba(x.reshape((-1,1)))
pdf = np.exp(logprob)
pdf_individual = responsibilities * pdf[:, np.newaxis]
plt.plot(x, pdf, '-k')
plt.plot(x, pdf_individual, '--k')
plt.xlim((0, med_delta*3))
plt.ylabel('p(x)')
plt.xlabel('x = Delta power')
# get point where curves cut each other
if means[0] < means[1]:
idx = np.where((x>= means[0]) & (x<= means[1]))[0]
else:
idx = np.where((x >= means[1]) & (x <= means[0]))[0]
imin = np.argmin(pdf[idx])
xcut = x[idx[0]+imin]
plt.plot(xcut, pdf[idx[0]+imin], 'ro')
ilow = np.argmin(np.abs(x-means[0]))
plt.plot(x[ilow], pdf[ilow], 'bo')
ihigh = np.argmin(np.abs(x-means[1]))
plt.plot(x[ihigh], pdf[ihigh], 'go')
plt.show(block=False)
# set parameters for hard and soft delta thresholds
tmp = np.array([x[ihigh], xcut, x[ilow]])
tmp.sort()
thr_delta1 = tmp[-1] # x[ihigh]; right peak of distribution
thr_delta2 = tmp[1] # trough of distribution
# NREM yes or no according to thresholds
# However, this variable does not directly control whether laser should
# be on or off; whether NREM sleep is really on or off is determined
# by nrem_idx; if pnrem_hidden == 1, then all threshold critera, but not
# sleep history criteria are fulfilled
pnrem_hidden = 0
# if nrem_idx[i] == 1, time point i is NREM
nrem_idx = np.zeros((ntbins,), dtype='int8')
# NREM stays on after thresholds are NOT fulfilled to avoid interruptions by microarousals
grace_period = int(20 / sdt)
# nrem_delay: NREM only starts with some delay
nrem_delay = int(10 / sdt)
grace_count = grace_period
delay_count = nrem_delay
for i in range(ntbins):
if pnrem_hidden == 0:
### Entering NREM:
# Delta power laser than high threshold
if pow_delta[i] > thr_delta1 and pow_mu[i] < thr_mu and th_delta[i] < thr_th_delta1:
### NOT-NREM -> NREM
pnrem_hidden = 1
nrem_idx[i] = 0
delay_count -= 1
# we are fully in NREM, that's why grace_count is reset:
grace_count = grace_period
else:
### NOT-NREM -> NOT-NREM
if grace_count > 0:
grace_count -= 1
nrem_idx[i] = 1
else:
nrem_idx[i] = 0
else:
### pnrem_hidden == 1
if pow_delta[i] > thr_delta2 and pow_mu[i] < thr_mu and th_delta[i] < thr_th_delta1:
if delay_count > 0:
delay_count -= 1
nrem_idx[i] = 0
else :
nrem_idx[i] = 1
else:
### Exit NREM -> NOT-NREM
# were are fully out of NREM, so delay_count can be reset:
delay_count = nrem_delay
pnrem_hidden = 0
if grace_count > 0:
grace_count -= 1
nrem_idx[i] = 1
#### figure ##############################################
plt.figure()
t = np.arange(0, sdt * (ntbins - 1) + sdt / 2.0, sdt)
ax1 = plt.subplot(411)
im = np.where((freq >= 0) & (freq <= 30))[0]
med = np.median(SE.max(axis=0))
ax1.imshow(np.flipud(SE[im, :]), vmin=0, vmax=med * 2, cmap='jet')
ax1.pcolorfast(t, freq[im], np.flipud(SE[im, :]), vmin=0, vmax=med * 2, cmap='jet')
plt.yticks(list(range(0, 31, 10)), list(range(30, -1, -10)))
plt.ylabel('Freq. (Hz)')
plt.axis('tight')
ax2 = plt.subplot(412, sharex=ax1)
ax2.plot(t, pow_mu, color='black')
ax2.plot(t, np.ones((len(t),)) * thr_mu, color='red')
plt.ylabel('EMG Pow.')
plt.xlim((t[0], t[-1]))
ax3 = plt.subplot(413, sharex=ax2)
ax3.plot(t, pow_delta, color='black')
ax3.plot(t, np.ones((len(t),)) * thr_delta1, color='red')
ax3.plot(t, np.ones((len(t),)) * thr_delta2, color=[1, 0.6, 0.6])
ax3.plot(t, nrem_idx * thr_delta1, color=[0.6, 0.6, 0.6])
plt.ylabel('Delta Pow.')
plt.xlim((t[0], t[-1]))
ax4 = plt.subplot(414, sharex=ax3)
ax4.plot(t, th_delta, color='black')
ax4.plot(t, np.ones((len(t),)) * thr_th_delta1, color='red')
plt.ylabel('Theta/Delta')
plt.xlabel('Time (s)')
plt.xlim((t[0], t[-1]))
plt.show(block=False)
# Determine which channel is EEG, EMG
ch_alloc = get_infoparam(os.path.join(ppath, recordings[0], 'info.txt'), 'ch_alloc')[0]
# write config file
if psave:
cfile = os.path.join(ppath, idf + '_nrem.txt')
fid = open(cfile, 'w')
fid.write(('IDF: %s' + os.linesep) % idf)
fid.write(('ch_alloc: %s' + os.linesep) % ch_alloc)
fid.write(('THR_DELTA: %.2f %.2f' + os.linesep) % (thr_delta1, thr_delta2))
fid.write(('THR_MU: %.2f' + os.linesep) % thr_mu)
fid.write(('THR_TH_DELTA: %.2f %.2f' + os.linesep) % (thr_th_delta1, thr_th_delta2))
fid.write(('STD_THDELTA: %.2f' + os.linesep) % std_thdelta)
fid.write(('SF: %.2f' + os.linesep) % sf)
fid.write(('ALPHA: %.2f' + os.linesep) % alpha)
if xemg:
fid.write(('XEMG: %d' + os.linesep) % 1)
else:
fid.write(('XEMG: %d' + os.linesep) % 0)
fid.close()
print('wrote file %s' % cfile)
def rem_online_analysis(ppath, recordings, backup='', single_mode=False, fig_file='', overlap=0):
"""
analyze results from closed-loop experiments
:param ppath: base folder
:param recordings: list of strings, recordinds
:param backup: string, potential second backup folder with recordings
:param single_mode: boolean, if True, average across all REM periods (irrespective of mouse)
and plot each single REM period as dot
:param overlap: float between 0 and 100; specifices percentage by which the online detected REM period has to
overlap with real (annotated) REM period to be further consided for analysis;
if overlap == 0, then any overlap counts, i.e. this parameter has no influence
:return: df, pd.DataFrame, with control and experimental REM durations as data columns
"""
if type(recordings) != list:
recordings = [recordings]
overlap = overlap / 100.0
paths = dict()
for rec in recordings:
if os.path.isdir(os.path.join(ppath, rec)):
paths[rec] = ppath
else:
paths[rec] = backup
mice = dict()
for rec in recordings:
idf = re.split('_', rec)[0]
if not idf in mice:
mice[idf] = 1
mice = list(mice.keys())
if len(mice) == 1:
single_mode=True
dur_exp = {m:[] for m in mice}
dur_ctr = {m:[] for m in mice}
for rec in recordings:
idf = re.split('_', rec)[0]
M,S = load_stateidx(paths[rec], rec)
sr = get_snr(paths[rec], rec)
nbin = int(np.round(sr)*2.5)
dt = (1.0/sr)*nbin
laser = load_laser(paths[rec], rec)
rem_trig = so.loadmat(os.path.join(paths[rec], rec, 'rem_trig_%s.mat'%rec), squeeze_me=True)['rem_trig']
laser = downsample_vec(laser, nbin)
laser[np.where(laser>0)] = 1
rem_trig = downsample_vec(rem_trig, nbin)
rem_trig[np.where(rem_trig>0)] = 1
laser_idx = np.where(laser==1)[0]
rem_idx = np.where(rem_trig==1)[0]
# REM sequences from offline analysis (assumed to be the
# "ground truth"
seq = get_sequences(np.where(M==1)[0])
for s in seq:
# check true REM sequences overlapping with online detected sequences
isect = np.intersect1d(s, rem_idx)
#print(len(isect)/ len(s))
# test if real REM period s overlaps with online detected REM periods and,
# if yes, make sure that the overlap is at least overlap *100 percent
if len(np.intersect1d(s, rem_idx)) > 0 and float(len(isect)) / len(s) >= overlap:
drn = (s[-1]-s[0]+1)*dt
# does the sequence overlap with laser?
if len(np.intersect1d(isect, laser_idx))>0:
dur_exp[idf].append(drn)
else:
dur_ctr[idf].append(drn)
data = {'exp':[], 'ctr':[]}
# if single_mode put all REM periods together,
# otherwise average across REM periods for each mouse
if len(mice) == 1 or single_mode==True:
for m in mice:
data['exp'] += dur_exp[m]
data['ctr'] += dur_ctr[m]
else:
for idf in dur_ctr:
dur_ctr[idf] = np.array(dur_ctr[idf]).mean()
dur_exp[idf] = np.array(dur_exp[idf]).mean()
data['exp'] = np.array(list(dur_exp.values()))
data['ctr'] = np.array(list(dur_ctr.values()))
df = pd.DataFrame({'ctr':pd.Series(data['ctr']), 'exp' : pd.Series(data['exp'])})
# plot everything
if not single_mode:
plt.ion()
plt.figure()
ax = plt.axes([0.2, 0.15, 0.3, 0.7])
df_mean = df.mean()
plt.bar([1], [df_mean['ctr']], color='grey', label='W/o Laser')
plt.bar([2], [df_mean['exp']], color='blue', label='With laser')
plt.xticks([1,2])
box_off(ax)
#ax.set_xticklabels(['ctr', 'exp'], rotation=30)
plt.ylabel('REM duration (s)')
for (a,b) in zip(df['ctr'], df['exp']):
plt.plot([1,2], [a,b], color='black')
plt.legend(bbox_to_anchor=(0., 1.0, 1., .102), loc=3, mode='expand', ncol=1, frameon=False)
else:
plt.figure()
ax = plt.axes([0.2, 0.15, 0.3, 0.7])
df_mean = df.mean()
plt.bar([1], [df_mean['ctr']], color='grey')
plt.bar([2], [df_mean['exp']], color='blue')
plt.xticks([1,2])
box_off(ax)
#ax.set_xticklabels(['ctr', 'exp'], rotation=30)
plt.ylabel('REM duration (s)')
a = df['ctr']
b = df['exp']
plt.plot(np.ones((len(a),)), a, '.', color='black', label='W/o Laser')
plt.plot(2*np.ones((len(b),)), b, '.', color='black', label='With laser')
plt.legend(bbox_to_anchor=(0., 1.0, 1., .102), loc=3, mode='expand', ncol=1, frameon=False)
plt.show()
if len(fig_file) > 0:
save_figure(fig_file)
return df
def online_homeostasis(ppath, recordings, backup='', mode=0, single_mode=False, pplot=True, overlap=0, ma_thr=0):
"""
Further analysis of data obtained from closed loop stimulation
Assume the sleep structure looks like this
R R R R W W N N N N N W W N N N N R R R R R
REM_pre -- inter REM ---- REM_post
REM_pre is the duration of the first REM period, inter-REM is everything between REM_pre and the
next REM period REM_post.
The function calculates the inter REM duration after REM periods with laser and after REM periods w/o laser
:param ppath: base folder
:param recordings: list of recording, or file listing
:param backup: backup folder for $ppath
:param mode: mode == 0, calculate complete inter REM duration
mode == 2, only calculate duration of wake in inter REM periods
mode == 3, only calculate duration of NREM in inter REM periods
:param single_mode: consider each single recording, instead of mice
:param overlap: percentage (number between 0 and 100). Defines the percentage
how much a true (offline annotated) REM period should overlap with laser
to be considered as REM sleep with laser.
Of note, REM periods w/o laser have to have 0 overlap with laser.
All remaining REM periods are discarded.
:param pplot: if True, plot figure; errorbars show 95% confidence intervals,
calculated using bootstrapping
:param ma:
:return: df, if single_mode == True $df is a pandas DataFrame:
REM iREM laser
mouse - mouse ID
REM - REM duration
iREM - inter REM duration after REM periods with laser
laser - 'y' or 'n'; depending on whether laser was on during REM sleep period (for "REM") or during the
preceding REM sleep period (for "iREM")
if single_mode == False, mouse is the data frame index
"""
if type(recordings) != list:
recordings = [recordings]
if overlap > 0:
overlap = overlap / 100
paths = dict()
for rec in recordings:
if os.path.isdir(os.path.join(ppath, rec)):
paths[rec] = ppath
else:
paths[rec] = backup
mice = dict()
for rec in recordings:
idf = re.split('_', rec)[0]
if not idf in mice:
mice[idf] = 1
mice = list(mice.keys())
if len(mice) == 1:
single_mode=True
remdur_exp = {m:[] for m in mice}
remdur_ctr = {m:[] for m in mice}
itdur_exp = {m:[] for m in mice}
itdur_ctr = {m:[] for m in mice}
for rec in recordings:
idf = re.split('_', rec)[0]
M = load_stateidx(paths[rec], rec)[0]
sr = get_snr(paths[rec], rec)
nbin = int(np.round(sr)*2.5)
dt = (1.0/sr)*nbin
if ma_thr>0:
seq = get_sequences(np.where(M==2)[0])
for s in seq:
if len(s)*dt <= ma_thr:
M[s] = 3
laser = load_laser(paths[rec], rec)
rem_trig = so.loadmat(os.path.join(paths[rec], rec, 'rem_trig_%s.mat' % rec), squeeze_me=True)['rem_trig']
laser = downsample_vec(laser, nbin)
laser[np.where(laser>0)] = 1
rem_trig = downsample_vec(rem_trig, nbin)
rem_trig[np.where(rem_trig>0)] = 1
laser_idx = np.where(laser==1)[0]
rem_idx = np.where(rem_trig==1)[0]
# REM sequences from offline analysis (assumed to be the
# "ground truth"
seq = get_sequences(np.where(M==1)[0])
for (p,q) in zip(seq[0:-1], seq[1:]):
# check if true REM sequences do overlap with online detected sequences
# and only continue working with those:
if len(np.intersect1d(p, rem_idx)) > 0:
drn = (p[-1]-p[0]+1)*dt
it_M = M[p[-1]+1:q[0]]
if mode == 0:
it_drn = len(it_M)*dt
elif mode == 2:
it_drn = len(np.where(it_M==2)[0]) * dt
else:
it_drn = len(np.where(it_M == 3)[0]) * dt
# does the true REM sequence overlap with laser?
# by setting overlap to a value > 0, you can
# set a percentage how much the REM period should overlap with laser
# NEW 08/26/21
if len(np.intersect1d(p, laser_idx)) / len(p) > overlap:
remdur_exp[idf].append(drn)
itdur_exp[idf].append(it_drn)
elif len(np.intersect1d(p, laser_idx)) == 0:
remdur_ctr[idf].append(drn)
itdur_ctr[idf].append(it_drn)
else:
pass
# if single_mode put all REM periods together,
# otherwise average across REM periods for each mouse
if len(mice) == 1 or single_mode==True:
data = {'itexp':[], 'itctr':[], 'remexp':[], 'remctr':[]}
for m in mice:
data['itexp'] += itdur_exp[m]
data['itctr'] += itdur_ctr[m]
data['remexp'] += remdur_exp[m]
data['remctr'] += remdur_ctr[m]
df = pd.DataFrame({'REM': data['remexp']+data['remctr'], 'iREM':data['itexp']+data['itctr'], 'laser': ['y']*len(data['remexp']) + ['n']*len(data['remctr'])})
else:
for idf in mice:
itdur_ctr[idf] = np.array(itdur_ctr[idf]).mean()
itdur_exp[idf] = np.array(itdur_exp[idf]).mean()
remdur_ctr[idf] = np.array(remdur_ctr[idf]).mean()
remdur_exp[idf] = np.array(remdur_exp[idf]).mean()
data = {}
for s in ['itexp', 'itctr', 'remexp', 'remctr']:
data[s] = np.zeros((len(mice),))
i = 0
for m in mice:
data['itexp'][i] = itdur_exp[m]
data['itctr'][i] = itdur_ctr[m]
data['remexp'][i] = remdur_exp[m]
data['remctr'][i] = remdur_ctr[m]
i += 1
df = pd.DataFrame({'REM': np.concatenate((data['remexp'], data['remctr'])),
'iREM': np.concatenate((data['itexp'], data['itctr'])),
'laser': ['y']*len(mice) + ['n']*len(mice),
'mouse': mice+mice})
if pplot and not single_mode:
dfm = pd.melt(df, id_vars=['laser', 'mouse'], var_name='state')
sns.set_style('whitegrid')
plt.ion()
plt.figure()
sns.barplot(data=dfm, hue='laser', x='state', y='value', palette=['blue', 'gray'])
sns.swarmplot(data=dfm, hue='laser', x='state', y='value', dodge=True, color='black')
sns.despine()
plt.ylabel('Duration (s)')
if pplot and single_mode:
dfm = pd.melt(df, id_vars=['laser'], var_name='state')
plt.ion()
plt.figure()
sns.set(style="whitegrid")
#sns.swarmplot(data=df[['itctr', 'itexp']], color='black')
#sns.barplot(data=df[['itctr', 'itexp']], palette=['gray', 'blue'], errcolor='black')
sns.barplot(data=dfm, hue='laser', x='state', y='value', palette=['blue', 'gray'])
sns.swarmplot(data=dfm, hue='laser', x='state', y='value', dodge=True, color='black')
sns.despine()
plt.ylabel('Duration (s)')
return df
### FUNCTIONS USED BY SLEEP_STATE #####################################################
def get_sequences(idx, ibreak=1) :
"""
get_sequences(idx, ibreak=1)
idx - np.vector of indices
@RETURN:
seq - list of np.vectors
"""
diff = idx[1:] - idx[0:-1]
breaks = np.nonzero(diff>ibreak)[0]
breaks = np.append(breaks, len(idx)-1)
seq = []
iold = 0
for i in breaks:
r = list(range(iold, i+1))
seq.append(idx[r])
iold = i+1
return seq
def threshold_crossing(data, th, ilen, ibreak, m):
"""
seq = threshold_crossing(data, th, ilen, ibreak, m)
"""
if m>=0:
idx = np.where(data>=th)[0]
else:
idx = np.where(data<=th)[0]
# gather sequences
j = 0
seq = []
while (j <= len(idx)-1):
s = [idx[j]]
for k in range(j+1,len(idx)):
if (idx[k] - idx[k-1]-1) <= ibreak:
# add j to sequence
s.append(idx[k])
else:
break
if (s[-1] - s[0]+1) >= ilen and not(s[0] in [i[1] for i in seq]):
seq.append((s[0], s[-1]))
if j == len(idx)-1:
break
j=k
return seq
def closest_precessor(seq, i):
"""
find the preceding element in seq which is closest to i
helper function for sleep_state
"""
tmp = seq-i;
d = np.where(tmp<0)[0]
if len(d)>0:
id = seq[d[-1]];
else:
id = 0;
return id
def write_remidx(M, K, ppath, name, mode=1) :
"""
rewrite_remidx(idx, states, ppath, name)
replace the indices idx in the remidx file of recording name
with the assignment given in states
"""
if mode == 0 :
outfile = os.path.join(ppath, name, 'remidx_' + name + '.txt')
else :
outfile = os.path.join(ppath, name, 'remidx_' + name + '_corr.txt')
f = open(outfile, 'w')
s = ["%d\t%d\n" % (i,j) for (i,j) in zip(M[0,:],K)]
f.writelines(s)
f.close()
#######################################################################################
### MANIPULATING FIGURES ##############################################################
def set_fontsize(fs):
import matplotlib
matplotlib.rcParams.update({'font.size': fs})
def set_fontarial():
"""
set Arial as default font
"""
import matplotlib
matplotlib.rcParams['font.sans-serif'] = "Arial"
def save_figure(fig_file):
# alternative way of setting nice fonts:
#matplotlib.rcParams['pdf.fonttype'] = 42
#matplotlib.rcParams['ps.fonttype'] = 42
#matplotlib.pylab.savefig(fig_file, dpi=300)
#matplotlib.rcParams['text.usetex'] = False
#matplotlib.rcParams['text.usetex'] = True
plt.savefig(fig_file, bbox_inches="tight", dpi=200)
#matplotlib.rcParams['text.usetex'] = False
def box_off(ax):
"""
similar to Matlab's box off
"""
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
#######################################################################################
def sleep_state(ppath, name, th_delta_std=1, mu_std=0, sf=1, sf_delta=3, pwrite=0,
pplot=True, pemg=True, vmax=2.5, pspec_norm=False, use_idx=[]):
"""
automatic sleep state detection based on
delta, theta, sigma, gamma and EMG power.
New: use also sigma band: that's very helpful to classify pre-REM periods
as NREM; otherwise they tend to be classified as wake.
Gamma peaks nicely pick up during microarousals.
My strategy is the following:
I smooth delta band a lot to avoid strong fragmentation of sleep; but to
still pick up microarousals I use the gamma power.
spectrogram data has to be calculated before using calculate_spectrum
Each bin in the spectrogram gets assigned one of four states:
1-REM
2-Wake
3-NREM
0-undef
:param ppath: base folder
:param name: recording name
:param th_delta_std: threshold for theta/delta band is calculated as mean(theta/delta) + th_delta_std*std(theta/delta)
:param mu_std: threshold for EMG power is calculate as "mean(EMG) + mu_std * mean(EMG)
:param sf: smoothing factor for gamma and sigma power
:param sf_delta: smoothing factor for delta power
:param pwrite: if True, save sleep classification to file remidx_$name.txt
:param pplot: if True, plot figures
:param pemg: if True, use EMG as EMG, otherwise use EEG gamma power instead
:param vmax: float, set maximum of color range of EEG heatmap.
:param pspec_norm: boolean, if True, normalized EEG spectrogram by deviding each frequency band by its mean; only affects
plotting, no effect on sleep state calculation
:param use_idx: list, if not empty, use only given indices to calculate sleep state
:return:
"""
PRE_WAKE_REM = 30.0
# Minimum Duration and Break in
# high theta/delta, high emg, high delta, high sigma and gamma sequences
#
# duration[i,0] is the minimum duration of sequence of state i
# duration[i,1] is maximal break duration allowed in a sequence of state i
duration = np.zeros((5,2))
# high theta/delta
duration[0,:] = [5,15]
# high emg
duration[1,:] = [0, 5]
# high delta
duration[2,:] = [10, 10]
# high sigma
duration[3,:] = [10, 10]
# gamma
duration[4,:] = [0, 5]
# Frequency Bands/Ranges for delta, theta, and, gamma
r_delta = [0.5, 4]
r_sigma = [12, 20]
r_theta = [5,12]
# EMG band
r_mu = [50, 500]
if not pemg:
r_mu = [250, 500]
# high gamma power
r_gamma = [100, 150]
#load EEG and EMG spectrum, calculated by calculate_spectrum
P = so.loadmat(os.path.join(ppath, name, 'sp_' + name + '.mat'))
if pemg:
Q = so.loadmat(os.path.join(ppath, name, 'msp_' + name + '.mat'))
else:
Q = so.loadmat(os.path.join(ppath, name, 'sp_' + name + '.mat'))
SPEEG = np.squeeze(P['SP'])
if pemg == 1:
SPEMG = np.squeeze(Q['mSP'])
else:
SPEMG = np.squeeze(P['SP'])
if use_idx == []:
use_idx = range(0, SPEEG.shape[1])
freq = np.squeeze(P['freq'])
t = np.squeeze(P['t'])
dt = float(np.squeeze(P['dt']))
N = len(t)
duration = np.divide(duration,dt)
# get indices for frequency bands
i_delta = np.where((freq >= r_delta[0]) & (freq <= r_delta[1]))[0]
i_theta = np.where((freq >= r_theta[0]) & (freq <= r_theta[1]))[0]
i_mu = np.where((freq >= r_mu[0]) & (freq <= r_mu[1]))[0]
i_sigma = np.where((freq >= r_sigma[0]) & (freq <= r_sigma[1]))[0]
i_gamma = np.where((freq >= r_gamma[0]) & (freq <= r_gamma[1]))[0]
p_delta = smooth_data( SPEEG[i_delta,:].mean(axis=0), sf_delta )
p_theta = smooth_data( SPEEG[i_theta,:].mean(axis=0), 0 )
# now filtering for EMG to pick up microarousals
p_mu = smooth_data( SPEMG[i_mu,:].mean(axis=0), sf )
p_sigma = smooth_data( SPEEG[i_sigma,:].mean(axis=0), sf )
p_gamma = smooth_data( SPEEG[i_gamma,:].mean(axis=0), 0 )
th_delta = np.divide(p_theta, p_delta)
#th_delta = smooth_data(th_delta, 2);
seq = {}
seq['high_theta'] = threshold_crossing(th_delta, np.nanmean(th_delta[use_idx])+th_delta_std*np.nanstd(th_delta[use_idx]),
duration[0,1], duration[0,1], 1)
seq['high_emg'] = threshold_crossing(p_mu, np.nanmean(p_mu[use_idx])+mu_std*np.nanstd(p_mu[use_idx]),
duration[1,0], duration[1,1], 1)
seq['high_delta'] = threshold_crossing(p_delta, np.nanmean(p_delta[use_idx]), duration[2,0], duration[2,1], 1)
seq['high_sigma'] = threshold_crossing(p_sigma, np.nanmean(p_sigma[use_idx]), duration[3,0], duration[3,1], 1)
seq['high_gamma'] = threshold_crossing(p_gamma, np.nanmean(p_gamma[use_idx]), duration[4,0], duration[4,1], 1)
# Sleep-State Rules
idx = {}
for k in seq:
tmp = [list(range(i,j+1)) for (i,j) in seq[k]]
# now idea why this works to flatten a list
# idx[k] = sum(tmp, [])
# alternative that I understand:
if len(tmp) == 0:
idx[k] = np.array([])
else:
idx[k] = np.array(reduce(lambda x,y: x+y, tmp))
idx['low_emg'] = np.setdiff1d(np.arange(0,N), np.array(idx['high_emg']))
idx['low_delta'] = np.setdiff1d(np.arange(0,N), np.array(idx['high_delta']))
idx['low_theta'] = np.setdiff1d(np.arange(0,N), np.array(idx['high_theta']))
#REM Sleep: thdel up, emg down, delta down
a = np.intersect1d(idx['high_theta'], idx['low_delta'])
# non high_emg phases
b = np.setdiff1d(a, idx['high_emg'])
rem = get_sequences(b, duration[0,1])
rem_idx = reduce(lambda x,y: np.concatenate((x,y)), rem)
# SWS Sleep
# delta high, no theta, no emg
a = np.setdiff1d(idx['high_delta'], idx['high_emg']) # no emg activation
b = np.setdiff1d(a, idx['high_theta']) # no theta;
sws = get_sequences(b)
sws_idx = reduce(lambda x,y: np.concatenate((x,y)), sws)
#print a
# Wake
# low delta + high emg and not rem
a = np.unique(np.union1d(idx['low_delta'], idx['high_emg']))
b = np.setdiff1d(a, rem_idx)
wake = get_sequences(b)
wake_idx = reduce(lambda x,y: np.concatenate((x,y)), wake)
# sequences with low delta, high sigma and low emg are NREM
a = np.intersect1d(np.intersect1d(idx['high_sigma'], idx['low_delta']), idx['low_emg'])
a = np.setdiff1d(a, rem_idx)
sws_idx = np.unique(np.union1d(a, sws_idx))
wake_idx = np.setdiff1d(wake_idx, a)
#NREM sequences with high gamma are wake
a = np.intersect1d(sws_idx, idx['high_gamma'])
sws_idx = np.setdiff1d(sws_idx, a)
wake_idx = np.unique(np.union1d(wake_idx,a))
# Wake and Theta
wake_motion_idx = np.intersect1d(wake_idx, idx['high_theta'])
# Wake w/o Theta
wake_nomotion_idx = np.setdiff1d(wake_idx, idx['low_theta'])
# Are there overlapping sequences?
a = np.intersect1d(np.intersect1d(rem_idx, wake_idx), sws_idx)
# Are there undefined sequences?
undef_idx = np.setdiff1d(np.setdiff1d(np.setdiff1d(np.arange(0,N), rem_idx), wake_idx), sws_idx)
# Wake wins over SWS
sws_idx = np.setdiff1d(sws_idx, wake_idx)
# Special rules
# if there's a REM sequence directly following a short wake sequence (PRE_WAKE_REM),
# this wake sequence goes to SWS
# NREM to REM transitions are sometimes mistaken as quite wake periods
for rem_seq in rem:
if len(rem_seq) > 0:
irem_start = rem_seq[0]
# is there wake in the preceding bin?
if irem_start-1 in wake_idx:
# get the closest sws bin in the preceding history
isws_end = closest_precessor(sws_idx, irem_start)
if (irem_start - isws_end)*dt < PRE_WAKE_REM:
new_rem = np.arange(isws_end+1,irem_start)
rem_idx = np.union1d(rem_idx, new_rem)
wake_idx = np.setdiff1d(wake_idx, new_rem)
else:
new_wake = rem_seq
wake_idx = np.union1d(wake_idx, new_wake)
rem_idx = np.setdiff1d(rem_idx, new_wake)
# two different representations for the results:
S = {}
S['rem'] = rem_idx
S['nrem'] = sws_idx
S['wake'] = wake_idx
S['awake'] = wake_motion_idx
S['qwake'] = wake_nomotion_idx
M = np.zeros((N,))
if len(rem_idx) > 0:
M[rem_idx] = 1
if len(wake_idx) > 0:
M[wake_idx] = 2
if len(sws_idx) > 0:
M[sws_idx] = 3
if len(undef_idx) > 0:
M[undef_idx] = 0
# write sleep annotation to file
if pwrite:
outfile = os.path.join(ppath, name, 'remidx_' + name + '.txt')
print("writing annotation to %s" % outfile)
f = open(outfile, 'w')
s = ["%d\t%d\n" % (i,j) for (i,j) in zip(M,np.zeros((N,)))]
f.writelines(s)
f.close()
# nice plotting
plt.ion()
if pplot:
plt.figure(figsize=(18,9))
axes1=plt.axes([0.1, 0.9, 0.8, 0.05])
A = np.zeros((1,len(M)))
A[0,:] = M
cmap = plt.cm.jet
my_map = cmap.from_list('ha', [[0,0,0], [0,1,1],[0.5,0,1], [0.8, 0.8, 0.8]], 4)
#tmp = axes1.imshow(A, vmin=0, vmax=3)
tmp = axes1.pcolorfast(t, [0,1], A, vmin=0, vmax=3)
tmp.set_cmap(my_map)
axes1.axis('tight')
tmp.axes.get_xaxis().set_visible(False)
tmp.axes.get_yaxis().set_visible(False)
box_off(axes1)
# show spectrogram
axes2=plt.axes([0.1, 0.75, 0.8, 0.1], sharex=axes1)
ifreq = np.where(freq <= 30)[0]
med = np.median(SPEEG.max(axis=0))
if pspec_norm:
ifreq = np.where(freq <= 80)[0]
filt = np.ones((6, 1))
filt = filt / np.sum(filt)
SPEEG = scipy.signal.convolve2d(SPEEG, filt, mode='same')
spec_mean = SPEEG.mean(axis=1)
SPEEG = np.divide(SPEEG, np.repeat([spec_mean], SPEEG.shape[1], axis=0).T)
med = np.median(SPEEG.max(axis=0))
axes2.pcolorfast(t, freq[ifreq], SPEEG[ifreq, :], vmax = med*vmax, cmap='jet')
else:
axes2.pcolorfast(t, freq[ifreq], SPEEG[ifreq, :], vmax=med * vmax, cmap='jet')
axes2.axis('tight')
plt.ylabel('Freq (Hz)')
box_off(axes2)
# show delta power
axes3=plt.axes([0.1, 0.6, 0.8, 0.1], sharex=axes2)
axes3.plot(t,p_delta, color='gray')
plt.ylabel('Delta (a.u.)')
plt.xlim((t[0], t[-1]))
seq = get_sequences(S['nrem'])
#for s in seq:
# plt.plot(t[s],p_delta[s], color='red')
s = idx['high_delta']
seq = get_sequences(s)
for s in seq:
plt.plot(t[s],p_delta[s], color='red')
box_off(axes3)
axes4=plt.axes([0.1, 0.45, 0.8, 0.1], sharex=axes3)
axes4.plot(t,p_sigma, color='gray')
plt.ylabel('Sigma (a.u.)')
plt.xlim((t[0], t[-1]))
s = idx['high_sigma']
seq = get_sequences(s)
for s in seq:
plt.plot(t[s],p_sigma[s], color='red')
box_off(axes4)
axes5=plt.axes([0.1, 0.31, 0.8, 0.1], sharex=axes4)
axes5.plot(t,th_delta, color='gray')
plt.ylabel('Th/Delta (a.u.)')
plt.xlim((t[0], t[-1]))
s = idx['high_theta']
seq = get_sequences(s)
for s in seq:
plt.plot(t[s],th_delta[s], color='red')
box_off(axes5)
axes6=plt.axes([0.1, 0.17, 0.8, 0.1], sharex=axes5)
axes6.plot(t,p_gamma, color='gray')
plt.ylabel('Gamma (a.u.)')
plt.xlim((t[0], t[-1]))
s = idx['high_gamma']
seq = get_sequences(s)
for s in seq:
plt.plot(t[s],p_gamma[s], color='red')
box_off(axes6)
axes7=plt.axes([0.1, 0.03, 0.8, 0.1], sharex=axes6)
axes7.plot(t,p_mu, color='gray')
plt.xlabel('Time (s)')
plt.ylabel('EMG (a.u.)')
plt.xlim((t[0], t[-1]))
s = idx['high_emg']
seq = get_sequences(s)
for s in seq:
plt.plot(t[s],p_mu[s], color='red')
box_off(axes7)
plt.show()
# 2nd figure showing distribution of different bands
plt.figure(figsize=(20,3))
axes1 = plt.axes([0.05, 0.1, 0.13, 0.8])
plt.hist(p_delta, bins=100)
plt.plot(np.nanmean(p_delta), 10, 'ro')
plt.title('delta')
plt.ylabel('# Occurances')
box_off(axes1)
axes1 = plt.axes([0.25, 0.1, 0.13, 0.8])
plt.hist(th_delta, bins=100)
plt.plot(np.nanmean(th_delta)+th_delta_std*np.nanstd(th_delta), 10, 'ro')
plt.title('theta/delta')
box_off(axes1)
axes1 = plt.axes([0.45, 0.1, 0.13, 0.8])
plt.hist(p_sigma, bins=100)
plt.plot(np.nanmean(p_sigma), 10, 'ro')
plt.title('sigma')
box_off(axes1)
axes1 = plt.axes([0.65, 0.1, 0.13, 0.8])
plt.hist(p_gamma, bins=100)
plt.plot(np.nanmean(p_gamma), 10, 'ro')
plt.title('gamma')
box_off(axes1)
axes1 = plt.axes([0.85, 0.1, 0.13, 0.8])
plt.hist(p_mu, bins=100)
plt.plot(np.nanmean(p_mu)+np.nanstd(p_mu), 10, 'ro')
plt.title('EMG')
plt.show(block=False)
box_off(axes1)
plt.show()
return M,S
def plot_hypnograms(ppath, recordings, tbin=0, unit='h', ma_thr=20, title='', tstart=0, tend=-1):
"""
plot all hypnograms specified in @recordings
:param ppath: base folder
:param recordings: list of recordings
:param tbin: tbin for xticks
:param unit: time unit; h - hour, min - minute, s - second
:param ma_thr: float, wake periods shorter than $ma_thr are considered as microarousals and further converted to NREM
:param tstart: float, start time point (in seconds!) of hypnograms
:param tend: float, last shown time point (in seconds!)
:param title: optional title for figure
"""
recordings = recordings[::-1]
sr = get_snr(ppath, recordings[0])
nbin = int(np.round(sr) * 2.5)
dt_sec = (1.0 / sr) * nbin
istart = int(np.round(tstart/dt_sec))
dt = dt_sec
if unit == 'h':
dt /= 3600
elif unit == 'min':
dt /= 60
rec_len = dict()
irec = 0
ny = (1.0-0.2) / len(recordings)
dy = ny * 0.75
cmap = plt.cm.jet
my_map = cmap.from_list('brs', [[0, 0, 0], [0, 1, 1], [0.5, 0, 1], [0.8, 0.8, 0.8]], 4)
plt.ion()
plt.figure(figsize=(9,4))
axes = []
for rec in recordings:
M,K = load_stateidx(ppath, rec)
#kcut = np.where(K<0)[0]
#M = M[kcut]
#M[kcut] = 0
if tend == -1:
iend = len(M)
else:
iend = int(tend/dt_sec)
M = M[istart:iend]
if ma_thr>0:
seq = get_sequences(np.where(M==2)[0])
for s in seq:
if len(s)*dt_sec <= ma_thr:
M[s] = 3
rec_len[rec] = len(M)*dt
t = np.arange(0, len(M))*dt
ax = plt.axes([0.05, ny*irec+0.15, 0.75, dy])
tmp = ax.pcolorfast(t, [0, 1], np.array([M]), vmin=0, vmax=3, cmap=my_map)
box_off(ax)
ax.axis('tight')
tmp.axes.get_yaxis().set_visible(False)
if irec > 0:
tmp.axes.get_xaxis().set_visible(False)
if irec == 0:
plt.xlabel('Time (%s)' % unit)
irec += 1
axes.append(ax)
if len(title) > 0:
plt.title(title)
max_dur = max(rec_len.values())
if tbin > 0:
xtick = np.arange(0, max_dur, tbin)
for (ax, rec) in zip(axes, recordings):
ax.set_xlim([0, max_dur])
if tbin > 0:
ax.set_xticks(xtick)
ax.text(max_dur+max_dur*0.01, 0.5, rec)
plt.show()
def plot_swa(ppath, name, delta_win, alpha, band=[0.5, 4.5], swa_yrange=[]):
"""
plot slow wave (delta) activity during NREM
The top plot shows the hynogram.
The middle plot shows the delta power (irrespective of brain state) as line plot
The bottom plot shows for consecutive $delta_win seconds long bins, the
median delta power (SWA) during NREM, if the ration of NREM during the
corresponding bin >= $alpha
Example call:
dm=plot_swa(ppath, name, 30, 0.5, swa_yrange=[0, 0.012])
:param ppath, name: basefolder, recording name
:param delta_win: plot median swa value for each consecutive $delta_win seconds long window, if
:param alpha: the ratio of NREM in this window is larger than alpha (value between 0 and 1)
:param swa_yrange: tuple, minimun and maximum value of yrange for SWA
:return df: pd.DataFrame with SWA time points and corresponding median SWA values
"""
r_delta = band
sr = get_snr(ppath, name)
nbin = int(np.round(2.5*sr))
dt = nbin*(1.0/sr)
M,_ = load_stateidx(ppath, name)
t = np.arange(0, len(M))*dt
P = so.loadmat(os.path.join(ppath, name, 'sp_%s.mat' % name), squeeze_me=True)
SP = P['SP']
freq = P['freq']
df = freq[1]-freq[0]
idelta = np.where((freq>=r_delta[0]) & (freq<=r_delta[1]))[0]
pow_delta = SP[idelta,:].sum(axis=0)*df
# get NREM sequences contributing points for fitting
iwin = int(delta_win/dt)
#seq = get_sequences(nrem_idx, ibreak=int((delta_win/dt)*0.1))
delta_med = []
for j in range(0, len(M)-iwin, iwin):
s = range(j, j+iwin)
sc = j+int(iwin/2)
Mcut = M[s]
if (1.0*len(np.where(Mcut==3)[0])) / len(s) >= alpha:
i = | np.where(Mcut==3) | numpy.where |
# This implementation is based on https://github.com/weihua916/powerful-gnns and https://github.com/chrsmrrs/k-gnn/tree/master/examples
# Datasets are implemented based on the description in the corresonding papers (see the paper for references)
import argparse
import numpy as np
import networkx as nx
import time
import random
import matplotlib.pyplot as plt
import torch
from torch import nn
import torch.nn.functional as F
from torch_geometric.data import DataLoader, Data
from torch_geometric.utils import degree
from torch_geometric.utils.convert import from_networkx
from torch_geometric.nn import GINConv, GINEConv, global_add_pool
torch.set_printoptions(profile="full")
# Synthetic datasets
class SymmetrySet:
def __init__(self):
self.hidden_units = 0
self.num_classes = 0
self.num_features = 0
self.num_nodes = 0
def addports(self, data):
data.ports = torch.zeros(data.num_edges, 1)
degs = degree(data.edge_index[0], data.num_nodes, dtype=torch.long) # out degree of all nodes
for n in range(data.num_nodes):
deg = degs[n]
ports = np.random.permutation(int(deg))
for i, neighbor in enumerate(data.edge_index[1][data.edge_index[0]==n]):
nb = int(neighbor)
data.ports[torch.logical_and(data.edge_index[0]==n, data.edge_index[1]==nb), 0] = float(ports[i])
return data
def makefeatures(self, data):
data.x = torch.ones((data.num_nodes, 1))
data.id = torch.tensor(np.random.permutation(np.arange(data.num_nodes))).unsqueeze(1)
return data
def makedata(self):
pass
class LimitsOne(SymmetrySet):
def __init__(self):
super().__init__()
self.hidden_units = 16
self.num_classes = 2
self.num_features = 4
self.num_nodes = 8
self.graph_class = False
def makedata(self):
n_nodes = 16 # There are two connected components, each with 8 nodes
ports = [1,1,2,2] * 8
colors = [0, 1, 2, 3] * 4
y = torch.tensor([0]* 8 + [1] * 8)
edge_index = torch.tensor([[0,1,1,2, 2,3,3,0, 4,5,5,6, 6,7,7,4, 8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,8], [1,0,2,1, 3,2,0,3, 5,4,6,5, 7,6,4,7, 9,8,10,9,11,10,12,11,13,12,14,13,15,14,8,15]], dtype=torch.long)
x = torch.zeros((n_nodes, 4))
x[range(n_nodes), colors] = 1
data = Data(x=x, edge_index=edge_index, y=y)
data.id = torch.tensor(np.random.permutation(np.arange(n_nodes))).unsqueeze(1)
data.ports = torch.tensor(ports).unsqueeze(1)
return [data]
class LimitsTwo(SymmetrySet):
def __init__(self):
super().__init__()
self.hidden_units = 16
self.num_classes = 2
self.num_features = 4
self.num_nodes = 8
self.graph_class = False
def makedata(self):
n_nodes = 16 # There are two connected components, each with 8 nodes
ports = ([1,1,2,2,1,1,2,2] * 2 + [3,3,3,3]) * 2
colors = [0, 1, 2, 3] * 4
y = torch.tensor([0] * 8 + [1] * 8)
edge_index = torch.tensor([[0,1,1,2,2,3,3,0, 4,5,5,6,6,7,7,4, 1,3,5,7, 8,9,9,10,10,11,11,8, 12,13,13,14,14,15,15,12, 9,15,11,13], [1,0,2,1,3,2,0,3, 5,4,6,5,7,6,4,7, 3,1,7,5, 9,8,10,9,11,10,8,11, 13,12,14,13,15,14,12,15, 15,9,13,11]], dtype=torch.long)
x = torch.zeros((n_nodes, 4))
x[range(n_nodes), colors] = 1
data = Data(x=x, edge_index=edge_index, y=y)
data.id = torch.tensor(np.random.permutation(np.arange(n_nodes))).unsqueeze(1)
data.ports = torch.tensor(ports).unsqueeze(1)
return [data]
class Triangles(SymmetrySet):
def __init__(self):
super().__init__()
self.hidden_units = 16
self.num_classes = 2
self.num_features = 1
self.num_nodes = 60
self.graph_class = False
def makedata(self):
size = self.num_nodes
generated = False
while not generated:
nx_g = nx.random_degree_sequence_graph([3] * size)
data = from_networkx(nx_g)
labels = [0] * size
for n in range(size):
for nb1 in data.edge_index[1][data.edge_index[0]==n]:
for nb2 in data.edge_index[1][data.edge_index[0]==n]:
if torch.logical_and(data.edge_index[0]==nb1, data.edge_index[1]==nb2).any():
labels[n] = 1
generated = labels.count(0) >= 20 and labels.count(1) >= 20
data.y = torch.tensor(labels)
data = self.addports(data)
data = self.makefeatures(data)
return [data]
class LCC(SymmetrySet):
def __init__(self):
super().__init__()
self.hidden_units = 16
self.num_classes = 3
self.num_features = 1
self.num_nodes = 10
self.graph_class = False
def makedata(self):
generated = False
while not generated:
graphs = []
labels = []
i = 0
while i < 6:
size = 10
nx_g = nx.random_degree_sequence_graph([3] * size)
if nx.is_connected(nx_g):
i += 1
data = from_networkx(nx_g)
lbls = [0] * size
for n in range(size):
edges = 0
nbs = [int(nb) for nb in data.edge_index[1][data.edge_index[0]==n]]
for nb1 in nbs:
for nb2 in nbs:
if torch.logical_and(data.edge_index[0]==nb1, data.edge_index[1]==nb2).any():
edges += 1
lbls[n] = int(edges/2)
data.y = torch.tensor(lbls)
labels.extend(lbls)
data = self.addports(data)
data = self.makefeatures(data)
graphs.append(data)
generated = labels.count(0) >= 10 and labels.count(1) >= 10 and labels.count(2) >= 10 # Ensure the dataset is somewhat balanced
return graphs
class FourCycles(SymmetrySet):
def __init__(self):
super().__init__()
self.p = 4
self.hidden_units = 16
self.num_classes = 2
self.num_features = 1
self.num_nodes = 4 * self.p
self.graph_class = True
def gen_graph(self, p):
edge_index = None
for i in range(p):
e = torch.tensor([[i, p + i, 2 * p + i, 3 * p + i], [2 * p + i, 3 * p + i, i, p + i]], dtype=torch.long)
if edge_index is None:
edge_index = e
else:
edge_index = torch.cat([edge_index, e], dim=-1)
top = np.zeros((p * p,))
perm = np.random.permutation(range(p))
for i, t in enumerate(perm):
top[i * p + t] = 1
bottom = np.zeros((p * p,))
perm = np.random.permutation(range(p))
for i, t in enumerate(perm):
bottom[i * p + t] = 1
for i, bit in enumerate(top):
if bit:
e = torch.tensor([[i // p, p + i % p], [p + i % p, i // p]], dtype=torch.long)
edge_index = torch.cat([edge_index, e], dim=-1)
for i, bit in enumerate(bottom):
if bit:
e = torch.tensor([[2 * p + i // p, 3 * p + i % p], [3 * p + i % p, 2 * p + i // p]], dtype=torch.long)
edge_index = torch.cat([edge_index, e], dim=-1)
return Data(edge_index=edge_index, num_nodes=self.num_nodes), any(np.logical_and(top, bottom))
def makedata(self):
size = 25
p = self.p
trues = []
falses = []
while len(trues) < size or len(falses) < size:
data, label = self.gen_graph(p)
data = self.makefeatures(data)
data = self.addports(data)
data.y = label
if label and len(trues) < size:
trues.append(data)
elif not label and len(falses) < size:
falses.append(data)
return trues + falses
class SkipCircles(SymmetrySet):
def __init__(self):
super().__init__()
self.hidden_units = 32
self.num_classes = 10 # num skips
self.num_features = 1
self.num_nodes = 41
self.graph_class = True
self.makedata()
def makedata(self):
size=self.num_nodes
skips = [2, 3, 4, 5, 6, 9, 11, 12, 13, 16]
graphs = []
for s, skip in enumerate(skips):
edge_index = torch.tensor([[0, size-1], [size-1, 0]], dtype=torch.long)
for i in range(size - 1):
e = torch.tensor([[i, i+1], [i+1, i]], dtype=torch.long)
edge_index = torch.cat([edge_index, e], dim=-1)
for i in range(size):
e = torch.tensor([[i, i], [(i - skip) % size, (i + skip) % size]], dtype=torch.long)
edge_index = torch.cat([edge_index, e], dim=-1)
data = Data(edge_index=edge_index, num_nodes=self.num_nodes)
data = self.makefeatures(data)
data = self.addports(data)
data.y = torch.tensor(s)
graphs.append(data)
return graphs
def main(args, cluster=None):
print(args, flush=True)
if args.dataset == "skipcircles":
dataset = SkipCircles()
elif args.dataset == "triangles":
dataset = Triangles()
elif args.dataset == "lcc":
dataset = LCC()
elif args.dataset == "limitsone":
dataset = LimitsOne()
elif args.dataset == "limitstwo":
dataset = LimitsTwo()
elif args.dataset == "fourcycles":
dataset = FourCycles()
print(dataset.__class__.__name__)
# Set the sampling probability and number of runs/samples for the DropGIN
n = dataset.num_nodes
print(f'Number of nodes: {n}')
gamma = n
p_opt = 2 * 1 /(1+gamma)
if args.prob >= 0:
p = args.prob
else:
p = p_opt
if args.num_runs > 0:
num_runs = args.num_runs
else:
num_runs = gamma
print(f'Number of runs: {num_runs}')
print(f'Sampling probability: {p}')
degs = []
for g in dataset.makedata():
deg = degree(g.edge_index[0], g.num_nodes, dtype=torch.long)
degs.append(deg.max())
print(f'Mean Degree: {torch.stack(degs).float().mean()}')
print(f'Max Degree: {torch.stack(degs).max()}')
print(f'Min Degree: {torch.stack(degs).min()}')
print(f'Number of graphs: {len(dataset.makedata())}')
graph_classification = dataset.graph_class
if graph_classification:
print('Graph Clasification Task')
else:
print('Node Clasification Task')
num_features = dataset.num_features
Conv = GINConv
if args.augmentation == 'ports':
Conv = GINEConv
elif args.augmentation == 'ids':
num_features += 1
elif args.augmentation == 'random':
num_features += 1
class GIN(nn.Module):
def __init__(self):
super(GIN, self).__init__()
dim = dataset.hidden_units
self.num_layers = args.num_layers
self.convs = nn.ModuleList()
self.bns = nn.ModuleList()
self.fcs = nn.ModuleList()
self.convs.append(Conv(nn.Sequential(nn.Linear(num_features, dim), nn.BatchNorm1d(dim), nn.ReLU(), nn.Linear(dim, dim))))
self.bns.append(nn.BatchNorm1d(dim))
self.fcs.append(nn.Linear(num_features, dataset.num_classes))
self.fcs.append(nn.Linear(dim, dataset.num_classes))
for i in range(self.num_layers-1):
self.convs.append(Conv(nn.Sequential(nn.Linear(dim, dim), nn.BatchNorm1d(dim), nn.ReLU(), nn.Linear(dim, dim))))
self.bns.append(nn.BatchNorm1d(dim))
self.fcs.append(nn.Linear(dim, dataset.num_classes))
def reset_parameters(self):
for m in self.modules():
if isinstance(m, nn.Linear):
m.reset_parameters()
elif isinstance(m, Conv):
m.reset_parameters()
elif isinstance(m, nn.BatchNorm1d):
m.reset_parameters()
def forward(self, data):
x = data.x
edge_index = data.edge_index
batch = data.batch
if args.augmentation == 'ids':
x = torch.cat([x, data.id.float()], dim=1)
elif args.augmentation == 'random':
x = torch.cat([x, torch.randint(0, 100, (x.size(0), 1), device=x.device) / 100.0], dim=1)
outs = [x]
for i in range(self.num_layers):
if args.augmentation == 'ports':
x = self.convs[i](x, edge_index, data.ports.expand(-1, x.size(-1)))
else:
x = self.convs[i](x, edge_index)
x = self.bns[i](x)
x = F.relu(x)
outs.append(x)
out = None
for i, x in enumerate(outs):
if graph_classification:
x = global_add_pool(x, batch)
x = self.fcs[i](x) # No dropout for these experiments
if out is None:
out = x
else:
out += x
return F.log_softmax(out, dim=-1), 0
use_aux_loss = args.use_aux_loss
class DropGIN(nn.Module):
def __init__(self):
super(DropGIN, self).__init__()
dim = dataset.hidden_units
self.num_layers = args.num_layers
self.convs = nn.ModuleList()
self.bns = nn.ModuleList()
self.fcs = nn.ModuleList()
self.convs.append(Conv(nn.Sequential(nn.Linear(num_features, dim), nn.BatchNorm1d(dim), nn.ReLU(), nn.Linear(dim, dim))))
self.bns.append(nn.BatchNorm1d(dim))
self.fcs.append(nn.Linear(num_features, dataset.num_classes))
self.fcs.append(nn.Linear(dim, dataset.num_classes))
for i in range(self.num_layers-1):
self.convs.append(Conv(nn.Sequential(nn.Linear(dim, dim), nn.BatchNorm1d(dim), nn.ReLU(), nn.Linear(dim, dim))))
self.bns.append(nn.BatchNorm1d(dim))
self.fcs.append(nn.Linear(dim, dataset.num_classes))
if use_aux_loss:
self.aux_fcs = nn.ModuleList()
self.aux_fcs.append(nn.Linear(num_features, dataset.num_classes))
for i in range(self.num_layers):
self.aux_fcs.append(nn.Linear(dim, dataset.num_classes))
def reset_parameters(self):
for m in self.modules():
if isinstance(m, nn.Linear):
m.reset_parameters()
elif isinstance(m, Conv):
m.reset_parameters()
elif isinstance(m, nn.BatchNorm1d):
m.reset_parameters()
def forward(self, data):
x = data.x
edge_index = data.edge_index
batch = data.batch
# Do runs in paralel, by repeating the graphs in the batch
x = x.unsqueeze(0).expand(num_runs, -1, -1).clone()
drop = torch.bernoulli(torch.ones([x.size(0), x.size(1)], device=x.device) * p).bool()
x[drop] = 0.0
del drop
outs = [x]
x = x.view(-1, x.size(-1))
run_edge_index = edge_index.repeat(1, num_runs) + torch.arange(num_runs, device=edge_index.device).repeat_interleave(edge_index.size(1)) * (edge_index.max() + 1)
for i in range(self.num_layers):
x = self.convs[i](x, run_edge_index)
x = self.bns[i](x)
x = F.relu(x)
outs.append(x.view(num_runs, -1, x.size(-1)))
del run_edge_index
out = None
for i, x in enumerate(outs):
x = x.mean(dim=0)
if graph_classification:
x = global_add_pool(x, batch)
x = self.fcs[i](x) # No dropout layer in these experiments
if out is None:
out = x
else:
out += x
if use_aux_loss:
aux_out = torch.zeros(num_runs, out.size(0), out.size(1), device=out.device)
run_batch = batch.repeat(num_runs) + torch.arange(num_runs, device=edge_index.device).repeat_interleave(batch.size(0)) * (batch.max() + 1)
for i, x in enumerate(outs):
if graph_classification:
x = x.view(-1, x.size(-1))
x = global_add_pool(x, run_batch)
x = x.view(num_runs, -1, x.size(-1))
x = self.aux_fcs[i](x) # No dropout layer in these experiments
aux_out += x
return F.log_softmax(out, dim=-1), F.log_softmax(aux_out, dim=-1)
else:
return F.log_softmax(out, dim=-1), 0
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f'Device: {device}')
if args.augmentation == 'dropout':
model = DropGIN().to(device)
else:
model = GIN().to(device)
use_aux_loss = False
def train(epoch, loader, optimizer):
model.train()
loss_all = 0
n = 0
for data in loader:
data = data.to(device)
optimizer.zero_grad()
logs, aux_logs = model(data)
loss = F.nll_loss(logs, data.y)
n += len(data.y)
if use_aux_loss:
aux_loss = F.nll_loss(aux_logs.view(-1, aux_logs.size(-1)), data.y.unsqueeze(0).expand(aux_logs.size(0),-1).clone().view(-1))
loss = 0.75*loss + 0.25*aux_loss
loss.backward()
loss_all += data.num_graphs * loss.item()
optimizer.step()
return loss_all / len(loader.dataset)
def test(loader):
model.eval()
n = 0
with torch.no_grad():
correct = 0
for data in loader:
data = data.to(device)
logs, aux_logs = model(data)
pred = logs.max(1)[1]
n += len(pred)
correct += pred.eq(data.y).sum().item()
return correct / n
def train_and_test(multiple_tests=False, test_over_runs=None):
train_accs = []
test_accs = []
nonlocal num_runs # access global num_runs variable inside this function
print(model.__class__.__name__)
for seed in range(10):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
model.reset_parameters()
lr = 0.01
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
test_dataset = dataset.makedata()
train_dataset = dataset.makedata()
test_loader = DataLoader(test_dataset, batch_size=len(train_dataset))
train_loader = DataLoader(train_dataset, batch_size=len(train_dataset), shuffle=True)
print('---------------- Seed {} ----------------'.format(seed))
for epoch in range(1, 1001):
if args.verbose:
start = time.time()
train_loss = train(epoch, train_loader, optimizer)
if args.verbose:
print('Epoch: {:03d}, LR: {:7f}, Train Loss: {:.7f}, Time: {:7f}'.format(epoch, lr, train_loss, time.time() - start), flush=True)
train_acc = test(train_loader)
train_accs.append(train_acc)
if not test_over_runs is None:
if multiple_tests:
for i in range(10):
old_num_runs = num_runs
for r in test_over_runs:
num_runs = r
test_acc = test(test_loader)
test_accs.append(test_acc)
num_runs = old_num_runs
else:
old_num_runs = num_runs
for r in test_over_runs:
num_runs = r
test_acc = test(test_loader)
test_accs.append(test_acc)
num_runs = old_num_runs
elif multiple_tests:
for i in range(10):
test_acc = test(test_loader)
test_accs.append(test_acc)
test_acc = torch.tensor(test_accs[-10:]).mean().item()
else:
test_acc = test(test_loader)
test_accs.append(test_acc)
print('Train Acc: {:.7f}, Test Acc: {:7f}'.format(train_acc, test_acc), flush=True)
train_acc = torch.tensor(train_accs)
test_acc = torch.tensor(test_accs)
if not test_over_runs is None:
test_acc = test_acc.view(-1, len(test_over_runs))
print('---------------- Final Result ----------------')
print('Train Mean: {:7f}, Train Std: {:7f}, Test Mean: {}, Test Std: {}'.format(train_acc.mean(), train_acc.std(), test_acc.mean(dim=0), test_acc.std(dim=0)), flush=True)
return test_acc.mean(dim=0), test_acc.std(dim=0)
if args.prob_ablation:
print('Dropout probability ablation')
probs = [0.0, 0.01, 0.02, 0.04, 0.08, 0.16, 0.32, 0.64, 0.95]
means = []
stds = []
for prob in probs:
print(f'Dropout probability {prob}:')
p = prob
mean, std = train_and_test(multiple_tests=True)
means.append(mean.item())
stds.append(std.item())
probs = | np.array(probs) | numpy.array |
from memory import BasicBuffer
from DQN_Model import ieee2_net,ieee4_net
import torch.nn as nn
from torch.autograd import Variable
from setup import powerGrid_ieee2
import numpy as np
import torch
import os
import matplotlib.pyplot as plt
import copy
import statistics as stat
from torch.utils.tensorboard import SummaryWriter
import pickle
import math
class DQN:
def __init__(self, ieeeBusSystem, lr, memorySize, batchSize, decayRate, numOfEpisodes, stepsPerEpisode, epsilon, annealingConstant, annealAfter, targetUpdateAfter,expandActions=False,ddqnMode=False):
prefix='ddqn' if ddqnMode else 'dqn'
self.env_2bus = powerGrid_ieee2('ddqn' if ddqnMode else 'dqn');
self.ddqnMode=ddqnMode;
if expandActions:
self.actions = ['v_ref:' + str(x) + ';lp_ref:' + str(y) for x in self.env_2bus.deepActionSpace['v_ref_pu']
for y
in self.env_2bus.deepActionSpace['lp_ref']]
else:
self.actions = ['v_ref:' + str(x) + ';lp_ref:' + str(y) for x in self.env_2bus.actionSpace['v_ref_pu'] for y
in self.env_2bus.actionSpace['lp_ref']]
op=len(self.actions)
self.eval_net, self.target_net = ieee2_net(24,op,0.3), ieee2_net(24,op)
USE_CUDA = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#USE_CUDA = torch.cuda.is_available();
self.learn_step_counter = 0 # for target updating
self.memory_counter = 0 # for storing memory
self.memory_capacity=memorySize; # initialize memory
self.memory=BasicBuffer(self.memory_capacity);
self.learningRate=lr
self.optimizer = torch.optim.Adam(self.eval_net.parameters(), lr=lr)
self.loss_func = nn.MSELoss()
self.batch_size=batchSize
self.numOfEpisodes = numOfEpisodes
self.annealingRate = annealingConstant
self.numOfSteps = stepsPerEpisode
#self.learningRate = learningRate
self.epsilon=epsilon
self.decayRate = decayRate
self.annealAfter = annealAfter
self.target_update_iter=targetUpdateAfter
self.allRewards=[];
self.fileName=prefix+'_lr' + str(lr) +'tua'+str(targetUpdateAfter)+'bs' +str(batchSize)+'ms'+str(memorySize)+'dr' + str(decayRate) + 'noe' + str(
numOfEpisodes) + 'spe' + str(stepsPerEpisode) + 'e' + str(epsilon) + 'ac' + str(
annealingConstant) + 'aa' + str(annealAfter)+'op'+str(op);
self.checkPoint = 'DQN_Checkpoints/'+self.fileName+'.tar';
print(self.checkPoint)
if os.path.isfile(self.checkPoint):
print('loading state values from last saved checkpoint');
checkpoint = torch.load(self.checkPoint);
self.eval_net.load_state_dict(checkpoint['evalNet_state_dict'])
self.target_net.load_state_dict(checkpoint['targetNet_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.epsilon=checkpoint['epsilon']
self.allRewards=checkpoint['allRewards']
self.learn_step_counter=checkpoint['learn_step_counter']
self.memory=checkpoint['memory']
self.memory_counter=checkpoint['memory_counter']
if USE_CUDA:
print('GPU Exists')
self.eval_net.cuda()
self.target_net.cuda()
for state in self.optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.cuda()
# self.writer = SummaryWriter(
# 'runs/' + self.fileName);
# dummystate, dummyaction, _, _, _ = self.memory.sample(1);
# self.writer.add_graph(self.eval_net,
# torch.FloatTensor(dummystate).cuda())
# self.writer.close()
def store_transition(self, s, a, r, done,s_):
self.memory.push(s, a, r, s_, done)
self.memory_counter += 1
def getActionFromIndex(self, ind):
actionString=self.actions[ind];
actionStringSplitted=actionString.split(';');
voltage = actionStringSplitted[0].split(':')[1];
loadingPercent = actionStringSplitted[1].split(':')[1];
return((int(loadingPercent),float(voltage) ));
def learn(self):
# target parameter update
if self.learn_step_counter % self.target_update_iter == 0:
self.target_net.load_state_dict(self.eval_net.state_dict())
self.learn_step_counter += 1
# sample batch transitions
b_s,b_a,b_r,b_s_,dones = self.memory.sample(self.batch_size)
b_s=Variable(torch.FloatTensor(b_s).cuda())
b_a = Variable(torch.LongTensor(b_a).cuda())
b_s_ = Variable(torch.FloatTensor(b_s_).cuda())
q_eval = self.eval_net(b_s.unsqueeze(1)).gather(1, b_a.unsqueeze(1)) # shape (batch, 1)
if self.ddqnMode:
q_next_eval = self.eval_net(b_s_.unsqueeze(1)).detach()
q_next_target = self.target_net(b_s_.unsqueeze(1)).detach()
else:
q_next = self.target_net(b_s_.unsqueeze(1)).detach() # detach from graph, don't backpropagate
target = [];
for i in range(0, self.batch_size):
terminal = dones[i]
if terminal:
target.append(0)
else:
if self.ddqnMode:
values, action = q_next_eval[i].max(0)
target.append(self.decayRate*q_next_target[i][action].item())
else:
target.append(self.decayRate*q_next[i].max(0)[0].item())
q_target = np.add(b_r , target)
q_target = Variable(torch.FloatTensor(q_target).cuda())
loss = self.loss_func(q_eval, q_target.unsqueeze(1))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.runningLoss+=loss.item()
self.runningRewards+=sum(b_r)/self.batch_size
if self.learn_step_counter % 200 == 0: # every 200 mini-batches...
# ...log the running loss
self.writer.add_scalar('training loss',
self.runningLoss / 200,
self.learn_step_counter)
self.writer.add_scalar('avg Reward',
self.runningRewards/200,
self.learn_step_counter)
self.runningRewards = 0;
self.runningLoss = 0.0
def train(self):
self.writer = SummaryWriter(
'runs/' + self.fileName);
print('epsilon: ' + str(self.epsilon))
print('Has already been trained for following num of episodes: ' + str(len(self.allRewards)))
noe = self.numOfEpisodes - len(self.allRewards)
self.eval_net.train();
self.env_2bus.setMode('train')
self.runningLoss=0;
self.runningRewards=0;
for i in range(0, noe):
accumulatedReward = 0;
self.env_2bus.reset();
currentState = [];
for j in range(0, 3):
m = self.env_2bus.getCurrentStateForDQN();
m.extend(m)
currentState.append(m);
self.env_2bus.stateIndex += 1;
self.env_2bus.scaleLoadAndPowerValue(self.env_2bus.stateIndex)
self.env_2bus.runEnv(False);
currentState.append(self.env_2bus.getCurrentStateForDQN())
currentState[3].extend(self.env_2bus.getCurrentStateForDQN())
currentState=np.array(currentState)
for j in range(0, self.numOfSteps):
epsComp = np.random.random();
if epsComp <= self.epsilon:
# Exploration Part
actionIndex = np.random.choice(len(self.actions), 1)[0]
else:
# Greedy Approach
q_value = self.eval_net.forward(Variable(torch.unsqueeze(torch.unsqueeze(torch.FloatTensor(currentState),0),0)).cuda());
actionIndex = torch.max(q_value, 1)[1].data.cpu().numpy()[0] # return the argmax
action = self.getActionFromIndex(actionIndex);
currentMeasurements, reward, done, _ = self.env_2bus.takeAction(action[0], action[1]);
oldState=currentState;
currentState=np.append(currentState, [currentMeasurements],axis=0)
currentState=np.delete(currentState,0,axis=0);
self.store_transition(oldState, actionIndex, reward, done ,currentState)
accumulatedReward += reward;
if self.memory_counter > self.memory_capacity:
self.learn()
if done:
break;
self.allRewards.append(accumulatedReward);
if (i + 1) % self.annealAfter == 0:
print('Episode: ' + str(len(self.allRewards)) + '; reward:' + str(accumulatedReward))
self.epsilon = self.annealingRate * self.epsilon;
print('saving checkpoint data')
torch.save({
'epsilon': self.epsilon,
'allRewards': self.allRewards,
'evalNet_state_dict': self.eval_net.state_dict(),
'targetNet_state_dict': self.target_net.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'learn_step_counter': self.learn_step_counter,
'memory': self.memory,
'memory_counter': self.memory_counter
}, self.checkPoint)
print('training finished')
def test(self, episodes, numOfStepsPerEpisode, busVoltageIndex,testAllActions):
rewards=[]
regrets=[]
count=0;
ul=self.numOfSteps;
self.eval_net.eval();
voltage=[]
voltage2 = []
self.env_2bus.setMode('test')
if testAllActions:
copyNetwork = copy.deepcopy(self)
for j in range(0,episodes):
self.env_2bus.reset();
#currentState = self.env_2bus.getCurrentStateForDQN();
currentState = [];
for j in range(0, 3):
m = self.env_2bus.getCurrentStateForDQN();
m.extend(m)
currentState.append(m);
self.env_2bus.stateIndex += 1;
self.env_2bus.scaleLoadAndPowerValue(self.env_2bus.stateIndex)
self.env_2bus.runEnv(False);
currentState.append(self.env_2bus.getCurrentStateForDQN())
currentState[3].extend(self.env_2bus.getCurrentStateForDQN())
currentState = np.array(currentState)
rewardForEp=[];
rew_aa_ForEp=[];
for i in range(0,numOfStepsPerEpisode):
q_value = self.eval_net.forward(Variable(torch.unsqueeze(torch.FloatTensor(currentState.flatten()), 0)).cuda());
# print(torch.max(q_value, 1)[1].shape)
actionIndex = torch.max(q_value, 1)[1].data.cpu().numpy()[0] # return the argmax
action = self.getActionFromIndex(actionIndex);
# oldMeasurements = currentMeasurements;
currentMeasurements, reward, done, _ = self.env_2bus.takeAction(action[0], action[1]);
currentState = np.append(currentState, [currentMeasurements], axis=0)
voltage.append(0.7*currentMeasurements[3] + 0.3*currentMeasurements[0])
voltage2.append(currentMeasurements[0])
# currentState.append(currentMeasurements);
currentState = np.delete(currentState, 0, axis=0);
rewardForEp.append(reward);
if testAllActions:
_,_,_,_, rew_aa = copyNetwork.runFACTSallActionsRL(busVoltageIndex)
rew_aa_ForEp.append(rew_aa)
if done == True:
break;
#print(self.env_2bus.net.res_bus.vm_pu)
#print(self.env_2bus.net.res_line)
rewards.append(sum(rewardForEp));
if testAllActions:
regrets.append(sum(rew_aa_ForEp) - sum(rewardForEp))
# PLot reward and regret
if testAllActions:
fig, (ax1, ax2, ax3, ax4) = plt.subplots(1,4)
else:
fig, (ax1, ax2, ax3) = plt.subplots(1, 3)
ax1.scatter(list(range(0, len(rewards))), rewards)
ax1.set_ylabel('Reward')
ax1.set_xlabel('Episode')
#print( voltage)
ax2.plot(list(range(0, len(voltage))), voltage)
ax2.set_ylabel('voltage')
ax2.set_xlabel('Episode')
ax3.plot(list(range(0, len(voltage2))), voltage2)
ax3.set_ylabel('voltage2')
ax3.set_xlabel('Episode')
if testAllActions:
ax4.scatter(list(range(0, len(regrets))), regrets)
ax4.set_ylabel('Regret')
ax4.set_xlabel('Episode')
plt.show()
#print(sum(rewards))
#self.writer.add_graph(self.eval_net, Variable(torch.unsqueeze(torch.FloatTensor(currentState), 0)).cuda())
#plt.scatter(list(range(0, len(rewards))), rewards)
#plt.show();
def lp_ref(self):
return stat.mean(self.env_2bus.net.res_line.loading_percent)
def runFACTSnoRL(self, v_ref, lp_ref, bus_index_shunt, bus_index_voltage, line_index, series_comp_enabl):
# Enable/Disable devices
self.env_2bus.net.switch.at[1, 'closed'] = False if series_comp_enabl else True
self.env_2bus.net.switch.at[0, 'closed'] = True
self.env_2bus.net.controller.in_service[1] = True if series_comp_enabl else False
# Set reference values
self.env_2bus.shuntControl.ref = v_ref;
self.env_2bus.seriesControl.ref = lp_ref;
self.env_2bus.runEnv(runControl=True)
busVoltage = self.env_2bus.net.res_bus.vm_pu[bus_index_voltage]
lp_max = max(self.env_2bus.net.res_line.loading_percent)
lp_std = np.std(self.env_2bus.net.res_line.loading_percent)
return busVoltage, lp_max, lp_std
## Run the environment controlled by greedy RL
def runFACTSgreedyRL(self, busVoltageIndex, currentState,takeLastAction):
#q_value = self.eval_net.forward(Variable(torch.unsqueeze(torch.FloatTensor(currentState), 0)).cuda());
q_value = self.eval_net.forward(Variable(torch.unsqueeze(torch.FloatTensor(currentState.flatten()), 0)).cuda())
actionIndex = torch.max(q_value, 1)[1].data.cpu().numpy()[0] # return the argmax
action = self.getActionFromIndex(actionIndex);
nextStateMeasurements, reward, done, measAfterAction = self.env_2bus.takeAction(action[0], action[1])
busVoltage = measAfterAction[0]
lp_max = measAfterAction[1]
lp_std = measAfterAction[2]
return nextStateMeasurements, busVoltage, lp_max, lp_std, reward
## Run environment and try all actions and choose highest reward
def runFACTSallActionsRL(self, busVoltageIndex):
copyNetwork = copy.deepcopy(self)
reward = 0
bestAction = []
rewArr = []
#Create action space with high resolution:
copyNetwork.actionSpace = {'v_ref_pu': [i/1000 for i in range(900, 1101)], 'lp_ref': [i for i in range(0, 151)]}
copyNetwork.actions = ['v_ref:' + str(x) + ';lp_ref:' + str(y) for x in copyNetwork.actionSpace['v_ref_pu']
for y in copyNetwork.actionSpace['lp_ref']]
# Test all actions
for i in range(0, len(copyNetwork.actions)):
action = copyNetwork.getActionFromIndex(i)
nextStateMeas, rew, done, _ = copyNetwork.env_2bus.takeAction(action[0], action[1] )
copyNetwork.env_2bus.stateIndex -= 1 # increment back as takeAction() increments +1
rewArr.append(rew)
if rew > reward:
bestAction = action # Save best action
reward = rew
# Take best action in actual environment
currentStateMeasurements, reward, done, measAfterAction = self.env_2bus.takeAction(bestAction[0], bestAction[1])
busVoltage = measAfterAction[0]
lp_max = measAfterAction[1]
lp_std = measAfterAction[2]
print(' max-min rewArr: ', max(rewArr), min(rewArr))
return currentStateMeasurements, busVoltage, lp_max, lp_std, reward
# Compare performance wrt reward and voltage stability between RL agent, benchmark and non-RL cases.
# Creates selection of graphs and prints other results.
def comparePerformance(self, steps, oper_upd_interval, bus_index_shunt, bus_index_voltage, line_index,benchmarkFlag):
v_noFACTS = []
lp_max_noFACTS = []
lp_std_noFACTS = []
v_FACTS = []
lp_max_FACTS = []
lp_std_FACTS = []
v_RLFACTS = []
lp_max_RLFACTS = []
lp_std_RLFACTS = []
v_FACTS_noSeries = []
lp_max_FACTS_noSeries = []
lp_std_FACTS_noSeries = []
v_FACTS_eachTS = []
lp_max_FACTS_eachTS = []
lp_std_FACTS_eachTS = []
rewardNoFacts = []
rewardFacts = []
rewardFactsEachTS = []
rewardFactsNoSeries = []
rewardFactsRL = []
v_RLFACTS_AfterLoadChange = []
lp_max_RLFACTS_AfterLoadChange = []
self.env_2bus.setMode('test')
self.env_2bus.reset()
stateIndex = self.env_2bus.stateIndex
loadProfile = self.env_2bus.loadProfile
performance=0
while stateIndex + steps+4 > len(loadProfile):
self.env_2bus.reset() # Reset to get sufficient number of steps left in time series
stateIndex = self.env_2bus.stateIndex
loadProfile = self.env_2bus.loadProfile
# Create copy of network for historic measurements
temp = copy.deepcopy(self)
#temp.eval_net.eval()
#Run for history measurements to get full state repr for RL.
currentState = [];
for j in range(0, 3):
m = temp.env_2bus.getCurrentStateForDQN();
m.extend(m) # creates the 2nd part of the state, "after action" but was no action with RL disabled in this part.
currentState.append(m);
temp.env_2bus.stateIndex += 1;
temp.env_2bus.scaleLoadAndPowerValue(temp.env_2bus.stateIndex)
temp.env_2bus.runEnv(False);
currentState.append(temp.env_2bus.getCurrentStateForDQN())
currentState[3].extend(temp.env_2bus.getCurrentStateForDQN())
currentState = np.array(currentState) # Only used for RLFACTS case
# Need seperate copy for each scenario
stateIndex = temp.env_2bus.stateIndex
qObj_env_noFACTS = copy.deepcopy(temp)
qObj_env_FACTS = copy.deepcopy(temp)
qObj_env_RLFACTS = copy.deepcopy(temp)
qObj_env_FACTS_noSeries = copy.deepcopy(temp)
qObj_env_FACTS_eachTS = copy.deepcopy(temp)
# Make sure FACTS devices disabled for noFACTS case and no Series for that case
qObj_env_noFACTS.env_2bus.net.switch.at[0, 'closed'] = False
qObj_env_noFACTS.env_2bus.net.switch.at[1, 'closed'] = True
qObj_env_FACTS_noSeries.env_2bus.net.switch.at[1, 'closed'] = True
# To plot horizontal axis in nose-curve
load_nom_pu = 2 #the nominal IEEE load in pu
print(stateIndex)
print(qObj_env_RLFACTS.env_2bus.stateIndex)
loading_arr = list(load_nom_pu*(loadProfile[stateIndex:stateIndex + steps] / stat.mean(loadProfile)))
loading_arr_afterLoadChange = list(load_nom_pu * (loadProfile[stateIndex+1:stateIndex + steps+1] / stat.mean(loadProfile))) # to get proper sorting for voltage after load change
# Loop through each load
for i in range(0, steps):
# no FACTS
qObj_env_noFACTS.env_2bus.runEnv(runControl=False) #No FACTS, no control
v_noFACTS.append(qObj_env_noFACTS.env_2bus.net.res_bus.vm_pu[bus_index_voltage])
lp_max_noFACTS.append(max(qObj_env_noFACTS.env_2bus.net.res_line.loading_percent))
lp_std_noFACTS.append(np.std(qObj_env_noFACTS.env_2bus.net.res_line.loading_percent))
rewardNoFacts.append((200+(math.exp(abs(1 - qObj_env_noFACTS.env_2bus.net.res_bus.vm_pu[bus_index_voltage]) * 10) * -20) - np.std(qObj_env_noFACTS.env_2bus.net.res_line.loading_percent))/200)
# FACTS with both series and shunt
v_ref = 1
if i % oper_upd_interval == 0:
lp_reference = qObj_env_FACTS.lp_ref()
#print('oper', lp_reference)
voltage, lp_max, lp_std = qObj_env_FACTS.runFACTSnoRL(v_ref, lp_reference, bus_index_shunt, bus_index_voltage,
line_index, True) # Series compensation enabled
v_FACTS.append(voltage)
lp_max_FACTS.append(lp_max)
lp_std_FACTS.append(lp_std)
rewFacts=(200+(math.exp(abs(1 - voltage) * 10) * -20) - lp_std)/200 ;
# FACTS no Series compensation
voltage, lp_max, lp_std = qObj_env_FACTS_noSeries.runFACTSnoRL(v_ref, lp_reference, bus_index_shunt, bus_index_voltage,
line_index, False) # Series compensation disabled
v_FACTS_noSeries.append(voltage)
lp_max_FACTS_noSeries.append(lp_max)
lp_std_FACTS_noSeries.append(lp_std)
rewFactsNoSeries=(200+(math.exp(abs(1 - voltage) * 10) * -20) - lp_std)/200 ;
# FACTS with both series and shunt, with system operator update EACH time step
lp_reference_eachTS = qObj_env_FACTS_eachTS.lp_ref()
#print('eachTS', lp_reference_eachTS)
voltage, lp_max, lp_std = qObj_env_FACTS_eachTS.runFACTSnoRL(v_ref, lp_reference_eachTS, bus_index_shunt, bus_index_voltage,
line_index, True) # Series compensation enabled
v_FACTS_eachTS.append(voltage)
lp_max_FACTS_eachTS.append(lp_max)
lp_std_FACTS_eachTS.append(lp_std)
#rewFactsEachTS=(200+(math.exp(abs(1 - voltage) * 10) * -20) - lp_std)/200 ;
# RLFACTS
takeLastAction=False;
qObj_env_RLFACTS.eval_net.eval();
currentMeasurements, voltage, lp_max, lp_std,r = qObj_env_RLFACTS.runFACTSgreedyRL(bus_index_voltage, currentState, takeLastAction) # runpp is done within this function
currentState = np.append(currentState, [currentMeasurements], axis=0)
currentState = np.delete(currentState, 0, axis=0);
v_RLFACTS.append(voltage)
lp_max_RLFACTS.append(lp_max)
lp_std_RLFACTS.append(lp_std)
rewardFactsRL.append(r) # FACTS with both series and shunt
v_RLFACTS_AfterLoadChange.append(qObj_env_RLFACTS.env_2bus.net.res_bus.vm_pu[1])
lp_max_RLFACTS_AfterLoadChange.append(max(qObj_env_RLFACTS.env_2bus.net.res_line.loading_percent))
# Increment state
stateIndex += 1
qObj_env_noFACTS.env_2bus.scaleLoadAndPowerValue(stateIndex) #Only for these, rest are incremented within their respective functions
qObj_env_FACTS.env_2bus.scaleLoadAndPowerValue(stateIndex)
qObj_env_FACTS_noSeries.env_2bus.scaleLoadAndPowerValue(stateIndex)
qObj_env_FACTS_eachTS.env_2bus.scaleLoadAndPowerValue(stateIndex)
rewFacts = 0.7 * rewFacts + 0.3 * (
200 + (math.exp(abs(1 - qObj_env_FACTS.env_2bus.net.res_bus.vm_pu[1]) * 10) * -20) - np.std(
qObj_env_FACTS.env_2bus.net.res_line.loading_percent)) / 200
rewardFacts.append(rewFacts)
rewFactsNoSeries = 0.7 * rewFactsNoSeries + 0.3 * (200 + (
math.exp(abs(1 - qObj_env_FACTS_noSeries.env_2bus.net.res_bus.vm_pu[1]) * 10) * -20) - np.std(
qObj_env_FACTS_noSeries.env_2bus.net.res_line.loading_percent)) / 200
rewardFactsNoSeries.append(rewFactsNoSeries)
rewFactsEachTS = 0.7 * rewFacts + 0.3 * (200 + (
math.exp(abs(1 - qObj_env_FACTS_eachTS.env_2bus.net.res_bus.vm_pu[1]) * 10) * -20) - np.std(
qObj_env_FACTS_eachTS.env_2bus.net.res_line.loading_percent)) / 200
rewardFactsEachTS.append(rewFactsEachTS) # FACTS with both series and shunt
if (rewFacts-r < 0.01) and (rewFactsNoSeries-r < 0.01):
performance += 1;
print('RL better than no RL in % wrt to reward (Upsilon): ', (performance / steps)*100)
print('max reward facts:', np.max(rewardFacts))
print('max reward facts with RL:', np.max(rewardFactsRL))
print('max reward facts no series:', np.max(rewardFactsNoSeries))
print('min reward facts:', np.min(rewardFacts))
print('min reward facts with RL:', np.min(rewardFactsRL))
print('min reward facts no series:', np.min(rewardFactsNoSeries))
print('mean reward facts:', np.mean(rewardFacts))
print('mean reward facts with RL:', np.mean(rewardFactsRL))
print('mean reward facts no series:', np.mean(rewardFactsNoSeries))
print('std reward facts:', | np.std(rewardFacts) | numpy.std |
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, TimedeltaIndex,
timedelta_range, date_range, DatetimeIndex, Int64Index,
_np_version_under1p10, Float64Index, Index, tslib)
from pandas.tests.test_base import Ops
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
| np.timedelta64(2, 'h') | numpy.timedelta64 |
# coding: utf-8
from typing import Tuple
import math
import os
from datetime import datetime
from os.path import abspath, join
import re
import numpy as np
from scipy import ndimage
from ncmagics import readnc, japanmap
def d_from_filterd_min(prmsl: np.ndarray, lat: np.ndarray, lon: np.ndarray):
# minimum value filter
filterd_prmsl = np.where(
ndimage.filters.minimum_filter(
prmsl, size=(18, 12), mode=('nearest', 'wrap')
) == prmsl
)
# spherical trigonometry (球面三角法)
dx_s = np.array([
np.deg2rad(lon[filterd_prmsl[1]] - lo)
for lo in lon
]) # (141, 14)
y0_s = np.deg2rad(lat) # (81)
y1_s = np.deg2rad(lat[filterd_prmsl[0]]) # (14)
cos_d_part1 = np.array([
np.sin(y0) * np.sin(y1_s)
for y0 in y0_s
])
cos_d_part2_ = np.array([
np.cos(y0) * np.cos(y1_s)
for y0 in y0_s
])
cos_d_part2 = np.array([
cos_d_part2_[i] * np.cos(dx)
for i in range(len(cos_d_part2_))
for dx in dx_s
]).reshape(len(cos_d_part2_), len(dx_s), len(filterd_prmsl[0]))
cos_d = np.array([
cos_d_part1[i] + cos_d_part2[i][j]
for i in range(len(cos_d_part1))
for j in range(len(dx_s))
]).T.reshape(len(filterd_prmsl[0]), len(cos_d_part2_), len(dx_s))
cos_d[cos_d > 1.0] = 1.0
d_from_min = np.arccos(cos_d) * 6400
return d_from_min
def define_cyclone(prmsl: np.ndarray, d_from_min: np.ndarray):
for d in d_from_min:
# print(d)
min_around = np.where(d <= 300)
prmsl_min_around_mean = prmsl[min_around].mean()
prmsl_min = prmsl[min_around].min()
n = np.argmin(prmsl[min_around])
if prmsl_min_around_mean - prmsl_min >= 0.5:
print(min_around)
print(prmsl[min_around])
print(prmsl_min_around_mean)
print(prmsl[min_around][n])
print(prmsl_min)
print("-----")
return min_around
def test(prmsl, jp_lat, jp_lon, d):
#min_around = np.where(d <= 300, True, np.nan)
min_around = | np.where(d <= 300) | numpy.where |
import numpy as np
import scipy
from ._simsig_tools import _check_list,_rand_uniform
from ._generator_base import generator_base
#------------------------------------------------------------------------------------
__all__=['harmonics','Harmonics']
#------------------------------------------------------------------------------------
_SIGNAL_PARAMETER_DEFAULT = {'amp':1, 'f0':1, 'delta_f':0, 'delay':0,'phase0':0,'callback': None}
_SYSTEM_PARAMETER_DEFAULT = {'fs':10, 'length':512}
#------------------------------------------------------------------------------------
def harmonics(amplitude = [1],
f0 = [1],
delta_f = [0],
delay = [0],
phase0 = [0],
callback = [None],
fs=10,
length=512,
snr_db = None):
'''
Harmonic signal generation.
Parameters
------------
* amplitude: 1d ndarray,
amplitude of signals.
* f0: 1d ndarray,
initial frequency (carried frequency).
* delta_f: 1d ndarray,
delta_f frequency (frequency band).
* delay: 1d ndarray,
signal delay.
* phase0: 1d ndarray,
initla phase.
* callback: 1d ndarray,
callback for special operations on signals.
* fs: float,
is the sampling frequency.
* length: int,
is the signal length;
* snr_db: float,
sngnal-to-noise ration in dB.
Returns:
-------------
* signal: 1d ndarray (complex),
harmonic signal.
Notes
---------
* Fs and N are the system parameters.
* Simulate harmonic (actually frequency modulated signal)
in the following form:
..math::
s = sum{f_i(a_i*exp[j2pi(f_0_i(t-tau_i)+
Delta_f_i(t-tau_i)^2/(N/fs))+j varphi_0_i])}+noises,
where:
* i = 0,.., are the signals number in superposition
(actually the number of the set initial frequencies(f0));
* a_i is the amplitude;
* f_0_i is the initial frequency;
* tau_i is the signal delay;
* Delta f_i is the frequency band (from f_0 to f_0+Delta_f);
* varphi_0_i is the initial phase
* f_i is the modulation callback;
* t is the time (up to N/fs);
* N is length (size) of signals samples;
* fs is the sampling frequency;
* noises are the gaussian white noises.
Example
-----------
import dsatools.generator
from dsatools.generator import callbacks
import dsatools.utilits as ut
#Example1----------------------------------------
signal = dsatools.generator.harmonics()
ut.probe(signal)
#Example2----------------------------------------
signal = dsatools.generator.harmonics(amplitude=[1],
f0=[1,2,3],
delta_f=[0.3],
delay=[0],
phase0=[0],
callback=[None],
fs=10,
length=512,
snr_db=None,)
ut.probe(signal)
#Example3----------------------------------------
cb1 = callbacks.harmonic_modulataion(amp_am=0.5,freq_am=9.5,phase_shift=0)
cb2 = callbacks.harmonic_modulataion(amp_am=0.7,freq_am=8.2,phase_shift=0)
signal = dsatools.generator.harmonics(amplitude=[1,1,0.4,0.3],
f0=[1,2,3,4],
delta_f=[0.2,1.3,],
delay =[0,0,0,4],
phase0=[0,1.2],
callback=[cb1,None,cb2],
fs=10,
length=512,
snr_db=20,)
ut.probe(signal)
'''
signal = Harmonics(fs, length)
signal.set_signal_parameters(amplitude = amplitude,
f0 = f0,
delta_f = delta_f,
delay = delay,
phase0 = phase0,
callback = callback,)
return signal.get_signal(snr_db = snr_db)
#------------------------------------------------------------------------------------
class Harmonics(generator_base):
'''
Harmonic signal generation.
Atriburts
----------------
> system_parameters = {fs, length},
* fs: float,
is the sampling frequency.
* length: int,
is the signal length.
> signal_parameters = list of
{amp,f0,delta_f,delay,phase0,callback},
* amplitude: 1d ndarray,
amplitude of signal components.
* f0: 1d ndarray,
initial components frequency
(carried frequency).
* delta_f: 1d ndarray,
delta_f frequency components band.
* delay: 1d ndarray,
signal components delay.
* phase0: 1d ndarray,
initla phase of components.
* callback: 1d ndarray,
callback for special operations on signals.
Methods
-----------
* set_system_parameters;
* get_system_parameters;
* set_signal_parameters;
* add_signal_parameters;
* print_signal_parameters;
* get_signal.
Notes
---------
* Fs and N are the system parameters.
* Simulate harmonic (actually frequency modulated signal)
in the following form:
..math::
s = sum{f_i(a_i*exp[j2pi(f_0_i(t-tau_i)+
Delta_f_i(t-tau_i)^2/(N/fs))+j varphi_0_i])}+noises,
where:
* i = 0,.., are the signals number in superposition
(actually the number of the set initial frequencies(f0));
* a_i is the amplitude;
* f_0_i is the initial frequency;
* tau_i is the signal delay;
* Delta f_i is the frequency band (from f_0 to f_0+Delta_f);
* varphi_0_i is the initial phase
* f_i is the modulation callback;
* t is the time (up to N/fs);
* N is length (size) of signals samples;
* fs is the sampling frequency;
* noises are the gaussian white noises.
Example
-----------
import dsatools.generator
from dsatools.generator import callbacks
import dsatools.utilits as ut
cb1 = callbacks.harmonic_modulataion(amp_am=0.1,freq_am=0.5,phase_shift=0)
callbacks.probe_modulation(cb1,512)
cb2 = callbacks.pulse_modulataion(200,400)
callbacks.probe_modulation(cb2,512)
signal1 = dsatools.generator.Harmonics()
signal1.get_system_parameters()
signal1.set_signal_parameters(amplitude=[1,0.5],
f0=[1,2,3],
delta_f=[0.4,0.1],
delay=[0],
phase0=[0],
callback=[cb1,cb2],)
sig1 = signal1.get_signal(snr_db = 200)
ut.probe(sig1)
'''
#@override
def __init__(self,
fs = _SYSTEM_PARAMETER_DEFAULT['fs'],
length = _SYSTEM_PARAMETER_DEFAULT['length']
):
self._signal_parameters_dict_default = _SIGNAL_PARAMETER_DEFAULT.copy()
self._system_parameters_dict_default = _SYSTEM_PARAMETER_DEFAULT.copy()
self.set_system_parameters(fs, length)
self.set_signal_parameters_dict_default()
#------------------------------------------------------------------------------------
#@override
def set_system_parameters(self,
fs=_SYSTEM_PARAMETER_DEFAULT['fs'],
length = _SYSTEM_PARAMETER_DEFAULT['fs']):
'''
Set system parameters.
Parameters
-------------
* fs: float,
is the sampling frequency.
* length: int,
is the length of signal.
'''
self._system_parameters['fs'] = fs
self._system_parameters['length'] = length
#------------------------------------------------------------------------------------
#@override
def make_signal_parameters_dict(self,
amplitude = _SIGNAL_PARAMETER_DEFAULT['amp'],
f0 = _SIGNAL_PARAMETER_DEFAULT['f0'],
delta_f = _SIGNAL_PARAMETER_DEFAULT['delta_f'],
delay = _SIGNAL_PARAMETER_DEFAULT['delay'],
phase0 = _SIGNAL_PARAMETER_DEFAULT['phase0'],
callback = _SIGNAL_PARAMETER_DEFAULT['callback']):
'''
Make the signal parameters dictionary.
Parameters
------------
* amplitude: 1d ndarray,
amplitude of signal components.
* f0: 1d ndarray,
initial components frequency
(carried frequency).
* delta_f: 1d ndarray,
delta_f frequency components band.
* delay: 1d ndarray,
signal components delay.
* phase0: 1d ndarray,
initla phase of components.
* callback: 1d ndarray,
callback for special operations on signals.
Returns
----------
* signal_parameters_dict: dict,
signal parameters dictionary.
'''
signal_parameters_dict = self.get_signal_parameters_dict_default()
signal_parameters_dict['amp'] = amplitude
signal_parameters_dict['f0'] = f0
signal_parameters_dict['delta_f'] = delta_f
signal_parameters_dict['delay'] = delay
signal_parameters_dict['phase0'] = phase0
signal_parameters_dict['callback'] = callback
return signal_parameters_dict
#------------------------------------------------------------------------------------
#@override
def add_signal_parameters(self,
amplitude = [_SIGNAL_PARAMETER_DEFAULT['amp']],
f0 = [_SIGNAL_PARAMETER_DEFAULT['f0']],
delta_f = [_SIGNAL_PARAMETER_DEFAULT['delta_f']],
delay = [_SIGNAL_PARAMETER_DEFAULT['delay']],
phase0 = [_SIGNAL_PARAMETER_DEFAULT['phase0']],
callback = [_SIGNAL_PARAMETER_DEFAULT['callback']]):
'''
Add signal parameters.
Parameters
------------
* amplitude: 1d ndarray,
amplitude of signal components.
* f0: 1d ndarray,
initial components frequency
(carried frequency).
* delta_f: 1d ndarray,
delta_f frequency components band.
* delay: 1d ndarray,
signal components delay.
* phase0: 1d ndarray,
initla phase of components.
* callback: 1d ndarray,
callback for special operations on signals.
Notes
----------
* formats of the input: float, list, tuple.
* in the case of different length of array,
all will be resized to f0_s length.
'''
# main array - f0
f0 = _check_list(f0,-1)
len_list = len(f0) #required length for all other arrays
amplitude = _check_list(amplitude, len_list, 'last')
delta_f = _check_list(delta_f, len_list, 0)
delay = _check_list(delay, len_list, 0)
phase0 = _check_list(phase0, len_list, 0)
callback = _check_list(callback, len_list, 'None')
dict2add = []
for (amplitude_,
f0_,
delta_f_,
delay_,
phase0_,
callback_) in \
zip(amplitude,
f0,
delta_f,
delay,
phase0,
callback):
dict2add += [self.make_signal_parameters_dict(amplitude_,
f0_,
delta_f_,
delay_,
phase0_,
callback_)]
self.add_signal_parameters_dicts(dict2add)
#------------------------------------------------------------------------------------
#@override
def set_signal_parameters(self,
amplitude = [_SIGNAL_PARAMETER_DEFAULT['amp']],
f0 = [_SIGNAL_PARAMETER_DEFAULT['f0']],
delta_f = [_SIGNAL_PARAMETER_DEFAULT['delta_f']],
delay = [_SIGNAL_PARAMETER_DEFAULT['delay']],
phase0 = [_SIGNAL_PARAMETER_DEFAULT['phase0']],
callback = [_SIGNAL_PARAMETER_DEFAULT['callback']]):
'''
Set signal parameters.
Parameters
------------
* amplitude: 1d ndarray,
amplitude of signal components.
* f0: 1d ndarray,
initial components frequency
(carried frequency).
* delta_f: 1d ndarray,
delta_f frequency components band.
* delay: 1d ndarray,
signal components delay.
* phase0: 1d ndarray,
initla phase of components.
* callback: 1d ndarray,
callback for special operations on signals.
Notes
----------
* formats of the input: float, list, tuple.
* in the case of different length of array,
all will be resized to f0_s length.
'''
self.clear_signal_parameters()
self.add_signal_parameters(amplitude,
f0,
delta_f,
delay,
phase0,
callback)
#------------------------------------------------------------------------------------
#@override
def add_random_signal_parameters(self,
n_of_params = 1,
amplitude_range = [0,_SIGNAL_PARAMETER_DEFAULT['amp']],
f0_range = [0,_SIGNAL_PARAMETER_DEFAULT['f0']],
delta_f_range = [0,_SIGNAL_PARAMETER_DEFAULT['delta_f']],
delay_range = [0,_SIGNAL_PARAMETER_DEFAULT['delay']],
phase0_range = [0,_SIGNAL_PARAMETER_DEFAULT['phase0']]):
'''
Add random uniformly distributed signal_parameters.
Parameters
-------------
* n_of_params: int,
number of paramentrs.
* amplitude_range: [float,float],
ranges of amplitudes.
* f0_range: [float,float],
ranges of the initial frequencies
(carried frequencies).
* delta_f_range: [float,float],
ranges of the delta_f frequencies
(frequency bands).
* delay_range: [float,float],
ranges of the signal delays.
* phase0_range: [float,float],
ranges of the initla phases.
Notes
-------
* Callbacks doesnot applied for this function.
'''
scale_float = _SCALE_TO_FLOAT_
amplitude = _rand_uniform(amplitude_range, n_of_params, scale_float)
f0 = _rand_uniform(f0_range, n_of_params, scale_float)
delta_f = _rand_uniform(delta_f_range, n_of_params, scale_float)
delay = _rand_uniform(delay_range, n_of_params, scale_float)
phase0 = _rand_uniform(phase0_range, n_of_params, scale_float)
self.add_signal_parameters(amplitude,
f0,
delta_f,
delay,
phase0,
callback = n_of_params * [None])
#------------------------------------------------------------------------------------
#@override
def _sim_one_sig(self, sig_param):
'''
Simulate one harmonic (actually frequency modulated signal).
Parameters
-----------
* sig_param: dict,
dictionary of signal parameters, whcih include
(a,f_0,\Delta f,\tau,phi0,callback).
Returns
-----------
* sig: 1d ndarray (complex),
simulated signal.
Notes
---------
* Fs and N are system parameters.
* In harmonic signal \tau and \varphi_0/2/pi
are play the same role.
* If callback is not None: s = callback(s)
(format of callback = f(x)),
if callback is None it does not applied.
* Signal in form:
..math::
s = f(a*exp[j2pi(f_0(t-tau)+Delta_f(t-tau)^2/(N/fs))+j varphi_0]),
where:
* a is the amplitude;
* f_0 is the initial frequency;
* tau is the signal delay;
* Delta_f is the frequency band
(from f_0 to f_0+\Delta f);
* N is length (size) of signals samples;
* fs is the sampling frequency;
* t is the time (up to N/fs);
* varphi_0 is the initial phase
* f modulation callback.
'''
fs = self._system_parameters['fs']
N = self._system_parameters['length']
f0 = sig_param['f0']
incF = sig_param['delta_f']
tau = sig_param['delay']
phi0 = sig_param['phase0']
A = sig_param['amp']
callback = sig_param['callback']
t = np.arange(N)/fs - tau
Tm = N/fs
sig = A*np.exp(2j*np.pi*( f0*t + incF*np.square(t)/2/Tm )+ phi0*1j )
sig = | np.asarray(sig,dtype= np.complex) | numpy.asarray |
# ---------------------------------
# データ等の準備
# ----------------------------------
import numpy as np
import pandas as pd
# train_xは学習データ、train_yは目的変数、test_xはテストデータ
# pandasのDataFrame, Seriesで保持します。(numpyのarrayで保持することもあります)
train = pd.read_csv('../input/sample-data/train_preprocessed.csv')
train_x = train.drop(['target'], axis=1)
train_y = train['target']
test_x = pd.read_csv('../input/sample-data/test_preprocessed.csv')
# 説明用に学習データとテストデータの元の状態を保存しておく
train_x_saved = train_x.copy()
test_x_saved = test_x.copy()
# 学習データとテストデータを返す関数
def load_data():
train_x, test_x = train_x_saved.copy(), test_x_saved.copy()
return train_x, test_x
# 変換する数値変数をリストに格納
num_cols = ['age', 'height', 'weight', 'amount',
'medical_info_a1', 'medical_info_a2', 'medical_info_a3', 'medical_info_b1']
# -----------------------------------
# 標準化
# -----------------------------------
# データの読み込み
train_x, test_x = load_data()
# -----------------------------------
from sklearn.preprocessing import StandardScaler
# 学習データに基づいて複数列の標準化を定義
scaler = StandardScaler()
scaler.fit(train_x[num_cols])
# 変換後のデータで各列を置換
train_x[num_cols] = scaler.transform(train_x[num_cols])
test_x[num_cols] = scaler.transform(test_x[num_cols])
# -----------------------------------
# データの読み込み
train_x, test_x = load_data()
# -----------------------------------
from sklearn.preprocessing import StandardScaler
# 学習データとテストデータを結合したものに基づいて複数列の標準化を定義
scaler = StandardScaler()
scaler.fit(pd.concat([train_x[num_cols], test_x[num_cols]]))
# 変換後のデータで各列を置換
train_x[num_cols] = scaler.transform(train_x[num_cols])
test_x[num_cols] = scaler.transform(test_x[num_cols])
# -----------------------------------
# データの読み込み
train_x, test_x = load_data()
# -----------------------------------
from sklearn.preprocessing import StandardScaler
# 学習データとテストデータを別々に標準化(悪い例)
scaler_train = StandardScaler()
scaler_train.fit(train_x[num_cols])
train_x[num_cols] = scaler_train.transform(train_x[num_cols])
scaler_test = StandardScaler()
scaler_test.fit(test_x[num_cols])
test_x[num_cols] = scaler_test.transform(test_x[num_cols])
# -----------------------------------
# Min-Maxスケーリング
# -----------------------------------
# データの読み込み
train_x, test_x = load_data()
# -----------------------------------
from sklearn.preprocessing import MinMaxScaler
# 学習データに基づいて複数列のMin-Maxスケーリングを定義
scaler = MinMaxScaler()
scaler.fit(train_x[num_cols])
# 変換後のデータで各列を置換
train_x[num_cols] = scaler.transform(train_x[num_cols])
test_x[num_cols] = scaler.transform(test_x[num_cols])
# -----------------------------------
# 対数変換
# -----------------------------------
x = | np.array([1.0, 10.0, 100.0, 1000.0, 10000.0]) | numpy.array |
import numpy as np
import cv2
import argparse
from common import load_models, imshow
from face_common import get_landmarks, get_landmarks_points
def parse_args():
parser = argparse.ArgumentParser(
description="given two images of faces F1 and F2, swap the faces in those images"
)
parser.add_argument(
'-f1', '--face1',
required=True,
help='input face 1 image file'
)
parser.add_argument(
'-f2', '--face2',
required=True,
help='input face 2 image file'
)
parser.add_argument(
'-m', '--landmarks_model',
default="models/shape_predictor_68_face_landmarks.dat",
help='dlib landmakrs shape predictor model path'
)
parser.add_argument(
'-o1', '--output1',
required=False,
help='file output for image1 result'
)
parser.add_argument(
'-o2', '--output2',
required=False,
help='file output for image1 result'
)
parser.add_argument(
'-s', '--show',
default=True,
help='shows the results on screen and waits a key'
)
args = parser.parse_args()
return args
# def draw_triangles(image, triangles):
#
# ret = image.copy()
# _, contours = cv2.findContours(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# for t in triangles:
# pt1 = (t[0], t[1])
# pt2 = (t[2], t[3])
# pt3 = (t[4], t[5])
# cv2.line(ret, pt1, pt2, color=(255, 0, 0), thickness=1)
# cv2.line(ret, pt1, pt3, color=(0, 255, 0), thickness=1)
# cv2.line(ret, pt2, pt3, color=(0, 0, 255), thickness=1)
#
# # cnt = triangles.reshape(-1, 6, 2).astype(np.int32)
# # #contours = np.array(contours).reshape((-1, 1, 2)).astype(np.int32)
# # cv2.drawContours(ret, cnt, -1, (0, 255, 127), 1)
# # cv2.imshow("img", ret)
# # cv2.waitKey()
#
# return ret
def draw_triangles(image, triplets, points):
ret = image.copy()
_, contours = cv2.findContours(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for triplet in triplets:
pt1 = points[triplet[0]]
pt2 = points[triplet[1]]
pt3 = points[triplet[2]]
cv2.line(ret, pt1, pt2, color=(255, 0, 0), thickness=1)
cv2.line(ret, pt1, pt3, color=(0, 255, 0), thickness=1)
cv2.line(ret, pt2, pt3, color=(0, 0, 255), thickness=1)
# cnt = triangles.reshape(-1, 6, 2).astype(np.int32)
# #contours = np.array(contours).reshape((-1, 1, 2)).astype(np.int32)
# cv2.drawContours(ret, cnt, -1, (0, 255, 127), 1)
# cv2.imshow("img", ret)
# cv2.waitKey()
return ret
def delunay(landmarks_points, fast=False):
np_points = np.array(landmarks_points, np.int32)
# creates a convex hull that encloses all the landmarks
# creates a box that encloses the convex hull
#rect1 = cv2.boundingRect(convexhull)
rect = cv2.boundingRect(np_points)
#x1, y1, w, h = rect
# ts = []
# create an empty image
# mask = np.zeros((image.shape[1], image.shape[0]), dtype='uint8')
# cv2.polylines(image, [convexhull], True, (255, 0, 0), 1)
# cv2.rectangle(image, (x1, y1), (x1 + w, y1 + h), (0, 255, 0), 1)
# cv2.imshow("image", image)
# cv2.waitKey()
subdiv = cv2.Subdiv2D(rect)
if fast:
convexhull = cv2.convexHull(np_points)
cvhp = list(convexhull)
cvhp.append(landmarks_points[33])
cvhp.append(landmarks_points[36])
cvhp.append(landmarks_points[45])
cvhp.append(landmarks_points[48])
cvhp.append(landmarks_points[54])
cvhp.append(landmarks_points[31])
cvhp.append(landmarks_points[35])
cvhp.append(landmarks_points[39])
cvhp.append(landmarks_points[42])
subdiv.insert(cvhp)
else:
subdiv.insert(landmarks_points)
triangles = subdiv.getTriangleList()
triangle_indices = []
for t in triangles:
pt1 = (t[0], t[1])
pt2 = (t[2], t[3])
pt3 = (t[4], t[5])
idx1 = np.where((np_points == pt1).all(axis=1))[0][0]
idx2 = np.where((np_points == pt2).all(axis=1))[0][0]
idx3 = np.where((np_points == pt3).all(axis=1))[0][0]
# idx1 = np.argwhere(np_points == pt1)[0][0]
# idx2 = np.argwhere(np_points == pt2)[0][0]
# idx3 = np.argwhere(np_points == pt3)[0][0]
index = tuple(sorted([idx1, idx2, idx3]))
#index = (idx1, idx2, idx3)
triangle_indices.append(index)
return triangles, triangle_indices
#landmarks_points = [(pt.x, pt.y)]
# mask =
# points = np.array(landmarks_points, np.int32)
# convexhull = cv2.convexHull(points)
# #cv2.polylines(img, [convexhull], True, (255, 0, 0), 3)
# cv2.fillConvexPoly(mask, convexhull, 255)
# face_image_1 = cv2.bitwise_and(img, img, mask=mask)
# # Delaunay triangulation
# rect = cv2.boundingRect(convexhull)
# subdiv = cv2.Subdiv2D(rect)
# subdiv.insert(landmarks_points)
# triangles = subdiv.getTriangleList()
# triangles = np.array(triangles, dtype=np.int32)
def warp_triangle(image1, image2, result1, result2, triplet, lp1, lp2, two_ways=True):
tr1 = [lp1[triplet[0]], lp1[triplet[1]], lp1[triplet[2]]]
tr2 = [lp2[triplet[0]], lp2[triplet[1]], lp2[triplet[2]]]
tr1 = np.array(tr1)
tr2 = np.array(tr2)
rect1 = cv2.boundingRect(tr1)
rect2 = cv2.boundingRect(tr2)
x1, y1, w1, h1 = rect1
x2, y2, w2, h2 = rect2
tl1 = (x1, y1)
tl2 = (x2, y2)
# removes offset to triangles, by subtracting its upper-left bounding box coordinate
centered_tr1 = tr1 - tl1
centered_tr2 = tr2 - tl2
ctr1 = np.float32(centered_tr1)
ctr2 = np.float32(centered_tr2)
T21 = cv2.getAffineTransform(ctr2, ctr1)
cropped_src_2 = image2[y2: y2 + h2, x2: x2 + w2]
cropped_tgt_1 = result1[y1: y1 + h1, x1: x1 + w1]
warped_2_1 = cv2.warpAffine(cropped_src_2, T21, dsize=(w1, h1), borderMode=cv2.BORDER_REFLECT101)
#ht2 = cv2.convertPointsToHomogeneous(centered_tr2)
#warped_2_1 = np.round(warped_2_1).astype(dtype='uint8')
#tt1 = np.round(np.dot(T21, centered_tr2))
# tt1 = cv2.transform(ht2, T21).reshape(-1, 2)
# diff = np.linalg.norm(tt1 - ctr1)
# if diff != 0:
# print("!differs!", diff)
mask1 = np.zeros((h1, w1, 3), np.uint8)
cv2.fillConvexPoly(mask1, centered_tr1, (255, 255, 255))
patch1 = ((1 - mask1/255.0) * cropped_tgt_1 + mask1/255.0 * warped_2_1).astype(dtype='uint8')
result1[y1: y1 + h1, x1: x1 + w1] = patch1
if two_ways:
T12 = cv2.getAffineTransform(ctr1, ctr2)
cropped_src_1 = image1[y1: y1 + h1, x1: x1 + w1]
warped_1_2 = cv2.warpAffine(cropped_src_1, T12, dsize=(w2, h2), borderMode=cv2.BORDER_REFLECT101)
cropped_tgt_2 = result2[y2: y2 + h2, x2: x2 + w2]
mask2 = | np.zeros((h2, w2, 3), np.uint8) | numpy.zeros |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
# Importing manupulation packages
from astropy.io import fits
import numpy as np
import glob
from numpy.polynomial import chebyshev
from scipy import ndimage
from scipy.signal import medfilt2d
from astropy.convolution import Gaussian1DKernel, Gaussian2DKernel, convolve
import sys
import argparse
import os
# Plotting
import matplotlib; matplotlib.use('TkAgg')
import matplotlib.pyplot as pl
import seaborn; seaborn.set_style('ticks')
import copy
from util import *
class XSHcomb:
"""
Class to contain XSH spectrscopy combinations.
"""
def __init__(self, list_of_files, base_name, sky, synth_sky, sky2d=None):
"""
Instantiate fitsfiles. Input list of file-names to be combined.
"""
if len(list_of_files) == 0:
raise ValueError("Input file list empty")
self.list_of_files = list_of_files
self.list_of_skyfiles = sky
fitsfile, header = {}, {}
flux, error, bpmap = {}, {}, {}
seeing = {}
for ii, kk in enumerate(self.list_of_files):
fitsfile[ii] = fits.open(kk)
header[ii] = fitsfile[ii][0].header
flux[ii] = fitsfile[ii][0].data
error[ii] = fitsfile[ii][1].data
bpmap[ii] = fitsfile[ii][2].data
seeing[ii] = np.mean([header[ii]["HIERARCH ESO TEL AMBI FWHM START"], header[ii]["HIERARCH ESO TEL AMBI FWHM END"]])
if sky2d is not None:
flux[ii] += sky2d[ii]
self.FWHM = np.median(list(seeing.values()))
em_sky = []
for ii, kk in enumerate(self.list_of_skyfiles):
# fitsfile[ii] = fits.open(kk)
em_sky.append(np.median(fits.open(kk)[0].data, axis = 0))
self.fitsfile = fitsfile
self.header = header
self.flux = flux
self.error = error
self.bpmap = bpmap
# Constructs WCS
self.haxis = convert_air_to_vacuum(10.*(((np.arange(self.header[0]['NAXIS1'])) + 1 - self.header[0]['CRPIX1'])*self.header[0]['CDELT1']+self.header[0]['CRVAL1']))
self.vaxis = (np.arange(self.header[0]['NAXIS2']) - self.header[0]['CRPIX2'])*self.header[0]['CDELT2']+self.header[0]['CRVAL2']
if len(em_sky) == 0:
print("No sky-frame given ... Using science image collapsed in the spatial direction ...")
try:
em_sky = np.sum(np.array(flux.values()), axis = 1)
except:
em_sky = None
self.em_sky = em_sky
self.base_name = base_name
self.synth_sky = synth_sky
def combine_imgs(self, NOD=False, same=False, repeats=1):
"""
Combines X-shooter images.
Function to inverse weighting combine multiple exposures from X-shooter. Tailored for combination of STARE-mode reduced images in NOD-sequence ABBA.
Args:
arg1 (bool): True to form nodding pairs before combination.
Returns:
fitsfile: fitsfile containing the combined flux, error and bad-pixel maps in consequtive extensions.
"""
print("Combining "+str(len(self.list_of_files))+" files,\n"+str(self.list_of_files)+"\nto file:\n"+self.base_name+".fits....")
img_nr = len(self.fitsfile)
img_nr_list = np.arange(img_nr)
pix_offsetx, pix_offsety = np.ones_like(img_nr_list), np.ones_like(img_nr_list)
naxis1, naxis2 = np.ones_like(img_nr_list), np.ones_like(img_nr_list)
exptimes = np.ones_like(img_nr_list)
ra, dec = [0]*len(img_nr_list), [0]*len(img_nr_list)
full_edge_mask = self.bpmap.copy()
for ii, kk in enumerate(self.fitsfile):
# Arrays to contain axis indices
naxis1[ii] = self.header[ii]['NAXIS1']
naxis2[ii] = self.header[ii]['NAXIS2']
exptimes[ii] = self.header[ii]['EXPTIME']
ra[ii], dec[ii] = float(self.header[ii]['RA']), float(self.header[ii]['DEC'])
ref_ra, ref_dec = ra[0], dec[0]
for ii, kk in enumerate(self.fitsfile):
try:
pix_offsetx[ii] = int(round(self.header[ii]['HIERARCH ESO SEQ CUMOFF X'] / self.header[ii]['CDELT1']))
pix_offsety[ii] = int(round(self.header[ii]['HIERARCH ESO SEQ CUMOFF Y'] / self.header[ii]['CDELT2']))
except KeyError:
try:
# Offset mode along slit for older observations
from astropy.coordinates import SkyCoord, SkyOffsetFrame
import astropy.units as u
point_ra = self.header[ii]['RA']*u.deg
point_dec = self.header[ii]['DEC']*u.deg
offset_ra = self.header[ii]['HIERARCH ESO SEQ CUMOFF RA']*u.arcsec
offset_dec = self.header[ii]['HIERARCH ESO SEQ CUMOFF DEC']*u.arcsec
center = SkyCoord(ra=point_ra, dec=point_dec, frame = self.header[ii]['RADECSYS'].lower())
off_ra = point_ra + offset_ra
off_dec = point_dec + offset_dec
other = SkyCoord(ra=off_ra, dec=off_dec, frame = self.header[ii]['RADECSYS'].lower())
offset = center.separation(other).arcsecond
# Assume offset is along slit axis
pix_offsetx[ii] = int(round(0 / self.header[ii]['CDELT1']))
pix_offsety[ii] = int(round(offset / self.header[ii]['CDELT2']))
except KeyError:
print("No header keyword: HIERARCH ESO SEQ CUMOFF X or HIERARCH ESO SEQ CUMOFF Y")
pix_offsetx[ii] = 0
pix_offsety[ii] = 0
if same:
# Wavelength step in velocity
midwl = (max(self.haxis) - min(self.haxis))/2
dv = 3e5*10*self.header[ii]['CDELT1']/midwl
pix_offsetx[ii] = int(round((self.header[ii]['HIERARCH ESO QC VRAD BARYCOR'] + (self.header[ii]['WAVECORR']-1)*3e5) / dv))
# # Assume object is centered
pix_offsety[ii] = int(round((max(naxis2)/2 - naxis2[ii]/2)))
# Pixel numbers in x- and y-direction
xs = np.arange(naxis1[ii]) + 1
ys = np.arange(naxis2[ii]) + 1
# Masking 1 pixel edges in frames.
edge_len = 2
if NOD:
edge_len = 0
edge_mask = (ys > max(ys) - edge_len) | (ys < min(ys) + edge_len)
full_edge_mask[ii] = np.tile(edge_mask , (len(xs), 1)).T
pix_offsetxmax = abs(max(pix_offsetx) - min(pix_offsetx))
pix_offsetymax = abs(max(pix_offsety) - min(pix_offsety))
# Defining size of out-array
v_size = max(naxis1) + pix_offsetxmax
h_size = max(naxis2) + pix_offsetymax
# Data storage
flux_cube = np.zeros((h_size, v_size, img_nr))
error_cube = np.zeros((h_size, v_size, img_nr))
bpmap_cube = np.ones((h_size, v_size, img_nr))
# Manually mask bad region in VIS arm
if self.header[ii]['HIERARCH ESO SEQ ARM'] == "VIS":
for ii, kk in enumerate(img_nr_list):
for xx, pp in enumerate(np.arange(11220, 11340, 1)):
self.bpmap[ii][int(round(26 - 0.2 * xx)):int(round(33 - 0.2 * xx)), pp] = 543
for ii, kk in enumerate(img_nr_list):
self.bpmap[ii] = self.bpmap[ii] + full_edge_mask[ii].astype("bool")*100
# Defining positional offset between the frames.
pos_v, pos_h = pix_offsety[kk], pix_offsetx[kk] # offset
# Finding the indices of the container in which to put image.
offv = pix_offsety[kk] - min(pix_offsety)
offh = pix_offsetx[kk] - min(pix_offsetx)
# Define slices where to put image
v_range1 = slice(offv, naxis2[ii] + offv)
h_range1 = slice(offh, naxis1[ii] + offh)
# b1 is full-size container with all values masked and b2 is input image with edge-mask + bad pixel mask.
b1 = np.zeros((h_size, v_size))
b2 = self.flux[ii]
# Insert smaller (b3, input image) frame into larger frame (container)
b1[v_range1, h_range1] = b2
# Append to list containing flux images
flux_cube[:, :, ii] = b1
# Repeat for error extension
b3 = np.zeros((h_size, v_size))
b4 = self.error[ii]
b3[v_range1, h_range1] = b4
error_cube[:, :, ii] = b3
# Repeat for bad pixel map
b5 = np.ones((h_size, v_size))
b6 = self.bpmap[ii]
# Grow bap pixel regions !! Deprecated after update to pipeline version 2.8.3
# b6 = np.rint(convolve(np.array(self.bpmap[ii]), Gaussian2DKernel(0.3)))
b5[v_range1, h_range1] = b6
bpmap_cube[:, :, ii] = b5
# Mask 3-sigma outliers in the direction of the stack
m, s = np.ma.median(np.ma.array(flux_cube, mask=bpmap_cube), axis = 2).data, np.std(np.ma.array(flux_cube, mask=bpmap_cube), axis = 2).data
if self.header[ii]['HIERARCH ESO SEQ ARM'] == "NIR":
sigma_mask = 5
else:
sigma_mask = 3
l, h = np.tile((m - sigma_mask*s).T, (img_nr, 1, 1)).T, np.tile((m + sigma_mask*s).T, (img_nr, 1, 1)).T
bpmap_cube[(flux_cube < l) | (flux_cube > h)] = 666
# Form nodding pairs
if NOD:
if not repeats == 1:
# Smaller container
flux_cube_tmp = np.zeros((h_size, v_size, int(np.ceil(img_nr / repeats))))
error_cube_tmp = np.zeros((h_size, v_size, int(np.ceil(img_nr / repeats))))
bpmap_cube_tmp = np.zeros((h_size, v_size, int(np.ceil(img_nr / repeats))))
# Collapse in repeats
for ii, kk in enumerate(np.arange(int(np.ceil(img_nr / repeats)))):
# Make lower an upper index of files, which is averaged over. If all NOD positions does not have the same number of repeats, assume the last position is cut.
low, up = ii*repeats, min(img_nr, (ii+1)*repeats)
# Slice structure
subset = slice(low, up)
# Average over subset
flux_cube_tmp[:, :, ii], error_cube_tmp[:, :, ii], bpmap_cube_tmp[:, :, ii] = avg(flux_cube[:, :, subset], error_cube[:, :, subset], bpmap_cube[:, :, subset].astype("bool"), axis=2)
# Update number holders
img_nr_list = np.arange(img_nr/repeats)
pix_offsety = pix_offsety[::repeats]
flux_cube, error_cube, bpmap_cube = flux_cube_tmp, error_cube_tmp, bpmap_cube_tmp
# Form the pairs [(A1-B1) - shifted(B1-A1)] and [(B2-A2) - shifted(A2-B2)] at positions 0, 2. Sets the other images to np.nan.
flux_cube, error_cube, bpmap_cube = form_nodding_pairs(flux_cube, error_cube, bpmap_cube, max(naxis2), pix_offsety)
# Introduce filter based on smoothed image
flux_filt, error_filt, bp_filt = | np.zeros_like(error_cube) | numpy.zeros_like |
####Please do not remove lines below####
from lmfit import Parameters
import numpy as np
import sys
import os
import math
sys.path.append(os.path.abspath('.'))
sys.path.append(os.path.abspath('./Functions'))
sys.path.append(os.path.abspath('./Fortran_routines/'))
from functools import lru_cache
####Please do not remove lines above####
####Import your modules below if needed####
# from xr_ref import parratt_numba
from numba import njit, prange
@njit(parallel=True,cache=True)
def parratt_numba(q,lam,d,rho,beta):
ref=np.ones_like(q)
refc=np.ones_like(q)*complex(1.0,0.0)
f1=16.0*np.pi*2.818e-5
f2=-32.0*np.pi**2/lam**2
Nl=len(d)
for j in range(len(q)):
r=complex(0.0,0.0)
for it in range(1,Nl):
i=Nl-it
qc1=f1*(rho[i-1]-rho[0])
qc2=f1*(rho[i]-rho[0])
k1=np.sqrt(complex(q[j]**2-qc1,f2*beta[i-1]))
k2=np.sqrt(complex(q[j]**2-qc2,f2*beta[i]))
X=(k1-k2)/(k1+k2)
fact1=complex(np.cos(k2.real*d[i]),np.sin(k2.real*d[i]))
fact2=np.exp(-k2.imag*d[i])
fact=fact1*fact2
r=(X+r*fact)/(1.0+X*r*fact)
ref[j]=np.abs(r)**2
refc[j]=r
return ref,r
class XLayers_Triphasic: #Please put the class name same as the function name
def __init__(self,x=0.1,E=10.0,mpar={'Phase1':{'Layers':['top','bottom'],'d':[0.0,1.0],'rho':[0.0,0.333],'mu':[0.0,0.0],'sig':[0.0,3.0]},
'Phase2':{'Layers':['top','bottom'],'d':[0.0,1.0],'rho':[0.0,0.333],'mu':[0.0,0.0],'sig':[0.0,3.0]},
'Phase3':{'Layers':['top','bottom'],'d':[0.0,1.0],'rho':[0.0,0.333],'mu':[0.0,0.0],'sig':[0.0,3.0]}},
dz=0.5, rrf=True, fix_sig=False, qoff=0.0, yscale=1,cov1=0.33, cov2=0.33, bkg=0.0, coherrent=False, aveed=True):
"""
Calculates X-ray reflectivity from a system of multiple layers using Parratt formalism
x : array of wave-vector transfer along z-direction
E : Energy of x-rays in invers units of x
dz :The thickness (Angstrom) of each layer for applying Parratt formalism
rrf : True for Frensnel normalized refelctivity and False for just reflectivity
qoff : q-offset to correct the zero q of the instrument
cov1 : The coverage of Phase1 the value should be between 0 and 1
yscale : a scale factor for R or R/Rf
bkg : In-coherrent background
coherrent: True or False for coherrent or incoherrent addition of reflectivities from different phases
fix_sig : True or False for constraining or not constraining all the roughness parameters to the roughness of the bare interface roughness
mpar : Dictionary of Phases where,
Layers: Layer description,
d: thickness of each layer in Angs,
rho:Electron density of each layer in el/Angs^3,
mu: Absorption length of each layer in 1/cm,
sig: roughness of interface separating each layer in Angs.
The upper and lower thickness should be always fixed. The roughness of the topmost layer should be always kept 0.
"""
if type(x)==list:
self.x=np.array(x)
else:
self.x=x
self.E=E
self.__mpar__=mpar
self.dz=dz
self.rrf=rrf
self.fix_sig=fix_sig
self.qoff=qoff
self.bkg=bkg
self.yscale=yscale
self.coherrent=coherrent
self.aveed=aveed
self.cov1=cov1
self.cov2=cov2
self.choices={'rrf':[True,False],'fix_sig': [True,False],'coherrent':[True,False],'aveed':[True,False]}
self.__d__={}
self.__rho__={}
self.__mu__={}
self.__sig__={}
self.__fit__=False
self.__mkeys__ = list(self.__mpar__.keys())
self.output_params = {'scaler_parameters': {}}
self.init_params()
def init_params(self):
"""
Define all the fitting parameters like
self.param.add('sig',value=0,vary=0)
"""
self.params=Parameters()
self.params.add('qoff', self.qoff, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
self.params.add('yscale', self.yscale, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
self.params.add('cov1', self.cov1, vary=0, min=0, max=1, expr=None, brute_step=0.1)
self.params.add('cov2', self.cov2, vary=0, min=0, max=1, expr=None, brute_step=0.1)
self.params.add('bkg', self.bkg, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)
for mkey in self.__mpar__.keys():
for key in self.__mpar__[mkey].keys():
if key!='Layers':
for i in range(len(self.__mpar__[mkey][key])):
self.params.add('__%s_%s_%03d' % (mkey,key, i), value=self.__mpar__[mkey][key][i], vary=0, min=0, max=np.inf, expr=None, brute_step=0.05)
@lru_cache(maxsize=10)
def calcProfile(self,d,rho,mu,sig,phase,dz,zmin=None,zmax=None):
"""
Calculates the electron and absorption density profiles
"""
d = np.array(d)
rho = np.array(rho)
mu = np.array(mu)
sig = np.array(sig)
if self.fix_sig:
for i in range(1,len(sig)):
sig[i]=sig[1]
n=len(d)
if zmin is None and zmax is None:
maxsig=max(np.abs(np.max(sig[1:])),3)
Nlayers=int((np.sum(d[:-1])+10*maxsig)/dz)
halfstep=( | np.sum(d[:-1]) | numpy.sum |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.