prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/master/LICENSE
from __future__ import absolute_import
import sys
import os
import json
import pytest
import numpy
import awkward1
def test_unknown():
a = awkward1.from_json("[[], [], []]", highlevel=False)
assert awkward1.to_list(a) == [[], [], []]
assert str(awkward1.type(a)) == "var * unknown"
assert awkward1.type(a) == awkward1.types.ListType(awkward1.types.UnknownType())
assert not awkward1.type(a) == awkward1.types.PrimitiveType("float64")
a = awkward1.from_json("[[], [[], []], [[], [], []]]", highlevel=False)
assert awkward1.to_list(a) == [[], [[], []], [[], [], []]]
assert str(awkward1.type(a)) == "var * var * unknown"
assert awkward1.type(a) == awkward1.types.ListType(awkward1.types.ListType(awkward1.types.UnknownType()))
a = awkward1.layout.ArrayBuilder()
a.beginlist()
a.endlist()
a.beginlist()
a.endlist()
a.beginlist()
a.endlist()
assert awkward1.to_list(a) == [[], [], []]
assert str(awkward1.type(a)) == "var * unknown"
assert awkward1.type(a) == awkward1.types.ListType(awkward1.types.UnknownType())
assert not awkward1.type(a) == awkward1.types.PrimitiveType("float64")
a = a.snapshot()
assert awkward1.to_list(a) == [[], [], []]
assert str(awkward1.type(a)) == "var * unknown"
assert awkward1.type(a) == awkward1.types.ListType(awkward1.types.UnknownType())
assert not awkward1.type(a) == awkward1.types.PrimitiveType("float64")
def test_getitem():
a = awkward1.from_json("[]")
a = awkward1.from_json("[[], [[], []], [[], [], []]]")
assert awkward1.to_list(a[2]) == [[], [], []]
assert awkward1.to_list(a[2, 1]) == []
with pytest.raises(ValueError) as excinfo:
a[2, 1, 0]
assert str(excinfo.value).endswith(" attempting to get 0, index out of range")
assert awkward1.to_list(a[2, 1][()]) == []
with pytest.raises(ValueError) as excinfo:
a[2, 1][0]
assert str(excinfo.value).endswith(" attempting to get 0, index out of range")
assert awkward1.to_list(a[2, 1][100:200]) == []
assert awkward1.to_list(a[2, 1, 100:200]) == []
assert awkward1.to_list(a[2, 1][ | numpy.array([], dtype=int) | numpy.array |
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
'''
@Useway : 生成训练 验证 测试数据集
@File : diff_model.py
@Time : 2020/11/25 16:18:22
@Author : <NAME>
@Version : 1.0
@Contact : <EMAIL>
@Time: 2020/11/25 16:18:22
'''
from torch.utils.data import Dataset
import h5py
import torch
from torchvision import transforms
from torch.nn.functional import interpolate
import numpy as np
import gc
class Generate_data(Dataset):
def __init__(self, paths, channels=31, fis=63, nums=10, down_size=[16,16], up_size=[63,63]):
super(Generate_data, self).__init__()
torch.manual_seed(0)
torch.cuda.manual_seed(0)
self.HR = torch.zeros([len(paths), nums, channels, fis, fis])
#Traverse all data
for i in range(len(paths)):
img = h5py.File(paths[i], 'r')['rad']
img = | np.array(img) | numpy.array |
import os
from glob import glob
from datetime import datetime
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from itertools import product
from random import randint, seed
from ops import *
from math import floor
from skimage import measure
def read_args_file(file_path):
args = {}
with open(file_path, 'r') as f:
while True:
line = f.readline()
if not line:
break
arg, arg_value = line[:-1].split(': ')
args[arg] = arg_value
return args
def copy_dataset_info(config):
from shutil import copyfile
copyfile( os.path.join(config.data_path, 'args.txt'), os.path.join(config.model_dir, 'data_args.txt') )
for i_d, data_type in enumerate(config.data_type):
copyfile( os.path.join(config.data_path, data_type[0]+'_range.txt'), os.path.join(config.model_dir, data_type[0]+'_range.txt') )
class TileConfig(object):
def __init__(self, tile_size, data_dim):
self.tile_size = tile_size
self.data_dim = data_dim
self.x_start = 0
self.x_end = 0
self.x_dim = tile_size[0]
self.y_start = 0
self.y_end = 0
self.y_dim = tile_size[1]
self.z_start = 0
self.z_end = 0
self.z_dim = tile_size[2]
self.cur_idx = 0
# returns max amount of tiles
def tile_count(self, stride=None):
if stride is None:
stride = self.tile_size
return [
1 + (self.data_dim[0]-self.tile_size[0])//stride[0],
1 + (self.data_dim[1]-self.tile_size[1])//stride[1],
1 + (self.data_dim[2]-self.tile_size[2])//stride[2]
]
def tile_count_linear(self, stride=None):
counts = self.tile_count(stride)
return counts[0] * counts[1] * counts[2]
def from_idx(self, idx, stride=None):
if stride is None:
stride = self.tile_size
x, y, z = self.tile_count(stride)
return [
(idx % x)*stride[0],
(idx//x % y)*stride[1],
(idx//(x*y))*stride[2]
]
def to_idx(self, pos, stride=None):
x, y, z = self.tile_count(stride)
return int(pos[2])//stride[2] * x * y + int(pos[1])//stride[1] * x + int(pos[0])//stride[0]
def cut_tile(self, data, pos):
return data[...,
int(pos[2]):int(pos[2])+self.tile_size[2],
int(pos[1]):int(pos[1])+self.tile_size[1],
int(pos[0]):int(pos[0])+self.tile_size[0], :]
def cut_tile(self, data):
if (self.x_start < 0 or self.x_end > self.data_dim[0]) or (self.y_start < 0 or self.y_end > self.data_dim[1]):
res = data[..., max(self.z_start, 0):min(self.z_end, self.data_dim[2]), max(self.y_start, 0):min(self.y_end, self.data_dim[1]), max(self.x_start, 0):min(self.x_end, self.data_dim[0]), :]
pad = []
# append zero padding for batch dim and similar (e.g. time)
for d in range(data.ndim - 4):
pad.append([0,0])
pad.append([ abs(self.z_start) if self.z_start < 0 else 0,
self.z_end - self.data_dim[2] if self.z_end > self.data_dim[2] else 0]) # zpad
pad.append([ abs(self.y_start) if self.y_start < 0 else 0,
self.y_end - self.data_dim[1] if self.y_end > self.data_dim[1] else 0]) # ypad
pad.append([ abs(self.x_start) if self.x_start < 0 else 0,
self.x_end - self.data_dim[0] if self.x_end > self.data_dim[0] else 0]) # xpad
pad.append([0,0])
res = np.pad(res, pad, mode="constant", constant_values=float("inf"))
res_tmp = np.where(np.isinf(res[...,0:3]),0,res[...,0:3])
res = np.concatenate([res_tmp, np.where(np.isinf(res[...,3:]),-1,res[...,3:])], axis=-1)
else:
res = data[...,self.z_start:self.z_end, self.y_start:self.y_end, self.x_start:self.x_end, :]
return res
def cut_tile_2d(self, data):
if (self.x_start < 0 or self.x_end > self.data_dim[0]) or (self.y_start < 0 or self.y_end > self.data_dim[1]):
res = data[..., max(self.y_start, 0):min(self.y_end, self.data_dim[1]), max(self.x_start, 0):min(self.x_end, self.data_dim[0]), :]
pad = []
# append zero padding for batch dim and similar (e.g. time)
for d in range(data.ndim - 3):
pad.append([0,0])
pad.append([ abs(self.y_start) if self.y_start < 0 else 0,
self.y_end - self.data_dim[1] if self.y_end > self.data_dim[1] else 0]) # ypad
pad.append([ abs(self.x_start) if self.x_start < 0 else 0,
self.x_end - self.data_dim[0] if self.x_end > self.data_dim[0] else 0]) # xpad
pad.append([0,0])
res = np.pad(res, pad, mode="constant", constant_values=float("inf"))
res_tmp = np.where(np.isinf(res[...,0:2]),0,res[...,0:2])
res_tmp2 = np.where(np.isinf(res[...,2:]),-1,res[...,2:])
res = np.concatenate([res_tmp, res_tmp2], axis=-1)
else:
res = data[..., self.y_start:self.y_end, self.x_start:self.x_end, :]
return res
def set_constant(self, data, data_dim_slice, constant): # call with e.g. slice(0,1,1)
data[..., max(self.z_start, 0):min(self.z_end, self.data_dim[2]), max(self.y_start, 0):min(self.y_end, self.data_dim[1]), max(self.x_start, 0):min(self.x_end, self.data_dim[0]), data_dim] = constant
return data
def set_constant_2d(self, data, data_dim_slice, constant):
data[..., max(self.y_start, 0):min(self.y_end, self.data_dim[1]), max(self.x_start, 0):min(self.x_end, self.data_dim[0]), data_dim] = constant
return data
# returns random tile
def generateRandomTile(self, out_of_bounds_fac=0):
self.x_start = randint(
-int(self.tile_size[0]/out_of_bounds_fac) if out_of_bounds_fac > 0 else 0,
self.data_dim[0] - int(self.tile_size[0]/out_of_bounds_fac) if out_of_bounds_fac > 0 else self.data_dim[0]-self.tile_size[0])
self.y_start = randint(
-int(self.tile_size[1]/out_of_bounds_fac) if out_of_bounds_fac > 0 else 0,
self.data_dim[1] - int(self.tile_size[1]/out_of_bounds_fac) if out_of_bounds_fac > 0 else self.data_dim[1]-self.tile_size[1])
self.z_start = randint(
-int(self.tile_size[2]/out_of_bounds_fac) if out_of_bounds_fac > 0 else 0,
self.data_dim[2] - int(self.tile_size[2]/out_of_bounds_fac) if out_of_bounds_fac > 0 else self.data_dim[2]-self.tile_size[2])
self.x_end = self.x_start + self.x_dim
self.y_end = self.y_start + self.y_dim
self.z_end = self.z_start + self.z_dim
# returns next tile in multiples of tile_size
def getNextTile(self):
if self.cur_idx < self.tile_count_linear(self.tile_size):
pos = self.from_idx(self.cur_idx)
self.x_start = pos[0]
self.x_end = self.x_start + self.x_dim
self.y_start = pos[1]
self.y_end = self.y_start + self.y_dim
self.z_start = pos[2]
self.z_end = self.z_start + self.z_dim
self.cur_idx += 1
return True
else:
self.cur_idx = 0
return False
def print(self):
print("({}:{}, {}:{}, {}:{})".format(self.x_start, self.x_end, self.y_start, self.y_end, self.z_start, self.z_end))
### ============ Class ==============
class BatchManager(object):
def __init__(self, config, sequence_length, prediction_window, data_args_path=None):
self.rng = np.random.RandomState(config.random_seed)
np.random.seed(config.random_seed)
self.root = config.data_path
self.config = config
seed(config.random_seed)
if data_args_path:
self.args = read_args_file(data_args_path)
else:
self.args = read_args_file(os.path.join(self.root, 'args.txt'))
self.is_3d = config.is_3d
self.c_num = int(self.args['num_param'])
self.paths = [[] for _ in range(len(config.data_type))]
assert self.c_num >= 2, ("At least >num_scenes<, and >num_frames< must be given")
num_frames = int(self.args['num_frames'])
for i_d, data_type in enumerate(config.data_type):
self.paths[i_d] = sorted(glob("{}/{}/*".format(self.root, data_type[0])),
key=lambda path: int(os.path.basename(path).split('_')[0])*num_frames+\
int(os.path.basename(path).split('_')[1].split('.')[0]))
# make tuple out of paths (e.g. [(v0, d0), (v1, d1)])
self.paths = list(zip(*self.paths))
self.num_samples = len(self.paths)
#assert(self.num_samples > 0)
self.dataset_valid = self.num_samples > 0
# when empty dataset should be read (e.g. for model creation)
self.num_samples = max(self.num_samples, 1)
self.batch_size = config.batch_size
self.epochs_per_step = self.batch_size / float(self.num_samples) # per epoch
self.random_indices = np.arange(self.num_samples)
np.random.shuffle(self.random_indices)
self.data_type = config.data_type
depth = []
for data_type in config.data_type:
if data_type == 'velocity':
if self.is_3d: depth.append(3)
else: depth.append(2)
else:
depth.append(1)
self.data_res_x = int(self.args["resolution_x"])
self.data_res_y = int(self.args["resolution_y"])
self.data_res_z = int(self.args["resolution_z"])
self.depth = depth
self.sequence_length = sequence_length
self.w_num = prediction_window
self.z_num = config.z_num
self.time_step = float(self.args["time_step"])
self.use_tiles = self.data_res_x != self.config.res_x or self.data_res_y != self.config.res_y or self.data_res_z != self.config.res_z
self.tile_generator = None
try:
self.tiles_per_sample = self.config.tiles_per_sample
self.tiles_use_global = self.config.tiles_use_global
except AttributeError:
self.tiles_per_sample = 4
self.tiles_use_global = False
try:
self.tile_scale = self.config.tile_scale
except AttributeError:
self.tile_scale = 1
try:
self.tile_multitile_border = self.config.tile_multitile_border
except AttributeError:
self.tile_multitile_border = 0
if self.use_tiles:
print("WARNING: use_tiles is activated since network resolution is different from dataset resolution ({},{},{}) <-> ({},{},{})".format(self.config.res_x, self.config.res_y, self.config.res_z, self.data_res_x, self.data_res_y, self.data_res_z))
self.tile_generator = TileConfig([self.config.res_x*self.tile_scale, self.config.res_y*self.tile_scale, self.config.res_z*self.tile_scale if self.is_3d else self.config.res_z], [self.data_res_x, self.data_res_y, self.data_res_z])
concat_depth = 0
for depth_ in self.depth:
concat_depth += depth_
if self.is_3d:
self.feature_dim = [self.w_num, self.config.res_z, self.config.res_y, self.config.res_x, concat_depth]
else:
self.feature_dim = [self.w_num, self.config.res_y, self.config.res_x, concat_depth]
self.x_range = []
self.data_type_normalization = {}
for i_d, data_type in enumerate(self.data_type):
r = np.loadtxt(os.path.join(os.path.dirname(data_args_path) if data_args_path else self.root, data_type[0]+'_range.txt'))
self.x_range.append(max(abs(r[0]), abs(r[1])))
self.data_type_normalization[data_type] = max(abs(r[0]), abs(r[1]))
self.y_range = []
self.y_num = []
for i in range(self.c_num):
p_name = self.args['p%d' % i]
p_min = float(self.args['min_{}'.format(p_name)])
p_max = float(self.args['max_{}'.format(p_name)])
p_num = int(self.args['num_{}'.format(p_name)])
self.y_range.append([p_min, p_max])
self.y_num.append(p_num)
# support for old scenes that do not explicitly state the position as control param
if len(self.y_range) <= 2 and self.args.get("min_src_pos"):
p_min = float(self.args['min_src_pos'])
p_max = float(self.args['max_src_pos'])
self.y_range.append([p_min, p_max])
self.y_num.append(self.y_num[-1])
vr = np.loadtxt(os.path.join(os.path.dirname(data_args_path) if data_args_path else self.root, 'v_range.txt'))
self.v_range = max(abs(vr[0]), abs(vr[1]))
self.to_v_ratio = []
for i_d, data_type in enumerate(self.data_type):
self.to_v_ratio.append(self.x_range[i_d] / self.v_range)
self.supervised_param_count = len(self.y_range) - 2
print("Dataset x_range: {}".format(self.x_range))
print("Dataset y_range: {}".format(self.y_range))
#--------------------------------------------
@property
def num_scenes(self):
return self.y_num[0]
#--------------------------------------------
@property
def num_frames(self):
return self.y_num[1]
#--------------------------------------------
def validation_start_index(self, validation_split=0.1, file_based=True):
if file_based:
return floor(self.num_samples * (1.0 - validation_split))
else:
val_scene_count = max(1.0, floor(self.num_scenes * validation_split))
return int(self.num_samples - self.num_frames * val_scene_count)
#------------------------------------------------------------------------------------------------
def steps_per_epoch(self, batch_size, validation_split=0.1, validation=False):
""" number of batches to train on. can be used in fit_generator """
assert self.dataset_valid, "Dataset was created with no samples..."
scene_count = self.y_num[0]
frame_count = self.y_num[1]
num_draws = scene_count * ( frame_count - self.sequence_length + 1)
num_draws = floor(num_draws * validation_split) if validation else floor(num_draws * (1.0 - validation_split))
if self.use_tiles:
num_draws *= self.tiles_per_sample
return int(num_draws / batch_size)
#------------------------------------------------------------------------------------------------
def generator_ae(self, batch_size, validation_split=0.1, validation=False, multitile=False):
""" generator for use with keras __fit_generator__ function. runs in its own thread """
assert self.dataset_valid, "Dataset was created with no samples..."
start_index = lambda: self.validation_start_index(validation_split) if validation else 0
index_cond = lambda idx: idx < self.num_samples if validation else idx < floor(self.num_samples * (1.0 - validation_split))
index = start_index()
while True:
x = []
y = []
while len(x) < batch_size:
if not index_cond(index):
index = start_index()
random_idx = self.random_indices[index]
if not self.sample_is_valid_for_timewindow(random_idx):
index += 1
continue
file_name = []
dir_name = []
cur_paths = self.paths[random_idx]
for i_d, data_type in enumerate(self.data_type):
file_name.append(cur_paths[i_d])
dir_name.append(os.path.dirname(file_name[-1]))
idx = os.path.basename(file_name[-1]).split('.')[0].split('_')
def getSequenceData(self, file_name, dir_name, idx):
t = int(idx[1])
x__ = []
y__ = []
for i in range(self.sequence_length):
t_ = t+i
x_ = None
y_ = None
for i_d, data_type in enumerate(self.data_type):
file_name = os.path.join(dir_name[i_d], idx[0] + '_%d.npz' % t_)
x_t, y_t = preprocess(file_name, data_type, self.x_range[i_d], self.y_range, den_inflow="density" in self.data_type)
if x_ is None:
x_ = x_t
else:
x_ = np.concatenate((x_,x_t), axis=-1)
if y_ is None:
y_ = y_t
# The following is not necessary, since it only contains the supervised part (-> it is equal for all data types)
#else:
# y_ = np.concatenate((y_,y_t), axis=-1)
x__.append(x_)
y__.append(y_)
return x__, y__
if self.use_tiles:
x__, y__ = getSequenceData(self, file_name, dir_name, idx)
x__ = np.array(x__, dtype=np.float32)
tile_count = 0
while tile_count < self.tiles_per_sample:
# get also tiles with empty parts on the borders
self.tile_generator.generateRandomTile(out_of_bounds_fac=3)
if x__[0].ndim == 4:
x_tile = self.tile_generator.cut_tile(x__)
else:
x_tile = self.tile_generator.cut_tile_2d(x__)
# check if something is happening in the tile
tile_dim = self.tile_generator.x_dim * self.tile_generator.y_dim * self.tile_generator.z_dim
if np.sum(x_tile[int(self.sequence_length / 2), ..., -1]) / tile_dim < -0.99:
continue
# Append global information
if self.tiles_use_global:
x_mult = int(self.tile_generator.data_dim[0] / self.tile_generator.tile_size[0])
y_mult = int(self.tile_generator.data_dim[1] / self.tile_generator.tile_size[1])
z_mult = int(self.tile_generator.data_dim[2] / self.tile_generator.tile_size[2])
tile_flag_shape = list(x__.shape)
tile_flag_shape[-1] = 1
if x__[0].ndim == 4:
tile_flag = np.zeros(tile_flag_shape)
tile_flag = self.tile_generator.set_constant(tile_flag, slice(0,None,1), 1)
# 3:4 -> capture only density part
x__downscale = measure.block_reduce(x__[...,3:4], (1, z_mult, y_mult, x_mult, 1), np.mean)
tile_flag_downscale = measure.block_reduce(tile_flag, (1, z_mult, y_mult, x_mult, 1), np.mean)
else:
tile_flag = np.zeros(tile_flag_shape)
tile_flag = self.tile_generator.set_constant_2d(tile_flag, slice(0,None,1), 1)
# 2:3 -> capture only density part
x__downscale = measure.block_reduce(x__[...,2:3], (1, y_mult, x_mult, 1), np.mean)
tile_flag_downscale = measure.block_reduce(tile_flag, (1, y_mult, x_mult, 1), np.mean)
x_tile = np.append(x_tile, x__downscale, axis=-1)
x_tile = np.append(x_tile, tile_flag_downscale, axis=-1)
x.append(x_tile)
y.append(y__)
tile_count += 1
else:
x__, y__ = getSequenceData(self, file_name, dir_name, idx)
x.append(x__)
y.append(y__)
index += 1
x = np.array(x, dtype=np.float32)
if x.shape[1] == 1:
x = np.squeeze(x, axis=1)
y = np.array(y, dtype=np.float32)
if y.shape[1] == 1:
y = np.squeeze(y, axis=1)
# AE: y = x
if self.use_tiles:
global_tiles_endmarker = -2 if self.tiles_use_global else None
if multitile and self.tile_multitile_border > 0:
if self.is_3d:
assert False, "Not implemented!"
else:
border_region_start = self.tile_generator.tile_size[0] // 2 - 1 - self.tile_multitile_border
border_region_end = self.tile_generator.tile_size[0] //2 + self.tile_multitile_border
x_border = x[:batch_size, :, border_region_start:border_region_end, :global_tiles_endmarker]
border_region_start = self.tile_generator.tile_size[1] // 2 - 1 - self.tile_multitile_border
border_region_end = self.tile_generator.tile_size[1] //2 + self.tile_multitile_border
y_border = x[:batch_size, border_region_start:border_region_end, :, :global_tiles_endmarker]
yield x[:batch_size], [x[:batch_size,...,:global_tiles_endmarker], y[:batch_size], y_border, x_border]
else:
yield x[:batch_size], [x[:batch_size,...,:global_tiles_endmarker], y[:batch_size]]
else:
yield x[:batch_size], [x[:batch_size], y[:batch_size]]
x = x[batch_size:]
y = y[batch_size:]
#------------------------------------------------------------------------------------------------
def generator_ae_tile_sequence(self, batch_size, validation_split=0.1, validation=False, ls_split_loss=False, advection_loss=False):
""" generator for use with keras __fit_generator__ function. runs in its own thread """
assert self.dataset_valid, "Dataset was created with no samples..."
pred_shape = [batch_size] + self.feature_dim[1:]
pred_dummy = np.zeros(pred_shape, dtype=np.float32)
if ls_split_loss:
pred_dummy_ls_split = np.zeros((batch_size, self.z_num), dtype=np.float32)
gen_ae = self.generator_ae(batch_size, validation_split=validation_split, validation=validation)
while True:
input_list = []
output_list = []
input_array, [_, _] = next(gen_ae)
# x = np.random.rand(80, 4, 128, 96, 2); (b,s,y,x,c)
# y = [np.random.rand(80, 128, 96, 2), np.random.rand(80, 2, 1), np.random.rand(80, 4, 32)]
# transform to tile based indexing (b,s,t,y/3,x/3,c)
tile_dim_x = int(self.tile_generator.tile_size[0] / 3)
tile_dim_y = int(self.tile_generator.tile_size[1] / 3)
input_tiles = None
for y_i in range(0,3):
for x_i in range(0,3):
if input_tiles is None:
input_tiles = np.expand_dims(input_array[:, :, y_i*tile_dim_y:(y_i+1)*tile_dim_y, x_i*tile_dim_x:(x_i+1)*tile_dim_x], axis=2)
else:
input_tiles = np.concatenate([input_tiles, np.expand_dims(input_array[:, :, y_i*tile_dim_y:(y_i+1)*tile_dim_y, x_i*tile_dim_x:(x_i+1)*tile_dim_x], axis=2)], axis=2)
# with w = 1 -> t0, t1, t2 is needed
input_v_d_t0_t1 = input_tiles[:, :self.w_num+1]
input_list.append(input_v_d_t0_t1)
v_d_t1_gt = input_tiles[:, self.w_num, 4]
output_list.append(v_d_t1_gt)
if advection_loss:
d_t2_gt = input_tiles[:, self.w_num+1, 4, ..., 3 if self.is_3d else 2]
d_t2_gt = d_t2_gt[...,np.newaxis]
output_list.append(d_t2_gt)
yield input_list, output_list
#------------------------------------------------------------------------------------------------
def generator_ae_sequence(self, batch_size, validation_split=0.1, validation=False, decode_predictions=False, ls_prediction_loss=False, ls_split_loss=False, train_prediction_only=False, advection_loss=False):
""" generator for use with keras __fit_generator__ function. runs in its own thread """
assert self.dataset_valid, "Dataset was created with no samples..."
if decode_predictions:
pred_shape = [batch_size] + self.feature_dim[1:]
pred_dummy = np.zeros(pred_shape, dtype=np.float32)
else:
pred_dummy = np.zeros((batch_size, (self.sequence_length-self.w_num) * 2, self.z_num), dtype=np.float32)
if ls_prediction_loss:
pred_dummy_ls = np.zeros((batch_size, (self.sequence_length-self.w_num) * 2, self.z_num), dtype=np.float32)
if ls_split_loss:
pred_dummy_ls_split = np.zeros((batch_size, self.z_num), dtype=np.float32)
gen_ae = self.generator_ae(batch_size, validation_split=validation_split, validation=validation)
while True:
input_array, [_, p] = next(gen_ae)
# x = np.random.rand(80, 4, 128, 96, 2)
# y = [np.random.rand(80, 128, 96, 2), np.random.rand(80, 2, 1), np.random.rand(80, 4, 32)]
#yield [input_array, np.zeros((batch_size, 512)), np.zeros((batch_size, 512)), np.zeros((batch_size, 512)), np.zeros((batch_size, 512))], [input_array[:,0], p[:,0], pred_dummy]
input_array_w_passive = input_array[..., :min(4 if self.is_3d else 3, input_array.shape[-1])]
if "inflow" in self.data_type:
input_array_inflow = input_array[..., -1:]
if train_prediction_only:
output_array = []
else:
output_array = [input_array_w_passive[:,0]]
if decode_predictions:
if self.config.only_last_prediction:
output_array.append(input_array_w_passive[:,-1:])
else:
output_array.append(input_array_w_passive[:,-(self.sequence_length-self.w_num):])
else:
output_array.append(pred_dummy)
if not train_prediction_only:
output_array.append(p[:,0])
if ls_prediction_loss:
output_array.append(pred_dummy_ls)
if ls_split_loss and not train_prediction_only:
output_array.append(pred_dummy_ls_split)
output_array.append(pred_dummy_ls_split)
if advection_loss:
# ranges from w_num+1 to rec_pred -> current gt + 1
# extract only GT values of passive quantity
if self.config.only_last_prediction:
output_array.append(input_array_w_passive[:,-1:, ..., -1:])
else:
output_array.append(input_array_w_passive[:, -(self.sequence_length - (self.w_num+1)):, ..., -1:])
if "inflow" in self.data_type:
input_array_inflow = self.denorm(input_array_inflow, "inflow")
yield [input_array_w_passive, p, input_array_inflow], output_array
else:
yield [input_array, p], output_array
#------------------------------------------------------------------------------------------------
def generator_ae_sequence_clean(self, batch_size, validation_split=0.1, validation=False, decode_predictions=False, ls_prediction_loss=False):
""" generator for use with keras __fit_generator__ function. runs in its own thread """
assert self.dataset_valid, "Dataset was created with no samples..."
if decode_predictions:
pred_shape = [batch_size] + self.feature_dim[1:]
pred_dummy = np.zeros(pred_shape, dtype=np.float32)
else:
pred_dummy = np.zeros((batch_size, (self.sequence_length-self.w_num) * 2, self.z_num), dtype=np.float32)
if ls_prediction_loss:
pred_dummy_ls = np.zeros((batch_size, (self.sequence_length-self.w_num) * 2, self.z_num), dtype=np.float32)
gen_ae = self.generator_ae(batch_size, validation_split=validation_split, validation=validation)
while True:
input_array, [_, p] = next(gen_ae)
output_array = [input_array[:,0]]
output_array.append(pred_dummy)
output_array.append(p[:,0])
output_array.append(p[:,0])
yield [input_array, p], output_array
#------------------------------------------------------------------------------------------------
def generator_ae_split(self, batch_size, validation_split=0.1, validation=False):
""" generator for use with keras __fit_generator__ function. runs in its own thread """
assert self.dataset_valid, "Dataset was created with no samples..."
pred_dummy = np.zeros((batch_size, self.z_num), dtype=np.float32)
gen_ae = self.generator_ae(batch_size, validation_split=validation_split, validation=validation)
while True:
input_array, [_, p] = next(gen_ae)
output_array = [input_array, p, pred_dummy, pred_dummy]
yield input_array, output_array
#------------------------------------------------------------------------------------------------
def generator_ae_crossmodal(self, batch_size, validation_split=0.1, validation=False):
""" generator for use with keras __fit_generator__ function. runs in its own thread """
assert self.dataset_valid, "Dataset was created with no samples..."
pred_dummy = np.zeros((batch_size, self.z_num), dtype=np.float32)
gen_ae = self.generator_ae(batch_size, validation_split=validation_split, validation=validation)
while True:
input_array, [_, p] = next(gen_ae)
output_array = [input_array, p, p, input_array]
yield input_array, output_array
#------------------------------------------------------------------------------------------------
def sample_is_valid_for_timewindow(self, id, dt=0):
file_name = self.paths[id][dt]
filename = os.path.basename(file_name).split('.')[0]
idx = filename.split('_')
t = int(idx[1])
max_frame = self.y_range[1][1]
if t <= max_frame - self.sequence_length + 1:
return True
return False
#------------------------------------------------------------------------------------------------
def sample(self, num, validation_split=0.1, validation=False, file_based=True):
val_start_idx = self.validation_start_index(validation_split, file_based=file_based)
max_idx = len(self.paths)
choice_range = max_idx - val_start_idx if validation else val_start_idx
idx = self.rng.choice(choice_range, num).tolist()
offset = val_start_idx if validation else 0
return [self.paths[i+offset] for i in idx]
#------------------------------------------------------------------------------------------------
def to_vel(self, x, dt=0):
assert dt == 0, ("Check this dt value in to_vel function")
return x*self.to_v_ratio[dt]
#------------------------------------------------------------------------------------------------
def denorm_vel(self, x):
x *= self.v_range
return x
#------------------------------------------------------------------------------------------------
def norm(self, x, data_type, as_layer=False):
assert data_type in self.config.data_type, ("data_type {} not found in config.data_type {}".format(data_type, self.config.data_type))
def _norm_f(x, data_type, fac):
if data_type == "density":
x = (x * 2.0) - 1.0
else:
x = x / fac
return x
if as_layer:
from keras.layers import Lambda
x = Lambda(_norm_f, arguments={'data_type': data_type, 'fac': self.data_type_normalization[data_type]})(x)
else:
x = _norm_f(x, data_type, self.data_type_normalization[data_type])
return x
#------------------------------------------------------------------------------------------------
def denorm(self, x, data_type, as_layer=False):
assert data_type in self.config.data_type, ("data_type {} not found in config.data_type {}".format(data_type, self.config.data_type))
def _denorm_f(x, data_type, fac):
if data_type == "density":
x = (x + 1.0) * 0.5
else:
x = x * fac
return x
if as_layer:
from keras.layers import Lambda
x = Lambda(_denorm_f, arguments={'data_type': data_type, 'fac': self.data_type_normalization[data_type]})(x)
else:
x = _denorm_f(x, data_type, self.data_type_normalization[data_type])
return x
#------------------------------------------------------------------------------------------------
def batch_with_name(self, b_num, validation_split=0.1, validation=False, randomized=True, file_based=True, adjust_to_batch=False, data_types=["velocity", "density", "levelset", "inflow"], use_tiles=False):
if adjust_to_batch:
paths = self.paths[:int(len(self.paths) / b_num) * b_num]
else:
paths = self.paths
assert len(paths) % b_num == 0, "Length: {}; Batch Size: {}".format(len(paths), b_num)
x_batch = []
y_batch = []
sup_params_batch = []
val_start_idx = self.validation_start_index(validation_split, file_based=file_based) if validation else 0
while True:
for i, filepath in enumerate( self.sample(b_num, validation_split=validation_split, file_based=file_based, validation=validation) if randomized else paths[val_start_idx:] ):
x = None
sup_params = None
for i_d, data_type in enumerate(self.data_type):
if data_type not in data_types:
continue
x_, sup_params = preprocess(filepath[i_d], data_type, self.x_range[i_d], self.y_range)
if x is None:
x = x_
else:
x = np.concatenate((x,x_), axis=-1)
if use_tiles and self.tile_generator is not None:
self.tile_generator.generateRandomTile()
if x.ndim == 4:
x_tile = x[self.tile_generator.z_start:self.tile_generator.z_end, self.tile_generator.y_start:self.tile_generator.y_end, self.tile_generator.x_start:self.tile_generator.x_end, :]
else:
x_tile = x[self.tile_generator.y_start:self.tile_generator.y_end, self.tile_generator.x_start:self.tile_generator.x_end, :]
# Append global information
if self.tiles_use_global:
x_mult = int(self.tile_generator.data_dim[0] / self.tile_generator.tile_size[0])
y_mult = int(self.tile_generator.data_dim[1] / self.tile_generator.tile_size[1])
z_mult = int(self.tile_generator.data_dim[2] / self.tile_generator.tile_size[2])
tile_flag_shape = list(x.shape)
tile_flag_shape[-1] = 1
if x.ndim == 4:
tile_flag = np.zeros(tile_flag_shape)
tile_flag[self.tile_generator.z_start:self.tile_generator.z_end, self.tile_generator.y_start:self.tile_generator.y_end, self.tile_generator.x_start:self.tile_generator.x_end, :] = 1
# 3:4 -> capture only density part
x_downscale = measure.block_reduce(x[...,3:4], (z_mult, y_mult, x_mult, 1), np.mean)
tile_flag_downscale = measure.block_reduce(tile_flag, (z_mult, y_mult, x_mult, 1), np.mean)
else:
tile_flag = np.zeros(tile_flag_shape)
tile_flag[self.tile_generator.y_start:self.tile_generator.y_end, self.tile_generator.x_start:self.tile_generator.x_end, :] = 1
# 2:3 -> capture only density part
x_downscale = measure.block_reduce(x[...,2:3], (y_mult, x_mult, 1), np.mean)
tile_flag_downscale = measure.block_reduce(tile_flag, (y_mult, x_mult, 1), np.mean)
x_tile = | np.append(x_tile, x_downscale, axis=-1) | numpy.append |
import copy
import numpy as np
import random
import pdb
OBSTACLE_REWARD = -0.1
GOAL_REWARD = 1.0
DIST_REWARD = 0.1
DRONE_POSITION = 0.1
class Env(object):
def __init__(self, args):
self.mode = args.mode
self.action_size = args.action_dim
self.grid_size = args.grid_size
self.num_obst = args.num_obst
self.state_dim = args.state_dim
# initialize objs_info
# objs_info = {
# "obst_list": list of nparray,
# "goal": nparray,
# "drone_pos": nparray
# }
self.reset(self.num_obst)
def reset(self, num_obst):
'''Reset environment including:
obstacle positions, goal position and drone position
'''
self.num_obst = num_obst
self.objs_info = self._reset_objs()
def copy(self, env):
self.num_obst = env.num_obst
self.state_dim = env.state_dim
self.objs_info = env.objs_info.copy()
def get_state(self):
""" Return the state that encoded by current |objs_info|.
"""
state = None
if self.mode == "linear":
state = self._encoding_2_state_linear()
elif self.mode == "conv":
state = self._encoding_2_state_conv()
assert state is not None
return state
def step(self, action):
""" Move drone by action.
==========
Parameters:
action : int
action index for moving drone.
Return:
reward : float
calculated reward for action.
is_done : bool
if current episode is finished.
reward_info : dict
if trajectory ends at goal position.
{
'is_goal': False,
'is_obst': False,
'reward': None
}
"""
drone_pos_curt = self.objs_info['drone_pos'].copy()
if self.action_size == 26:
drone_pos_next, outbound = self._move_26(drone_pos_curt, action)
elif self.action_size == 6:
drone_pos_next, outbound = self._move_6(drone_pos_curt, action)
self.objs_info['drone_pos'] = drone_pos_next
reward_info = self._calculate_reward(
drone_pos_curt, drone_pos_next, outbound)
assert reward_info['reward'] is not None
if reward_info['is_goal']:# or reward_info['is_obst'] or outbound:
return reward_info['reward'], True, reward_info
else:
return reward_info['reward'], False, reward_info
def _calculate_reward(self, drone_pos_curt, drone_pos_next, outbound):
reward_info = {
'is_goal': False,
'is_obst': False,
'reward': None
}
if outbound:
reward_info['is_obst'] = True
reward_info['reward'] = OBSTACLE_REWARD
return reward_info
if np.array_equal(self.objs_info['goal'], drone_pos_next):
reward_info['is_goal'] = True
reward_info['reward'] = GOAL_REWARD
return reward_info
for temp_obst in self.objs_info['obst_list']:
if np.array_equal(temp_obst, drone_pos_next):
reward_info['is_obst'] = True
reward_info['reward'] = OBSTACLE_REWARD
return reward_info
temp_reward = self._calculate_projection_reward(drone_pos_curt, drone_pos_next)
# + self._calculate_distance_reward(drone_pos_curt, drone_pos_next)
# temp_reward = self._calculate_distance_reward(drone_pos_curt, drone_pos_next)
reward_info['reward'] = temp_reward
return reward_info
def _calculate_projection_reward(self, drone_pos_curt, drone_pos_next):
drone_pos_curt = drone_pos_curt.astype(np.float32)
drone_pos_next = drone_pos_next.astype(np.float32)
if np.array_equal(drone_pos_curt, drone_pos_next):
return 0.0
goal_dirction = self.objs_info['goal'] - drone_pos_curt
goal_dirction_normalize = goal_dirction / np.linalg.norm(goal_dirction)
move_direction = drone_pos_next - drone_pos_curt
move_direction_normalize = move_direction / np.linalg.norm(move_direction)
projection_normalized = np.dot(goal_dirction_normalize, move_direction_normalize)
assert projection_normalized >= -1 and projection_normalized <= 1
return projection_normalized * DIST_REWARD
def _calculate_distance_reward(self, drone_pos_curt, drone_pos_next):
drone_pos_curt = drone_pos_curt.astype(np.float32)
drone_pos_next = drone_pos_next.astype(np.float32)
dist_curt = ((drone_pos_curt[0] - self.objs_info['goal'][0])**2 + \
(drone_pos_curt[1] - self.objs_info['goal'][1])**2 + \
(drone_pos_curt[2] - self.objs_info['goal'][2])**2)**0.5
dist_next = ((drone_pos_next[0] - self.objs_info['goal'][0])**2 + \
(drone_pos_next[1] - self.objs_info['goal'][1])**2 + \
(drone_pos_next[2] - self.objs_info['goal'][2])**2)**0.5
# dist_reward = float(dist_next < dist_curt) * DIST_REWARD
dist_ori = ((self.objs_info['drone_pos_start'][0] - self.objs_info['goal'][0])**2 + \
(self.objs_info['drone_pos_start'][1] - self.objs_info['goal'][1])**2 + \
(self.objs_info['drone_pos_start'][2] - self.objs_info['goal'][2])**2)**0.5
dist_reward = (dist_curt - dist_next) / dist_ori * DIST_REWARD
return dist_reward
def _encoding_2_state_linear(self):
state_placeholder = np.zeros(self.state_dim).astype(np.float32)
writer_idx = 0
for temp_obst in self.objs_info['obst_list']:
temp_dist_normalize = \
(temp_obst - self.objs_info['drone_pos']) \
/ float(self.grid_size)
state_placeholder[writer_idx:writer_idx+3] = temp_dist_normalize
state_placeholder[writer_idx+3] = OBSTACLE_REWARD
writer_idx += 4
assert writer_idx + 4 <= self.state_dim, \
"Not enough space left for state_placeholder."
goal_dist_normalized = \
(self.objs_info['goal'] - self.objs_info['drone_pos']) / float(self.grid_size)
drone_pos_normalized = self.objs_info['drone_pos'] / float(self.grid_size)
# state_placeholder[-3:] = drone_pos_normalized
# state_placeholder[-7:-4] = goal_dist_normalized
# state_placeholder[-4] = GOAL_REWARD
state_placeholder[-4:-1] = goal_dist_normalized
state_placeholder[-1] = GOAL_REWARD
return state_placeholder
def _encoding_2_state_conv(self):
state_placeholder = np.zeros(
(self.grid_size, self.grid_size, self.grid_size)).astype(np.float32)
for temp_obst in self.objs_info['obst_list']:
state_placeholder[
int(temp_obst[0]), int(temp_obst[1]), int(temp_obst[2])] = OBSTACLE_REWARD
state_placeholder[
int(self.objs_info['goal'][0]),
int(self.objs_info['goal'][1]),
int(self.objs_info['goal'][2])] = GOAL_REWARD
state_placeholder[
int(self.objs_info['drone_pos'][0]),
int(self.objs_info['drone_pos'][1]),
int(self.objs_info['drone_pos'][2])] = DRONE_POSITION
return (state_placeholder, self.objs_info['drone_pos'] / float(self.grid_size))
def _reset_objs(self):
obst_list = []
while len(obst_list) < self.num_obst:
temp_obs = np.random.randint(self.grid_size, size=3)
if not self._array_in_list(obst_list, temp_obs):
obst_list.append(temp_obs.astype(np.float32))
goal = None
while goal is None:
temp_goal = np.random.randint(self.grid_size, size=3)
if not self._array_in_list(obst_list, temp_goal):
goal = temp_goal
drone_pos = None
while drone_pos is None:
temp_drone_pos = np.random.randint(self.grid_size, size=3)
if (not self._array_in_list(obst_list, temp_drone_pos)) \
and (not np.array_equal(temp_drone_pos, goal)):
drone_pos = temp_drone_pos
objs_info = {
"obst_list": tuple(obst_list),
"goal": goal.astype(np.float32),
"drone_pos": drone_pos.astype(np.float32),
"drone_pos_start": drone_pos.astype(np.float32)
}
return objs_info
def _array_in_list(self, input_list, input_array):
for temp_array in input_list:
if np.array_equal(temp_array, input_array):
return True
return False
def _move_26(self, drone_pos_curt, action):
assert action < 26, "This is 26 steps moving map."
act_vec = None
if action == 0:
act_vec = np.array([-1, -1, 1])
elif action == 1:
act_vec = np.array([-1, 0, 1])
elif action == 2:
act_vec = np.array([-1, 1, 1])
elif action == 3:
act_vec = np.array([0, -1, 1])
elif action == 4:
act_vec = np.array([0, 0, 1])
elif action == 5:
act_vec = np.array([0, 1, 1])
elif action == 6:
act_vec = np.array([1, -1, 1])
elif action == 7:
act_vec = np.array([1, 0, 1])
elif action == 8:
act_vec = np.array([1, 1, 1])
elif action == 9:
act_vec = np.array([-1, -1, -1])
elif action == 10:
act_vec = np.array([-1, 0, -1])
elif action == 11:
act_vec = np.array([-1, 1, -1])
elif action == 12:
act_vec = np.array([0, -1, -1])
elif action == 13:
act_vec = np.array([0, 0, -1])
elif action == 14:
act_vec = np.array([0, 1, -1])
elif action == 15:
act_vec = np.array([1, -1, -1])
elif action == 16:
act_vec = np.array([1, 0, -1])
elif action == 17:
act_vec = np.array([1, 1, -1])
elif action == 18:
act_vec = np.array([-1, -1, 0])
elif action == 19:
act_vec = np.array([-1, 0, 0])
elif action == 20:
act_vec = | np.array([-1, 1, 0]) | numpy.array |
import os
import sys
import math
import laspy
import scipy
import numpy as np
import pandas as ps
import scipy.linalg
import multiprocessing
import matplotlib as plt
from numpy import linalg as LA
from scipy import spatial,optimize
from sklearn.decomposition import PCA
filename = str(sys.argv[1])
class featurecalculation:
def features(self,filename):
"""
INPUT :- LAS file name
OUTPUT :- A numpy array of size (no. of points , 22) consisting predefined features
"""
pool = multiprocessing.Pool(processes=multiprocessing.cpu_count()-1) # Create a multiprocessing Poolfor div in range(division):
logger.info("calculating neighbours")
result=pool.map(self.calc, range(division),chunksize=1) # process data_inputs iterable with pool
for divo in range(division):
if divo == (division - 1):
full_training_data[divo *maximum_points:] = result[divo][:][:]
else :
full_training_data[divo *maximum_points:(divo +1)*maximum_points] = result[divo][:][:]
logger.info(divo)
np.save('./data/interim/'+filename[:-4]+'_features' , full_training_data)
return
def calc(self,div):
# Calculating Feature for small point cloud with (maximum_points) no. of points
small_xyz = xyz[div*maximum_points:(div+1)*maximum_points]
small_data = data[div*maximum_points:(div+1)*maximum_points]
tree = spatial.KDTree(small_xyz)
_, idx = tree.query(small_xyz[:,:], k=10)
logger.info("Starting new Worker Process:%s",div)
medoid = []
for i in small_xyz[[idx]]:
d = scipy.spatial.distance.pdist(i)
d = scipy.spatial.distance.squareform(d)
medoid.append(np.argmin(d.sum(axis=0)))
covariance = []
for i in small_xyz[[idx]]:
covariance.append(np.cov(np.array(i).T))
covariance = np.array(covariance)
# Calculating Eigen Vectors and Eigen Values for each point
# w: eigen values , v: eigen vectors
w,v = LA.eigh(covariance)
w = [i/np.sum(i) for i in w]
w = np.array(w)
training_data = np.zeros((len(small_xyz),21))
# Calculating Geometric features for each point
training_data[:,0] = np.power(np.multiply(np.multiply(w[:,0], w[:,1]), w[:,2]), 1/3) #omnivariance
training_data[:,1] = -np.multiply(w[:,0], np.log(w[:,0]))-np.multiply(w[:,1], np.log(w[:,1]))-np.multiply(w[:,2], np.log(w[:,2])) #eigenentropy
training_data[:,2] = np.divide(w[:,2]-w[:,0], w[:,2]) #anistropy
training_data[:,3] = np.divide(w[:,1]-w[:,0], w[:,2]) #planarity
training_data[:,4] = np.divide(w[:,2]-w[:,1], w[:,2]) #linearity
training_data[:,5] = w[:,0] #surface variation
training_data[:,6] = np.divide(w[:,0], w[:,2]) #scatter
training_data[:,7] = 1-abs(v[:,0,2]) #verticality
temp = []
for i in range(len(small_xyz)):
temp.append(np.subtract(small_xyz[idx[i]],small_xyz[idx[medoid[i]]]))
# Calculating Central Moments and height feature for each point
moment11 = [] #moment 1st order 1st axis
moment12 = [] #moment 1st order 2nd axis
moment21 = [] #moment 2nd order 1st axis
moment22 = [] #moment 2nd order 2nd axis
vertical_range = [] #vertical range
height_below = [] #height below
for i in range(len(small_xyz)):
moment11.append(np.sum( | np.dot(temp[i], v[i][2]) | numpy.dot |
"""
test_comparison_with_reference
==============================
Module with test comparing new simulations with reference data.
"""
import subprocess
import os
import inspect
import tempfile
import h5py
import numpy as np
import math
def test_comparison():
compare_spectra()
def compare_spectra(script_file="scripts/run_Ni_NiO_Xbath.sh",
script_argument=50,
reference_file="referenceOutput/Ni_NiO_50bath/spectra.h5"):
print("Start comparison of spectra...")
# Create a temporary directory using the context manager
with tempfile.TemporaryDirectory() as tmpdirname:
print('Created temporary directory', tmpdirname)
os.chdir(tmpdirname)
print("Current working dir:", os.getcwd())
path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
cmd = os.path.join(path[:-19], script_file)
print("Run command:", cmd)
print("Use command argument:", script_argument)
subprocess.call([cmd, str(script_argument)])
files_and_dirs = os.listdir()
print("Files and folders in temporary folder:", files_and_dirs)
# Open spectra file and the reference spectra file
file_handle = h5py.File("spectra.h5", "r")
ref_file_handle = h5py.File(os.path.join(path, reference_file), "r")
# Compare file contents
for key in ref_file_handle:
print("Compare dataset:", key)
x = file_handle[key][()]
x_ref = ref_file_handle[key][()]
abs_diff = np.abs(x - x_ref)
i = np.argmax(abs_diff)
print("Max abs diff:", | np.ravel(abs_diff) | numpy.ravel |
from time import strftime
from datetime import datetime
from urllib.request import urlopen
from urllib.error import URLError
from .skullstrip import skullstrip
from .reorient import orient, reorient
from .pad import pad_image
import os
import argparse
import numpy as np
import nibabel as nib
from sklearn.utils import shuffle
from skimage import measure
from skimage import morphology
from subprocess import Popen, PIPE
from tqdm import tqdm
import random
import copy
import csv
def save_args_to_csv(args_obj, out_dir):
'''
Saves arguments to a csv for future reference
args_obj: argparse object, collected after running parse_args
out_dir: string, path where to save the csv file
'''
if not os.path.exists(out_dir):
os.makedirs(out_dir)
out_file = os.path.join(out_dir, "script_arguments.csv")
with open(out_file, 'w') as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(["Argument", "Value"])
for arg in vars(args_obj):
writer.writerow([arg, getattr(args_obj, arg)])
def preprocess(filename, src_dir, preprocess_root_dir, skullstrip_script_path, n4_script_path,
verbose=0):
'''
Preprocesses an image:
1. skullstrip
2. N4 bias correction
3. resample
4. reorient to RAI
Params: TODO
Returns: TODO, the directory location of the final processed image
'''
########## Directory Setup ##########
SKULLSTRIP_DIR = os.path.join(preprocess_root_dir, "skullstripped")
N4_DIR = os.path.join(preprocess_root_dir, "n4_bias_corrected")
RESAMPLE_DIR = os.path.join(preprocess_root_dir, "resampled")
RAI_DIR = os.path.join(preprocess_root_dir, "RAI")
for d in [SKULLSTRIP_DIR, N4_DIR, RESAMPLE_DIR, RAI_DIR]:
if not os.path.exists(d):
os.makedirs(d)
if "CT" in filename:
skullstrip(filename, src_dir, SKULLSTRIP_DIR,
skullstrip_script_path, verbose)
#n4biascorrect(filename, SKULLSTRIP_DIR, N4_DIR, n4_script_path, verbose)
#resample(filename, N4_DIR, RESAMPLE_DIR, verbose)
#orient(filename, SKULLSTRIP_DIR, RAI_DIR, verbose)
'''
elif "mask" in filename or "multiatlas" in filename:
#resample(filename, src_dir, RESAMPLE_DIR, verbose)
#orient(filename, SKULLSTRIP_DIR, RAI_DIR, verbose)
'''
final_preprocess_dir = SKULLSTRIP_DIR
return final_preprocess_dir
def parse_args(session):
'''
Parse command line arguments.
Params:
- session: string, one of "train", "validate", or "test"
Returns:
- parse_args: object, accessible representation of arguments
'''
parser = argparse.ArgumentParser(
description="Arguments for Training and Testing")
if session == "train":
parser.add_argument('--datadir', required=True, action='store', dest='SRC_DIR',
help='Where the initial unprocessed data is. See readme for\
further information')
parser.add_argument('--plane', required=False, action='store', dest='plane',
default='axial', type=str,
help='Which plane to train the model on. Default is axial. \
Other options are only "sagittal" or "coronal".')
parser.add_argument('--psize', required=True, action='store', dest='patch_size',
help='Patch size, eg: 45x45. Patch sizes are separated by x\
and in voxels')
parser.add_argument('--batch_size', required=False, action='store', dest='batch_size',
default=256, type=int,
help='Batch size for training.')
parser.add_argument('--loss', required=False, action='store', dest='loss',
default='cdc', type=str,
help='Loss for the model to optimize over. Options are: \
bce, dice_coef, tpr, cdc, tpw_cdc, bce_tp')
parser.add_argument('--model', required=False, action='store', dest='model',
default=None,
help='If provided, picks up training from this model.')
parser.add_argument('--experiment_details', required=False, action='store',
dest='experiment_details', default='experiment_details', type=str,
help='Description of experiment, used to create folder to save\
weights.')
parser.add_argument('--num_patches', required=False, action='store', dest='num_patches',
default=1500000, type=int,
help='Maximum allowed number of patches. Default is all possible.')
elif session == "test":
parser.add_argument('--infile', required=True, action='store', dest='INFILE',
help='Image to segment')
parser.add_argument('--inmask', required=False, action='store', dest='INMASK',
help='Manual mask')
parser.add_argument('--model', required=True, action='store', dest='model',
help='Model Architecture (.json) file')
parser.add_argument('--weights', required=True, action='store', dest='weights',
help='Learnt weights (.hdf5) file')
parser.add_argument('--segdir', required=True, action='store', dest='segdir',
help='Directory in which to place segmentations')
elif session == "validate":
parser.add_argument('--datadir', required=True, action='store', dest='VAL_DIR',
help='Where the initial unprocessed data is')
parser.add_argument('--weights', required=True, action='store',
dest='weights',
help='Learnt weights on axial plane (.hdf5) file')
parser.add_argument('--threshold', required=False, action='store', dest='threshold',
type=float, default=0.5,
help='Scalar in [0,1] to use as binarizing threshold.')
elif session == "multiseg":
parser.add_argument('--datadir', required=True, action='store', dest='DATA_DIR',
help='Where the initial unprocessed data is')
parser.add_argument('--weights', required=True, action='store',
dest='weights',
help='Learnt weights on axial plane (.hdf5) file')
elif session == "calc_dice":
parser.add_argument('--gt_dir', required=True, action='store', dest='GT_DATA_DIR',
help='Where the manual masks are')
parser.add_argument('--indata', required=True, action='store', dest='IN_DATA',
help='Predicted data, either a file or directory')
else:
print("Invalid session. Must be one of \"train\", \"validate\", or \"test\"")
sys.exit()
parser.add_argument('--num_channels', required=False, type=int, action='store',
dest='num_channels', default=1,
help='Number of channels to include. First is CT, second is atlas,\
third is unskullstripped CT')
parser.add_argument('--gpuid', required=False, action='store', type=int, dest='GPUID',
help='For a multi-GPU system, the trainng can be run on different GPUs.\
Use a GPU id (single number), eg: 1 or 2 to run on that particular GPU.\
0 indicates first GPU. Optional argument. Default is the first GPU.\
-1 for all GPUs.')
return parser.parse_args()
def now():
'''
Formats time for use in the log file
Pulls time from internet in UTC to sync properly
'''
try:
res = urlopen('http://just-the-time.appspot.com/')
result = res.read().strip()
result_str = result.decode('utf-8')
result_str = result_str.split()
result_str = '_'.join(result_str)
except URLError as err:
result_str = strftime("%Y-%m-%d_%H:%M:%S", datetime.now())
return result_str
def write_log(log_file, host_id, acc, val_acc, loss):
update_log_file = False
new_log_file = False
with open(log_file, 'r') as f:
logfile_data = [x.split() for x in f.readlines()]
if (len(logfile_data) >= 1 and logfile_data[-1][1] != host_id)\
or len(logfile_data) == 0:
update_log_file = True
if len(logfile_data) == 0:
new_log_file = True
if update_log_file:
with open(log_file, 'a') as f:
if new_log_file:
f.write("{:<30}\t{:<10}\t{:<10}\t{:<10}\t{:<10}\n".format(
"timestamp",
"host_id",
"train_acc",
"val_acc",
"loss",))
f.write("{:<30}\t{:<10}\t{:<10.4f}\t{:<10.4f}\t{:<10.4f}\n".format(
now(),
host_id,
acc,
val_acc,
loss,))
def remove_ext(filename):
if ".nii" in filename:
return filename[:filename.find(".nii")]
else:
return filename
def get_root_filename(filename):
if "CT" in filename:
return filename[:filename.find("_CT")]
elif "mask" in filename:
return filename[:filename.find("_mask")]
else:
return filename
def get_dice(img1, img2):
'''
Returns the dice score as a voxel-wise comparison of two nifti files.
Params:
- img1: ndarray, tensor of first .nii.gz file
- img2: ndarray, tensor of second .nii.gz file
Returns:
- dice: float, the dice score between the two files
'''
img_data_1 = img1.astype(np.bool)
img_data_2 = img2.astype(np.bool)
if img_data_1.shape != img_data_2.shape:
print("Pred shape", img_data_1.shape)
print("GT shape", img_data_2.shape)
raise ValueError("Shape mismatch between files")
volume_dice = dice_metric(img_data_1.flatten(), img_data_2.flatten())
slices_dice = []
for slice_idx in range(img_data_1.shape[2]):
slices_dice.append(dice_metric(img_data_1[:, :, slice_idx],
img_data_2[:, :, slice_idx]))
return volume_dice, slices_dice
def dice_metric(A, B):
'''
Dice calculation over two BOOLEAN numpy tensors
'''
union = A.sum() + B.sum()
intersection = np.logical_and(A, B).sum()
if union == 0:
return 1.0
return 2.0 * intersection / union
def write_stats(filename, nii_obj, nii_obj_gt, stats_file, threshold=0.5):
'''
Writes to csv probability volumes and thresholded volumes.
Params:
- filename: string, name of the subject/file which was segmented
- nii_obj: nifti object, segmented CT
- nii_obj_gt: nifti object, ground truth segmentation
- stats_file: string, path and filename of .csv file to hold statistics
'''
SEVERE_HEMATOMA = 25000 # in mm^3
# get ground truth severity
img_data_gt = nii_obj_gt.get_data()
# pad ground truth
img_data_gt = pad_image(img_data_gt)
zooms_gt = nii_obj_gt.header.get_zooms()
scaling_factor_gt = zooms_gt[0] * zooms_gt[1] * zooms_gt[2]
# get volumes
probability_vol_gt = np.sum(img_data_gt)
prob_thresh_vol_gt = np.sum(
img_data_gt[np.where(img_data_gt >= threshold)])
thresh_data_gt = img_data_gt.copy()
thresh_data_gt[np.where(thresh_data_gt < threshold)] = 0
thresh_data_gt[np.where(thresh_data_gt >= threshold)] = 1
thresholded_vol_gt = np.sum(thresh_data_gt)
thresholded_vol_mm_gt = scaling_factor_gt * thresholded_vol_gt
# classify severity of largest hematoma in ground truth
label_gt = measure.label(img_data_gt)
props_gt = measure.regionprops(label_gt)
if len(props_gt) > 0:
areas = [x.area for x in props_gt]
areas.sort()
largest_contig_hematoma_vol_mm_gt = areas[-1] * scaling_factor_gt
else:
largest_contig_hematoma_vol_mm_gt = 0
if largest_contig_hematoma_vol_mm_gt > SEVERE_HEMATOMA:
severe_gt = 1
else:
severe_gt = 0
##### SEGMENTATION DATA #####
# load object tensor for calculations
img_data = nii_obj.get_data()[:, :, :]
img_data = pad_image(img_data)
zooms = nii_obj.header.get_zooms()
scaling_factor = zooms[0] * zooms[1] * zooms[2]
# get volumes
probability_vol = | np.sum(img_data) | numpy.sum |
import sys
import datetime
import reportconfig
import projectmetrics
import os
import numpy as np
import matplotlib
import matplotlib.dates as mdates
# check for headless executions
if "DISPLAY" not in os.environ:
if os.system('python -c "import matplotlib.pyplot as plt; plt.figure()"') != 0:
print("INFO: Lack of display should generate an expected ImportError. Changing MatPlotLib backend.")
matplotlib.use('Agg')
import matplotlib.pyplot as plt
else:
import matplotlib.pyplot as plt
# import constants for the days of the week
from matplotlib.dates import MO, TU, WE, TH, FR, SA
plt.style.use('ggplot')
# plt.style.use('fivethirtyeight')
# plt.style.use('classic')
# plt.style.use('seaborn')
YEARS = mdates.YearLocator() # every year
MONTHS = mdates.MonthLocator() # every month
WEEKDAYS = mdates.WeekdayLocator(byweekday=(MO, TU, WE, TH, FR)) # every weekday
WEEKFINISH = mdates.WeekdayLocator(byweekday=SA) # every week start
YEARS_FORMAT = mdates.DateFormatter('%Y')
MONTHS_FORMAT = mdates.DateFormatter('%b %Y')
# DEFAULT_CMAP = "Set2"
# DEFAULT_CMAP = "Set3"
# DEFAULT_CMAP = "prism"
DEFAULT_CMAP = "tab10"
SECONDARY_CMAP = "gist_ncar"
DEFAULT_TREND_RANGE = [60, 30, 14, 7]
DEFAULT_SLOC_TYPES = ["HAND", "AC", "XML"]
DEFAULT_COMP_TYPES = ["Channels", "Commands", "Events", "Parameters", "Total Ports"]
DEFAULT_ISSUE_LABELS = ["Bug", "Req. Change", "Enhancement", "Process", "Issue"]
DEFAULT_BAR_WIDTH = 0.8
I = 'issues'
S = 'sloc'
C = 'comp'
class GitHubMetricsReport:
def __init__(self, args):
if "--config" in args:
config_file = args[args.index("--config") + 1]
else:
config_file = args[2]
self.config_opts = reportconfig.ReportConfiguration(config_file)
if "--username" in args:
self.config_opts.username = args[args.index("--username") + 1]
if "--git-api-key" in args:
self.config_opts.git_api_key = args[args.index("--git-api-key") + 1]
if "--zen-api-key" in args:
self.config_opts.zen_api_key = args[args.index("--zen-api-key") + 1]
if "--show" in args:
self.show = True
else:
self.show = False
self.metrics = projectmetrics.ProjectMetrics(None, config_opts=self.config_opts)
def create_graph_colors_list(data_types):
if len(data_types) > 20:
cmap = plt.get_cmap(SECONDARY_CMAP)
colors_list = cmap(np.linspace(0., 1., len(data_types)))
else:
cmap = plt.get_cmap(DEFAULT_CMAP)
colors_list = cmap(np.arange(len(data_types)))
return colors_list
def format_label_chart(fig, axs, x_data):
try:
for ax in axs:
ax.legend()
ax.set_xticks(np.array(list(range(len(x_data)))))
ax.set_xticklabels(x_data, rotation=90)
x_lim = ax.get_xlim()
ax.set_xlim(-1, len(x_data))
y_lim = ax.get_ylim()
ax.set_ylim(y_lim[0], 1.05 * y_lim[1])
except TypeError:
axs.legend()
axs.set_xticks(np.array(list(range(len(x_data)))))
axs.set_xticklabels(x_data, rotation=90)
x_lim = axs.get_xlim()
axs.set_xlim(-1, len(x_data))
y_lim = axs.get_ylim()
axs.set_ylim(y_lim[0], 1.05*y_lim[1])
fig.tight_layout()
return fig
def format_date_chart(fig, axs, x_data):
try:
for ax in axs:
ax.xaxis_date()
ax.legend()
ax.xaxis.set_major_locator(MONTHS)
ax.xaxis.set_major_formatter(MONTHS_FORMAT)
y_lim = ax.get_ylim()
ax.set_ylim(y_lim[0], 1.05 * y_lim[1])
# if len(data_x) <= 120:
# ax.xaxis.set_minor_locator(WEEKDAYS)
# else:
# ax.xaxis.set_minor_locator(WEEKFINISH)
except TypeError:
axs.xaxis_date()
axs.legend()
axs.xaxis.set_major_locator(MONTHS)
axs.xaxis.set_major_formatter(MONTHS_FORMAT)
y_lim = axs.get_ylim()
axs.set_ylim(y_lim[0], 1.05*y_lim[1])
# if len(data_x) <= 120:
# axs.xaxis.set_minor_locator(WEEKDAYS)
# else:
# axs.xaxis.set_minor_locator(WEEKFINISH)
fig.autofmt_xdate()
fig.tight_layout()
return fig
def finalize_figure(fig, title, directory=None, show=False):
if show:
plt.show()
plt.close(fig)
return
if directory is not None:
output_file = directory + title + ".png"
output_file = output_file.replace(" ", "_")
plt.savefig(output_file)
plt.close(fig)
return output_file
def generate_table(table_columns, data, title="", directory=None, show=False):
fig, ax = plt.subplots(1, 1, figsize=(10, (len(data) + 2) / 4 + 1))
# fig.patch.set_visible(False)
ax.axis('off')
table = ax.table(cellText=data, colLabels=table_columns, loc='center')
for index, header in enumerate(table_columns):
table.auto_set_column_width(index)
table.auto_set_font_size(True)
ax.set_title(title)
fig.tight_layout()
output_file = finalize_figure(fig, title, directory, show)
return output_file
def generate_line_plot(x_data, y_data, filled=None, data_labels=None, title="", directory=None, show=False,
date_plot=False, stacked=False):
if data_labels is None:
data_labels = list(y_data.keys())
if date_plot:
x_index = x_data
else:
x_index = np.array(list(range(len(x_data))))
y_offset = np.zeros((len(x_index),))
colors = create_graph_colors_list(data_labels)
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
for index, label in enumerate(data_labels):
if isinstance(x_data, dict):
if stacked:
raise ValueError("Stacked line charts require shared x_data basis.")
x = x_data[label]
y_offset = np.zeros((len(x),))
else:
x = x_index
y = y_data[label]
if stacked:
y += y_offset
if date_plot:
ax.plot_date(x, y, '-', color=colors[index], label=label)
else:
ax.plot(x, y, '-', color=colors[index], label=label)
if filled and label in filled:
ax.fill_between(x, y, y_offset, color=colors[index], alpha=0.4)
if stacked:
y_offset += y
ax.set_title(title)
# format the ticks
if date_plot:
format_date_chart(fig, ax, x_data)
else:
format_label_chart(fig, ax, x_data)
# handles, labels = _sort_legend(ax)
# ax.legend(handles, labels)
output_file = finalize_figure(fig, title, directory, show)
return output_file
def _generate_complicated_bar_plot(x_data, y_data, data_labels=None, title="", directory=None, show=False,
date_plot=False, split=False, adjacent=False, stacked=False):
if data_labels is None:
data_labels = list(y_data.keys())
bar_width = DEFAULT_BAR_WIDTH
colors = create_graph_colors_list(data_labels)
if date_plot:
# TODO consider re-enabling; expand chart when range > 60 days
# sorted_x_data = sorted(x_data)
# fig_x = max(10., ((sorted_x_data[-1] - sorted_x_data[0]).days + 1) / 6.)
fig_x = 10
else:
# expand chart when components > 25
fig_x = max(10., len(x_data) / 2.5)
if split and len(data_labels) > 1:
fig, axs = plt.subplots(len(data_labels), 1, figsize=(fig_x, 5 * len(data_labels)))
ax = axs[0]
else:
axs = []
fig, ax = plt.subplots(1, 1, figsize=(fig_x, 10))
if date_plot:
x = x_data
else:
x = np.array(list(range(len(x_data))))
if adjacent:
bar_width /= len(data_labels)
x = x - (len(data_labels) - 1) * bar_width / 2
y_offset = np.zeros((len(x),))
for index, label in enumerate(data_labels):
if isinstance(x_data, dict):
if stacked:
raise ValueError("Stacked line charts require shared x_data basis.")
x = x_data[label]
y_offset = np.zeros((len(x),))
if split and len(data_labels) > 1:
ax = axs[index]
y = y_data[label]
bars = ax.bar(x, y, width=bar_width, bottom=y_offset, color=colors[index], label=label)
if not date_plot:
if adjacent:
x = x + bar_width
for position, bar in enumerate(bars):
height = bar.get_height()
if height != 0:
ax.text(bar.get_x() + bar.get_width() / 2., height + y_offset[position], " {} ".format(height),
ha='center', va='bottom')
# ha='center', va='bottom', rotation=90)
if stacked:
y_offset = y_offset + y_data[label]
if index == 0:
ax.set_title(title)
if split:
ax = axs
if date_plot:
format_date_chart(fig, ax, x_data)
else:
format_label_chart(fig, ax, x_data)
output_file = finalize_figure(fig, title, directory, show)
return output_file
def generate_stacked_bar_plot(x_data, y_data, data_labels=None, title="", directory=None, show=False, date_plot=False):
return _generate_complicated_bar_plot(x_data, y_data, data_labels, title, directory, show, date_plot, stacked=True)
def generate_split_bar_plot(x_data, y_data, data_labels=None, title="", directory=None, show=False, date_plot=False):
return _generate_complicated_bar_plot(x_data, y_data, data_labels, title, directory, show, date_plot, split=True)
def generate_adjacent_bar_plot(x_data, y_data, data_labels=None, title="", directory=None, show=False, date_plot=False):
return _generate_complicated_bar_plot(x_data, y_data, data_labels, title, directory, show, date_plot, adjacent=True)
def generate_pie_plot():
raise NotImplementedError()
def generate_stacked_pie_plot():
raise NotImplementedError()
def table_project_summary(reporter, categories=None, period=None, show=False, directory=None, title="Project Summary"):
metrics = reporter.metrics
table_columns = [""] + ["Current Value"]
table_data = []
# TODO evaluate issue label filter approach
# issue label counts, starting with overall
if categories[I]:
total = metrics.issue_totals[metrics.OV][metrics.NEW][-1] - metrics.issue_totals[metrics.OV][metrics.DONE][-1]
table_data.append([metrics.OV + " issues", total])
for key in categories[I]:
if key == metrics.OV:
continue
total = metrics.issue_totals[key][metrics.NEW][-1] - metrics.issue_totals[key][metrics.DONE][-1]
table_data.append([key + " issues", total])
# sloc
for category in categories[S]:
total = 0
for key in (list(metrics.sloc_data.keys())):
total += metrics.sloc_data[key].get(category) \
if metrics.sloc_data[key].get(category) is not None else 0
table_data.append([category, total])
# component counts
for comp in categories[C]:
total = 0
for key in list(metrics.comp_data.keys()):
total += metrics.comp_data[key].get(comp) \
if metrics.comp_data[key].get(comp) is not None else 0
table_data.append([comp, total])
output_file = generate_table(table_columns, table_data, title, directory, show)
return output_file
def table_issue_label_summary(reporter, categories=None, period=None, show=False, directory=None, title="Issue Label Summary"):
categories = {I: categories[I], S: [], C: []}
return table_project_summary(reporter, categories=categories, period=period, show=show, directory=directory, title=title)
def table_sloc_summary(reporter, categories=None, period=None, show=False, directory=None, title="Component SLOC Summary"):
categories = {I: [], S: categories[S], C: []}
return table_project_summary(reporter, categories=categories, period=period, show=show, directory=directory, title=title)
def table_comp_summary(reporter, categories=None, period=None, show=False, directory=None, title="Component Structure Summary"):
categories = {I: [], S: [], C: categories[C]}
return table_project_summary(reporter, categories=categories, period=period, show=show, directory=directory, title=title)
def table_project_diffs(reporter, categories=None, period=None, show=False, directory=None, title="Project Changes"):
metrics = reporter.metrics
table_columns = [""] + ["%d Day Change" % x for x in period]
table_data = []
# issue label diffs, starting with overall
if categories[I]:
# TODO evaluate issue label filter approach
label_totals = metrics.issue_totals[metrics.OV]
table_data += [[metrics.OV] + ["+" + str(label_totals[metrics.NEW][-1] - label_totals[metrics.NEW][-x]) +
" / -" + str(label_totals[metrics.DONE][-1] - label_totals[metrics.DONE][-x])
if x <= len(metrics.issue_dates) else "" for x in period]]
for key in categories[I]:
if key == metrics.OV:
continue
label_totals = metrics.issue_totals[key]
row = [key] + ["+" + str(label_totals[metrics.NEW][-1] - label_totals[metrics.NEW][-x]) +
" / -" + str(label_totals[metrics.DONE][-1] - label_totals[metrics.DONE][-x])
if x <= len(metrics.issue_dates) else "" for x in period]
table_data.append(row)
# manual sloc diffs
if categories[S]:
dates = metrics.sloc_totals[metrics.DATE]
for key in categories[S]:
if key == metrics.DATE:
continue
label_totals = metrics.sloc_totals.get(key)
if label_totals is None:
continue
row = [key] + [str(label_totals[-1] - label_totals[-x])
if x <= len(dates) else "" for x in period]
for index, value in enumerate(row):
if index == 0:
continue
if value and int(value) >= 0:
row[index] = '+' + value
table_data.append(row)
# component counts
if categories[C]:
dates = metrics.comp_totals[metrics.DATE]
for key in categories[C]:
if key == metrics.DATE:
continue
label_totals = metrics.comp_totals.get(key)
if label_totals is None:
continue
row = [key] + [str(label_totals[-1] - label_totals[-x])
if x <= len(dates) else "" for x in period]
for index, value in enumerate(row):
if index == 0:
continue
if value and int(value) >= 0:
row[index] = '+' + value
table_data.append(row)
output_file = generate_table(table_columns, table_data, title, directory, show)
return output_file
def table_issue_label_diffs(reporter, categories=None, period=None, show=False, directory=None, title="Issue Label Changes"):
categories = {I: categories[I], S: [], C: []}
return table_project_diffs(reporter, categories=categories, period=period, show=show, directory=directory, title=title)
def table_sloc_diffs(reporter, categories=None, period=None, show=False, directory=None, title="Component SLOC Changes"):
categories = {I: [], S: categories[S], C: []}
return table_project_diffs(reporter, categories=categories, period=period, show=show, directory=directory, title=title)
def table_comp_diffs(reporter, categories=None, period=None, show=False, directory=None, title="Component Structure Changes"):
categories = {I: [], S: [], C: categories[C]}
return table_project_diffs(reporter, categories=categories, period=period, show=show, directory=directory, title=title)
def table_task_list(reporter, categories=None, period=None, show=False, directory=None, title="Planned Task List"):
metrics = reporter.metrics
table_columns = metrics.task_items_header
table_data = [[task] + [metrics.plan_dict[task][header] for header in metrics.task_items_header[1:]]
for task in metrics.plan_task_list]
#
# table_data = [metrics.task_items[task] for task in metrics.plan_task_list]
output_file = generate_table(table_columns, table_data, title, directory, show)
return output_file
def line_plot_trend(reporter, categories=None, period=None, show=False, directory=None, title=""):
metrics = reporter.metrics
period_dates = []
data_source = []
data_categories = []
if categories[I]:
period_dates = np.array(metrics.issue_dates)
data_categories = [metrics.NEW, metrics.DONE, metrics.OPEN]
data_source = metrics.issue_totals[metrics.OV]
elif categories[S]:
period_dates = np.array(metrics.sloc_totals[metrics.DATE])
data_categories = categories[S]
data_source = metrics.sloc_totals
elif categories[C]:
period_dates = np.array(metrics.comp_totals[metrics.DATE])
data_categories = categories[C]
data_source = metrics.comp_totals
output_file = generate_line_plot(period_dates, data_source, data_labels=data_categories,
title=title, directory=directory, show=show, date_plot=True)
return output_file
def line_plot_issue_labels_trend(reporter, categories=None, period=None, show=False, directory=None, title="Issue Label Trendline"):
categories = {I: categories[I], S: [], C: []}
return line_plot_trend(reporter, categories=categories, period=period, show=show, directory=directory, title=title)
def line_plot_comp_trend(reporter, categories=None, period=None, show=False, directory=None, title="Component Structure Trendline"):
categories = {I: [], S: [], C: categories[C]}
return line_plot_trend(reporter, categories=categories, period=period, show=show, directory=directory, title=title)
def line_plot_sloc_trend(reporter, categories=None, period=None, show=False, directory=None, title="Component SLOC Trendline"):
categories = {I: [], S: categories[S], C: []}
return line_plot_trend(reporter, categories=categories, period=period, show=show, directory=directory, title=title)
def stacked_bar_plot_trend(reporter, categories=None, period=None, show=False, directory=None, title=""):
metrics = reporter.metrics
data = {}
data_categories = []
# TODO evaluate issue label filter approach
if categories[I]:
period = min(len(metrics.issue_dates), max(period))
period_dates = | np.array(metrics.issue_dates[-period:]) | numpy.array |
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn import metrics
# normalization
def normalize(x):
return (x - np.min(x)) / (np.max(x) - np.min(x))
def get_data():
X, y = datasets.load_breast_cancer(return_X_y=True)
#X, y = datasets.load_iris(return_X_y=True)
X = normalize(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=0)
rng = np.random.RandomState(42)
random_unlabeled_points = rng.rand(len(X_train)) < 0.1
y_train[random_unlabeled_points] = -1
#
index, = | np.where(y_train != -1) | numpy.where |
import subprocess
lib_list = ['numpy','csv','seaborn','matplotlib']
for lib_name in lib_list:
try:
import lib_name
except ImportError:
if lib_name == 'csv':
print(lib_name,' Module not installed')
subprocess.run(['pip','install','python-csv'])
else:
print(lib_name,' Module not installed')
subprocess.run(['pip','install','%s'%lib_name])
import numpy as np
import csv
import os
import seaborn as sns
import matplotlib.pyplot as plt
################################################################################
###### Fetch the result data and plot out the PDF and mean+/-STD figures #######
################################################################################
# Function of reading data
def LumenRead(path,numfile):
resultlist = np.empty((0,39), float)
for i in range(numfile):
filename = os.path.join(path,('lumen_area_000'+str("{0:0=3d}".format(i))+'.csv'))
reader = csv.reader(open(filename, "r"), delimiter='\t')
x = list(reader)
result = np.array(x[0][:-1]).astype("float")
resultlist = np.append(resultlist, np.expand_dims(result,axis=0), axis=0)
# print(resultlist.shape)
return resultlist
# Set Directory and number of instance in the UQ campaign
data_root = './UQtest/A/'
time_step = 361
# Read the all subdirectory of UQ instances in a list
data_list = [os.path.join(data_root, item) for item in sorted(os.listdir(data_root))]
#Create an empty list and fetch the data in an loop
lumen_list = np.empty((0,time_step,39), float)
for item in data_list:
print('Processing:',item)
Data = LumenRead(item,time_step)
lumen_list = np.append(lumen_list,np.expand_dims(Data,axis=0),axis=0)
print(lumen_list.shape)
np.save('LumenData',lumen_list)
# Calculate the lumen volume from (lumen_area_of_each_slice*depth_of_slice)
lumen_list = np.load('LumenData.npy')
lumen_vol = np.sum(lumen_list[:,:,:],axis=2) * 0.03125
fig = plt.figure()
# plt.plot(np.ones(128)*3.56055,np.linspace(0.0,2.5,128),label='0 days',c='k')
sns.kdeplot(lumen_vol[:,72],label='3 days')
sns.kdeplot(lumen_vol[:,144],label='6 days')
sns.kdeplot(lumen_vol[:,216],label='9 days')
sns.kdeplot(lumen_vol[:,288],label='12 days')
sns.kdeplot(lumen_vol[:,360],label='15 days')
plt.legend(fontsize=10,loc=2)
plt.xlabel('Lumen volume of blood vessel ($mm^3$)',fontsize=12)
plt.ylabel('Probability density function',fontsize=12)
plt.savefig('./'+'pdf.png')
plt.clf()
# plot mean+/-STD
days = np.zeros(361)
for i in range(361):
days[i] = i/24
plt.plot(days, | np.mean(lumen_vol,axis=0) | numpy.mean |
################################################################################
# The Neural Network (NN) based Speech Synthesis System
# https://svn.ecdf.ed.ac.uk/repo/inf/dnn_tts/
#
# Centre for Speech Technology Research
# University of Edinburgh, UK
# Copyright (c) 2014-2015
# All Rights Reserved.
#
# The system as a whole and most of the files in it are distributed
# under the following copyright and conditions
#
# Permission is hereby granted, free of charge, to use and distribute
# this software and its documentation without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of this work, and to
# permit persons to whom this work is furnished to do so, subject to
# the following conditions:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# - The authors' names may not be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THE UNIVERSITY OF EDINBURGH AND THE CONTRIBUTORS TO THIS WORK
# DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT
# SHALL THE UNIVERSITY OF EDINBURGH NOR THE CONTRIBUTORS BE LIABLE
# FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
# AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
# ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
# THIS SOFTWARE.
################################################################################
import sys, numpy,os
sys.path.append('/home/work_nfs2/jcong/workspace/merlin/src')
import io_funcs
from io_funcs.binary_io import BinaryIOCollection
import logging
from scipy.stats.stats import pearsonr
class DistortionComputation(object):
def __init__(self):
self.total_frame_number = 0
self.distortion = 0.0
self.bap_distortion = 0.0
self.f0_distortion = 0.0
self.vuv_error = 0.0
self.mgc_dim = 60
self.bap_dim = 5
self.lf0_dim = 1
def compute_distortion(self, file_id_list, reference_dir, generation_dir):
total_voiced_frame_number = 0
file_cnt = 0
for file_id in open(file_id_list,'r').readlines():
file_id =file_id.strip()
file_cnt += 1
mgc_file_name0 = reference_dir + '/' + 'mgc' + '/' + file_id + '.mgc'
bap_file_name0 = reference_dir + '/' + 'bap' + '/' + file_id + '.bap'
lf0_file_name0 = reference_dir + '/' + 'lf0' + '/' + file_id + '.lf0'
mgc_file_name = generation_dir + '/' + 'mgc' + '/' + file_id + '.mgc'
bap_file_name = generation_dir + '/' + 'bap' + '/' + file_id + '.bap'
lf0_file_name = generation_dir + '/' + 'lf0' + '/' + file_id + '.lf0'
generation_mgc, mgc_frame_number = self.load_binary_file(mgc_file_name, self.mgc_dim)
generation_lf0, lf0_frame_number = self.load_binary_file(lf0_file_name, self.lf0_dim)
generation_bap, bap_frame_number = self.load_binary_file(bap_file_name, self.bap_dim)
reference_mgc, mgc_frame_number0 = self.load_binary_file(mgc_file_name0, self.mgc_dim)
reference_lf0, lf0_frame_number0 = self.load_binary_file(lf0_file_name0, self.lf0_dim)
reference_bap, bap_frame_number0 = self.load_binary_file(bap_file_name0, self.bap_dim)
if mgc_frame_number0 != mgc_frame_number:
print("The number of mgc frames is not the same: %d vs %d. Error in compute_distortion.py" %(mgc_frame_number0, mgc_frame_number))
if mgc_frame_number0 < mgc_frame_number:
generation_mgc = generation_mgc[0:mgc_frame_number0]
mgc_frame_number = mgc_frame_number0
else:
reference_mgc = reference_mgc[0:mgc_frame_number]
mgc_frame_number0 = mgc_frame_number
print('Ignore the redundant frames')
if lf0_frame_number0 != lf0_frame_number:
print("The number of lf0 frames is not the same: %d vs %d. Error in compute_distortion.py" %(lf0_frame_number0, lf0_frame_number))
if lf0_frame_number0 < lf0_frame_number:
generation_lf0 = generation_lf0[0:lf0_frame_number0]
lf0_frame_number = lf0_frame_number0
else:
reference_lf0 = reference_lf0[0:lf0_frame_number]
lf0_frame_number0 = lf0_frame_number
print('Ignore the redundant frames')
temp_distortion = self.compute_mse(reference_mgc[:, 1:self.mgc_dim], generation_mgc[:, 1:self.mgc_dim])
self.distortion += temp_distortion * (10 /numpy.log(10)) * numpy.sqrt(2.0)
temp_bap_distortion = self.compute_mse(reference_bap, generation_bap)
self.bap_distortion += temp_bap_distortion * (10 /numpy.log(10)) * numpy.sqrt(2.0)
temp_f0_distortion, temp_vuv_error, voiced_frame_number = self.compute_f0_mse(reference_lf0, generation_lf0)
self.f0_distortion += temp_f0_distortion
self.vuv_error += temp_vuv_error
self.total_frame_number += mgc_frame_number0
total_voiced_frame_number += voiced_frame_number
self.distortion /= float(self.total_frame_number)
self.bap_distortion /= float(self.total_frame_number)
self.f0_distortion /= total_voiced_frame_number
self.f0_distortion = numpy.sqrt(self.f0_distortion)
self.vuv_error /= float(self.total_frame_number)
print('---------------------------------------------------------------------')
print('Total file number is: %d' % (file_cnt))
print('MCD (MGC Distortion): %.3f dB' %(self.distortion))
print('BAPD (BAP Distortion): %.3f dB' %(self.bap_distortion))
print('RMSE (RMSE in lof_f0): %.3f Hz' %(self.f0_distortion))
print('VUV (V/UV Error Rate): %.3f%%' % (self.vuv_error*100.))
return self.distortion, self.bap_distortion, self.f0_distortion, self.vuv_error
def compute_f0_mse(self, ref_data, gen_data):
ref_vuv_vector = numpy.zeros((ref_data.size, 1))
gen_vuv_vector = numpy.zeros((ref_data.size, 1))
ref_vuv_vector[ref_data > 0.0] = 1.0
gen_vuv_vector[gen_data > 0.0] = 1.0
sum_ref_gen_vector = ref_vuv_vector + gen_vuv_vector
voiced_ref_data = ref_data[sum_ref_gen_vector == 2.0]
voiced_gen_data = gen_data[sum_ref_gen_vector == 2.0]
voiced_frame_number = voiced_gen_data.size
f0_mse = numpy.sum(((numpy.exp(voiced_ref_data) - numpy.exp(voiced_gen_data)) ** 2))
# f0_mse = numpy.sum((((voiced_ref_data) - (voiced_gen_data)) ** 2))
vuv_error_vector = sum_ref_gen_vector[sum_ref_gen_vector == 0.0]
vuv_error = | numpy.sum(sum_ref_gen_vector[sum_ref_gen_vector == 1.0]) | numpy.sum |
from __future__ import print_function
import os
import shutil
import tempfile
import unittest
import numpy as np
import math
from distutils.version import LooseVersion
from numpy.testing import assert_array_almost_equal, assert_almost_equal
import scipy
try:
from scipy.sparse import load_npz
except ImportError:
load_npz = None
from openmdao.api import Problem, IndepVarComp, ExecComp, DirectSolver,\
ExplicitComponent, LinearRunOnce, ScipyOptimizeDriver, ParallelGroup, Group, \
SqliteRecorder, CaseReader
from openmdao.utils.assert_utils import assert_rel_error, assert_warning
from openmdao.utils.general_utils import set_pyoptsparse_opt
from openmdao.utils.coloring import get_simul_meta, _solves_info
from openmdao.utils.mpi import MPI
from openmdao.test_suite.tot_jac_builder import TotJacBuilder
import openmdao.test_suite
try:
from openmdao.vectors.petsc_vector import PETScVector
except ImportError:
PETScVector = None
# check that pyoptsparse is installed
OPT, OPTIMIZER = set_pyoptsparse_opt('SNOPT')
if OPTIMIZER:
from openmdao.drivers.pyoptsparse_driver import pyOptSparseDriver
class CounterGroup(Group):
def __init__(self, *args, **kwargs):
self._solve_count = 0
super(CounterGroup, self).__init__(*args, **kwargs)
def _solve_linear(self, *args, **kwargs):
super(CounterGroup, self)._solve_linear(*args, **kwargs)
self._solve_count += 1
# note: size must be an even number
SIZE = 10
def run_opt(driver_class, mode, assemble_type=None, color_info=None, sparsity=None, derivs=True,
recorder=None, **options):
p = Problem(model=CounterGroup())
if assemble_type is not None:
p.model.linear_solver = DirectSolver(assemble_jac=True)
p.model.options['assembled_jac_type'] = assemble_type
indeps = p.model.add_subsystem('indeps', IndepVarComp(), promotes_outputs=['*'])
# the following were randomly generated using np.random.random(10)*2-1 to randomly
# disperse them within a unit circle centered at the origin.
indeps.add_output('x', np.array([ 0.55994437, -0.95923447, 0.21798656, -0.02158783, 0.62183717,
0.04007379, 0.46044942, -0.10129622, 0.27720413, -0.37107886]))
indeps.add_output('y', np.array([ 0.52577864, 0.30894559, 0.8420792 , 0.35039912, -0.67290778,
-0.86236787, -0.97500023, 0.47739414, 0.51174103, 0.10052582]))
indeps.add_output('r', .7)
p.model.add_subsystem('arctan_yox', ExecComp('g=arctan(y/x)', vectorize=True,
g=np.ones(SIZE), x=np.ones(SIZE), y=np.ones(SIZE)))
p.model.add_subsystem('circle', ExecComp('area=pi*r**2'))
p.model.add_subsystem('r_con', ExecComp('g=x**2 + y**2 - r', vectorize=True,
g=np.ones(SIZE), x=np.ones(SIZE), y=np.ones(SIZE)))
thetas = np.linspace(0, np.pi/4, SIZE)
p.model.add_subsystem('theta_con', ExecComp('g = x - theta', vectorize=True,
g=np.ones(SIZE), x=np.ones(SIZE),
theta=thetas))
p.model.add_subsystem('delta_theta_con', ExecComp('g = even - odd', vectorize=True,
g=np.ones(SIZE//2), even=np.ones(SIZE//2),
odd=np.ones(SIZE//2)))
p.model.add_subsystem('l_conx', ExecComp('g=x-1', vectorize=True, g=np.ones(SIZE), x=np.ones(SIZE)))
IND = np.arange(SIZE, dtype=int)
ODD_IND = IND[1::2] # all odd indices
EVEN_IND = IND[0::2] # all even indices
p.model.connect('r', ('circle.r', 'r_con.r'))
p.model.connect('x', ['r_con.x', 'arctan_yox.x', 'l_conx.x'])
p.model.connect('y', ['r_con.y', 'arctan_yox.y'])
p.model.connect('arctan_yox.g', 'theta_con.x')
p.model.connect('arctan_yox.g', 'delta_theta_con.even', src_indices=EVEN_IND)
p.model.connect('arctan_yox.g', 'delta_theta_con.odd', src_indices=ODD_IND)
p.driver = driver_class()
p.driver.options.update(options)
p.model.add_design_var('x')
p.model.add_design_var('y')
p.model.add_design_var('r', lower=.5, upper=10)
# nonlinear constraints
p.model.add_constraint('r_con.g', equals=0)
p.model.add_constraint('theta_con.g', lower=-1e-5, upper=1e-5, indices=EVEN_IND)
p.model.add_constraint('delta_theta_con.g', lower=-1e-5, upper=1e-5)
# this constrains x[0] to be 1 (see definition of l_conx)
p.model.add_constraint('l_conx.g', equals=0, linear=False, indices=[0,])
# linear constraint
p.model.add_constraint('y', equals=0, indices=[0,], linear=True)
p.model.add_objective('circle.area', ref=-1)
# # setup coloring
if color_info is not None:
p.driver.set_simul_deriv_color(color_info)
elif sparsity is not None:
p.driver.set_total_jac_sparsity(sparsity)
if recorder:
p.driver.add_recorder(recorder)
p.setup(mode=mode, derivatives=derivs)
p.run_driver()
return p
class SimulColoringPyoptSparseTestCase(unittest.TestCase):
@unittest.skipUnless(OPTIMIZER == 'SNOPT', "This test requires SNOPT.")
def test_simul_coloring_snopt_fwd(self):
# first, run w/o coloring
p = run_opt(pyOptSparseDriver, 'fwd', optimizer='SNOPT', print_results=False)
color_info = {"fwd": [[
[20], # uncolored columns
[0, 2, 4, 6, 8], # color 1
[1, 3, 5, 7, 9], # color 2
[10, 12, 14, 16, 18], # color 3
[11, 13, 15, 17, 19] # color 4
],
[
[1, 11, 12, 17], # column 0
[2, 17], # column 1
[3, 13, 18], # column 2
[4, 18], # column 3
[5, 14, 19], # column 4
[6, 19], # column 5
[7, 15, 20], # column 6
[8, 20], # column 7
[9, 16, 21], # column 8
[10, 21], # column 9
[1, 12, 17], # column 10
[2, 17], # column 11
[3, 13, 18], # column 12
[4, 18], # column 13
[5, 14, 19], # column 14
[6, 19], # column 15
[7, 15, 20], # column 16
[8, 20], # column 17
[9, 16, 21], # column 18
[10, 21], # column 19
None # column 20
]],
"sparsity": {
"circle.area": {
"indeps.x": [[], [], [1, 10]],
"indeps.y": [[], [], [1, 10]],
"indeps.r": [[0], [0], [1, 1]]
},
"r_con.g": {
"indeps.x": [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [10, 10]],
"indeps.y": [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [10, 10]],
"indeps.r": [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [10, 1]]
},
"l_conx.g": {
"indeps.x": [[0], [0], [1, 10]],
"indeps.y": [[], [], [1, 10]],
"indeps.r": [[], [], [1, 1]]
},
"theta_con.g": {
"indeps.x": [[0, 1, 2, 3, 4], [0, 2, 4, 6, 8], [5, 10]],
"indeps.y": [[0, 1, 2, 3, 4], [0, 2, 4, 6, 8], [5, 10]],
"indeps.r": [[], [], [5, 1]]
},
"delta_theta_con.g": {
"indeps.x": [[0, 0, 1, 1, 2, 2, 3, 3, 4, 4], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [5, 10]],
"indeps.y": [[0, 0, 1, 1, 2, 2, 3, 3, 4, 4], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [5, 10]],
"indeps.r": [[], [], [5, 1]]
}
}}
p_color = run_opt(pyOptSparseDriver, 'fwd', color_info=color_info, optimizer='SNOPT', print_results=False)
assert_almost_equal(p['circle.area'], np.pi, decimal=7)
assert_almost_equal(p_color['circle.area'], np.pi, decimal=7)
# - coloring saves 16 solves per driver iter (5 vs 21)
# - initial solve for linear constraints takes 21 in both cases (only done once)
# - (total_solves - 21) / (solves_per_iter) should be equal between the two cases
self.assertEqual((p.model._solve_count - 21) / 21,
(p_color.model._solve_count - 21) / 5)
@unittest.skipUnless(OPTIMIZER == 'SNOPT', "This test requires SNOPT.")
def test_dynamic_simul_coloring_snopt_auto(self):
# first, run w/o coloring
p = run_opt(pyOptSparseDriver, 'auto', optimizer='SNOPT', print_results=False)
p_color = run_opt(pyOptSparseDriver, 'auto', optimizer='SNOPT', print_results=False,
dynamic_simul_derivs=True)
assert_almost_equal(p['circle.area'], np.pi, decimal=7)
assert_almost_equal(p_color['circle.area'], np.pi, decimal=7)
# - coloring saves 16 solves per driver iter (5 vs 21)
# - initial solve for linear constraints takes 21 in both cases (only done once)
# - dynamic case does 3 full compute_totals to compute coloring, which adds 21 * 3 solves
# - (total_solves - N) / (solves_per_iter) should be equal between the two cases,
# - where N is 21 for the uncolored case and 21 * 4 for the dynamic colored case.
self.assertEqual((p.model._solve_count - 21) / 21,
(p_color.model._solve_count - 21 * 4) / 5)
@unittest.skipUnless(OPTIMIZER == 'SNOPT', "This test requires SNOPT.")
def test_dynamic_simul_coloring_snopt_auto_assembled(self):
# first, run w/o coloring
p = run_opt(pyOptSparseDriver, 'auto', assemble_type='dense', optimizer='SNOPT', print_results=False)
p_color = run_opt(pyOptSparseDriver, 'auto', assemble_type='dense', optimizer='SNOPT', print_results=False,
dynamic_simul_derivs=True)
assert_almost_equal(p['circle.area'], np.pi, decimal=7)
assert_almost_equal(p_color['circle.area'], np.pi, decimal=7)
# - coloring saves 16 solves per driver iter (5 vs 21)
# - initial solve for linear constraints takes 21 in both cases (only done once)
# - dynamic case does 3 full compute_totals to compute coloring, which adds 21 * 3 solves
# - (total_solves - N) / (solves_per_iter) should be equal between the two cases,
# - where N is 21 for the uncolored case and 21 * 4 for the dynamic colored case.
self.assertEqual((p.model._solve_count - 21) / 21,
(p_color.model._solve_count - 21 * 4) / 5)
def test_simul_coloring_pyoptsparse_slsqp_fwd(self):
try:
from pyoptsparse import OPT
except ImportError:
raise unittest.SkipTest("This test requires pyoptsparse.")
try:
OPT('SLSQP')
except:
raise unittest.SkipTest("This test requires pyoptsparse SLSQP.")
color_info = {"fwd": [[
[20], # uncolored columns
[0, 2, 4, 6, 8], # color 1
[1, 3, 5, 7, 9], # color 2
[10, 12, 14, 16, 18], # color 3
[11, 13, 15, 17, 19] # color 4
],
[
[1, 11, 12, 17], # column 0
[2, 17], # column 1
[3, 13, 18], # column 2
[4, 18], # column 3
[5, 14, 19], # column 4
[6, 19], # column 5
[7, 15, 20], # column 6
[8, 20], # column 7
[9, 16, 21], # column 8
[10, 21], # column 9
[1, 12, 17], # column 10
[2, 17], # column 11
[3, 13, 18], # column 12
[4, 18], # column 13
[5, 14, 19], # column 14
[6, 19], # column 15
[7, 15, 20], # column 16
[8, 20], # column 17
[9, 16, 21], # column 18
[10, 21], # column 19
None # column 20
]],
"sparsity": {
"circle.area": {
"indeps.x": [[], [], [1, 10]],
"indeps.y": [[], [], [1, 10]],
"indeps.r": [[0], [0], [1, 1]]
},
"r_con.g": {
"indeps.x": [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [10, 10]],
"indeps.y": [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [10, 10]],
"indeps.r": [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [10, 1]]
},
"l_conx.g": {
"indeps.x": [[0], [0], [1, 10]],
"indeps.y": [[], [], [1, 10]],
"indeps.r": [[], [], [1, 1]]
},
"theta_con.g": {
"indeps.x": [[0, 1, 2, 3, 4], [0, 2, 4, 6, 8], [5, 10]],
"indeps.y": [[0, 1, 2, 3, 4], [0, 2, 4, 6, 8], [5, 10]],
"indeps.r": [[], [], [5, 1]]
},
"delta_theta_con.g": {
"indeps.x": [[0, 0, 1, 1, 2, 2, 3, 3, 4, 4], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [5, 10]],
"indeps.y": [[0, 0, 1, 1, 2, 2, 3, 3, 4, 4], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [5, 10]],
"indeps.r": [[], [], [5, 1]]
}
}}
p_color = run_opt(pyOptSparseDriver, 'fwd', color_info=color_info, optimizer='SLSQP', print_results=False)
assert_almost_equal(p_color['circle.area'], np.pi, decimal=7)
# run w/o coloring
p = run_opt(pyOptSparseDriver, 'fwd', optimizer='SLSQP', print_results=False)
assert_almost_equal(p['circle.area'], np.pi, decimal=7)
# - coloring saves 16 solves per driver iter (5 vs 21)
# - initial solve for linear constraints takes 21 in both cases (only done once)
# - (total_solves - 21) / (solves_per_iter) should be equal between the two cases
self.assertEqual((p.model._solve_count - 21) / 21,
(p_color.model._solve_count - 21) / 5)
def test_dynamic_simul_coloring_pyoptsparse_slsqp_auto(self):
try:
from pyoptsparse import OPT
except ImportError:
raise unittest.SkipTest("This test requires pyoptsparse.")
try:
OPT('SLSQP')
except:
raise unittest.SkipTest("This test requires pyoptsparse SLSQP.")
p_color = run_opt(pyOptSparseDriver, 'auto', optimizer='SLSQP', print_results=False,
dynamic_simul_derivs=True)
assert_almost_equal(p_color['circle.area'], np.pi, decimal=7)
# run w/o coloring
p = run_opt(pyOptSparseDriver, 'auto', optimizer='SLSQP', print_results=False)
assert_almost_equal(p['circle.area'], np.pi, decimal=7)
# - coloring saves 16 solves per driver iter (5 vs 21)
# - initial solve for linear constraints takes 21 in both cases (only done once)
# - dynamic case does 3 full compute_totals to compute coloring, which adds 21 * 3 solves
# - (total_solves - N) / (solves_per_iter) should be equal between the two cases,
# - where N is 21 for the uncolored case and 21 * 4 for the dynamic colored case.
self.assertEqual((p.model._solve_count - 21) / 21,
(p_color.model._solve_count - 21 * 4) / 5)
@unittest.skipUnless(OPTIMIZER == 'SNOPT', "This test requires SNOPT.")
class SimulColoringRecordingTestCase(unittest.TestCase):
def setUp(self):
from tempfile import mkdtemp
self.dir = mkdtemp()
self.original_path = os.getcwd()
os.chdir(self.dir)
def tearDown(self):
os.chdir(self.original_path)
try:
shutil.rmtree(self.dir)
except OSError as e:
# If directory already deleted, keep going
if e.errno not in (errno.ENOENT, errno.EACCES, errno.EPERM):
raise e
def test_recording(self):
# coloring involves an underlying call to run_model (and final_setup),
# this verifies that it is handled properly by the recording setup logic
recorder = SqliteRecorder('cases.sql')
p = run_opt(pyOptSparseDriver, 'auto', assemble_type='csc', optimizer='SNOPT',
dynamic_simul_derivs=True, print_results=False, recorder=recorder)
cr = CaseReader('cases.sql')
self.assertEqual(cr.list_cases(), ['rank0:SNOPT|%d' % i for i in range(p.driver.iter_count)])
class SimulColoringPyoptSparseRevTestCase(unittest.TestCase):
"""Reverse coloring tests for pyoptsparse."""
@unittest.skipUnless(OPTIMIZER == 'SNOPT', "This test requires SNOPT.")
def test_simul_coloring_snopt(self):
# first, run w/o coloring
p = run_opt(pyOptSparseDriver, 'rev', optimizer='SNOPT', print_results=False)
color_info = {"rev": [[
[4, 5, 6, 7, 8, 9, 10], # uncolored rows
[1, 18, 19, 20, 21], # color 1
[0, 17, 13, 14, 15, 16], # color 2
[2, 11], # color 3
[3, 12] # color 4
],
[
[20], # row 0
[0, 10, 20], # row 1
[1, 11, 20], # row 2
[2, 12, 20], # row 3
None, # row 4
None, # row 5
None, # row 6
None, # row 7
None, # row 8
None, # row 9
None, # row 10
[0], # row 11
[0, 10], # row 12
[2, 12], # row 13
[4, 14], # row 14
[6, 16], # row 15
[8, 18], # row 16
[0, 1, 10, 11], # row 17
[2, 3, 12, 13], # row 18
[4, 5, 14, 15], # row 19
[6, 7, 16, 17], # row 20
[8, 9, 18, 19] # row 21
]],
"sparsity": {
"circle.area": {
"indeps.x": [[], [], [1, 10]],
"indeps.y": [[], [], [1, 10]],
"indeps.r": [[0], [0], [1, 1]]
},
"r_con.g": {
"indeps.x": [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [10, 10]],
"indeps.y": [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [10, 10]],
"indeps.r": [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [10, 1]]
},
"theta_con.g": {
"indeps.x": [[0, 1, 2, 3, 4], [0, 2, 4, 6, 8], [5, 10]],
"indeps.y": [[0, 1, 2, 3, 4], [0, 2, 4, 6, 8], [5, 10]],
"indeps.r": [[], [], [5, 1]]
},
"delta_theta_con.g": {
"indeps.x": [[0, 0, 1, 1, 2, 2, 3, 3, 4, 4], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [5, 10]],
"indeps.y": [[0, 0, 1, 1, 2, 2, 3, 3, 4, 4], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [5, 10]],
"indeps.r": [[], [], [5, 1]]
},
"l_conx.g": {
"indeps.x": [[0], [0], [1, 10]],
"indeps.y": [[], [], [1, 10]],
"indeps.r": [[], [], [1, 1]]
}
}}
p_color = run_opt(pyOptSparseDriver, 'rev', color_info=color_info, optimizer='SNOPT', print_results=False)
assert_almost_equal(p['circle.area'], np.pi, decimal=7)
assert_almost_equal(p_color['circle.area'], np.pi, decimal=7)
# - coloring saves 11 solves per driver iter (11 vs 22)
# - initial solve for linear constraints takes 1 in both cases (only done once)
# - (total_solves - 1) / (solves_per_iter) should be equal between the two cases
self.assertEqual((p.model._solve_count - 1) / 22,
(p_color.model._solve_count - 1) / 11)
@unittest.skipUnless(OPTIMIZER == 'SNOPT', "This test requires SNOPT.")
def test_dynamic_rev_simul_coloring_snopt(self):
# first, run w/o coloring
p = run_opt(pyOptSparseDriver, 'rev', optimizer='SNOPT', print_results=False)
p_color = run_opt(pyOptSparseDriver, 'rev', optimizer='SNOPT', print_results=False,
dynamic_simul_derivs=True)
assert_almost_equal(p['circle.area'], np.pi, decimal=7)
assert_almost_equal(p_color['circle.area'], np.pi, decimal=7)
# - bidirectional coloring saves 11 solves per driver iter (11 vs 22)
# - initial solve for linear constraints takes 1 in both cases (only done once)
# - dynamic case does 3 full compute_totals to compute coloring, which adds 22 * 3 solves
# - (total_solves - N) / (solves_per_iter) should be equal between the two cases,
# - where N is 1 for the uncolored case and 22 * 3 + 1 for the dynamic colored case.
self.assertEqual((p.model._solve_count - 1) / 22,
(p_color.model._solve_count - 1 - 22 * 3) / 11)
def test_simul_coloring_pyoptsparse_slsqp(self):
try:
from pyoptsparse import OPT
except ImportError:
raise unittest.SkipTest("This test requires pyoptsparse.")
try:
OPT('SLSQP')
except:
raise unittest.SkipTest("This test requires pyoptsparse SLSQP.")
color_info = {"rev": [[
[1, 4, 5, 6, 7, 8, 9, 10],
[3, 17],
[0, 11, 13, 14, 15, 16],
[2, 12, 18, 19, 20, 21]
],
[
[20],
None,
[1, 11, 20],
[2, 12, 20],
None,
None,
None,
None,
None,
None,
None,
[0],
[0, 10],
[2, 12],
[4, 14],
[6, 16],
[8, 18],
[0, 1, 10, 11],
[2, 3, 12, 13],
[4, 5, 14, 15],
[6, 7, 16, 17],
[8, 9, 18, 19]
]],
"sparsity": {
"circle.area": {
"indeps.x": [[], [], [1, 10]],
"indeps.y": [[], [], [1, 10]],
"indeps.r": [[0], [0], [1, 1]]
},
"r_con.g": {
"indeps.x": [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [10, 10]],
"indeps.y": [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [10, 10]],
"indeps.r": [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [10, 1]]
},
"l_conx.g": {
"indeps.x": [[0], [0], [1, 10]],
"indeps.y": [[], [], [1, 10]],
"indeps.r": [[], [], [1, 1]]
},
"theta_con.g": {
"indeps.x": [[0, 1, 2, 3, 4], [0, 2, 4, 6, 8], [5, 10]],
"indeps.y": [[0, 1, 2, 3, 4], [0, 2, 4, 6, 8], [5, 10]],
"indeps.r": [[], [], [5, 1]]
},
"delta_theta_con.g": {
"indeps.x": [[0, 0, 1, 1, 2, 2, 3, 3, 4, 4], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [5, 10]],
"indeps.y": [[0, 0, 1, 1, 2, 2, 3, 3, 4, 4], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [5, 10]],
"indeps.r": [[], [], [5, 1]]
}
}}
p_color = run_opt(pyOptSparseDriver, 'rev', color_info=color_info, optimizer='SLSQP', print_results=False)
assert_almost_equal(p_color['circle.area'], np.pi, decimal=7)
# run w/o coloring
p = run_opt(pyOptSparseDriver, 'rev', optimizer='SLSQP', print_results=False)
assert_almost_equal(p['circle.area'], np.pi, decimal=7)
# - coloring saves 11 solves per driver iter (11 vs 22)
# - initial solve for linear constraints takes 1 in both cases (only done once)
# - (total_solves - 1) / (solves_per_iter) should be equal between the two cases
self.assertEqual((p.model._solve_count - 1) / 22,
(p_color.model._solve_count - 1) / 11)
def test_dynamic_rev_simul_coloring_pyoptsparse_slsqp(self):
try:
from pyoptsparse import OPT
except ImportError:
raise unittest.SkipTest("This test requires pyoptsparse.")
try:
OPT('SLSQP')
except:
raise unittest.SkipTest("This test requires pyoptsparse SLSQP.")
p_color = run_opt(pyOptSparseDriver, 'rev', optimizer='SLSQP', print_results=False,
dynamic_simul_derivs=True)
assert_almost_equal(p_color['circle.area'], np.pi, decimal=7)
# run w/o coloring
p = run_opt(pyOptSparseDriver, 'rev', optimizer='SLSQP', print_results=False)
assert_almost_equal(p['circle.area'], np.pi, decimal=7)
# - coloring saves 11 solves per driver iter (11 vs 22)
# - initial solve for linear constraints takes 1 in both cases (only done once)
# - dynamic case does 3 full compute_totals to compute coloring, which adds 22 * 3 solves
# - (total_solves - N) / (solves_per_iter) should be equal between the two cases,
# - where N is 1 for the uncolored case and 22 * 3 + 1 for the dynamic colored case.
self.assertEqual((p.model._solve_count - 1) / 22,
(p_color.model._solve_count - 1 - 22 * 3) / 11)
class SimulColoringScipyTestCase(unittest.TestCase):
def setUp(self):
self.color_info = {"fwd": [[
[20], # uncolored columns
[0, 2, 4, 6, 8], # color 1
[1, 3, 5, 7, 9], # color 2
[10, 12, 14, 16, 18], # color 3
[11, 13, 15, 17, 19] # color 4
],
[
[1, 11, 16, 21], # column 0
[2, 16], # column 1
[3, 12, 17], # column 2
[4, 17], # column 3
[5, 13, 18], # column 4
[6, 18], # column 5
[7, 14, 19], # column 6
[8, 19], # column 7
[9, 15, 20], # column 8
[10, 20], # column 9
[1, 11, 16], # column 10
[2, 16], # column 11
[3, 12, 17], # column 12
[4, 17], # column 13
[5, 13, 18], # column 14
[6, 18], # column 15
[7, 14, 19], # column 16
[8, 19], # column 17
[9, 15, 20], # column 18
[10, 20], # column 19
None # column 20
]],
"sparsity": None
}
def test_simul_coloring_fwd(self):
# first, run w/o coloring
p = run_opt(ScipyOptimizeDriver, 'fwd', optimizer='SLSQP', disp=False)
p_color = run_opt(ScipyOptimizeDriver, 'fwd', color_info=self.color_info, optimizer='SLSQP', disp=False)
assert_almost_equal(p['circle.area'], np.pi, decimal=7)
assert_almost_equal(p_color['circle.area'], np.pi, decimal=7)
# - coloring saves 16 solves per driver iter (5 vs 21)
# - initial solve for linear constraints takes 21 in both cases (only done once)
# - (total_solves - 21) / (solves_per_iter) should be equal between the two cases
self.assertEqual((p.model._solve_count - 21) / 21,
(p_color.model._solve_count - 21) / 5)
# check for proper handling if someone calls compute_totals on Problem with different set or different order
# of desvars/responses than were used to define the coloring. Behavior should be that coloring is turned off
# and a warning is issued.
msg = "compute_totals called using a different list of design vars and/or responses than those used " \
"to define coloring, so coloring will be turned off.\ncoloring design vars: " \
"['indeps.x', 'indeps.y', 'indeps.r'], current design vars: ['indeps.x', 'indeps.y', 'indeps.r']\n" \
"coloring responses: ['circle.area', 'r_con.g', 'theta_con.g', 'delta_theta_con.g', 'l_conx.g'], " \
"current responses: ['delta_theta_con.g', 'circle.area', 'r_con.g', 'theta_con.g', 'l_conx.g']."
with assert_warning(UserWarning, msg):
p_color.compute_totals(of=['delta_theta_con.g', 'circle.area', 'r_con.g', 'theta_con.g', 'l_conx.g'],
wrt=['x', 'y', 'r'])
def test_bad_mode(self):
with self.assertRaises(Exception) as context:
p_color = run_opt(ScipyOptimizeDriver, 'rev', color_info=self.color_info, optimizer='SLSQP', disp=False)
self.assertEqual(str(context.exception),
"Simultaneous coloring does forward solves but mode has been set to 'rev'")
def test_dynamic_simul_coloring_auto(self):
# first, run w/o coloring
p = run_opt(ScipyOptimizeDriver, 'auto', optimizer='SLSQP', disp=False)
p_color = run_opt(ScipyOptimizeDriver, 'auto', optimizer='SLSQP', disp=False, dynamic_simul_derivs=True)
assert_almost_equal(p['circle.area'], np.pi, decimal=7)
assert_almost_equal(p_color['circle.area'], np.pi, decimal=7)
# - coloring saves 16 solves per driver iter (5 vs 21)
# - initial solve for linear constraints takes 21 in both cases (only done once)
# - dynamic case does 3 full compute_totals to compute coloring, which adds 21 * 3 solves
# - (total_solves - N) / (solves_per_iter) should be equal between the two cases,
# - where N is 21 for the uncolored case and 21 * 4 for the dynamic colored case.
self.assertEqual((p.model._solve_count - 21) / 21,
(p_color.model._solve_count - 21 * 4) / 5)
def test_simul_coloring_example(self):
from openmdao.api import Problem, IndepVarComp, ExecComp, ScipyOptimizeDriver
import numpy as np
p = Problem()
indeps = p.model.add_subsystem('indeps', IndepVarComp(), promotes_outputs=['*'])
# the following were randomly generated using np.random.random(10)*2-1 to randomly
# disperse them within a unit circle centered at the origin.
indeps.add_output('x', np.array([ 0.55994437, -0.95923447, 0.21798656, -0.02158783, 0.62183717,
0.04007379, 0.46044942, -0.10129622, 0.27720413, -0.37107886]))
indeps.add_output('y', np.array([ 0.52577864, 0.30894559, 0.8420792 , 0.35039912, -0.67290778,
-0.86236787, -0.97500023, 0.47739414, 0.51174103, 0.10052582]))
indeps.add_output('r', .7)
p.model.add_subsystem('arctan_yox', ExecComp('g=arctan(y/x)', vectorize=True,
g=np.ones(SIZE), x=np.ones(SIZE), y=np.ones(SIZE)))
p.model.add_subsystem('circle', ExecComp('area=pi*r**2'))
p.model.add_subsystem('r_con', ExecComp('g=x**2 + y**2 - r', vectorize=True,
g=np.ones(SIZE), x=np.ones(SIZE), y=np.ones(SIZE)))
thetas = np.linspace(0, np.pi/4, SIZE)
p.model.add_subsystem('theta_con', ExecComp('g = x - theta', vectorize=True,
g=np.ones(SIZE), x=np.ones(SIZE),
theta=thetas))
p.model.add_subsystem('delta_theta_con', ExecComp('g = even - odd', vectorize=True,
g=np.ones(SIZE//2), even=np.ones(SIZE//2),
odd=np.ones(SIZE//2)))
p.model.add_subsystem('l_conx', ExecComp('g=x-1', vectorize=True, g=np.ones(SIZE), x=np.ones(SIZE)))
IND = np.arange(SIZE, dtype=int)
ODD_IND = IND[1::2] # all odd indices
EVEN_IND = IND[0::2] # all even indices
p.model.connect('r', ('circle.r', 'r_con.r'))
p.model.connect('x', ['r_con.x', 'arctan_yox.x', 'l_conx.x'])
p.model.connect('y', ['r_con.y', 'arctan_yox.y'])
p.model.connect('arctan_yox.g', 'theta_con.x')
p.model.connect('arctan_yox.g', 'delta_theta_con.even', src_indices=EVEN_IND)
p.model.connect('arctan_yox.g', 'delta_theta_con.odd', src_indices=ODD_IND)
p.driver = ScipyOptimizeDriver()
p.driver.options['optimizer'] = 'SLSQP'
p.driver.options['disp'] = False
p.model.add_design_var('x')
p.model.add_design_var('y')
p.model.add_design_var('r', lower=.5, upper=10)
# nonlinear constraints
p.model.add_constraint('r_con.g', equals=0)
p.model.add_constraint('theta_con.g', lower=-1e-5, upper=1e-5, indices=EVEN_IND)
p.model.add_constraint('delta_theta_con.g', lower=-1e-5, upper=1e-5)
# this constrains x[0] to be 1 (see definition of l_conx)
p.model.add_constraint('l_conx.g', equals=0, linear=False, indices=[0,])
# linear constraint
p.model.add_constraint('y', equals=0, indices=[0,], linear=True)
p.model.add_objective('circle.area', ref=-1)
# setup coloring
color_info = {"fwd": [[
[20], # uncolored column list
[0, 2, 4, 6, 8], # color 1
[1, 3, 5, 7, 9], # color 2
[10, 12, 14, 16, 18], # color 3
[11, 13, 15, 17, 19], # color 4
],
[
[1, 11, 16, 21], # column 0
[2, 16], # column 1
[3, 12, 17], # column 2
[4, 17], # column 3
[5, 13, 18], # column 4
[6, 18], # column 5
[7, 14, 19], # column 6
[8, 19], # column 7
[9, 15, 20], # column 8
[10, 20], # column 9
[1, 11, 16], # column 10
[2, 16], # column 11
[3, 12, 17], # column 12
[4, 17], # column 13
[5, 13, 18], # column 14
[6, 18], # column 15
[7, 14, 19], # column 16
[8, 19], # column 17
[9, 15, 20], # column 18
[10, 20], # column 19
None, # column 20
]],
"sparsity": None
}
p.driver.set_simul_deriv_color(color_info)
p.setup(mode='fwd')
p.run_driver()
assert_almost_equal(p['circle.area'], np.pi, decimal=7)
class SimulColoringRevScipyTestCase(unittest.TestCase):
"""Rev mode coloring tests."""
def setUp(self):
self.color_info = {"rev": [[
[4, 5, 6, 7, 8, 9, 10], # uncolored rows
[2, 21], # color 1
[3, 16], # color 2
[1, 17, 18, 19, 20], # color 3
[0, 11, 12, 13, 14, 15] # color 4
],
[
[20], # row 0
[0, 10, 20], # row 1
[1, 11, 20], # row 2
[2, 12, 20], # row 3
None, # row 4
None, # row 5
None, # row 6
None, # row 7
None, # row 8
None, # row 9
None, # row 10
[0, 10], # row 11
[2, 12], # row 12
[4, 14], # row 13
[6, 16], # row 14
[8, 18], # row 15
[0, 1, 10, 11], # row 16
[2, 3, 12, 13], # row 17
[4, 5, 14, 15], # row 18
[6, 7, 16, 17], # row 19
[8, 9, 18, 19], # row 20
[0] # row 21
]],
"sparsity": None}
def test_simul_coloring(self):
color_info = self.color_info
p = run_opt(ScipyOptimizeDriver, 'rev', optimizer='SLSQP', disp=False)
p_color = run_opt(ScipyOptimizeDriver, 'rev', color_info=color_info, optimizer='SLSQP', disp=False)
assert_almost_equal(p['circle.area'], np.pi, decimal=7)
assert_almost_equal(p_color['circle.area'], np.pi, decimal=7)
# - coloring saves 11 solves per driver iter (11 vs 22)
# - initial solve for linear constraints takes 1 in both cases (only done once)
# - (total_solves - 1) / (solves_per_iter) should be equal between the two cases
self.assertEqual((p.model._solve_count - 1) / 22,
(p_color.model._solve_count - 1) / 11)
def test_bad_mode(self):
with self.assertRaises(Exception) as context:
p_color = run_opt(ScipyOptimizeDriver, 'fwd', color_info=self.color_info, optimizer='SLSQP', disp=False)
self.assertEqual(str(context.exception),
"Simultaneous coloring does reverse solves but mode has been set to 'fwd'")
def test_dynamic_simul_coloring(self):
p_color = run_opt(ScipyOptimizeDriver, 'rev', optimizer='SLSQP', disp=False, dynamic_simul_derivs=True)
p = run_opt(ScipyOptimizeDriver, 'rev', optimizer='SLSQP', disp=False)
assert_almost_equal(p['circle.area'], np.pi, decimal=7)
assert_almost_equal(p_color['circle.area'], np.pi, decimal=7)
# - bidirectional coloring saves 11 solves per driver iter (11 vs 22)
# - initial solve for linear constraints takes 1 in both cases (only done once)
# - dynamic case does 3 full compute_totals to compute coloring, which adds 22 * 3 solves
# - (total_solves - N) / (solves_per_iter) should be equal between the two cases,
# - where N is 1 for the uncolored case and 22 * 3 + 1 for the dynamic colored case.
self.assertEqual((p.model._solve_count - 1) / 22,
(p_color.model._solve_count - 1 - 22 * 3) / 11)
def test_dynamic_simul_coloring_no_derivs(self):
with self.assertRaises(Exception) as context:
p_color = run_opt(ScipyOptimizeDriver, 'rev', optimizer='SLSQP', disp=False,
dynamic_simul_derivs=True, derivs=False)
self.assertEqual(str(context.exception),
"Derivative support has been turned off but compute_totals was called.")
class SparsityTestCase(unittest.TestCase):
def setUp(self):
self.startdir = os.getcwd()
self.tempdir = tempfile.mkdtemp(prefix='SparsityTestCase-')
os.chdir(self.tempdir)
self.sparsity = {
"circle.area": {
"indeps.x": [[], [], [1, 10]],
"indeps.y": [[], [], [1, 10]],
"indeps.r": [[0], [0], [1, 1]]
},
"r_con.g": {
"indeps.x": [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [10, 10]],
"indeps.y": [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [10, 10]],
"indeps.r": [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [10, 1]]
},
"theta_con.g": {
"indeps.x": [[0, 1, 2, 3, 4], [0, 2, 4, 6, 8], [5, 10]],
"indeps.y": [[0, 1, 2, 3, 4], [0, 2, 4, 6, 8], [5, 10]],
"indeps.r": [[], [], [5, 1]]
},
"delta_theta_con.g": {
"indeps.x": [[0, 0, 1, 1, 2, 2, 3, 3, 4, 4], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [5, 10]],
"indeps.y": [[0, 0, 1, 1, 2, 2, 3, 3, 4, 4], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [5, 10]],
"indeps.r": [[], [], [5, 1]]
},
"l_conx.g": {
"indeps.x": [[0], [0], [1, 10]],
"indeps.y": [[], [], [1, 10]],
"indeps.r": [[], [], [1, 1]]
}
}
def tearDown(self):
os.chdir(self.startdir)
try:
shutil.rmtree(self.tempdir)
except OSError:
pass
@unittest.skipUnless(OPTIMIZER == 'SNOPT', "This test requires SNOPT.")
def test_sparsity_snopt(self):
# first, run without sparsity
p = run_opt(pyOptSparseDriver, 'fwd', optimizer='SNOPT', print_results=False)
# run with dynamic sparsity
p_dynamic = run_opt(pyOptSparseDriver, 'fwd', dynamic_derivs_sparsity=True,
optimizer='SNOPT', print_results=False)
# run with provided sparsity
p_sparsity = run_opt(pyOptSparseDriver, 'fwd', sparsity=self.sparsity,
optimizer='SNOPT', print_results=False)
assert_almost_equal(p['circle.area'], np.pi, decimal=7)
assert_almost_equal(p_dynamic['circle.area'], np.pi, decimal=7)
assert_almost_equal(p_sparsity['circle.area'], np.pi, decimal=7)
def test_sparsity_pyoptsparse_slsqp(self):
try:
from pyoptsparse import OPT
except ImportError:
raise unittest.SkipTest("This test requires pyoptsparse.")
try:
OPT('SLSQP')
except:
raise unittest.SkipTest("This test requires pyoptsparse SLSQP.")
# first, run without sparsity
p = run_opt(pyOptSparseDriver, 'fwd', optimizer='SLSQP', print_results=False)
# run with dynamic sparsity
p_dynamic = run_opt(pyOptSparseDriver, 'fwd', dynamic_derivs_sparsity=True,
optimizer='SLSQP', print_results=False)
# run with provided sparsity
p_sparsity = run_opt(pyOptSparseDriver, 'fwd', sparsity=self.sparsity,
optimizer='SLSQP', print_results=False)
assert_almost_equal(p['circle.area'], np.pi, decimal=7)
| assert_almost_equal(p_dynamic['circle.area'], np.pi, decimal=7) | numpy.testing.assert_almost_equal |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2019 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for NeighbourSelection class"""
import unittest
import cartopy.crs as ccrs
import iris
import numpy as np
import scipy
from iris.tests import IrisTest
from improver.spotdata.neighbour_finding import NeighbourSelection
from improver.utilities.cube_metadata import create_coordinate_hash
from improver.utilities.warnings_handler import ManageWarnings
class Test_NeighbourSelection(IrisTest):
"""Test class for the NeighbourSelection tests, setting up inputs."""
def setUp(self):
"""Set up cubes and sitelists for use in testing NeighbourSelection"""
# Set up orography and land mask data
land_data = np.zeros((9, 9))
land_data[0:2, 4] = 1
land_data[4, 4] = 1
orography_data = np.zeros((9, 9))
orography_data[0, 4] = 1
orography_data[1, 4] = 5
# Global coordinates and cubes
projection = iris.coord_systems.GeogCS(6371229.0)
xcoord = iris.coords.DimCoord(
np.linspace(-160, 160, 9), standard_name='longitude',
units='degrees', coord_system=projection,
circular=True)
xcoord.guess_bounds()
ycoord = iris.coords.DimCoord(
np.linspace(-80, 80, 9), standard_name='latitude',
units='degrees', coord_system=projection,
circular=False)
ycoord.guess_bounds()
global_land_mask = iris.cube.Cube(
land_data, standard_name="land_binary_mask", units=1,
dim_coords_and_dims=[(ycoord, 1), (xcoord, 0)])
global_orography = iris.cube.Cube(
orography_data, standard_name="surface_altitude", units='m',
dim_coords_and_dims=[(ycoord, 1), (xcoord, 0)])
# Regional grid coordinates and cubes
projection = iris.coord_systems.LambertAzimuthalEqualArea(
ellipsoid=iris.coord_systems.GeogCS(
semi_major_axis=6378137.0, semi_minor_axis=6356752.314140356))
xcoord = iris.coords.DimCoord(
np.linspace(-1E5, 1E5, 9), standard_name='projection_x_coordinate',
units='m', coord_system=projection)
xcoord.guess_bounds()
ycoord = iris.coords.DimCoord(
np.linspace(-5E4, 5E4, 9), standard_name='projection_y_coordinate',
units='degrees', coord_system=projection)
ycoord.guess_bounds()
region_land_mask = iris.cube.Cube(
land_data, standard_name="land_binary_mask", units=1,
dim_coords_and_dims=[(ycoord, 1), (xcoord, 0)])
region_orography = iris.cube.Cube(
orography_data, standard_name="surface_altitude", units='m',
dim_coords_and_dims=[(ycoord, 1), (xcoord, 0)])
# Create site lists
self.global_sites = [
{'altitude': 2.0, 'latitude': 0.0, 'longitude': -64.0,
'wmo_id': 1}]
self.region_sites = [
{'altitude': 2.0, 'projection_x_coordinate': -4.0E4,
'projection_y_coordinate': 0.0, 'wmo_id': 1}]
self.global_land_mask = global_land_mask
self.global_orography = global_orography
self.region_land_mask = region_land_mask
self.region_orography = region_orography
self.region_projection = projection
class Test__repr__(IrisTest):
"""Tests the class __repr__ function."""
def test_basic(self):
"""Test that the __repr__ returns the expected string with defaults."""
plugin = NeighbourSelection()
result = str(plugin)
msg = ("<NeighbourSelection: land_constraint: False, minimum_dz: False"
", search_radius: 10000.0, site_coordinate_system: <class "
"'cartopy.crs.PlateCarree'>, site_x_coordinate:longitude, "
"site_y_coordinate: latitude, node_limit: 36>")
self.assertEqual(result, msg)
def test_non_default(self):
"""Test that the __repr__ returns the expected string with defaults."""
plugin = NeighbourSelection(land_constraint=True, minimum_dz=True,
search_radius=1000,
site_coordinate_system=ccrs.Mercator(),
site_x_coordinate='x_axis',
site_y_coordinate='y_axis',
node_limit=100)
result = str(plugin)
msg = ("<NeighbourSelection: land_constraint: True, minimum_dz: True,"
" search_radius: 1000, site_coordinate_system: <class "
"'cartopy.crs.Mercator'>, site_x_coordinate:x_axis, "
"site_y_coordinate: y_axis, node_limit: 100>")
self.assertEqual(result, msg)
class Test_neighbour_finding_method_name(IrisTest):
"""Test the function for generating the name that describes the neighbour
finding method."""
def test_nearest(self):
"""Test name generated when using the default nearest neighbour
method."""
plugin = NeighbourSelection()
expected = 'nearest'
result = plugin.neighbour_finding_method_name()
self.assertEqual(result, expected)
def test_nearest_land(self):
"""Test name generated when using the nearest land neighbour
method."""
plugin = NeighbourSelection(land_constraint=True)
expected = 'nearest_land'
result = plugin.neighbour_finding_method_name()
self.assertEqual(result, expected)
def test_nearest_land_minimum_dz(self):
"""Test name generated when using the nearest land neighbour
with smallest vertical displacment method."""
plugin = NeighbourSelection(land_constraint=True, minimum_dz=True)
expected = 'nearest_land_minimum_dz'
result = plugin.neighbour_finding_method_name()
self.assertEqual(result, expected)
def test_nearest_minimum_dz(self):
"""Test name generated when using the nearest neighbour with the
smallest vertical displacment method."""
plugin = NeighbourSelection(minimum_dz=True)
expected = 'nearest_minimum_dz'
result = plugin.neighbour_finding_method_name()
self.assertEqual(result, expected)
class Test__transform_sites_coordinate_system(Test_NeighbourSelection):
"""Test the function for converting arrays of site coordinates into the
correct coordinate system for the model/grid cube."""
def test_global_to_region(self):
"""Test coordinates generated when transforming from a global to
regional coordinate system, in this case PlateCarree to Lambert
Azimuthal Equal Areas."""
plugin = NeighbourSelection()
x_points = np.array([0, 10, 20])
y_points = np.array([0, 0, 10])
expected = [[0., 0.], [1111782.53516264, 0.],
[2189747.33076441, 1121357.32401753]]
result = plugin._transform_sites_coordinate_system(
x_points, y_points, self.region_orography)
self.assertArrayAlmostEqual(result, expected)
def test_region_to_global(self):
"""Test coordinates generated when transforming from a regional to
global coordinate system, in this case Lambert Azimuthal Equal Areas
to PlateCarree."""
plugin = NeighbourSelection(
site_coordinate_system=self.region_projection.as_cartopy_crs())
x_points = np.array([0, 1, 2])
y_points = np.array([0, 0, 1])
expected = [[0., 0.], [8.98315284e-06, 0.],
[1.79663057e-05, 9.04369476e-06]]
result = plugin._transform_sites_coordinate_system(
x_points, y_points, self.global_orography)
self.assertArrayAlmostEqual(result, expected)
def test_global_to_global(self):
"""Test coordinates generated when the input and output coordinate
systems are the same, in this case Plate-Carree."""
plugin = NeighbourSelection()
x_points = np.array([0, 10, 20])
y_points = np.array([0, 0, 10])
expected = np.stack((x_points, y_points), axis=1)
result = plugin._transform_sites_coordinate_system(
x_points, y_points, self.global_orography)
self.assertArrayAlmostEqual(result, expected)
def test_region_to_region(self):
"""Test coordinates generated when the input and output coordinate
systems are the same, in this case Lambert Azimuthal Equal Areas."""
plugin = NeighbourSelection(
site_coordinate_system=self.region_projection.as_cartopy_crs())
x_points = np.array([0, 1, 2])
y_points = | np.array([0, 0, 1]) | numpy.array |
########################################################################
# Project Name: Decentralised Deep Learning without Forgetting
# Creators: <NAME> (<EMAIL>)
# <NAME> (<EMAIL>)
# <NAME> (<EMAIL>)
# <NAME> (<EMAIL>)
# Project Owners: <NAME> (<EMAIL>),
# <NAME> (<EMAIL>)
# December 2019
#########################################################################
import matplotlib.pyplot as plt
import sklearn as sk
import sklearn.linear_model
import scipy.io as sio
from PLN_Class import PLN
from Admm import optimize_admm
from LoadDataFromMat import importData
import numpy as np
from LwF_based_ADMM import LwF_based_ADMM_LS_Diff
import copy
# Compute the W_ls by solving a Least Squares Regularization problem
def compute_Wls(X,T,lam):
# the X are in n*p form, n sample, each sample has p dims. T is n*Q matrix, each sample is a row vector
inv_matrix = np.linalg.inv(np.dot(X, X.T)+lam*np.eye(X.shape[0]))
W_ls = np.dot(np.dot(T, X.T), inv_matrix).astype(np.float32)
return W_ls
def compute_ol(Y,T,mu, max_iterations):
# Computes the Output matrix by calling the ADMM Algorithm function with given parameters
ol = optimize_admm(T, Y, mu, max_iterations)
return ol
def compute_accuracy(predicted_lbl, true_lbl):
# Computes a Classification Accuracy between true label
acc = 100.*np.mean(np.argmax(predicted_lbl,axis=0)==np.argmax(true_lbl,axis=0))
return acc
def compute_test_outputs(PLN_object_array, W_ls, num_layers, X_test):
# Computes the network output for the first layer
PLN_1 = PLN_object_array[0]
W_initial_top = np.dot(np.dot(PLN_1.V_Q, W_ls), X_test)
W_initial_bottom = PLN_1.normalization(np.dot(PLN_1.R_l, X_test))
Z = np.concatenate((W_initial_top, W_initial_bottom), axis=0)
y = PLN_1.activation_function(Z)
# Computes the network output for each layer after the first layer
for i in range(1, num_layers):
PLN_l = PLN_object_array[i]
W_top = np.dot(np.dot(PLN_l.V_Q, PLN_object_array[i-1].O_l), y)
W_bottom = PLN_l.normalization(np.dot(PLN_l.R_l, y))
Z = np.concatenate((W_top, W_bottom), axis=0)
y = PLN_l.activation_function(Z)
# Returns network output for the last layer
return np.dot(PLN_object_array[num_layers - 1].O_l, y)
def compute_NME(predicted_lbl, actual_lbl):
# This function computes the Normalized Mean Error (NME) in dB scale
num = np.linalg.norm(actual_lbl - predicted_lbl, ord='fro') # Frobenius Norm of the difference between Predicted and True Label
den = np.linalg.norm(actual_lbl, ord='fro') # Frobenius Norm of the True Label
NME = 20 * np.log10(num / den)
return NME
########################################################################################
# This function returns training, testing accuracies and NME test values for Least Squares
########################################################################################
def compute_LS_test_accuracy(Wls, X_train, Y_train, X_test, Y_test):
# Task prediction on the joint dataset
predict_train_total = np.dot(Wls, X_train)
predict_test_total = np.dot(Wls, X_test)
# Compute metrics
acc_train = compute_accuracy(predict_train_total, Y_train)
acc_test = compute_accuracy(predict_test_total, Y_test)
nme_test = compute_NME(predict_test_total, Y_test)
return acc_train, acc_test, nme_test
#########################################################################################
# This function is used to compute the inputs and targets required for joint training
#########################################################################################
def compute_joint_datasets(X1_train, X2_train, Y1_train, Y2_train):
# Row and Column wise appending for the Targets
Y1_train_padded = np.concatenate((Y1_train, np.zeros((Y1_train.shape[0], Y2_train.shape[1]))),axis=1)
Y2_train_padded = np.concatenate((np.zeros((Y2_train.shape[0], Y1_train.shape[1])), Y2_train),axis=1)
Y_joint_train = np.concatenate((Y1_train_padded, Y2_train_padded), axis=0)
# Row wise appending of zeros for the Inputs, so that the row dimension is same for both datasets
if X1_train.shape[0] < X2_train.shape[0]:
padding = np.zeros((int(X2_train.shape[0] - X1_train.shape[0]), X1_train.shape[1]))
X1_train_padded = np.concatenate((X1_train, padding),axis=0)
X_joint_train = | np.concatenate((X1_train_padded, X2_train), axis=1) | numpy.concatenate |
# dataloader of augmented original t-less dataset
import torch.utils.data as data
from PIL import Image
import os
import os.path
import torch
import numpy as np
import torchvision.transforms as transforms
import argparse
import time
import random
import numpy.ma as ma
import scipy.misc
import scipy.io as scio
import yaml
import json
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import open3d as o3d
proj_dir = os.getcwd()+'/'
# proj_dir = '/home/lthpc/yifeis/pose/StablePose/'
# proj_dir = '/home/dell/yifeis/pose/pose_est_tless_3d/'
class PoseDataset(data.Dataset):
def __init__(self, mode, num_pt, add_noise, root, noise_trans, refine):
if mode == 'train':
self.mode = 'train'
self.path = proj_dir + 'datasets/tless/dataset_config/bop_train_list.txt'
elif mode == 'test':
self.mode = 'test'
self.path = proj_dir + 'datasets/tless/dataset_config/bop_final_test_list.txt'
self.num_pt = num_pt
self.root = root
self.add_noise = add_noise
self.noise_trans = noise_trans
self.model_root = root
self.list = []
# self.real = []
self.syn = []
input_file = open(self.path) # open data folder(include png,depth,label mat)
while 1:
input_line = input_file.readline()
if not input_line:
break
if input_line[-1:] == '\n':
input_line = input_line[:-1]
# if input_line[:5] == 'train_primesense/':
# self.real.append(input_line)
# else:
# self.syn.append(input_line)
self.list.append(input_line)
input_file.close()
self.length = len(self.list)
# self.len_real = len(self.real)
self.len_syn = len(self.syn)
model_info_file = open('{0}/models_reconst/models_info.json'.format(self.model_root), 'r', encoding='utf-8')
self.model_info = json.load(model_info_file)
self.cld = {}
for class_id in range(1, 31):
self.cld[class_id] = []
mesh = o3d.io.read_triangle_mesh('{0}/models_reconst/obj_{1}.ply'.format(self.model_root, str(class_id).zfill(6)))
pcd = mesh.sample_points_uniformly(number_of_points=10000)
pcd = np.asarray(pcd.points)
# displayPoint(pcd, pcd,k)
self.cld[class_id] = pcd
if self.mode == 'train':
self.xmap = np.array([[j for i in range(400)] for j in range(400)])
self.ymap = np.array([[i for i in range(400)] for j in range(400)])
self.rt_list = []
self.patch_num_list = []
self.crop_size_list = []
self.gt_list = []
self.info_list = []
self.cam_list = []
N = 31
for i in range(1, N):
datadir = 'train_primesense/' + str(i).zfill(6)
info_file = open('{0}/{1}/scene_gt_info.json'.format(self.root, datadir), 'r', encoding='utf-8')
gt_file = open('{0}/{1}/scene_gt.json'.format(self.root, datadir), 'r', encoding='utf-8')
cam_file = open('{0}/{1}/scene_camera.json'.format(self.root, datadir), 'r', encoding='utf-8')
info = json.load(info_file)
gt = json.load(gt_file)
cam = json.load(cam_file)
self.info_list.append(info)
self.gt_list.append(gt)
self.cam_list.append(cam)
print('loading training ' + str(i) + 'json files')
else:
self.xmap = np.array([[j for i in range(720)] for j in range(540)])
self.ymap = np.array([[i for i in range(720)] for j in range(540)])
self.gt_list = []
self.info_list = []
self.cam_list = []
self.patch_num_list = []
############# load json
for i in range(1, 21):
datadir = 'test_primesense/' + str(i).zfill(6)
info_file = open('{0}/{1}/scene_gt_info.json'.format(self.root, datadir), 'r', encoding='utf-8')
gt_file = open('{0}/{1}/scene_gt.json'.format(self.root, datadir), 'r', encoding='utf-8')
cam_file = open('{0}/{1}/scene_camera.json'.format(self.root, datadir), 'r', encoding='utf-8')
info = json.load(info_file)
gt = json.load(gt_file)
cam = json.load(cam_file)
self.info_list.append(info)
self.gt_list.append(gt)
self.cam_list.append(cam)
print('loading testing '+str(i)+' yml files')
self.trancolor = transforms.ColorJitter(0.2, 0.2, 0.2, 0.05)
self.noise_img_loc = 0.0
self.noise_img_scale = 7.0
self.minimum_num_pt = 100
self.trans = transforms.ToTensor()
self.norm1 = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self.norm2 = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
self.symmetry_obj_idx = [1-1, 2-1, 3-1, 4-1, 13-1, 14-1, 15-1, 16-1, 17-1,
24-1, 30-1,5-1, 6-1, 7-1, 8-1, 9-1, 10-1, 11-1,
12-1, 19-1, 20-1, 23-1, 25-1, 26-1, 27-1, 28-1, 29-1]
# self.rot_index = [1, 2, 3, 4, 13, 14, 15, 16, 17, 24, 30]
# self.ref_index = [5, 6, 7, 8, 9, 10, 11, 12, 19, 20, 23, 25, 26, 27, 28, 29]
# self.nosym_obj_idx = [18, 21, 22]
self.rot_obj_idx = [1-1, 2-1, 3-1, 4-1, 13-1, 14-1, 15-1, 16-1, 17-1, 24-1, 30-1]
self.ref_obj_idx = [5-1, 6-1, 7-1, 8-1, 9-1, 10-1, 11-1, 12-1, 19-1, 20-1,
23-1, 25-1, 26-1, 27-1, 28-1, 29-1]
self.nosym_obj_idx = [18-1, 21-1, 22-1]
self.num_pt_mesh_small = 5000
self.num_pt_mesh_large = 1000
self.refine = refine
self.front_num = 2
self.img_noise = True
self.t_noise = True
# print(len(self.list))
def __getitem__(self, index):
if self.mode == 'train':
data_dir = self.list[index][:-7]
dir_num = self.list[index][-13:-7]
data_num = self.list[index][-6:]
info = self.info_list[int(dir_num) - 1]
gt = self.gt_list[int(dir_num) - 1]
cam = self.cam_list[int(dir_num) - 1]
choose_file = '{0}/{1}/{2}/{3}_choose.list'.format(self.root, data_dir, 'segmentation', data_num)
label = Image.open(
'{0}/{1}/{2}/{3}_{4}.png'.format(self.root, data_dir, 'mask_visib_occ', data_num, str(0).zfill(6)))
img = Image.open('{0}/{1}/{2}/{3}.png'.format(self.root, data_dir, 'rgb_occ', data_num))
depth = Image.open('{0}/{1}/{2}/{3}.png'.format(self.root, data_dir, 'depth', data_num))
patch_file = Image.open('{0}/{1}/{2}/{3}.png'.format(self.root, data_dir, 'segmentation', data_num))
normal_file = Image.open('{0}/{1}/{2}/{3}.png'.format(self.root, data_dir, 'normal', data_num))
choose_file = '{0}/{1}/{2}/{3}_choose.list'.format(self.root, data_dir, 'segmentation', data_num)
choose_ls = []
stable_ls = []
try:
with open(choose_file) as f:
data = f.readlines()
if len(data) > 1:
for ids in data:
choose_id = ids[:-1].split(',')[:-1]
stable = float(ids[:-1].split(',')[-1])
choose_ls.append([int(x) for x in choose_id])
stable_ls.append(stable)
else:
if data[0] != '0':
choose_id = data[0].split(',')[:-1]
stable = float(data[0].split(',')[-1])
choose_ls.append([int(x) for x in choose_id])
stable_ls.append(stable)
else:
stable_ls.append(0)
except(OSError):
print('choose_list file not exist')
stable_ls.append(0)
choose_ls = []
data = ['0']
# if self.img_noise:
# img = self.trancolor(img)
patch_label = np.array(patch_file)
depth = np.array(depth)
mask_occ = np.array(label)
normal = np.array(normal_file)
cam_k = cam[str(int(data_num))]['cam_K']
depth_scale = cam[str(int(data_num))]['depth_scale']
cam_k = np.array(cam_k).reshape(3, 3)
obj_bb = info[str(int(data_num))][0]['bbox_visib']
obj_id = gt[str(int(data_num))][0]['obj_id']
model_info = self.model_info[str(obj_id)]
depth_mask = ma.getmaskarray(ma.masked_not_equal(depth, 0))
mask_label = ma.getmaskarray(ma.masked_not_equal(mask_occ, 0))
mask = mask_label * depth_mask
# mask_of_normal = mask.reshape(400, 400, 1).repeat(3, 2)
# normal_masked = normal*mask_of_normal
# mask_depth = mask * depth
# mask_patch = mask * patch_label
target_r = gt[str(int(data_num))][0]['cam_R_m2c']
target_r = np.array(target_r).reshape(3, 3).T
target_t = np.array(gt[str(int(data_num))][0]['cam_t_m2c'])
target_t = target_t / 1000
rt = np.append(target_r, target_t).reshape(1, 12)
add = np.array([[0, 0, 0, 1]])
target_trans = np.append(target_r.T, target_t.reshape(3, 1), axis=1)
target_trans = np.append(target_trans, add, axis=0)
rmin, rmax, cmin, cmax = get_bbox(mask_label)
img_masked = self.trans(img)[:, rmin:rmax, cmin:cmax]
img_masked = torch.zeros(img_masked.shape)
# img_masked1 = self.norm1(img_masked[:, rmin:rmax, cmin:cmax])
# img_masked2 = self.norm1(img_masked[:, rmin:rmax, cmin:cmax])
# img = np.array(img)
# img_masked_ = np.transpose(img[:, :, :3], (2, 0, 1))[:, rmin:rmax, cmin:cmax]
choose = mask[rmin:rmax, cmin:cmax].flatten().nonzero()[0]
if len(choose) > self.num_pt:
c_mask = np.zeros(len(choose), dtype=int)
c_mask[:self.num_pt] = 1
np.random.shuffle(c_mask)
choose = choose[c_mask.nonzero()]
else:
if len(choose) == 0:
print(0)
choose = np.pad(choose, (0, self.num_pt - len(choose)), 'wrap')
normal_maskd = normal[rmin:rmax, cmin:cmax].reshape(-1,3)[choose][:,:, np.newaxis].astype(np.float32)
patch_masked = patch_label[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
depth_masked = depth[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
xmap_masked = self.xmap[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
ymap_masked = self.ymap[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
choose = np.array([choose])
cam_cx = cam_k[0, 2]
cam_cy = cam_k[1, 2]
cam_fx = cam_k[0, 0]
cam_fy = cam_k[1, 1]
pt3 = patch_masked
pt2 =depth_masked*depth_scale / 1000
pt0 = (ymap_masked - cam_cx) * pt2 / cam_fx
pt1 = (xmap_masked - cam_cy) * pt2 / cam_fy
cloud = np.concatenate((pt0, pt1, pt2, pt3), axis=1)
nx = normal_maskd[:, 0] / 255.0 * 2 - 1
ny = normal_maskd[:, 1] / 255.0 * 2 - 1
nz = normal_maskd[:, 2] / 255.0 * 2 - 1
normals = np.concatenate((nx, ny, nz), axis=1)
dellist = [j for j in range(0, len(self.cld[obj_id]))]
if self.refine:
dellist = random.sample(dellist, len(self.cld[obj_id]) - self.num_pt_mesh_large)
else:
dellist = random.sample(dellist, len(self.cld[obj_id]) - self.num_pt_mesh_small)
model_points = np.delete(self.cld[obj_id], dellist, axis=0)
model_points = model_points / 1000
target = np.dot(model_points, target_r)
target = np.add(target, target_t)
# displayPoint(cloud, target, index)
# patch_occ = patch_label[rmin:rmax, cmin:cmax]
# num_patch = np.max(patch_occ)
# num_list = []
# for n in range(1, num_patch + 1):
# num = str(patch_occ.tolist()).count(str(n))
# num_list.append(num)
#
# num_list_new = []
# patch_id_list = []
# for m in num_list:
# if m > 100:
# num_list_new.append(m)
# patch_id_list.append(num_list.index(m) + 1)
im_id = 0
scene_id = 0
if self.mode == 'test':
data_dir = self.list[index][:23]
dir_num = self.list[index][16:22]
data_num = self.list[index][23:29]
obj_order = self.list[index][30:]
idx = int(obj_order)
im_id = int(data_num)
scene_id = int(dir_num)
info = self.info_list[int(dir_num) - 1]
gt = self.gt_list[int(dir_num) - 1]
cam = self.cam_list[int(dir_num) - 1]
obj_num = len(gt[str(int(data_num))])
obj_id = gt[str(int(data_num))][idx]['obj_id']
model_info = self.model_info[str(obj_id)]
depth = np.array(Image.open('{0}/{1}/{2}/{3}.png'.format(self.root, data_dir, 'depth', data_num)))
label = np.array(Image.open(
'{0}/{1}/{2}/{3}_{4}.png'.format(self.root, data_dir, 'mask_visib', data_num, str(idx).zfill(6))))
patch_label = np.array(Image.open(
'{0}/{1}/{2}/{3}_{4}.png'.format(self.root, data_dir, 'segmentation', data_num, str(idx).zfill(6))))
img = Image.open('{0}/{1}/{2}/{3}.png'.format(self.root, data_dir, 'rgb', data_num.zfill(6)))
normal = np.array(Image.open(
'{0}/{1}/{2}/{3}.png'.format(self.root, data_dir, 'normal', data_num, str(idx).zfill(6))))
choose_file = '{0}/{1}/{2}/{3}_{4}_choose.list'.format(self.root, data_dir, 'segmentation', data_num, str(idx).zfill(6))
with open(choose_file) as f:
data = f.readlines()
choose_ls = []
stable_ls = []
if len(data) > 1:
for ids in data:
choose_id = ids[:-1].split(',')[:-1]
stable = float(ids[:-1].split(',')[-1])
choose_ls.append([int(x) for x in choose_id])
stable_ls.append(stable)
else:
if data[0] != '0':
choose_id = data[0].split(',')[:-1]
stable = float(data[0].split(',')[-1])
choose_ls.append([int(x) for x in choose_id])
stable_ls.append(stable)
else:
stable_ls.append(0)
depth_scale = cam[str(int(data_num))]['depth_scale']
cam_k = cam[str(int(data_num))]['cam_K']
cam_k = np.array(cam_k).reshape(3, 3)
cam_cx = cam_k[0, 2]
cam_cy = cam_k[1, 2]
cam_fx = cam_k[0, 0]
cam_fy = cam_k[1, 1]
obj_bb = info[str(int(data_num))][idx]['bbox_visib']
cmin = obj_bb[0]
cmax = cmin + obj_bb[2]
rmin = obj_bb[1]
rmax = rmin + obj_bb[3]
img_masked = self.trans(img)[:, rmin:rmax, cmin:cmax]
img_masked = torch.zeros(img_masked.shape)
# img_masked = np.transpose(np.array(img)[:, :, :3], (2, 0, 1))[:, rmin:rmax, cmin:cmax]
mask_depth = ma.getmaskarray(ma.masked_not_equal(depth, 0))
mask_label = ma.getmaskarray(ma.masked_equal(label, 255))
mask = mask_label * mask_depth
mask_patch = mask * patch_label
mask_num = len(mask.flatten().nonzero()[0])
target_r = gt[str(int(data_num))][idx]['cam_R_m2c']
target_r = np.array(target_r).reshape(3, 3).T
target_t = np.array(gt[str(int(data_num))][idx]['cam_t_m2c'])
target_t = target_t / 1000
rt = np.append(target_r.reshape(-1), target_t.reshape(-1)).reshape(1, 12)
add = np.array([[0,0,0,1]])
target_trans = np.append(target_r.T,target_t.reshape(3,1),axis=1)
target_trans = np.append(target_trans,add,axis=0)
choose = mask[rmin:rmax, cmin:cmax].flatten().nonzero()[0]
if len(choose) > self.num_pt:
c_mask = np.zeros(len(choose), dtype=int)
c_mask[:self.num_pt] = 1
np.random.shuffle(c_mask)
choose = choose[c_mask.nonzero()]
else:
if len(choose) == 0:
print(0)
choose = np.pad(choose, (0, self.num_pt - len(choose)), 'wrap')
normal_maskd = normal[rmin:rmax, cmin:cmax].reshape(-1, 3)[choose][:, :, np.newaxis].astype(np.float32)
patch_masked = patch_label[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
depth_masked = depth[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
xmap_masked = self.xmap[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
ymap_masked = self.ymap[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
pt3 = patch_masked
pt2 = depth_masked*depth_scale / 1000
pt0 = (ymap_masked - cam_cx) * pt2 / cam_fx
pt1 = (xmap_masked - cam_cy) * pt2 / cam_fy
cloud = np.concatenate((pt0, pt1, pt2, pt3), axis=1)
nx = normal_maskd[:, 0] / 255.0 * 2 - 1
ny = normal_maskd[:, 1] / 255.0 * 2 - 1
nz = normal_maskd[:, 2] / 255.0 * 2 - 1
normals = np.concatenate((nx, ny, nz), axis=1)
dellist = [j for j in range(0, len(self.cld[obj_id]))]
if self.refine:
dellist = random.sample(dellist, len(self.cld[obj_id]) - self.num_pt_mesh_large)
else:
dellist = random.sample(dellist, len(self.cld[obj_id]) - self.num_pt_mesh_small)
model_points = np.delete(self.cld[obj_id], dellist, axis=0)
model_points = model_points / 1000
target = np.dot(model_points, target_r)
target = np.add(target, target_t)
# displayPoint(cloud,target,index)
patches = pt3.astype(int)
num_patch = np.max(patches)
num_list = []
patch_list = patches.reshape(-1).tolist()
for n in range(1, num_patch + 1): # ordered num of point in each patch(from patch_1 to patch_n)
num = str(patch_list).count(str(n))
num_list.append(num)
num_list_new = []
patch_id_list_all = []
for m in num_list: # select patchs that num of points > 100
if m > 100:
num_list_new.append(m)
patch_id_list_all.append(num_list.index(m) + 1)
# stable_id_order = []
# stable_score_order = sorted(stable_ls)
# for i in stable_score_order:
# stable_id_order.append(stable_score_order.index(i))
choose_patch = []
all_list = [i for i in range(0, 2000)]
if data[0] != '0':
patch_id_list = choose_ls[stable_ls.index(max(stable_ls))]
for k in patch_id_list:
patch_idx = []
for m in range(cloud.shape[0]):
if cloud[m, 3] == k:
patch_idx.append(m)
if len(patch_idx) >= 128:
choose_patch.append(np.array(patch_idx))
else:
choose_patch.append(np.array(all_list))
# patch_id_ls = [] # combine all choosed patch idx
# for patch_id in choose_patch:
# patch_id_ls = patch_id_ls + patch_id.tolist()
# patch_add = list(set(all_list) - set(patch_id_ls))
# choose_patch.append(np.array(patch_add))#把剩下没选上的作为一个patch加进来
else:
choose_patch.append(np.array(all_list))
if not choose_patch:
choose_patch.append(np.array(all_list))
cloud = cloud[:, :-1]
model_axis = np.array([0.0,0.0,1.0])
return torch.from_numpy(cloud.astype(np.float32)), \
torch.LongTensor(choose.astype(np.int32)), \
img_masked, \
torch.from_numpy(rt.astype(np.float32)), \
torch.from_numpy(target_trans.astype(np.float32)), \
torch.LongTensor([obj_id - 1]), \
choose_patch,\
torch.from_numpy(target.astype(np.float32)),\
torch.from_numpy(model_points.astype(np.float32)),\
torch.from_numpy(normals.astype(np.float32)),\
model_info,\
torch.from_numpy(model_axis.astype(np.float32)),\
scene_id,\
im_id
def __len__(self):
return self.length
def get_sym_list(self):
return self.symmetry_obj_idx
def get_nosym_list(self):
return self.nosym_obj_idx
def get_rot_list(self):
return self.rot_obj_idx
def get_ref_list(self):
return self.ref_obj_idx
def get_num_points_mesh(self):
if self.refine:
return self.num_pt_mesh_large
else:
return self.num_pt_mesh_small
# border_list = [-1, 40, 80, 120, 160, 200, 240, 280, 320, 360, 400, 440, 480, 520, 560, 600, 640, 680]
border_list = [-1, 40, 80, 120, 160, 200, 240, 280, 320, 360, 400]
img_width = 400
img_length = 400
def get_bbox(label):
rows = np.any(label, axis=1)
cols = np.any(label, axis=0)
rmin, rmax = np.where(rows)[0][[0, -1]]
cmin, cmax = | np.where(cols) | numpy.where |
"""Sun position management.
All relevant functionality is contained in the SunData class.
"""
import numpy as np
import time
import pandas as pd
import requests
import json
import geopy
import datetime as dt
import matplotlib.pyplot as plt
import io
from scipy.interpolate import interp1d
from scipy.constants import pi
from multiprocessing import Pool
from ..helper import req
from pvlib.solarposition import spa_python
from functools import lru_cache
from typing import List
class SunData():
"""Class to generage and manage data associated with sun positions.
Attributes:
dates (pd.date_range): Datetimes solar postion is calculated on.
data (pd.DataFrame): Dataset of time and corresponding sun properties.
"""
def __init__(self, location: List[float], horizon=None,
start_year: int = 2006, end_year: int = 2007,
timestep: int = 600):
"""Initialize.
Args:
location (List[float,float]): Location on the ground
horizon (np.ndarray): Horizon at location. Defaults to None.
start_year (int): Inital year of the calculation.
Defaults to '2006'.
end_year (int): End year of the calculation.
Defaults to '2007'.
timestep (int): Timestep of the simulation. Defaults to 6e2.
"""
self.location = location
self.timestep = timestep
self.dates = pd.date_range(str(start_year),
str(end_year),
freq="{}s".format(int(timestep)))
if not(isinstance(horizon, interp1d)):
self.horizon = interp1d(
horizon[:, 0], horizon[:, 1], bounds_error=False)
else:
self.horizon = horizon
suns = spa_python(self.dates, *self.location)
suns.azimuth = suns.azimuth.map(
lambda azimuth: azimuth + (azimuth < 0)*360-180)
up = suns.elevation > self.horizon(suns.azimuth)
suns.insert(column="is_up", value=up, loc=0)
self.start_year = int(start_year)
self.end_year = (end_year)
self.data = suns
self._timestamp_begin = self.dates[0].timestamp()
self._timestamp_end = self.dates[-1].timestamp()
self._len_timeframe = self._timestamp_end - self._timestamp_begin
self._columns = list(self.data.columns)
self._index = self.data.index.values.astype(np.int64)*1e-9
self._values = self.data.values
def plot(self):
"""Plot horizon and sun positions over the course of a year."""
plt.plot(self.data.azimuth[self.data.is_up is True],
self.data.elevation[self.data.is_up is True], 'o', alpha=.01)
plt.plot(self.horizon.x, self.horizon.y)
def __getitem__(self, index):
"""Get item either by index or datetime."""
if isinstance(index, dt.datetime):
timestamp_date = index.replace(year=self.start_year).timestamp()
index = int((timestamp_date - self._timestamp_begin)/self.timestep)
if not(timestamp_date == self._index[index]):
raise IndexError(
"Calculated index {} does not match real date {}".format(
index, timestamp_date)
)
return dict(zip(self._columns, self._values[index]))
else:
return self.data.values[index]
class Sun():
"""LEGACY SUN CLASS. NOT IN USE.
Class to get solar information at a
date in datetime
loc in array(2)
with horizon as array(azimuth, elevation)
"""
def __init__(self, date: dt.datetime, loc: List[float],
horizon: np.ndarray = None):
"""Initialize."""
self.date = date
self.loc = loc
self.altitude = None
self.azimuth = None
self.sun_angle()
if isinstance(horizon, np.ndarray):
self.horizon = horizon
self.horizon_f = interp1d(self.horizon[:, 0], self.horizon[:, 1])
elif isinstance(horizon, interp1d):
self.horizon_f = horizon
else:
raise ValueError('No valid horizon data')
self.up = self.is_up()
@property
def az_r(self):
"""Azimuth in radians."""
return self.azimuth*pi/180
@property
def al_r(self):
"""Altitude in radians."""
return self.altitude*pi/180
def __call__(self):
"""Return azimuth and altitude."""
return self.azimuth, self.altitude
def bpy(self, az_delta=-44):
"""Return sun rotation for blender.
az_delta: angle between north and x axis blender
"""
return (0, (-90+self.altitude)*pi/180, -(az_delta-self.azimuth) *
pi/180)
@property
def az_al(self):
"""Return azimuth and altitude."""
return self.azimuth, self.altitude
@property
def vec(self):
"""Return solar angle as normalized vector."""
x = np.cos(pi-self.al_r)
y = np.sin(self.az_r)
z = np.sin(self.al_r)
return | np.asarray([x, y, z]) | numpy.asarray |
import numpy as np
from scipy.linalg import eigh
import h5py
class _CCABase(object):
def __init__(self, numCV = None, reg = None, regs = None, numCC = None, numCCs = None, kernelcca = True, ktype = None, verbose = False, select = 0.2, cutoff = 1e-15, gausigma = 1.0, degree = 2):
self.numCV = numCV
self.reg = reg
self.regs = regs
self.numCC = numCC
self.numCCs = numCCs
self.kernelcca = kernelcca
self.ktype = ktype
self.cutoff = cutoff
self.select = select
self.gausigma = gausigma
self.degree = degree
if self.kernelcca and self.ktype == None:
self.ktype = "linear"
self.verbose = verbose
def train(self, data):
nT = data[0].shape[0]
if self.verbose:
if self.kernelcca:
print("Training CCA, %s kernel, regularization = %0.4f, %d components" % (self.ktype, self.reg, self.numCC))
else:
print("Training CCA, regularization = %0.4f, %d components" % (self.reg, self.numCC))
comps = kcca(data, self.reg, self.numCC, kernelcca = self.kernelcca, ktype = self.ktype, gausigma = self.gausigma, degree = self.degree)
self.cancorrs, self.ws, self.comps = recon(data, comps, kernelcca = self.kernelcca)
if len(data) == 2:
self.cancorrs = self.cancorrs[np.nonzero(self.cancorrs)]
return self
def validate(self, vdata):
vdata = [np.nan_to_num(_zscore(d)) for d in vdata]
if not hasattr(self, 'ws'):
raise NameError("Algorithm needs to be trained!")
self.preds, self.corrs = predict(vdata, self.ws, self.cutoff)
return self.corrs
def compute_ev(self, vdata):
nD = len(vdata)
nT = vdata[0].shape[0]
nC = self.ws[0].shape[1]
nF = [d.shape[1] for d in vdata]
self.ev = [np.zeros((nC, f)) for f in nF]
for cc in range(nC):
ccs = cc+1
if self.verbose:
print("Computing explained variance for component #%d" % ccs)
preds, corrs = predict(vdata, [w[:, ccs-1:ccs] for w in self.ws], self.cutoff)
resids = [abs(d[0]-d[1]) for d in zip(vdata, preds)]
for s in range(nD):
ev = abs(vdata[s].var(0) - resids[s].var(0))/vdata[s].var(0)
ev[ | np.isnan(ev) | numpy.isnan |
import serial
import argparse
import time
import json
import numpy as np
import os
import datetime
parser = argparse.ArgumentParser()
parser.add_argument("--serial", default="COM7", help="Serial port to connect to for instance COM3 on windows or /dev/ttyUSB0 on Linux")
parser.add_argument("-b", "--baud", help="Baud rate for reading serial port", default="9600")
parser.add_argument("-o", "--output", help="Where to save output data", default=".")
args = parser.parse_args()
def GetIRData(serialport, baud, output_folder):
ser = serial.Serial(serialport, baud)
while True:
line = ser.readline()
data = eval(str(line).split("JSON")[1])
message_number = str(line).split('Message')[1].split('JSON')[0]
print(f"Message : {message_number}")
retrieved_data = ParseJsonData(data)
np.save(os.path.join(output_folder, f"{message_number.replace(' ', '')}-{str(datetime.datetime.now().timestamp())}.npy"),retrieved_data)
print("Getting new data ... ")
time.sleep(1)
print("Done change position")
def ParseJsonData(json_data):
"""Parse Json from arduino
Args:
json_data (str): look like {'IR':[{'A':3, 'B':0, 'C':0, 'D':0, 'E':1, 'F':4, 'G':0, 'H':0, ...},...]}
Returns:
[np.array]: 3D array of containing slices with shape (n,len_diode,len_diode)
with len_diode corresponding to number of diode defined in json
"""
data_names = [chr(letter) for letter in range(65,65+len(json_data["IR"][0]))] # except data to be A,B,C ...
one_led_array = np.array([])
one_slice_array = | np.array([]) | numpy.array |
import numpy as np
from scipy.stats import linregress
import matplotlib.pyplot as plt
import bisect
def find_ge(a, x):
"""Find leftmost item greater than or equal to x"""
i = bisect.bisect_left(a, x)
if i != len(a):
return i
raise ValueError
def mean_clone_size_fit(times, rlam):
"""For the single progenitor models (for progenitor cells only)"""
return 1+rlam*times
def surviving_clones_fit(times, rlam, start_clones):
"""For the single progenitor models (for progenitor cells only)"""
return start_clones*(1/(1+rlam*times))
def mean_clone_size(clone_size_dist):
# Mean of surviving clones from a clone size frequency array
"""Gets the mean of clones > 1 cell. For dists that start at 0 cell clones"""
return sum([(i) * clone_size_dist[i] for i in range(1, len(clone_size_dist))]) / clone_size_dist[1:].sum()
# Incomplete moment functions
def incomplete_moment(clone_size_dist):
# Assuming clone_size_dist starts from zero
if clone_size_dist[1:].sum() == 0:
return None
mcs = mean_clone_size(clone_size_dist)
total_living_clones = clone_size_dist[1:].sum()
proportions = clone_size_dist / total_living_clones
sum_terms = proportions * np.arange(len(proportions))
moments = np.cumsum(sum_terms[::-1])[::-1]
return moments / mcs
def incomplete_moment_sem(clone_size_dist):
sems = []
s1 = (np.arange(len(clone_size_dist))*clone_size_dist).sum()
s2 = 0
s3 = 0
for i, v in zip(reversed(range(len(clone_size_dist))), reversed(clone_size_dist)):
s2 += v*i
s3 += v*i**2
sem = ((s1-s2)/s1**2)*np.sqrt(s3)
sems.append(sem)
return sems[::-1]
def incomplete_moment_vaf_fixed_intervals(vafs, interval):
vafs = np.flip(np.sort(vafs), axis=0)
mean_clone_size = vafs.mean()
x = np.arange(int(vafs.min() / interval) * interval, round(vafs.max() / interval) * interval,
interval)
if len(x) == 0: # No variation in clone sizes.
return np.array([]), np.array([])
x_idx = -1
last_x = x[x_idx]
y = []
incom = 0
for v in vafs:
while v < last_x:
y.append(incom)
x_idx -= 1
last_x = x[x_idx]
incom += v
y.append(incom)
return x, np.flip(np.array(y), axis=0) / mean_clone_size / len(vafs)
def fit_straight_line_to_incomplete_moment(incom, fix_intercept=True):
"""The intercept we refer to here is when x=min_clone_size since this is the point we want to fix
We therefore will shift over the values by one to fit, then shift back to plot
incom will already be from clone size min_clone_size as the first entry
"""
log_incom = | np.log(incom) | numpy.log |
from __future__ import print_function, division, absolute_import
import functools
import sys
import warnings
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from imgaug import dtypes as iadt
from imgaug.testutils import (array_equal_lists, keypoints_equal, reseed,
runtest_pickleable_uint8_img)
import imgaug.augmenters.arithmetic as arithmetic_lib
import imgaug.augmenters.contrast as contrast_lib
class TestAdd(unittest.TestCase):
def setUp(self):
reseed()
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.Add(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.Add(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_add_zero(self):
# no add, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_add_one(self):
# add > 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
def test_minus_one(self):
# add < 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
def test_uint8_every_possible_value(self):
# uint8, every possible addition for base value 127
for value_type in [float, int]:
for per_channel in [False, True]:
for value in np.arange(-255, 255+1):
aug = iaa.Add(value=value_type(value), per_channel=per_channel)
expected = np.clip(127 + value_type(value), 0, 255)
img = np.full((1, 1), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == expected
img = np.full((1, 1, 3), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert np.all(img_aug == expected)
def test_add_floats(self):
# specific tests with floats
aug = iaa.Add(value=0.75)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
aug = iaa.Add(value=0.45)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
def test_stochastic_parameters_as_value(self):
# test other parameters
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
aug = iaa.Add(value=iap.Discretize(iap.Clip(iap.Normal(1, 1), -3, 3)))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Add(value=1)
aug_det = iaa.Add(value=1).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_value(self):
# varying values
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=(0, 10))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not | np.array_equal(observed_aug_det, last_aug_det) | numpy.array_equal |
"""Functions to create and plot outlier scores (or other) in a fixed bounded range. Intended to use to
show the results of an outlier algorithm in a user friendly UI"""
import numpy as np
def make_linear_part(max_score, min_score):
"""
:param bottom: the proportion of the graph used for the bottom "sigmoid"
:param middle: the proportion of the graph used for the middle linear part
:param top: the proportion of the graph used for the top "sigmoid"
:param max_score: the maximum score seen on train
:param min_score: the minimum score seen on train
:return: the linear part of the ui score mapping
"""
slope = 1 / (max_score - min_score)
def linear_part(x):
return x * slope + 1 - slope * min_score
return linear_part
def make_top_part(base, max_score, min_score):
"""
The base has to be between 0 and 1, strictly.
The function will be of the form -base ** (-x + t) + C, where t and C
are the two constants to solve for. The constraints are continuity and
smoothness at max_score when pieced with the linear part
"""
slope = 1 / (max_score - min_score)
t = np.log(slope / np.log(base)) / np.log(base) + max_score
# at the limit when x->inf, the function will approach c
c = 2 + base ** (-max_score + t)
def top_part(x):
return -(base ** (-x + t)) + c
return top_part, c
def make_bottom_part(base, max_score, min_score):
"""
The base has to be between 0 and 1, strictly.
The function will be of the form -base ** (-x + t) + C, where t and C
are the two constants to solve for. The constraints are continuity and
smoothness at max_score when pieced with the linear part
"""
slope = 1 / (max_score - min_score)
t = np.log(slope / | np.log(base) | numpy.log |
#!/usr/bin/env python
import rospy
import numpy as np
import cv2
import matplotlib.pyplot as plt
from tf.transformations import euler_from_quaternion
from geometry_msgs.msg import PoseStamped , Twist
from nav_msgs.msg import Odometry
from Astarcodes import astar , reconstract_path , world2maplocation , Save_A_star_Solution , valid_point , save_configuration_as_img
from get_map import get_occupancy_data , build_A_star_required_map , reconstract_map
from get_destination_from_rviz import get_goal
def publish_vel(front , rad , pub):
T = Twist()
T.linear.x = front
T.linear.y = 0
T.linear.z = 0
T.angular.z = rad
T.angular.y = 0
T.angular.x = 0
for _ in range(5): pub.publish(T)
def callback(msg):
global pub , cond , map_building_cond , a , b , path , ros_path , idx , res , obs , orig , x_start , y_start , old_d
pos = msg.pose.pose.position
q = msg.pose.pose.orientation
xr , yr = pos.x , pos.y
q_l = [q.x,q.y,q.z,q.w]
(_,_,thr) = euler_from_quaternion(q_l)
if cond:
idx = 0
if map_building_cond:
x_start = -2.0
y_start = -0.5
map_building_cond = False
print('Build obsticles data from occupancy grid msg...')
m , res = get_occupancy_data()
orig = np.array([x_start , y_start , 0])
newmap = reconstract_map(m)
obs = build_A_star_required_map(m ,a , b , res , theta_inc = np.pi/18)
save_configuration_as_img(obs[:,:,0] , name = 'Configuration 0.png')
save_configuration_as_img(obs[:,:,3] , name = 'Configuration 30.png')
save_configuration_as_img(obs[:,:,6] , name = 'Configuration 60.png')
print('Finished generating obsticles matrix data')
else:
m , _ = get_occupancy_data()
newmap = reconstract_map(m)
while True:
print('Please insert a goal using rviz')
x,y,th = get_goal()
print(round(x,3),round(y,3),round(th,3))
point = | np.array([x,y,th]) | numpy.array |
import io
import itertools
from dataclasses import dataclass, field
from pathlib import Path
from typing import Generator, List, Optional
from matplotlib import figure
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from matplotlib.animation import FFMpegFileWriter
from matplotlib import collections as mc
import matplotlib.patches as patches
from matplotlib.patches import Circle, Wedge
import scipy.stats as stats
import geopandas as gpd
from shapely.geometry import Polygon
import copy
BACKGROUND_COLOUR = "#000000FF"
FRAME_RATE = 24
@dataclass
class Scene:
"""
Base class for animated scene layers
"""
start_frame: int
end_frame: int
zorder: float
render_frame: Generator[Image.Image, None, None]
def convert_plot_to_image(figure: figure.Figure) -> Image.Image:
"""
Converts the specified matplotlib Figure into a PIL Image
:param figure: Figure to convert
:return: PIL Image
"""
buf = io.BytesIO()
figure.savefig(buf, format="png", facecolor="None")
buf.seek(0)
im = Image.open(buf)
return im
def draw_eye(
axes_dims: List[float], persist_frames: int, fade_out_frames: int
) -> Generator[Image.Image, None, None]:
"""
Generator method for rendering a stylized eye
:param axes_dims: offset and dimensions of the axes to render to
:param persist_frames: Number of frames to persist the frames to
:param fade_out_frames: Number of frames to fade out the eye
:return: Generator for producing PIL Image frames
"""
interval_count = 361
angle = np.linspace(0, np.pi * 2.0, interval_count)
radius = np.array([num % 2 for num in range(0, interval_count)]) * 2.5 + 1.5
x = radius * np.cos(angle)
y = radius * np.sin(angle)
iris = np.vstack([x.reshape(1, -1), y.reshape(1, -1)])
intervals = np.linspace(-7.05, 7.05, interval_count)
positive_curve = 0.075 * intervals ** 2 - 3.75
negative_curve = -0.075 * (intervals ** 2) + 3.75
im: Image.Image = Image.fromarray(np.zeros((1, 1, 4), dtype=np.uint8))
figure = plt.figure(figsize=(19.2, 10.8))
for i in range(1, interval_count + 3, 3):
figure.clear()
# Draw Iris
ax = figure.add_axes(axes_dims)
ax.fill_between(
intervals[interval_count - i :],
positive_curve[interval_count - i :],
negative_curve[interval_count - i :],
color="white",
zorder=1,
)
ax.plot(iris[0, 0:i], iris[1, 0:i], linewidth=5, color="steelblue", zorder=3)
ax.fill_between(
intervals,
np.ones(interval_count) * 5,
negative_curve,
color="black",
alpha=1.0,
zorder=4,
)
ax.fill_between(
intervals,
-np.ones(interval_count) * 5,
positive_curve,
color="black",
alpha=1.0,
zorder=4,
)
ax.set_xlim(-9.6, 9.6)
ax.set_ylim(-4.32, 4.32)
ax.axis("off")
patch = patches.Circle((0, 0), radius=4.02, color="black", zorder=2)
ax.add_patch(patch)
im = convert_plot_to_image(figure)
yield im
# Keep the image for this many frames
for i in range(persist_frames):
yield im
# Fade out the image over this many frames
fade_out_alpha = np.power(np.linspace(1, 0, fade_out_frames), 2)
for alpha in fade_out_alpha:
pixels = np.array(im)
alpha_layer = pixels[:, :, 3]
alpha_layer[alpha_layer > 0] = int(255 * alpha)
yield Image.fromarray(pixels)
# Stay black for the remainder
black_screen = np.array(im)
black_screen[:, :, :] = 0
im = Image.fromarray(black_screen)
while True:
yield im
def draw_text(
sentence: str,
text_pos_list: List[int],
alpha_transitions: int,
persist_frames: int,
fade_out_frames: int,
font_size: int,
left_offset: float,
bottom_offset: float,
) -> Generator[Image.Image, None, None]:
"""
Renders a sentence with configurable phrase boundaries
:param sentence: Full text to render
:param text_pos_list: Character offsets in the sentence to fade in
:param alpha_transitions: Number of alpha increments to fade in each phrase
:param persist_frames: Number of frames to persist the sentence once drawn
:param fade_out_frames: Number of frames to fade out the sentence
:param font_size: Size of the font to render the sentence with
:param left_offset: axes offset for the text from the left boundary
:param bottom_offset: axes offset for the text from the bottom boundary
:return: Generator for producing PIL Image frames
"""
im: Image.Image = Image.fromarray(np.zeros((1, 1, 4), dtype=np.uint8))
figure = plt.figure(figsize=(19.2, 10.8))
alpha_array = np.power(np.linspace(0, 1, alpha_transitions), 2)
for idx, text_pos in enumerate(text_pos_list):
for alpha in alpha_array:
figure.clear()
text_axes = figure.add_axes([0.0, 0.0, 1.0, 1.0])
text_axes.axis("off")
if idx > 0:
text_axes.text(
left_offset,
bottom_offset,
s=sentence[: text_pos_list[idx - 1]],
fontsize=font_size,
style="oblique",
ha="left",
va="bottom",
color="white",
alpha=1.0,
)
text_axes.text(
left_offset,
bottom_offset,
s=sentence[:text_pos],
fontsize=font_size,
style="oblique",
ha="left",
va="bottom",
color="white",
alpha=alpha,
)
im = convert_plot_to_image(figure)
yield im
# Keep the image for this many frames
for i in range(persist_frames):
yield im
# Fade out the image over this many frames
fade_out_alpha = np.power(np.linspace(1, 0, fade_out_frames), 2)
for alpha in fade_out_alpha:
figure.clear()
text_axes = figure.add_axes([0.0, 0.0, 1.0, 1.0])
text_axes.axis("off")
text_axes.text(
left_offset,
bottom_offset,
s=sentence,
fontsize=font_size,
style="oblique",
ha="left",
va="bottom",
color="white",
alpha=alpha,
)
im = convert_plot_to_image(figure)
yield im
# Stay black for the remainder
black_screen = np.array(im)
black_screen[:, :, :] = 0
im = Image.fromarray(black_screen)
while True:
yield im
@dataclass
class FireAutomata:
"""
Class for representing and animating a fire effect automta
"""
height: int
width: int
decay: float
spawn_points: int
heatmap: np.ndarray = field(init=False)
spawn_indices: np.ndarray = field(init=False)
non_spawn_indices: np.ndarray = field(init=False)
flame_base: np.ndarray = field(init=False)
height_max_index: int = field(init=False)
width_max_index: int = field(init=False)
def __post_init__(self):
"""
Initialize the calculated properties
"""
self.heatmap = np.zeros((self.height, self.width))
indices = np.arange(self.width)
self.spawn_indices = np.random.choice(indices, 20)
self.non_spawn_indices = np.delete(indices, self.spawn_points)
self.flame_base = np.zeros(self.width)
self.height_max_index = self.height - 1
self.width_max_index = self.width - 1
def update_heatmap(self):
"""
Update the fire automata by one frame
"""
swap_spawn = np.random.randint(len(self.spawn_indices))
swap_non_spawn = np.random.randint(len(self.non_spawn_indices))
self.spawn_indices[swap_spawn], self.non_spawn_indices[swap_non_spawn] = (
self.non_spawn_indices[swap_non_spawn],
self.spawn_indices[swap_spawn],
)
self.flame_base *= 0
self.flame_base[self.spawn_indices] = 1
self.heatmap[self.height_max_index, :] = self.flame_base
delta = np.random.random((self.height_max_index, self.width, 3))
delta[:, self.width_max_index, 0] = 0
delta[:, 0, 2] = 0
scaled_delta = delta / delta.sum(axis=2)[:, :, np.newaxis]
heatmap_source_part = np.zeros((self.height_max_index, self.width, 3))
heatmap_source_part[:, : self.width_max_index, 0] = self.heatmap[
1 : self.height, 1 : self.width
]
heatmap_source_part[:, :, 1] = self.heatmap[1 : self.height, :]
heatmap_source_part[:, 1 : self.width, 2] = self.heatmap[
1 : self.height, : self.width_max_index
]
self.heatmap[: self.height_max_index, :] = (
heatmap_source_part * scaled_delta
).sum(axis=2) * self.decay
def draw_fire_automata(
axes_dims: List[float],
fade_in_frames: int,
update_frames: int,
fade_out_frames: int,
) -> Generator[Image.Image, None, None]:
"""
Generator method for rendering the fire automata
:param axes_dims: Offset and dimensions of the axes
:param fade_in_frames: Number of frames to fade in the graphic
:param update_frames: Number of frames to update the graphic
:param fade_out_frames: Number of frames to fade out the graphic
:return: Generator for producing PIL Image frames
"""
im: Image.Image = Image.fromarray(np.zeros((1, 1, 4), dtype=np.uint8))
fire_automata = FireAutomata(height=65, width=64, decay=0.95, spawn_points=20)
figure = plt.figure(figsize=(19.2, 10.8))
fade_in_alpha = np.power(np.linspace(0, 1, fade_in_frames), 2)
for alpha in fade_in_alpha:
figure.clear()
render_axes = figure.add_axes(axes_dims)
fire_automata.update_heatmap()
render_axes.imshow(
fire_automata.heatmap[:-1, :],
cmap="hot",
interpolation="nearest",
alpha=alpha,
)
render_axes.axis("off")
im = convert_plot_to_image(figure)
yield im
for frame_number in range(update_frames):
figure.clear()
render_axes = figure.add_axes(axes_dims)
fire_automata.update_heatmap()
render_axes.imshow(
fire_automata.heatmap[:-1, :], cmap="hot", interpolation="nearest"
)
render_axes.axis("off")
im = convert_plot_to_image(figure)
yield im
fade_out_alpha = np.power(np.linspace(1, 0, fade_out_frames), 2)
for alpha in fade_out_alpha:
figure.clear()
render_axes = figure.add_axes(axes_dims)
fire_automata.update_heatmap()
render_axes.imshow(
fire_automata.heatmap[:-1, :],
cmap="hot",
interpolation="nearest",
alpha=alpha,
)
render_axes.axis("off")
im = convert_plot_to_image(figure)
yield im
# Stay black for the remainder
black_screen = np.array(im)
black_screen[:, :, :] = 0
im = Image.fromarray(black_screen)
while True:
yield im
def draw_gaussian(
axes_dims: List[float],
fade_in_frames: int,
update_frames: int,
persist_frames: int,
fade_out_frames: int,
) -> Generator[Image.Image, None, None]:
"""
Generator method for drawing Gaussian
:param axes_dims: Offset and dimensions of the plot axes
:param fade_in_frames: Number of frames to fade in the graphic
:param update_frames: Number of frames to update the graphic
:param persist_frames: Number of frames to persist the graphic
:param fade_out_frames: Number of frames to fade out the graphic
:return: generator for producing the PIL Image frames
"""
figure = plt.figure(figsize=(19.2, 10.8))
with plt.style.context("dark_background"):
ax = figure.add_axes(axes_dims)
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["left"].set_linewidth(2)
ax.spines["bottom"].set_linewidth(2)
ax.set_xlim((-8.8, 8.8))
ax.set_ylim((-0.02, 0.42))
im = convert_plot_to_image(figure)
# [0.05, 0.1, 0.9, 0.25]
# Fade in the axes over this many frames
fade_in_alpha = np.power(np.linspace(0, 1, fade_in_frames), 2)
for alpha in fade_in_alpha:
pixels = np.array(im)
alpha_layer = pixels[:, :, 3]
alpha_layer[alpha_layer > 0] = int(255 * alpha)
yield Image.fromarray(pixels)
# Animate the Guassian
mu = 0
variance = 1
sigma = | np.sqrt(variance) | numpy.sqrt |
import astropy.units as u
from astropy.time import Time
import numpy as np
from aspros import simulate_lc, inject_transits, bls_peakfinder
n_trials = 1000000
detected = []
not_detected = []
for i in range(n_trials):
period = (9 * np.random.rand() + 3) * u.hour
epoch = Time('2020-04-01') + np.random.rand() * u.day
radius = (500 + 2500 * np.random.rand()) * u.km
inc = 90 * u.deg
periods = np.linspace(2, 12, 1500) * u.hour
transit_duration = 2 * u.min
seed = None # 42
clean_lc = simulate_lc(24*u.hour, efficiency=0.6, seed=seed)
transit_lc = inject_transits(clean_lc, period, epoch, radius, inc)
results, bests, stats = transit_lc.bls(periods=periods,
duration=transit_duration)
best_period, best_duration, best_epoch = bests
top_powers, significance = bls_peakfinder(results)
# Compute S/N
phases = (((transit_lc.times.jd - best_epoch.jd) %
best_period.to(u.day).value) / best_period.to(u.day).value)
phases[phases > 0.5] -= 1
intransit = | np.abs(phases) | numpy.abs |
"""
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import pytest
from astropy.utils.misc import NumpyRNGContext
from ..mean_los_velocity_vs_rp import mean_los_velocity_vs_rp
from ...tests.cf_helpers import generate_locus_of_3d_points
__all__ = ('test_mean_los_velocity_vs_rp_correctness1', 'test_mean_los_velocity_vs_rp_correctness2',
'test_mean_los_velocity_vs_rp_correctness3', 'test_mean_los_velocity_vs_rp_correctness4',
'test_mean_los_velocity_vs_rp_parallel', 'test_mean_los_velocity_vs_rp_auto_consistency',
'test_mean_los_velocity_vs_rp_cross_consistency')
fixed_seed = 43
def pure_python_mean_los_velocity_vs_rp(
sample1, velocities1, sample2, velocities2, rp_min, rp_max, pi_max, Lbox=None):
""" Brute force pure python function calculating mean los velocities
in a single bin of separation.
"""
if Lbox is None:
xperiod, yperiod, zperiod = np.inf, np.inf, np.inf
else:
xperiod, yperiod, zperiod = Lbox, Lbox, Lbox
npts1, npts2 = len(sample1), len(sample2)
running_tally = []
for i in range(npts1):
for j in range(npts2):
dx = sample1[i, 0] - sample2[j, 0]
dy = sample1[i, 1] - sample2[j, 1]
dz = sample1[i, 2] - sample2[j, 2]
dvz = velocities1[i, 2] - velocities2[j, 2]
if dx > xperiod/2.:
dx = xperiod - dx
elif dx < -xperiod/2.:
dx = -(xperiod + dx)
if dy > yperiod/2.:
dy = yperiod - dy
elif dy < -yperiod/2.:
dy = -(yperiod + dy)
if dz > zperiod/2.:
dz = zperiod - dz
zsign_flip = -1
elif dz < -zperiod/2.:
dz = -(zperiod + dz)
zsign_flip = -1
else:
zsign_flip = 1
d_rp = np.sqrt(dx*dx + dy*dy)
if (d_rp > rp_min) & (d_rp < rp_max) & (abs(dz) < pi_max):
if abs(dz) > 0:
vlos = dvz*dz*zsign_flip/abs(dz)
else:
vlos = dvz
running_tally.append(vlos)
if len(running_tally) > 0:
return np.mean(running_tally)
else:
return 0.
def test_mean_radial_velocity_vs_r_vs_brute_force_pure_python():
""" This function tests that the
`~halotools.mock_observables.mean_radial_velocity_vs_r` function returns
results that agree with a brute force pure python implementation
for a random distribution of points, both with and without PBCs.
"""
npts = 99
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((npts, 3))
sample2 = np.random.random((npts, 3))
velocities1 = np.random.uniform(-10, 10, npts*3).reshape((npts, 3))
velocities2 = np.random.uniform(-10, 10, npts*3).reshape((npts, 3))
rp_bins, pi_max = np.array([0, 0.1, 0.2, 0.3]), 0.1
############################################
# Run the test with PBCs turned off
s1s2 = mean_los_velocity_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2, do_auto=False)
rmin, rmax = rp_bins[0], rp_bins[1]
pure_python_s1s2 = pure_python_mean_los_velocity_vs_rp(
sample1, velocities1, sample2, velocities2, rmin, rmax, pi_max)
assert np.allclose(s1s2[0], pure_python_s1s2, rtol=0.01)
rmin, rmax = rp_bins[1], rp_bins[2]
pure_python_s1s2 = pure_python_mean_los_velocity_vs_rp(
sample1, velocities1, sample2, velocities2, rmin, rmax, pi_max)
assert np.allclose(s1s2[1], pure_python_s1s2, rtol=0.01)
rmin, rmax = rp_bins[2], rp_bins[3]
pure_python_s1s2 = pure_python_mean_los_velocity_vs_rp(
sample1, velocities1, sample2, velocities2, rmin, rmax, pi_max)
assert np.allclose(s1s2[2], pure_python_s1s2, rtol=0.01)
# ############################################
# # Run the test with PBCs operative
s1s2 = mean_los_velocity_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2, do_auto=False, period=1)
rmin, rmax = rp_bins[0], rp_bins[1]
pure_python_s1s2 = pure_python_mean_los_velocity_vs_rp(
sample1, velocities1, sample2, velocities2, rmin, rmax, pi_max, Lbox=1)
assert np.allclose(s1s2[0], pure_python_s1s2, rtol=0.01)
rmin, rmax = rp_bins[1], rp_bins[2]
pure_python_s1s2 = pure_python_mean_los_velocity_vs_rp(
sample1, velocities1, sample2, velocities2, rmin, rmax, pi_max, Lbox=1)
assert np.allclose(s1s2[1], pure_python_s1s2, rtol=0.01)
rmin, rmax = rp_bins[2], rp_bins[3]
pure_python_s1s2 = pure_python_mean_los_velocity_vs_rp(
sample1, velocities1, sample2, velocities2, rmin, rmax, pi_max, Lbox=1)
assert np.allclose(s1s2[2], pure_python_s1s2, rtol=0.01)
@pytest.mark.slow
def test_mean_los_velocity_vs_rp_correctness1():
""" This function tests that the
`~halotools.mock_observables.mean_los_velocity_vs_rp` function returns correct
results for a controlled distribution of points whose mean radial velocity
is analytically calculable.
For this test, the configuration is two tight localizations of points,
the first at (1, 0, 0.1), the second at (1, 0.2, 0.25).
The first set of points is moving at +50 in the z-direction;
the second set of points is at rest.
PBCs are set to infinity in this test.
So in this configuration, the two sets of points are moving towards each other,
and so the relative z-velocity should be -50 for cross-correlations
in separation bins containing the pair of points. For any separation bin containing only
one set or the other, the auto-correlations should be 0 because each set of
points moves coherently.
The tests will be run with the two point configurations passed in as
separate ``sample1`` and ``sample2`` distributions, as well as bundled
together into the same distribution.
"""
correct_relative_velocity = -50
npts = 100
xc1, yc1, zc1 = 1, 0, 0.1
xc2, yc2, zc2 = 1, 0.2, 0.25
sample1 = generate_locus_of_3d_points(npts, xc=xc1, yc=yc1, zc=zc1, seed=fixed_seed)
sample2 = generate_locus_of_3d_points(npts, xc=xc2, yc=yc2, zc=zc2, seed=fixed_seed)
velocities1 = np.zeros(npts*3).reshape(npts, 3)
velocities2 = np.zeros(npts*3).reshape(npts, 3)
velocities1[:, 2] = 50.
rp_bins, pi_max = np.array([0, 0.1, 0.15, 0.21, 0.25]), 0.2
s1s1, s1s2, s2s2 = mean_los_velocity_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2)
assert np.allclose(s1s1[0:2], 0, rtol=0.01)
assert np.allclose(s1s2[0:2], 0, rtol=0.01)
assert np.allclose(s2s2[0:2], 0, rtol=0.01)
assert np.allclose(s1s1[2], 0, rtol=0.01)
assert np.allclose(s1s2[2], correct_relative_velocity, rtol=0.01)
assert np.allclose(s2s2[2], 0, rtol=0.01)
assert np.allclose(s1s1[3], 0, rtol=0.01)
assert np.allclose(s1s2[3], 0, rtol=0.01)
assert np.allclose(s2s2[3], 0, rtol=0.01)
# Now bundle sample2 and sample1 together and only pass in the concatenated sample
sample = np.concatenate((sample1, sample2))
velocities = np.concatenate((velocities1, velocities2))
s1s1 = mean_los_velocity_vs_rp(sample, velocities, rp_bins, pi_max)
assert np.allclose(s1s1[0:2], 0, rtol=0.01)
assert np.allclose(s1s1[2], correct_relative_velocity, rtol=0.01)
assert np.allclose(s1s1[3], 0, rtol=0.01)
@pytest.mark.slow
def test_mean_los_velocity_vs_rp_correctness2():
""" This function tests that the
`~halotools.mock_observables.mean_los_velocity_vs_rp` function returns correct
results for a controlled distribution of points whose mean radial velocity
is analytically calculable.
For this test, the configuration is two tight localizations of points,
the first at (0.5, 0.5, 0.1), the second at (0.5, 0.35, 0.2).
The first set of points is moving at -50 in the z-direction;
the second set of points is at rest.
PBCs are set to infinity in this test.
So in this configuration, the two sets of points are moving away from each other,
and so the relative z-velocity should be +50 for cross-correlations
in separation bins containing the pair of points. For any separation bin containing only
one set or the other, the auto-correlations should be 0 because each set of
points moves coherently.
The tests will be run with the two point configurations passed in as
separate ``sample1`` and ``sample2`` distributions, as well as bundled
together into the same distribution.
"""
correct_relative_velocity = +50
npts = 100
xc1, yc1, zc1 = 0.5, 0.5, 0.1
xc2, yc2, zc2 = 0.5, 0.35, 0.25
sample1 = generate_locus_of_3d_points(npts, xc=xc1, yc=yc1, zc=zc1, seed=fixed_seed)
sample2 = generate_locus_of_3d_points(npts, xc=xc2, yc=yc2, zc=zc2, seed=fixed_seed)
velocities1 = np.zeros(npts*3).reshape(npts, 3)
velocities2 = np.zeros(npts*3).reshape(npts, 3)
velocities1[:, 2] = -50.
rp_bins, pi_max = np.array([0, 0.1, 0.3]), 0.2
s1s1, s1s2, s2s2 = mean_los_velocity_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2)
assert np.allclose(s1s1[0], 0, rtol=0.01)
assert np.allclose(s1s2[0], 0, rtol=0.01)
assert np.allclose(s2s2[0], 0, rtol=0.01)
assert np.allclose(s1s1[1], 0, rtol=0.01)
assert np.allclose(s1s2[1], correct_relative_velocity, rtol=0.01)
assert np.allclose(s2s2[1], 0, rtol=0.01)
# Now bundle sample2 and sample1 together and only pass in the concatenated sample
sample = np.concatenate((sample1, sample2))
velocities = np.concatenate((velocities1, velocities2))
s1s1 = mean_los_velocity_vs_rp(sample, velocities, rp_bins, pi_max)
assert np.allclose(s1s1[0], 0, rtol=0.01)
assert np.allclose(s1s1[1], correct_relative_velocity, rtol=0.01)
@pytest.mark.slow
def test_mean_los_velocity_vs_rp_correctness3():
""" This function tests that the
`~halotools.mock_observables.mean_los_velocity_vs_rp` function returns correct
results for a controlled distribution of points whose mean radial velocity
is analytically calculable.
For this test, the configuration is two tight localizations of points,
the first at (0.5, 0.55, 0.1), the second at (0.5, 0.4, 0.95).
The first set of points is moving at (-50, -10, +20),
the second set of points is moving at (+25, +10, +40).
So in this configuration, the second set of points is "gaining ground" on
the second set in the z-direction, and so the relative z-velocity
should be -20 for cross-correlations in separation bins containing the pair of points.
For any separation bin containing only
one set or the other, the auto-correlations should be 0 because each set of
points moves coherently.
The tests will be run with the two point configurations passed in as
separate ``sample1`` and ``sample2`` distributions, as well as bundled
together into the same distribution.
"""
correct_relative_velocity = -20
npts = 100
xc1, yc1, zc1 = 0.5, 0.55, 0.1
xc2, yc2, zc2 = 0.5, 0.4, 0.95
sample1 = generate_locus_of_3d_points(npts, xc=xc1, yc=yc1, zc=zc1, seed=fixed_seed)
sample2 = generate_locus_of_3d_points(npts, xc=xc2, yc=yc2, zc=zc2, seed=fixed_seed)
velocities1 = np.zeros(npts*3).reshape(npts, 3)
velocities1[:, 0] = -50.
velocities1[:, 1] = -10.
velocities1[:, 2] = +20.
velocities2 = np.zeros(npts*3).reshape(npts, 3)
velocities2[:, 0] = +25.
velocities2[:, 1] = +10.
velocities2[:, 2] = +40.
rp_bins, pi_max = np.array([0, 0.1, 0.3]), 0.2
s1s1, s1s2, s2s2 = mean_los_velocity_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2, period=1)
assert np.allclose(s1s1[0], 0, rtol=0.01)
assert np.allclose(s1s2[0], 0, rtol=0.01)
assert np.allclose(s2s2[0], 0, rtol=0.01)
assert np.allclose(s1s1[1], 0, rtol=0.01)
assert np.allclose(s1s2[1], correct_relative_velocity, rtol=0.01)
assert np.allclose(s2s2[1], 0, rtol=0.01)
# Now bundle sample2 and sample1 together and only pass in the concatenated sample
sample = np.concatenate((sample1, sample2))
velocities = np.concatenate((velocities1, velocities2))
s1s1 = mean_los_velocity_vs_rp(sample, velocities, rp_bins, pi_max, period=1)
assert np.allclose(s1s1[0], 0, rtol=0.01)
assert np.allclose(s1s1[1], correct_relative_velocity, rtol=0.01)
@pytest.mark.slow
def test_mean_los_velocity_vs_rp_correctness4():
""" This function tests that the
`~halotools.mock_observables.mean_los_velocity_vs_rp` function returns correct
results for a controlled distribution of points whose mean radial velocity
is analytically calculable.
For this test, the configuration is two tight localizations of points,
the first at (0.05, 0.05, 0.3), the second at (0.95, 0.95, 0.4).
The first set of points is moving at (-50, -10, +20),
the second set of points is moving at (+25, +10, +40).
So in this configuration, the first set of points is "losing ground" on
the second set in the z-direction, and so the relative z-velocity
should be +20 for cross-correlations in separation bins containing the pair of points.
For any separation bin containing only one set or the other,
the auto-correlations should be 0 because each set of
points moves coherently.
Note that in this test, PBCs operate in both x & y directions
to identify pairs of points, but PBCs are irrelevant in the z-direction.
The tests will be run with the two point configurations passed in as
separate ``sample1`` and ``sample2`` distributions, as well as bundled
together into the same distribution.
"""
correct_relative_velocity = +20
npts = 100
xc1, yc1, zc1 = 0.05, 0.05, 0.3
xc2, yc2, zc2 = 0.95, 0.95, 0.4
sample1 = generate_locus_of_3d_points(npts, xc=xc1, yc=yc1, zc=zc1, seed=fixed_seed)
sample2 = generate_locus_of_3d_points(npts, xc=xc2, yc=yc2, zc=zc2, seed=fixed_seed)
velocities1 = np.zeros(npts*3).reshape(npts, 3)
velocities1[:, 0] = -50.
velocities1[:, 1] = -10.
velocities1[:, 2] = +20.
velocities2 = np.zeros(npts*3).reshape(npts, 3)
velocities2[:, 0] = +25.
velocities2[:, 1] = +10.
velocities2[:, 2] = +40.
rp_bins, pi_max = np.array([0, 0.1, 0.3]), 0.2
s1s1, s1s2, s2s2 = mean_los_velocity_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2, period=1)
assert np.allclose(s1s1[0], 0, rtol=0.01)
assert np.allclose(s1s2[0], 0, rtol=0.01)
assert np.allclose(s2s2[0], 0, rtol=0.01)
assert np.allclose(s1s1[1], 0, rtol=0.01)
assert np.allclose(s1s2[1], correct_relative_velocity, rtol=0.01)
assert np.allclose(s2s2[1], 0, rtol=0.01)
# Now bundle sample2 and sample1 together and only pass in the concatenated sample
sample = np.concatenate((sample1, sample2))
velocities = np.concatenate((velocities1, velocities2))
s1s1 = mean_los_velocity_vs_rp(sample, velocities, rp_bins, pi_max, period=1)
assert np.allclose(s1s1[0], 0, rtol=0.01)
assert np.allclose(s1s1[1], correct_relative_velocity, rtol=0.01)
@pytest.mark.slow
def test_mean_los_velocity_vs_rp_parallel():
"""
Verify that the `~halotools.mock_observables.mean_los_velocity_vs_rp` function
returns identical results for a random distribution of points whether the function
runs in parallel or serial.
"""
npts = 101
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((npts, 3))
velocities1 = np.random.normal(loc=0, scale=100, size=npts*3).reshape((npts, 3))
sample2 = np.random.random((npts, 3))
velocities2 = np.random.normal(loc=0, scale=100, size=npts*3).reshape((npts, 3))
rp_bins, pi_max = np.array([0, 0.1, 0.3]), 0.2
s1s1_parallel, s1s2_parallel, s2s2_parallel = mean_los_velocity_vs_rp(
sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2, num_threads=2, period=1)
s1s1_serial, s1s2_serial, s2s2_serial = mean_los_velocity_vs_rp(
sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2, num_threads=1, period=1)
assert np.allclose(s1s1_serial, s1s1_parallel, rtol=0.001)
assert np.allclose(s1s2_serial, s1s2_parallel, rtol=0.001)
assert np.allclose(s2s2_serial, s2s2_parallel, rtol=0.001)
@pytest.mark.slow
def test_mean_los_velocity_vs_rp_auto_consistency():
""" Verify that the `~halotools.mock_observables.mean_los_velocity_vs_rp` function
returns self-consistent auto-correlation results
regardless of whether we ask for cross-correlations.
"""
npts = 101
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((npts, 3))
velocities1 = | np.random.normal(loc=0, scale=100, size=npts*3) | numpy.random.normal |
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import ticker
import matplotx
def test_plot():
x = np.linspace(-1.0, 1.0, 10)
y = np.linspace(-1.0, 1.0, 11)
X, Y = np.meshgrid(x, y)
z = X + 1j * Y
vals = np.angle(z)
# import time
# t = time.time()
# plt.contour(X, Y, vals, levels=[0.5], linestyles=":")
# print("T", time.time() - t)
# # plt.show()
# t = time.time()
# matplotx.contour(X, Y, vals, levels=[0.5], linestyles="-")
# print("t", time.time() - t)
matplotx.contour(X, Y, vals, levels=[0.5], max_jump=5.0)
matplotx.discontour(X, Y, vals, min_jump=5.0, linestyle=":", color="r")
plt.gca().set_aspect("equal")
plt.show()
plt.close()
def test_paths():
x = np.linspace(-1.0, 1.0, 10)
y = np.linspace(-1.0, 1.0, 11)
X, Y = np.meshgrid(x, y)
z = X + 1j * Y
vals = np.angle(z).T
paths = matplotx._contour._get_xy_paths(
x, y, vals, level=0.5, max_jump=5.0, min_jump=None
)
assert len(paths) == 1
assert paths[0].shape == (2, 9)
def test_closed_path():
delta = 0.5
x = np.arange(-2.0, 2.0, delta)
y = np.arange(-2.0, 2.0, delta)
X, Y = np.meshgrid(x, y)
Z = np.exp(-(X**2) - Y**2)
# plt.contour(X, Y, Z, levels=[0.75])
matplotx.contour(X, Y, Z, levels=[0.75])
plt.gca().set_aspect("equal")
plt.show()
plt.close()
def test_separate_paths():
delta = 0.05
x = np.arange(-2.0, 2.0, delta)
y = np.arange(-2.0, 2.0, delta)
X, Y = | np.meshgrid(x, y) | numpy.meshgrid |
#!/usr/bin/env python
# Author: <NAME> <<EMAIL>>
# PTC5892 Processamento de Imagens Medicas
# POLI - University of Sao Paulo
# Implementation of the
# References:
# [1] <NAME>, Digital Image Processing. New York: Wiley, 1977
# [2] <NAME> and <NAME>, Speckle Reducing Anisotropic Diffusion.
# IEEE Transactions on Image Processing, Vol. 11, No. 11, 2002
import numpy as np
from canny import canny
from scipy.ndimage import distance_transform_edt
DEFAULT_ALPHA = 1.0 / 9
def fom(img, img_gold_std, alpha = DEFAULT_ALPHA):
"""
Computes Pratt's Figure of Merit for the given image img, using a gold
standard image as source of the ideal edge pixels.
"""
# To avoid oversmoothing, we apply canny edge detection with very low
# standard deviation of the Gaussian kernel (sigma = 0.1).
edges_img = canny(img, 0.1, 20, 50)
edges_gold = canny(img_gold_std, 0.1, 20, 50)
# Compute the distance transform for the gold standard image.
dist = distance_transform_edt( | np.invert(edges_gold) | numpy.invert |
#
# Authors: <NAME>, <NAME>
#
###############################
## Default Parameters ##
###############################
# Discretization values
ds = 1/64; # Spatial step length
tolerance = 1E-20 * ds**3 # Error tolerance for iterative solver
alpha = 0.0 # Damping factor (coefficient of D_t n)
# Boundary conditions, either 'P' for "Periodic", 'N' for "Neumann", or 'D' for "Dirichlet"
# Note that Neumann only works when K1 = K2 due to some unimplemented boundary behavior (namely, we need div(n) = 0 on
# the boundary for Neumann to properly work out)
boundary_behavior = 'P'
# Spatial boundary values
min_x, max_x = -0.5, 0.5
min_y, max_y = -0.5, 0.5
min_z, max_z = -0, 0
final_time = 1.0 # Simulation time
# Frank elastic constants
K1 = 0.5; # Splay
K2 = 1.0; # Twist
K3 = 0.5; # Bend
# Whether or not to allow central derivatives in certain expressions (terms in F* and energy)
a_term_central = True
c_term_central = True
###############################
## Imports and Logging ##
###############################
# Import NumPy for all batch computing operations
import numpy as np
# Import os.path for reading and writing pre-computed frame data
import os.path
# Import tqdm for nice command line progress bars, in this file for computing frame data
from tqdm import trange
# Import sys, types, and argparse for processing command line arguments
import sys
import types
import argparse
verbose = False # Verbose mode prints more computation data as the simulation is running.
###############################
## Indexing Helpers ##
###############################
# Returns a multi-index in which all but one axis is free. For example, axial_index(3, 54) = (:,:,:,54).
def axial_index(axis, index):
return (np.s_[:],) * axis + (index,)
# See axial_index.
def axial_array(array, axis, index):
return array[axial_index(axis, index)]
# Given a potentially out-of-bounds array of indices, returns a new in-bounds array of indices. The behavior of this
# function depends on the boundary conditions. Periodic conditions wrap out-of-bounds indices back around to smaller
# index values, as if indices were on a circle, whereas Neumann conditions clamp out-of-bounds indices to the bounds.
def boundary_handler(index, array_length):
if boundary_behavior == 'P':
# If periodic, wrap around
return index % array_length
else:
# If Neumann or other, clamp to range
def clamp(x):
return min(max(x, 0), array_length - 1)
return np.vectorize(clamp)(index)
###############################
## OldNewPair Class ##
###############################
# The OldNewPair class represents a pair of two NumPy arrays of the same shape, one representing an older value,
# and the other representing a newer value.
class OldNewPair:
# Initializes a pair to be all zeros, unless pair is not None, in which case the pair is initialized to the
# given value.
def __init__(self, shape, pair=None):
if pair is None:
self.pair = np.zeros(shape + (2,))
self.__pair_axis = len(shape)
else:
self.pair = pair
self.__pair_axis = len(np.shape(pair)) - 1
self.__old_idx = axial_index(self.__pair_axis, 0)
self.__new_idx = axial_index(self.__pair_axis, 1)
# Returns a pair of the i'th component of old and new.
def component(self, i):
axis = self.__pair_axis - 1 # Dimension index should always come right before pairing index
return axial_array(self.pair, axis, i)
# Sets "old" to "new", and "new" to "newer".
def update(self, newer):
self.old = self.new
self.new = newer
# Returns the arithmetic mean of the old and new values.
def mid(self):
return (self.old + self.new) / 2
@property
def old(self):
return self.__old
@old.getter
def old(self):
return self.pair[self.__old_idx]
@old.setter
def old(self, old):
self.pair[self.__old_idx] = old
@property
def new(self):
return self.__new
@new.getter
def new(self):
return self.pair[self.__new_idx]
@new.setter
def new(self, new):
self.pair[self.__new_idx] = new
# Returns an OldNewPair for which the old and new values are the same.
def make_constant_pair(array):
pair = OldNewPair(np.shape(array))
pair.old = array.copy()
pair.new = array.copy()
return pair
# Returns the i'th component of a normal NumPy array (instead of an OldNewPair).
def component(array, i):
axis = len(np.shape(array)) - 1 # Dimension index should be the last index
return axial_array(array, axis, i)
# Returns the arithmetic mean of a pair of values which haven't been wrapped into an OldNewPair instance
def mid(pair):
return OldNewPair(None, pair).mid()
###############################
## Domain Initialization ##
###############################
dim = 3 # Simulation is always run in 3 dimensions, even if z-axis is a singleton
def init_computation_domain(input_dt = None):
global dt, C1, C2, C3, t_axis, num_t, time_indices, x_axis, y_axis, z_axis, space_axes, space_indices, space_sizes
if input_dt is None:
dt = 0.1 * ds
else:
dt = input_dt
# Constants from the paper
C1 = K1 - K2
C2 = K2
C3 = K3 - K2
# Discrete time axis
t_axis = np.arange(0, final_time + dt, dt)
num_t, = np.shape(t_axis)
time_indices = np.arange(num_t)
# Discrete space axes
x_axis = np.arange(min_x, max_x + ds, ds)
y_axis = np.arange(min_y, max_y + ds, ds)
z_axis = np.arange(min_z, max_z + ds, ds)
# Axes, their associated indices, and the number of elements in each axis
space_axes = (x_axis, y_axis, z_axis)
space_indices = (np.arange(len(x_axis)), np.arange(len(y_axis)), np.arange(len(z_axis)))
space_sizes = (len(x_axis), len(y_axis), len(z_axis))
###############################
## Discrete Operators ##
###############################
# Treating 'array' as a function of its index domain, returns the partial derivative of 'array' along 'axis'.
# Possible values for 'kind' are 'C' (central derivative), '+' (forwards derivative), and '-' (backwards derivative).
def diff(axis, array, kind='C'):
denom_mult = 2 if kind == 'C' else 1
left = axial_array(array, axis, boundary_handler(space_indices[axis] + (0 if kind == '-' else 1), space_sizes[axis]))
right = axial_array(array, axis, boundary_handler(space_indices[axis] - (0 if kind == '+' else 1), space_sizes[axis]))
return (left - right) / (denom_mult * ds)
# Returns the discrete Jacobian matrix (with specified direction; central, forwards, or backwards) of a vector field
def grad(field, kind='C'):
result = np.zeros(space_sizes + (dim, dim))
for i in range(dim):
for j in range(dim):
result[:,:,:,i,j] = diff(i, component(field, j), kind)
return result
# Given an OldNewPair of vector fields, returns the value F^*(n) from the paper.
def f_star(nfield_pair):
result = np.zeros(space_sizes + (dim,))
for i in range(dim):
sum = np.zeros(space_sizes)
for j in range(dim):
for k in range(dim):
ni = nfield_pair.component(i)
nj = nfield_pair.component(j)
nk = nfield_pair.component(k)
if a_term_central:
term_a = diff(i, diff(j, mid(nj)))
else:
term_a = diff(i, diff(j, mid(nj), '-'), '+')
term_b = diff(j, diff(j, mid(ni), '-'), '+')
if c_term_central:
term_c = diff(j, mid(nj) * mid(nk * diff(k, ni)))
term_c -= mid(nj * diff(j, nk)) * diff(i, mid(nk))
else:
term_c = diff(j, mid(nj) * mid(nk * diff(k, ni, '-')), '+')
term_c -= mid(nj * diff(j, nk, '-')) * diff(i, mid(nk), '-')
sum += C1 * term_a + C2 * term_b + C3 * term_c
result[:,:,:,i] = sum
return result
# Given a vector field, returns the corresponding Frank-Oseen energy density field.
def energy(nfield, wfield):
term_a, term_b, term_c = 0, 0, 0
for i in range(dim):
for j in range(dim):
ni = component(nfield, i)
nj = component(nfield, j)
term_b += diff(j, ni, '-')**2
if c_term_central:
term_c += nj * diff(j, ni)
else:
term_c += nj * diff(j, ni, '-')
if a_term_central:
term_a += diff(i, ni)
else:
term_a += diff(i, ni, '-')
term_a = term_a**2
term_c = term_c**2
energy_field = 0.5 * (C1 * term_a + C2 * term_b + C3 * term_c)
n_energy = np.einsum("xyz->", energy_field)
return n_energy + 0.5 * np.einsum("xyzi,xyzi->", wfield, wfield)
###############################
## Solvers ##
###############################
# wfield_pair is (w^m, w^{m,s})
# nfield_old is n^m
# returns n^{m,s+1}
def n_solver(nfield_old, wfield_pair):
def cross_matrix(w):
return np.array([[np.zeros(space_sizes), component(w, 2), -component(w, 1)],
[-component(w, 2), np.zeros(space_sizes), component(w, 0)],
[component(w, 1), -component(w, 0), np.zeros(space_sizes)]])
def v_matrix(w):
a = (dt / 2)**2 * np.einsum("xyzi,xyzi->xyz", w, w) # dt^2/4 * |w|^2
term1 = np.einsum("xyz,ij->xyzij", 1 - a, np.eye(dim, dim)) # (1 - a) * I
term2 = (dt**2 / 2) * np.einsum('xyzi,xyzj->xyzij', w, w) # (dt^2 / 2) * (w \otimes w)
term3 = np.einsum(",ijxyz->xyzij", dt, cross_matrix(w)) # dt * Q(w)
return np.einsum("xyz,xyzij->xyzij", 1/(1 + a), term1 + term2 + term3) # 1/(1 + a) * sum
# n^{m,s+1} = V((w^{m,s} + w^m)/2) * n^m
return np.einsum("xyzij,xyzj->xyzi", v_matrix(wfield_pair.mid()), nfield_old)
# wfield_old is w^m
# nfield_pair is (n^m, n^{m,s+1})
# returns w^{m,s+1}
def w_solver(wfield_old, nfield_pair):
c = 1/(1/dt + alpha/2)
# w^{m,s+1} = w^m + c F*(n^m, n^{m,s+1}) x (n^m + n^{m,s+1})/2
return wfield_old + c * np.cross(f_star(nfield_pair), nfield_pair.mid())
# nfield_pair is n^m
# wfield_pair is w^m
# return n^{m+1} and w^{m+1}
def iterative_solver(nfield_old, wfield_old):
# Error from the paper
def error(w_difference, n_difference):
gradient = grad(n_difference)
return np.einsum("xyzi,xyzi->", w_difference, w_difference) + np.einsum("xyzij,xyzij->", gradient, gradient)
iterations = 0
# Initial values (w^{m,0} = w^m and n^{m,0} = n^m)
nfield_pair = make_constant_pair(nfield_old)
wfield_pair = make_constant_pair(wfield_old)
while True:
nfield_s = nfield_pair.new.copy()
wfield_s = wfield_pair.new.copy()
nfield_pair.new = n_solver(nfield_pair.old, wfield_pair) # Update (n^m, n^{m,s}) to (n^m, n^{m,s+1})
wfield_pair.new = w_solver(wfield_pair.old, nfield_pair) # Update (w^m, w^{m,s}) to (w^m, w^{m,s+1})
err = error(wfield_pair.new - wfield_s, nfield_pair.new - nfield_s)
# If too many iterations occur, there's a problem; notify the user that the simulation is failing
if iterations >= 400:
if verbose:
print("Too many iterations; no convergence. Final error: " + str(err))
break
# If at leats 2 iterations have run and the error is low enough, return
if err <= tolerance and (not iterations <= 2):
break
iterations += 1
return iterations, nfield_pair.new, wfield_pair.new
###############################
## Simulation and Output ##
###############################
def compute_simulation_frames(output_vfd_filepath, initial_field=None):
# Initialize director and angular momentum fields
field_shape = space_sizes + (dim,) # Shape of all relevant vector fields
nfield_initial = | np.zeros(field_shape) | numpy.zeros |
"""
New generic implementation of multiple regression analysis under noisy
measurements.
"""
import numpy as np
_NITER = 2
TINY = float(np.finfo(np.double).tiny)
def nonzero(x):
"""
Force strictly positive values.
"""
return np.maximum(x, TINY)
def em(Y, VY, X, C=None, niter=_NITER, log_likelihood=False):
"""
Maximum likelihood regression in a mixed-effect linear model using
the EM algorithm.
Parameters
----------
Y : array
Array of observations.
VY : array
C is the contrast matrix. Conventionally, C is p x q where p
is the number of regressors.
OUTPUT: beta, s2
beta -- array of parameter estimates
s2 -- array of squared scale parameters.
REFERENCE:
<NAME> Roche, ISBI 2008.
"""
# Number of observations, regressors and points
nobs = X.shape[0]
nreg = X.shape[1]
npts = np.prod(Y.shape[1:])
# Reshape input array
y = np.reshape(Y, (nobs, npts))
vy = np.reshape(VY, (nobs, npts))
# Compute the projected pseudo-inverse matrix
if C == None:
PpX = | np.linalg.pinv(X) | numpy.linalg.pinv |
import unittest
import warnings
import numpy as np
from numpy.testing import assert_almost_equal
import openmdao.api as om
from openmdao.test_suite.components.sellar_feature import SellarIDF
from openmdao.utils.assert_utils import assert_near_equal, assert_check_partials
class TestEQConstraintComp(unittest.TestCase):
def test_sellar_idf(self):
prob = om.Problem(SellarIDF())
prob.driver = om.ScipyOptimizeDriver(optimizer='SLSQP', disp=False)
prob.setup()
# check derivatives
prob['y1'] = 100
prob['equal.rhs:y1'] = 1
prob.run_model()
cpd = prob.check_partials(out_stream=None)
assert_check_partials(cpd, atol=1e-5, rtol=1e-5)
# check results
prob.run_driver()
assert_near_equal(prob['x'], 0., 1e-5)
assert_near_equal(prob['z'], [1.977639, 0.], 1e-5)
assert_near_equal(prob['obj_cmp.obj'], 3.18339395045, 1e-5)
assert_almost_equal(prob['y1'], 3.16)
assert_almost_equal(prob['d1.y1'], 3.16)
assert_almost_equal(prob['y2'], 3.7552778)
assert_almost_equal(prob['d2.y2'], 3.7552778)
assert_almost_equal(prob['equal.y1'], 0.0)
assert_almost_equal(prob['equal.y2'], 0.0)
def test_create_on_init(self):
prob = om.Problem()
model = prob.model
# find intersection of two non-parallel lines
model.add_subsystem('indep', om.IndepVarComp('x', val=0.))
model.add_subsystem('f', om.ExecComp('y=3*x-3', x=0.))
model.add_subsystem('g', om.ExecComp('y=2.3*x+4', x=0.))
model.add_subsystem('equal', om.EQConstraintComp('y', val=11.))
model.connect('indep.x', 'f.x')
model.connect('indep.x', 'g.x')
model.connect('f.y', 'equal.lhs:y')
model.connect('g.y', 'equal.rhs:y')
model.add_design_var('indep.x', lower=0., upper=20.)
model.add_objective('f.y')
prob.setup(mode='fwd')
# verify that the output variable has been initialized
self.assertEqual(prob['equal.y'], 11.)
# verify that the constraint has not been added
self.assertFalse('equal.y' in model.get_constraints())
# manually add the constraint
model.add_constraint('equal.y', equals=0.)
prob.setup(mode='fwd')
prob.driver = om.ScipyOptimizeDriver(disp=False)
prob.run_driver()
assert_almost_equal(prob['equal.y'], 0.)
assert_almost_equal(prob['indep.x'], 10.)
assert_almost_equal(prob['f.y'], 27.)
assert_almost_equal(prob['g.y'], 27.)
cpd = prob.check_partials(out_stream=None)
assert_check_partials(cpd, atol=1e-5, rtol=1e-5)
def test_create_on_init_add_constraint(self):
prob = om.Problem()
model = prob.model
# find intersection of two non-parallel lines
model.add_subsystem('indep', om.IndepVarComp('x', val=0.))
model.add_subsystem('f', om.ExecComp('y=3*x-3', x=0.))
model.add_subsystem('g', om.ExecComp('y=2.3*x+4', x=0.))
model.add_subsystem('equal', om.EQConstraintComp('y', add_constraint=True))
model.connect('indep.x', 'f.x')
model.connect('indep.x', 'g.x')
model.connect('f.y', 'equal.lhs:y')
model.connect('g.y', 'equal.rhs:y')
model.add_design_var('indep.x', lower=0., upper=20.)
model.add_objective('f.y')
prob.setup(mode='fwd')
# verify that the constraint has been added as requested
self.assertTrue('equal.y' in model.get_constraints())
prob.driver = om.ScipyOptimizeDriver(disp=False)
prob.run_driver()
assert_almost_equal(prob['equal.y'], 0.)
assert_almost_equal(prob['indep.x'], 10.)
assert_almost_equal(prob['f.y'], 27.)
assert_almost_equal(prob['g.y'], 27.)
cpd = prob.check_partials(out_stream=None)
assert_check_partials(cpd, atol=1e-5, rtol=1e-5)
def test_create_on_init_add_constraint_no_normalization(self):
prob = om.Problem()
model = prob.model
# find intersection of two non-parallel lines
model.add_subsystem('indep', om.IndepVarComp('x', val=-2.0))
model.add_subsystem('f', om.ExecComp('y=3*x-3', x=0.))
model.add_subsystem('g', om.ExecComp('y=2.3*x+4', x=0.))
model.add_subsystem('equal', om.EQConstraintComp('y', add_constraint=True, normalize=False,
ref0=0, ref=100.0))
model.connect('indep.x', 'f.x')
model.connect('indep.x', 'g.x')
model.connect('f.y', 'equal.lhs:y')
model.connect('g.y', 'equal.rhs:y')
model.add_design_var('indep.x', lower=0., upper=20.)
model.add_objective('f.y')
prob.setup(mode='fwd')
# verify that the constraint has been added as requested
self.assertTrue('equal.y' in model.get_constraints())
# verify that the output is not being normalized
prob.run_model()
lhs = prob['f.y']
rhs = prob['g.y']
diff = lhs - rhs
assert_near_equal(prob['equal.y'], diff)
prob.driver = om.ScipyOptimizeDriver(disp=False)
prob.run_driver()
assert_almost_equal(prob['equal.y'], 0.)
assert_almost_equal(prob['indep.x'], 10.)
assert_almost_equal(prob['f.y'], 27.)
assert_almost_equal(prob['g.y'], 27.)
cpd = prob.check_partials(out_stream=None)
assert_check_partials(cpd, atol=1e-5, rtol=1e-5)
def test_vectorized(self):
prob = om.Problem()
model = prob.model
n = 100
# find intersection of two non-parallel lines, vectorized
model.add_subsystem('indep', om.IndepVarComp('x', val=np.ones(n)))
model.add_subsystem('f', om.ExecComp('y=3*x-3', x=np.ones(n), y=np.ones(n)))
model.add_subsystem('g', om.ExecComp('y=2.3*x+4', x=np.ones(n), y=np.ones(n)))
model.add_subsystem('equal', om.EQConstraintComp('y', val=np.ones(n), add_constraint=True))
model.add_subsystem('obj_cmp', om.ExecComp('obj=sum(y)', y=np.zeros(n)))
model.connect('indep.x', 'f.x')
model.connect('indep.x', 'g.x')
model.connect('f.y', 'equal.lhs:y')
model.connect('g.y', 'equal.rhs:y')
model.connect('f.y', 'obj_cmp.y')
model.add_design_var('indep.x', lower=np.zeros(n), upper=20.*np.ones(n))
model.add_objective('obj_cmp.obj')
prob.setup(mode='fwd')
prob.driver = om.ScipyOptimizeDriver(disp=False)
prob.run_driver()
assert_almost_equal(prob['equal.y'], np.zeros(n))
assert_almost_equal(prob['indep.x'], np.ones(n)*10.)
assert_almost_equal(prob['f.y'], np.ones(n)*27.)
assert_almost_equal(prob['g.y'], np.ones(n)*27.)
cpd = prob.check_partials(out_stream=None)
assert_check_partials(cpd, atol=1e-5, rtol=1e-5)
def test_set_shape(self):
prob = om.Problem()
model = prob.model
n = 100
# find intersection of two non-parallel lines, vectorized
model.add_subsystem('indep', om.IndepVarComp('x', val=np.ones(n)))
model.add_subsystem('f', om.ExecComp('y=3*x-3', x=np.ones(n), y=np.ones(n)))
model.add_subsystem('g', om.ExecComp('y=2.3*x+4', x=np.ones(n), y=np.ones(n)))
model.add_subsystem('equal', om.EQConstraintComp('y', shape=(n,), add_constraint=True))
model.add_subsystem('obj_cmp', om.ExecComp('obj=sum(y)', y=np.zeros(n)))
model.connect('indep.x', 'f.x')
model.connect('indep.x', 'g.x')
model.connect('f.y', 'equal.lhs:y')
model.connect('g.y', 'equal.rhs:y')
model.connect('f.y', 'obj_cmp.y')
model.add_design_var('indep.x', lower=np.zeros(n), upper=20.*np.ones(n))
model.add_objective('obj_cmp.obj')
prob.setup(mode='fwd')
prob.driver = om.ScipyOptimizeDriver(disp=False)
prob.run_driver()
assert_almost_equal(prob['equal.y'], np.zeros(n))
assert_almost_equal(prob['indep.x'], np.ones(n)*10.)
assert_almost_equal(prob['f.y'], np.ones(n)*27.)
assert_almost_equal(prob['g.y'], np.ones(n)*27.)
cpd = prob.check_partials(out_stream=None)
assert_check_partials(cpd, atol=1e-5, rtol=1e-5)
def test_vectorized_no_normalization(self):
prob = om.Problem()
model = prob.model
n = 100
# find intersection of two non-parallel lines, vectorized
model.add_subsystem('indep', om.IndepVarComp('x', val=-2.0*np.ones(n)))
model.add_subsystem('f', om.ExecComp('y=3*x-3', x=np.ones(n), y=np.ones(n)))
model.add_subsystem('g', om.ExecComp('y=2.3*x+4', x=np.ones(n), y=np.ones(n)))
model.add_subsystem('equal', om.EQConstraintComp('y', val=np.ones(n), add_constraint=True,
normalize=False))
model.add_subsystem('obj_cmp', om.ExecComp('obj=sum(y)', y=np.zeros(n)))
model.connect('indep.x', 'f.x')
model.connect('indep.x', 'g.x')
model.connect('f.y', 'equal.lhs:y')
model.connect('g.y', 'equal.rhs:y')
model.connect('f.y', 'obj_cmp.y')
model.add_design_var('indep.x', lower=np.zeros(n), upper=20.*np.ones(n))
model.add_objective('obj_cmp.obj')
prob.setup(mode='fwd')
prob.driver = om.ScipyOptimizeDriver(disp=False)
# verify that the output is not being normalized
prob.run_model()
lhs = prob['f.y']
rhs = prob['g.y']
diff = lhs - rhs
assert_near_equal(prob['equal.y'], diff)
prob.run_driver()
assert_almost_equal(prob['equal.y'], np.zeros(n))
assert_almost_equal(prob['indep.x'], np.ones(n)*10.)
assert_almost_equal(prob['f.y'], np.ones(n)*27.)
assert_almost_equal(prob['g.y'], np.ones(n)*27.)
cpd = prob.check_partials(out_stream=None)
assert_check_partials(cpd, atol=1e-5, rtol=1e-5)
def test_scalar_with_mult(self):
prob = om.Problem()
model = prob.model
# find where 2*x == x^2
model.add_subsystem('indep', om.IndepVarComp('x', val=1.))
model.add_subsystem('multx', om.IndepVarComp('m', val=2.))
model.add_subsystem('f', om.ExecComp('y=x**2', x=1.))
model.add_subsystem('equal', om.EQConstraintComp('y', use_mult=True))
model.connect('indep.x', 'f.x')
model.connect('indep.x', 'equal.lhs:y')
model.connect('multx.m', 'equal.mult:y')
model.connect('f.y', 'equal.rhs:y')
model.add_design_var('indep.x', lower=0., upper=10.)
model.add_constraint('equal.y', equals=0.)
model.add_objective('f.y')
prob.setup(mode='fwd')
prob.driver = om.ScipyOptimizeDriver(disp=False)
prob.run_driver()
assert_near_equal(prob['equal.y'], 0., 1e-6)
assert_near_equal(prob['indep.x'], 2., 1e-6)
assert_near_equal(prob['f.y'], 4., 1e-6)
cpd = prob.check_partials(out_stream=None)
assert_check_partials(cpd, atol=1e-5, rtol=1e-5)
def test_complex_step(self):
prob = om.Problem()
model = prob.model
# find where 2*x == x^2
model.add_subsystem('indep', om.IndepVarComp('x', val=1.))
model.add_subsystem('multx', om.IndepVarComp('m', val=2.))
model.add_subsystem('f', om.ExecComp('y=x**2', x=1.))
model.add_subsystem('equal', om.EQConstraintComp('y', use_mult=True))
model.connect('indep.x', 'f.x')
model.connect('indep.x', 'equal.lhs:y')
model.connect('multx.m', 'equal.mult:y')
model.connect('f.y', 'equal.rhs:y')
model.add_design_var('indep.x', lower=0., upper=10.)
model.add_constraint('equal.y', equals=0.)
model.add_objective('f.y')
prob.setup(mode='fwd', force_alloc_complex=True)
prob.driver = om.ScipyOptimizeDriver(disp=False)
prob.run_driver()
with warnings.catch_warnings():
warnings.filterwarnings(action="error", category=np.ComplexWarning)
cpd = prob.check_partials(out_stream=None, method='cs')
assert_check_partials(cpd, atol=1e-10, rtol=1e-10)
def test_vectorized_with_mult(self):
prob = om.Problem()
model = prob.model
n = 100
# find where 2*x == x^2, vectorized
model.add_subsystem('indep', om.IndepVarComp('x', val=np.ones(n)))
model.add_subsystem('multx', om.IndepVarComp('m', val=np.ones(n)*2.))
model.add_subsystem('f', om.ExecComp('y=x**2', x=np.ones(n), y=np.ones(n)))
model.add_subsystem('equal', om.EQConstraintComp('y', val=np.ones(n),
use_mult=True, add_constraint=True))
model.add_subsystem('obj_cmp', om.ExecComp('obj=sum(y)', y=np.zeros(n)))
model.connect('indep.x', 'f.x')
model.connect('indep.x', 'equal.lhs:y')
model.connect('multx.m', 'equal.mult:y')
model.connect('f.y', 'equal.rhs:y')
model.connect('f.y', 'obj_cmp.y')
model.add_design_var('indep.x', lower=np.zeros(n), upper=np.ones(n)*10.)
model.add_objective('obj_cmp.obj')
prob.setup(mode='fwd')
prob.driver = om.ScipyOptimizeDriver(disp=False)
prob.run_driver()
assert_near_equal(prob['equal.y'], np.zeros(n), 1e-6)
assert_near_equal(prob['indep.x'], np.ones(n)*2., 1e-6)
assert_near_equal(prob['f.y'], np.ones(n)*4., 1e-6)
cpd = prob.check_partials(out_stream=None)
assert_check_partials(cpd, atol=1e-5, rtol=1e-5)
def test_vectorized_with_default_mult(self):
prob = om.Problem()
model = prob.model
n = 100
# find where 2*x == x^2, vectorized
model.add_subsystem('indep', om.IndepVarComp('x', val=np.ones(n)))
model.add_subsystem('f', om.ExecComp('y=x**2', x=np.ones(n), y=np.ones(n)))
model.add_subsystem('equal', om.EQConstraintComp('y', val=np.ones(n),
use_mult=True, mult_val=2., add_constraint=True))
model.add_subsystem('obj_cmp', om.ExecComp('obj=sum(y)', y=np.zeros(n)))
model.connect('indep.x', 'f.x')
model.connect('indep.x', 'equal.lhs:y')
model.connect('f.y', 'equal.rhs:y')
model.connect('f.y', 'obj_cmp.y')
model.add_design_var('indep.x', lower=np.zeros(n), upper=np.ones(n)*10.)
model.add_objective('obj_cmp.obj')
prob.setup(mode='fwd')
prob.driver = om.ScipyOptimizeDriver(disp=False)
prob.run_driver()
assert_near_equal(prob['equal.y'], np.zeros(n), 1e-6)
assert_near_equal(prob['indep.x'], np.ones(n)*2., 1e-6)
assert_near_equal(prob['f.y'], np.ones(n)*4., 1e-6)
cpd = prob.check_partials(out_stream=None)
assert_check_partials(cpd, atol=1e-5, rtol=1e-5)
def test_rhs_val(self):
prob = om.Problem()
model = prob.model
# find where x^2 == 4
model.add_subsystem('indep', om.IndepVarComp('x', val=1.))
model.add_subsystem('f', om.ExecComp('y=x**2', x=1.))
model.add_subsystem('equal', om.EQConstraintComp('y', rhs_val=4.))
model.connect('indep.x', 'f.x')
model.connect('f.y', 'equal.lhs:y')
model.add_design_var('indep.x', lower=0., upper=10.)
model.add_constraint('equal.y', equals=0.)
model.add_objective('f.y')
prob.setup(mode='fwd')
prob.driver = om.ScipyOptimizeDriver(disp=False)
prob.run_driver()
assert_near_equal(prob['equal.y'], 0., 1e-6)
assert_near_equal(prob['indep.x'], 2., 1e-6)
assert_near_equal(prob['f.y'], 4., 1e-6)
cpd = prob.check_partials(out_stream=None)
assert_check_partials(cpd, atol=1e-5, rtol=1e-5)
def test_vectorized_rhs_val(self):
prob = om.Problem()
model = prob.model
n = 100
# find where x^2 == 4, vectorized
model.add_subsystem('indep', om.IndepVarComp('x', val=np.ones(n)))
model.add_subsystem('f', om.ExecComp('y=x**2', x=np.ones(n), y=np.ones(n)))
model.add_subsystem('equal', om.EQConstraintComp('y', val=np.ones(n),
rhs_val=np.ones(n)*4., use_mult=True, mult_val=2.))
model.add_subsystem('obj_cmp', om.ExecComp('obj=sum(y)', y=np.zeros(n)))
model.connect('indep.x', 'f.x')
model.connect('indep.x', 'equal.lhs:y')
model.connect('f.y', 'obj_cmp.y')
model.add_design_var('indep.x', lower=np.zeros(n), upper=np.ones(n)*10.)
model.add_constraint('equal.y', equals=0.)
model.add_objective('obj_cmp.obj')
prob.setup(mode='fwd')
prob.driver = om.ScipyOptimizeDriver(disp=False)
prob.run_driver()
assert_near_equal(prob['equal.y'], np.zeros(n), 1e-6)
assert_near_equal(prob['indep.x'], np.ones(n)*2., 1e-6)
assert_near_equal(prob['f.y'], np.ones(n)*4., 1e-6)
cpd = prob.check_partials(out_stream=None)
assert_check_partials(cpd, atol=2e-5, rtol=2e-5)
def test_specified_shape_rhs_val(self):
prob = om.Problem()
model = prob.model
shape = (3, 2, 4)
rhs = np.zeros(shape)
model.add_subsystem('indep', om.IndepVarComp('x', val=np.ones(shape)))
model.add_subsystem('equal', om.EQConstraintComp('y', val=np.ones(shape),
rhs_val=rhs))
model.connect('indep.x', 'equal.lhs:y')
prob.setup()
prob.run_model()
assert_near_equal(prob['equal.y'], | np.ones(shape) | numpy.ones |
# Utility Functions
# Authors: <NAME>
# Edited by: <NAME>
'''
Used by the user to define channels that are hard coded for analysis.
'''
# Imports necessary for this function
import numpy as np
import re
from itertools import combinations
def splitpatient(patient):
stringtest = patient.find('seiz')
if stringtest == -1:
stringtest = patient.find('sz')
if stringtest == -1:
stringtest = patient.find('aw')
if stringtest == -1:
stringtest = patient.find('aslp')
if stringtest == -1:
stringtest = patient.find('_')
if stringtest == -1:
print("Not sz, seiz, aslp, or aw! Please add additional naming possibilities, or tell data gatherers to rename datasets.")
else:
pat_id = patient[0:stringtest]
seiz_id = patient[stringtest:]
# remove any underscores
pat_id = re.sub('_', '', pat_id)
seiz_id = re.sub('_', '', seiz_id)
return pat_id, seiz_id
def returnindices(pat_id, seiz_id=None):
included_indices, onsetelecs, clinresult = returnnihindices(
pat_id, seiz_id)
if included_indices.size == 0:
included_indices, onsetelecs, clinresult = returnlaindices(
pat_id, seiz_id)
if included_indices.size == 0:
included_indices, onsetelecs, clinresult = returnummcindices(
pat_id, seiz_id)
if included_indices.size == 0:
included_indices, onsetelecs, clinresult = returnjhuindices(
pat_id, seiz_id)
if included_indices.size == 0:
included_indices, onsetelecs, clinresult = returntngindices(
pat_id, seiz_id)
return included_indices, onsetelecs, clinresult
def returntngindices(pat_id, seiz_id):
included_indices = np.array([])
onsetelecs = None
clinresult = -1
if pat_id == 'id001ac':
# included_indices = np.concatenate((np.arange(0,4), np.arange(5,55),
# np.arange(56,77), np.arange(78,80)))
included_indices = np.array([0, 1, 5, 6, 7, 8, 9, 10, 11, 12, 13,
15, 16, 17, 18, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 35, 36, 37, 38, 39, 40, 41, 42, 43, 45, 46, 47, 48,
49, 50, 51, 52, 53, 58, 59, 60, 61, 62, 63, 64, 65, 66, 68,
69, 70, 71, 72, 73, 74, 75, 76, 78, 79])
elif pat_id == 'id002cj':
# included_indices = np.array(np.arange(0,184))
included_indices = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
30, 31, 32, 33, 34, 35, 36, 37, 38,
45, 46, 47, 48, 49, 50, 51, 52, 53,
60, 61, 62, 63, 64, 65, 66, 67, 70, 71, 72, 73, 74, 75, 76, 85, 86, 87, 88, 89,
90, 91, 92, 93, 100, 101, 102, 103, 104, 105,
106, 107, 108, 115, 116, 117, 118, 119,
120, 121, 122, 123, 129, 130, 131, 132, 133,
134, 135, 136, 137,
# np.arange(143, 156)
143, 144, 145, 146, 147,
148, 149, 150, 151, 157, 158, 159, 160, 161,
162, 163, 164, 165, 171, 172, 173, 174, 175,
176, 177, 178, 179, 180, 181, 182])
elif pat_id == 'id003cm':
included_indices = np.concatenate((np.arange(0,13), np.arange(25,37),
np.arange(40,50), np.arange(55,69), np.arange(70,79)))
elif pat_id == 'id004cv':
# removed OC'10, SC'5, CC'14/15
included_indices = np.concatenate((np.arange(0,23), np.arange(25,39),
np.arange(40,59), np.arange(60,110)))
elif pat_id == 'id005et':
included_indices = np.concatenate((np.arange(0,39), np.arange(39,47),
np.arange(52,62), np.arange(62,87)))
elif pat_id == 'id006fb':
included_indices = np.concatenate((np.arange(10,19), np.arange(40,50),
np.arange(115,123)))
elif pat_id == 'id008gc':
included_indices = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 48, 49, 50, 51, 52, 53, 54, 56, 57, 58, 61, 62, 63, 64, 65,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 83, 84, 85, 86, 87, 88, 89, 90, 92, 93,
94, 95, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 110, 111])
elif pat_id == 'id009il':
included_indices = np.concatenate((np.arange(0,10), np.arange(10,152)))
elif pat_id == 'id010js':
included_indices = np.concatenate((np.arange(0,14),
np.arange(15,29), np.arange(30,42), np.arange(43,52),
np.arange(53,65), np.arange(66,75), np.arange(76,80),
np.arange(81,85), np.arange(86,94), np.arange(95,98),
np.arange(99,111),
np.arange(112,124)))
elif pat_id == 'id011ml':
included_indices = np.concatenate((np.arange(0,18), np.arange(21,68),
np.arange(69,82), np.arange(82,125)))
elif pat_id == 'id012pc':
included_indices = np.concatenate((np.arange(0,4), np.arange(9,17),
np.arange(18,28), np.arange(31,41), np.arange(44,56),
np.arange(57,69), np.arange(70,82), np.arange(83,96),
np.arange(97,153)))
elif pat_id == 'id013pg':
included_indices = np.array([2, 3, 4, 5, 15, 18, 19, 20, 21, 23, 24,
25, 30, 31, 32, 33, 34, 35, 36, 37, 38, 50, 51, 52, 53, 54, 55, 56,
57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 68, 70, 71, 72, 73, 74, 75,
76, 77, 78])
elif pat_id == 'id014rb':
included_indices = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101,
102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
130, 131, 132, 133, 135, 136, 140, 141, 142, 143, 144, 145, 146,
147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
160, 161, 162, 163, 164])
elif pat_id == 'id015sf':
included_indices = np.concatenate((np.arange(0,37), np.arange(38,77),
np.arange(78,121)))
return included_indices, onsetelecs, clinresult
def returnnihindices(pat_id, seiz_id):
included_indices = np.array([])
onsetelecs = None
clinresult = -1
if pat_id == 'pt1':
included_indices = np.concatenate((np.arange(0, 36), np.arange(41, 43),
np.arange(45, 69), np.arange(71, 95)))
onsetelecs = set(['ATT1', 'ATT2', 'AD1', 'AD2', 'AD3', 'AD4',
'PD1', 'PD2', 'PD3', 'PD4'])
resectelecs = set(['ATT1', 'ATT2', 'ATT3', 'ATT4', 'ATT5', 'ATT6', 'ATT7', 'ATT8',
'AST1', 'AST2', 'AST3', 'AST4',
'PST1', 'PST2', 'PST3', 'PST4',
'AD1', 'AD2', 'AD3', 'AD4',
'PD1', 'PD2', 'PD3', 'PD4',
'PLT5', 'PLT6', 'SLT1'])
clinresult = 1
elif pat_id == 'pt2':
# [1:14 16:19 21:25 27:37 43 44 47:74]
included_indices = np.concatenate((np.arange(0, 14), np.arange(15, 19),
np.arange(
20, 25), np.arange(
26, 37), np.arange(
42, 44),
np.arange(46, 74)))
onsetelecs = set(['MST1', 'PST1', 'AST1', 'TT1'])
resectelecs = set(['TT1', 'TT2', 'TT3', 'TT4', 'TT6', 'TT6',
'G1', 'G2', 'G3', 'G4', 'G9', 'G10', 'G11', 'G12', 'G18', 'G19',
'G20', 'G26', 'G27',
'AST1', 'AST2', 'AST3', 'AST4',
'MST1', 'MST2', 'MST3', 'MST4'])
clinresult = 1
elif pat_id == 'pt3':
# [1:19 21:37 42:43 46:69 71:107]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 37),
np.arange(41, 43), np.arange(45, 69), np.arange(70, 107)))
onsetelecs = set(['SFP1', 'SFP2', 'SFP3',
'IFP1', 'IFP2', 'IFP3',
'MFP2', 'MFP3',
'OF1', 'OF2', 'OF3', 'OF4'])
resectelecs = set(['FG1', 'FG2', 'FG9', 'FG10', 'FG17', 'FG18', 'FG25',
'SFP1', 'SFP2', 'SFP3', 'SFP4', 'SFP5', 'SFP6', 'SFP7', 'SFP8',
'MFP1', 'MFP2', 'MFP3', 'MFP4', 'MFP5', 'MFP6',
'IFP1', 'IFP2', 'IFP3', 'IFP4',
'OF3', 'OF4'])
clinresult = 1
elif pat_id == 'pt4':
# [1:19 21:37 42:43 46:69 71:107]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 26),
np.arange(28, 36)))
onsetelecs = set([])
resectelecs = set([])
clinresult = -1
elif pat_id == 'pt5':
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 26),
np.arange(28, 36)))
onsetelecs = set([])
resectelecs = set([])
clinresult = -1
elif pat_id == 'pt6':
# [1:36 42:43 46 52:56 58:71 73:95]
included_indices = np.concatenate((np.arange(0, 36), np.arange(41, 43),
np.arange(45, 46), np.arange(51, 56), np.arange(57, 71), np.arange(72, 95)))
onsetelecs = set(['LA1', 'LA2', 'LA3', 'LA4',
'LAH1', 'LAH2', 'LAH3', 'LAH4',
'LPH1', 'LPH2', 'LPH3', 'LPH4'])
resectelecs = set(['LALT1', 'LALT2', 'LALT3', 'LALT4', 'LALT5', 'LALT6',
'LAST1', 'LAST2', 'LAST3', 'LAST4',
'LA1', 'LA2', 'LA3', 'LA4', 'LPST4',
'LAH1', 'LAH2', 'LAH3', 'LAH4',
'LPH1', 'LPH2'])
clinresult = 2
elif pat_id == 'pt7':
# [1:17 19:35 37:38 41:62 67:109]
included_indices = np.concatenate((np.arange(0, 17), np.arange(18, 35),
np.arange(36, 38), np.arange(40, 62), np.arange(66, 109)))
onsetelecs = set(['MFP1', 'LFP3',
'PT2', 'PT3', 'PT4', 'PT5',
'MT2', 'MT3',
'AT3', 'AT4',
'G29', 'G30', 'G39', 'G40', 'G45', 'G46'])
resectelecs = set(['G28', 'G29', 'G30', 'G36', 'G37', 'G38', 'G39',
'G41', 'G44', 'G45', 'G46',
'LFP1', 'LFP2', 'LSF3', 'LSF4'])
clinresult = 3
elif pat_id == 'pt8':
# [1:19 21 23 30:37 39:40 43:64 71:76]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 21),
np.arange(
22, 23), np.arange(
29, 37), np.arange(
38, 40),
np.arange(42, 64), np.arange(70, 76)))
onsetelecs = set(['G19', 'G23', 'G29', 'G30', 'G31',
'TO6', 'TO5',
'MST3', 'MST4',
'O8', 'O9'])
resectelecs = set(['G22', 'G23', 'G27', 'G28', 'G29', 'G30', 'G31',
'MST2', 'MST3', 'MST4', 'PST2', 'PST3', 'PST4'])
clinresult = 1
elif pat_id == 'pt10':
# [1:3 5:19 21:35 48:69]
included_indices = np.concatenate((np.arange(0, 3), np.arange(4, 19),
np.arange(20, 35), np.arange(47, 69)))
onsetelecs = set(['TT1', 'TT2', 'TT4', 'TT6',
'MST1',
'AST2'])
resectelecs = set(['G3', 'G4', 'G5', 'G6', 'G11', 'G12', 'G13', 'G14',
'TT1', 'TT2', 'TT3', 'TT4', 'TT5', 'TT6', 'AST1', 'AST2', 'AST3', 'AST4'])
clinresult = 2
elif pat_id == 'pt11':
# [1:19 21:35 37 39 40 43:74 76:81 83:84]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 35),
np.arange(
36, 37), np.arange(
38, 40), np.arange(
42, 74),
np.arange(75, 81), np.arange(82, 84)))
onsetelecs = set(['RG29', 'RG30', 'RG31', 'RG37', 'RG38', 'RG39',
'RG44', 'RG45'])
resectelecs = set(['RG4', 'RG5', 'RG6', 'RG7', 'RG12', 'RG13', 'RG14', 'RG15',
'RG21', 'RG22', 'RG23', 'RG29', 'RG30', 'RG31', 'RG37', 'RG38', 'RG39', 'RG45', 'RG46', 'RG47'])
resectelecs = set(['RG4', 'RG5', 'RG6', 'RG7', 'RG12',
'RG13', 'RG14', 'RG15',
'RG21', 'RG22', 'RG23', 'RG29', 'RG30',
'RG31', 'RG37', 'RG38', 'RG39', 'RG45', 'RG46', 'RG47'])
clinresult = 1
elif pat_id == 'pt12':
# [1:15 17:33 38:39 42:61]
included_indices = np.concatenate((np.arange(0, 15), np.arange(16, 33),
np.arange(37, 39), np.arange(41, 61)))
onsetelecs = set(['AST1', 'AST2',
'TT2', 'TT3', 'TT4', 'TT5'])
resectelecs = set(['G19', 'G20', 'G21', 'G22', 'G23', 'G27', 'G28', 'G29', 'G30', 'G31',
'TT1', 'TT2', 'TT3', 'TT4', 'TT5', 'TT6',
'AST1', 'AST2', 'AST3', 'AST4',
'MST1', 'MST2', 'MST3', 'MST4'])
clinresult = 2
elif pat_id == 'pt13':
# [1:36 39:40 43:66 69:74 77 79:94 96:103 105:130]
included_indices = np.concatenate((np.arange(0, 36), np.arange(38, 40),
np.arange(
42, 66), np.arange(
68, 74), np.arange(
76, 77),
np.arange(78, 94), np.arange(95, 103), np.arange(104, 130)))
onsetelecs = set(['G1', 'G2', 'G9', 'G10', 'G17', 'G18'])
resectelecs = set(['G1', 'G2', 'G3', 'G4', 'G9', 'G10', 'G11',
'G17', 'G18', 'G19',
'AP2', 'AP3', 'AP4'])
clinresult = 1
elif pat_id == 'pt14':
# [1:19 21:37 41:42 45:61 68:78]
included_indices = np.concatenate((np.arange(0, 3), np.arange(6, 10),
np.arange(
11, 17), np.arange(
18, 19), np.arange(
20, 37),
np.arange(40, 42), np.arange(44, 61), np.arange(67, 78)))
onsetelecs = set(['MST1', 'MST2',
'TT1', 'TT2', 'TT3',
'AST1', 'AST2'])
resectelecs = set(['TT1', 'TT2', 'TT3', 'AST1', 'AST2',
'MST1', 'MST2', 'PST1'])
clinresult = 4
elif pat_id == 'pt15':
# [2:7 9:30 32:36 41:42 45:47 49:66 69 71:85];
included_indices = np.concatenate((np.arange(1, 7), np.arange(8, 30),
np.arange(
31, 36), np.arange(
40, 42), np.arange(
44, 47),
np.arange(48, 66), np.arange(68, 69), np.arange(70, 85)))
onsetelecs = set(['TT1', 'TT2', 'TT3', 'TT4',
'MST1', 'MST2', 'AST1', 'AST2', 'AST3'])
resectelecs = set(['G2', 'G3', 'G4', 'G5', 'G10', 'G11', 'G12', 'G13',
'TT1', 'TT2', 'TT3', 'TT4', 'TT5',
'AST1', 'AST2', 'AST3', 'AST4',
'MST1', 'MST2', 'MST3', 'MST4'])
clinresult = 1
elif pat_id == 'pt16':
# [1:19 21:37 42:43 46:53]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 37),
np.arange(41, 43), np.arange(45, 53)))
onsetelecs = set(['TT1', 'TT2', 'TT3', 'TT4', 'TT5', 'TT6',
'AST1', 'AST2', 'AST3', 'AST4',
'MST3', 'MST4',
'G26', 'G27', 'G28', 'G18', 'G19', 'G20', 'OF4'])
resectelecs = set(['G18', 'G19', 'G20', 'G26', 'G27', 'G28',
'G29', 'G30', 'TT1', 'TT2', 'TT3', 'TT4', 'TT5', 'TT6',
'AST1', 'AST2', 'AST3', 'AST4',
'MST1', 'MST2', 'MST3', 'MST4'
])
clinresult = 1
elif pat_id == 'pt17':
# [1:19 21:37 42:43 46:51]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 37),
np.arange(41, 43), np.arange(45, 51)))
onsetelecs = set(['TT1', 'TT2'])
resectelecs = set(['G27', 'G28', 'G29', 'G30',
'TT', 'TT2', 'TT3', 'TT4', 'TT5', 'TT6',
'AST1', 'AST2', 'AST3', 'AST4',
'MST1', 'MST2', 'MST3', 'MST4'])
clinresult = 1
return included_indices, onsetelecs, clinresult
def returnlaindices(pat_id, seiz_id):
included_indices = np.array([])
onsetelecs = None
clinresult = -1
spreadelecs = None
if pat_id == 'la01':
# [1 3 7:8 11:13 17:19 22:26 32 34:35 37 42 50:55 58 ...
# 62:65 70:72 77:81 84:97 100:102 105:107 110:114 120:121 130:131];
# onset_electrodes = {'Y''1', 'X''4', ...
# 'T''5', 'T''6', 'O''1', 'O''2', 'B1', 'B2',...% rare onsets
# }
included_indices = np.concatenate((np.arange(0, 3), np.arange(6, 8), np.arange(10, 13),
np.arange(
16, 19), np.arange(
21, 26), np.arange(
31, 32),
np.arange(
33, 35), np.arange(
36, 37), np.arange(
41, 42),
np.arange(
49, 55), np.arange(
57, 58), np.arange(
61, 65),
np.arange(
69, 72), np.arange(
76, 81), np.arange(
83, 97),
np.arange(
99, 102), np.arange(
104, 107), np.arange(
109, 114),
np.arange(119, 121), np.arange(129, 131)))
onsetelecs = ["X'4", "T'5", "T'6", "O'1", "O'2", "B1", "B2"]
spreadelecs = ["P1", "P2", 'P6', "X1", "X8", "X9", "E'2", "E'3"
"T'1"]
if seiz_id == 'inter2':
included_indices = np.concatenate((np.arange(0, 1), np.arange(7, 16), np.arange(21, 28),
np.arange(
33, 36), np.arange(
39, 40), np.arange(
42, 44), np.arange(
46, 50),
np.arange(
56, 58), np.arange(
62, 65), np.arange(
66, 68), np.arange(
69, 75),
np.arange(76, 83), np.arange(85, 89), np.arange(96, 103),
np.arange(106, 109), np.arange(111, 115), np.arange(116, 117),
np.arange(119, 123), np.arange(126, 127), np.arange(130, 134),
np.arange(136, 137), np.arange(138, 144), np.arange(146, 153)))
if seiz_id == 'ictal2':
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 19), np.arange(20, 33),
np.arange(
34, 37), np.arange(
38, 40), np.arange(
42, 98),
np.arange(107, 136), np.arange(138, 158)))
onsetelecs = ["Y'1"]
clinresult = 1
elif pat_id == 'la02':
# [1:4 7 9 11:12 15:18 21:28 30:34 47 50:62 64:67 ...
# 70:73 79:87 90 95:99]
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 7), np.arange(8, 9),
np.arange(
10, 12), np.arange(
14, 18), np.arange(
20, 28),
np.arange(
29, 34), np.arange(
46, 47), np.arange(
49, 62),
np.arange(
63, 67), np.arange(
69, 73), np.arange(
78, 87),
np.arange(89, 90), np.arange(94, 99)))
onsetelecs = ["L'2", "L'3", "L'4"]
clinresult = 1
elif pat_id == 'la03':
# [1:3 6:33 36:68 77:163]
included_indices = np.concatenate((np.arange(0, 3), np.arange(5, 33),
np.arange(35, 68), np.arange(76, 163)))
onsetelecs = ["L7"]
clinresult = 2
elif pat_id == 'la04':
# [1:4 9:13 15:17 22 24:32 44:47 52:58 60 63:64 ...
# 67:70 72:74 77:84 88:91 94:96 98:101 109:111 114:116 121 123:129];
included_indices = np.concatenate((np.arange(0, 4), np.arange(8, 13),
np.arange(
14, 17), np.arange(
21, 22), np.arange(
23, 32),
np.arange(43, 47), np.arange(51, 58), np.arange(59, 60),
np.arange(62, 64), np.arange(66, 70), np.arange(71, 74),
np.arange(76, 84), np.arange(87, 91), np.arange(93, 96),
np.arange(97, 101), np.arange(108, 111), np.arange(113, 116),
np.arange(120, 121), np.arange(122, 129)))
# FIRST ABLATION WAS A FAILURE
onsetelecs = ["L'4", "G'1", # 2ND RESECTION REMOVED ALL OF M' ELECTRODES
"M'1", "M'2", "M'3", "M'4", "M'5", "M'6", "M'7",
"M'8", "M'9", "M'10", "M'11", "M'12", "M'13", "M'14", "M'15", "M'16"]
clinresult = 2
elif pat_id == 'la05':
# [2:4 7:15 21:39 42:82 85:89 96:101 103:114 116:121 ...
# 126:145 147:152 154:157 160:161 165:180 182:191];
included_indices = np.concatenate((np.arange(1, 4), np.arange(6, 15),
np.arange(
20, 39), np.arange(
41, 82), np.arange(
84, 89),
np.arange(95, 101), np.arange(102, 114), np.arange(115, 121),
np.arange(125, 145), np.arange(146, 152), np.arange(153, 157),
np.arange(159, 161), np.arange(164, 180), np.arange(181, 191)))
onsetelecs = ["T'1", "T'2", "D'1", "D'2"]
clinresult = 1
elif pat_id == 'la06':
# [1:4 7:12 14:17 19 21:33 37 46:47 50:58 61:62 70:73 77:82 ...
# 84:102 104:112 114:119];
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 12),
np.arange(
13, 17), np.arange(
18, 19), np.arange(
20, 33),
np.arange(36, 37), np.arange(45, 47), np.arange(49, 58),
np.arange(60, 62), np.arange(69, 73), np.arange(76, 82),
np.arange(83, 102), np.arange(103, 112), np.arange(113, 119)))
onsetelecs = ["Q'3", "Q'4", "R'3", "R'4"]
clinresult = 2
elif pat_id == 'la07':
# [1:18 22:23 25 34:37 44 48:51 54:55 57:69 65:66 68:78 ...
# 82:83 85:93 96:107 114:120];
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 18), np.arange(21, 23),
np.arange(
24, 25), np.arange(
33, 37), np.arange(
43, 44),
np.arange(47, 51), np.arange(53, 55), np.arange(56, 69),
np.arange(64, 66), np.arange(67, 78), np.arange(81, 83),
np.arange(84, 93), np.arange(95, 107), np.arange(113, 120)))
onsetelecs = ["T'1", "T'3", "R'8", "R'9"]
clinresult = 1
elif pat_id == 'la08':
# [1:2 8:13 15:19 22 25 27:30 34:35 46:48 50:57 ...
# 65:68 70:72 76:78 80:84 87:93 100:102 105:108 110:117 123:127 130:131 133:137 ...
# 140:146]
included_indices = np.concatenate((np.arange(0, 2), np.arange(7, 13),
np.arange(
14, 19), np.arange(
21, 22), np.arange(
24, 25),
np.arange(26, 30), np.arange(33, 35), np.arange(45, 48),
np.arange(49, 57), np.arange(64, 68), np.arange(69, 72),
np.arange(75, 78), np.arange(79, 84), np.arange(86, 93),
np.arange(99, 102), np.arange(104, 108), np.arange(109, 117),
np.arange(122, 127), np.arange(129, 131), np.arange(132, 137),
np.arange(139, 146)))
onsetelecs = ["Q2"]
clinresult = 2
elif pat_id == 'la09':
# [3:4 7:17 21:28 33:38 42:47 51:56 58:62 64:69 ...
# 73:80 82:84 88:92 95:103 107:121 123 126:146 150:161 164:169 179:181 ...
# 183:185 187:191]
# 2/7/18 - got rid of F10 = looking at edf was super noisy
included_indices = np.concatenate((np.arange(2, 3), np.arange(6, 17),
np.arange(
20, 28), np.arange(
32, 38), np.arange(
41, 47),
np.arange(
50, 56), np.arange(
57, 62), np.arange(
63, 66), np.arange(
67, 69),
np.arange(72, 80), np.arange(81, 84), np.arange(87, 92),
np.arange(94, 103), np.arange(106, 121), np.arange(122, 123),
np.arange(125, 146), np.arange(149, 161), np.arange(163, 169),
np.arange(178, 181), np.arange(182, 185), np.arange(186, 191)))
onsetelecs = ["X'1", "X'2", "X'3", "X'4", "U'1", "U'2"]
if seiz_id == 'ictal2':
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 19),
np.arange(20, 39), np.arange(41, 189)))
onsetelecs = ["P'1", "P'2"]
clinresult = 2
elif pat_id == 'la10':
# [1:4 7:13 17:19 23:32 36:37 46:47 50 54:59 62:66 68:79 82:96 ...
# 99:106 108:113 117:127 135:159 163:169 172:173 176:179 181:185];
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 13),
np.arange(
16, 19), np.arange(
22, 32), np.arange(
35, 37),
np.arange(45, 47), np.arange(49, 50), np.arange(53, 59),
np.arange(61, 66), np.arange(67, 79), np.arange(81, 96),
np.arange(98, 106), np.arange(107, 113), np.arange(116, 127),
np.arange(134, 159), np.arange(162, 169), np.arange(171, 173),
np.arange(175, 179), np.arange(180, 185)))
onsetelecs = ["S1", "S2", "R2", "R3"]
clinresult = 2
elif pat_id == 'la11':
# [3:4 7:16 22:30 33:39 42 44:49 53:62 64:87 91:100 ...
# 102:117 120:127 131:140 142:191];
included_indices = np.concatenate((np.arange(2, 4), np.arange(6, 16),
np.arange(
21, 30), np.arange(
32, 39), np.arange(
41, 42), np.arange(
43, 49),
np.arange(
52, 62), np.arange(
63, 87), np.arange(
90, 100), np.arange(
101, 117),
np.arange(119, 127), np.arange(130, 140), np.arange(141, 191)))
onsetelecs = ["D6", "Z10"]
clinresult = 2
elif pat_id == 'la12':
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 15),
np.arange(
19, 23), np.arange(
24, 31), np.arange(
34, 36), np.arange(
42, 44), np.arange(
47, 48),
np.arange(
49, 59), np.arange(
61, 66), np.arange(
68, 86), np.arange(
87, 90),
np.arange(
91, 100), np.arange(
101, 119), np.arange(
121, 129), np.arange(
131, 134),
np.arange(136, 150), np.arange(153, 154), np.arange(156, 161),
np.arange(167, 178), np.arange(187, 191)))
onsetelecs = ["S1", "S2", "R2", "R3"]
clinresult = 3
elif pat_id == 'la13':
# [1:4 7:12 23:33 36:37 44:45 48:70 72:93]
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 12),
np.arange(
22, 33), np.arange(
35, 37), np.arange(
43, 45),
np.arange(47, 70), np.arange(71, 93)))
onsetelecs = ["Y13", "Y14"]
clinresult = 2
elif pat_id == 'la15':
# included_channels = [1:4 9:12 15:19 21:27 30:34 36:38 43:57 62:66 ...
# 68:71 76:85 89:106 108:112 114:115 118:124 127:132 135:158 ...
# 161:169 171:186]
included_indices = np.concatenate((np.arange(0, 4), np.arange(8, 12),
np.arange(
14, 19), np.arange(
20, 27), np.arange(
29, 34),
np.arange(35, 38), np.arange(42, 57), np.arange(61, 66),
np.arange(67, 71), np.arange(75, 85), np.arange(88, 106),
np.arange(107, 112), np.arange(113, 115), np.arange(117, 124),
np.arange(126, 132), np.arange(134, 158), np.arange(160, 169), np.arange(170, 186)))
if seiz_id == 'ictal':
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 19),
np.arange(
20, 39), np.arange(
41, 95), np.arange(
96, 112),
np.arange(113, 132), np.arange(134, 187)))
onsetelecs = ["R1", "R2", "R3"]
clinresult = 4
elif pat_id == 'la16':
# [1:3 10:16 23:24 28 31:35 37:39 42:44 46:47 ...
# 49:54 58:62 64:65 68:70 76:89 93:98 100:101 105:124 126 128:130 ...
# 132:134 136:140 142:144 149:156 158:163 165:166 168:170 173:181
# 183:189];
included_indices = np.concatenate((np.arange(0, 3), np.arange(9, 16),
np.arange(
22, 24), np.arange(
27, 28), np.arange(
30, 35),
np.arange(36, 39), np.arange(41, 44), np.arange(45, 47),
np.arange(48, 54), np.arange(57, 62), np.arange(63, 65),
np.arange(67, 70), np.arange(75, 89), np.arange(92, 98),
np.arange(99, 101), np.arange(104, 124), np.arange(125, 126),
np.arange(127, 130), np.arange(131, 134), np.arange(135, 140),
np.arange(141, 144), np.arange(148, 156), np.arange(157, 163),
np.arange(164, 166), np.arange(167, 170), np.arange(172, 181),
np.arange(182, 189)))
onsetelecs = ["Q7", "Q8"]
clinresult = 4
elif pat_id == 'la17':
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 39),
np.arange(41, 64)))
onsetelecs = ["X'1", "Y'1"]
clinresult = 4
return included_indices, onsetelecs, clinresult
def returnummcindices(pat_id, seiz_id):
included_indices = np.array([])
onsetelecs = None
clinresult = -1
if pat_id == 'ummc001':
# included_channels = [1:22 24:29 31:33 35:79 81:92];
included_indices = np.concatenate((np.arange(0, 22), np.arange(23, 29), np.arange(30, 33),
np.arange(34, 79), np.arange(80, 92)))
onsetelecs = ["GP13", 'GP21', 'GP29']
clinresult = 4
elif pat_id == 'ummc002':
# included_channels = [1:22 24:29 31:33 35:52];
included_indices = np.concatenate((np.arange(0, 22), np.arange(23, 29), np.arange(30, 33),
np.arange(34, 52)))
onsetelecs = ['ANT1', 'ANT2', 'ANT3',
'MEST1', 'MEST2', 'MEST3', 'MEST4', 'GRID17', 'GRID25']
# onsetelecs = ['ATT1', 'ATT2', 'ATT3',
# 'MEST1', 'MEST2', 'MEST3', 'MEST4', 'GRID17', 'GRID25']
clinresult = 1
elif pat_id == 'ummc003':
included_indices = np.concatenate((np.arange(0, 22), np.arange(23, 29), np.arange(30, 33),
np.arange(34, 48)))
onsetelecs = ['MEST4', 'MEST5', 'GRID4', 'GRID10', 'GRID12',
'GRID18', 'GRID19', 'GRID20', 'GRID26', 'GRID27']
clinresult = 1
elif pat_id == 'ummc004':
included_indices = np.concatenate((np.arange(0, 22), np.arange(23, 29), np.arange(30, 33),
np.arange(34, 49)))
onsetelecs = ['AT1', 'GRID1', 'GRID9', 'GRID10', 'GRID17', 'GRID18']
clinresult = 1
elif pat_id == 'ummc005':
included_indices = np.concatenate(
(np.arange(0, 33), np.arange(34, 48)))
onsetelecs = ['AT2', 'G17', 'G19', 'G25', 'G27', 'AT1', 'AT2', 'AT3', 'AT4',
'AT5', 'AT6']
onsetelecs = ['AT1']
# , 'GRID1', 'GRID9', 'GRID10', 'GRID17', 'GRID18']
clinresult = 1
elif pat_id == 'ummc005':
included_indices = np.concatenate(
(np.arange(0, 33), np.arange(34, 48)))
onsetelecs = ['AT2', 'G17', 'G19', 'G25', 'G27']
# , 'AT1', 'AT2', 'AT3', 'AT4','AT5', 'AT6']
clinresult = 1
elif pat_id == 'ummc006':
included_indices = np.concatenate((np.arange(0, 22), np.arange(23, 26), np.arange(27, 29),
| np.arange(30, 33) | numpy.arange |
from scipy.special import logsumexp
from scipy.stats import poisson
import numpy as np
from .estimation import log_posterior_sum, log_mat_mul
from .utils import log_inv
from .sparsebase import _BaseSparseHMM
from sklearn.utils import check_random_state
from sklearn import cluster
class PoissonHMM(_BaseSparseHMM):
def __init__(self, n_components=2,
startprob_prior=1.0, transmat_prior=1.0,
algorithm="viterbi", random_state=None,
n_iter=10, tol=1e-2, verbose=False,
params="str", init_params="str"):
super().__init__(n_components,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter, tol=tol, verbose=verbose,
params=params, init_params=init_params)
def _get_n_fit_scalars_per_param(self):
nc = self.n_components
return {
"s": nc - 1,
"t": nc * (nc - 1),
"r": nc
}
def _init(self, X, lengths=None):
self._check_and_set_n_features(X)
super()._init(X, lengths=lengths)
self.random_state = check_random_state(self.random_state)
if 'r' in self.init_params:
kmeans = cluster.KMeans(n_clusters=self.n_components,
random_state=self.random_state)
kmeans.fit(X)
self.rate_ = np.sort(kmeans.cluster_centers_.flatten())
def _check(self):
super()._check()
assert self.rate_.shape == (self.n_components,), (self.rate_.shape, (self.n_components,))
def _compute_log_likelihood(self, X):
return poisson.logpmf(X.reshape((-1, 1)), self.rate_)
def _initialize_sufficient_statistics(self):
stats = super()._initialize_sufficient_statistics()
stats['counts'] = np.zeros(self.n_components)
stats['posts'] = np.zeros(self.n_components)
return stats
def _accumulate_sufficient_statistics(self, stats, X, framelogprob,
posteriors, fwdlattice, bwdlattice, rls):
super()._accumulate_sufficient_statistics(
stats, X, framelogprob, posteriors, fwdlattice, bwdlattice, rls)
if 'r' in self.params:
inv_mat, s_inv_mat = log_inv(np.log(self.transmat_)+ framelogprob[0][None, :], np.sign(self.transmat_))
first_f, sf = log_mat_mul(( | np.log(self.startprob_) | numpy.log |
#!/usr/bin/python
'''
Program:
This is a liberary program of register
Usage:
1. from register_lib import [func name] or import curvefit
2. use it in your lovely code.
Editor:
Jacob975
#################################
update log
20180628 version alpha 1
1. Remove some
'''
import numpy as np
# calculate the inner product and error of two side, from star_1 to star_2 and from star_1 to star_3.
def inner_product(star_1, star_2, star_3, sigma):
try:
inner_prod = (star_2[0] - star_1[0])*(star_3[0] - star_1[0]) + (star_2[1] - star_1[1])*(star_3[1] - star_1[1])
x_part_1 = np.power(star_1[0] - star_2[0], 2)
x_error_1 = (2 * np.power(sigma, 2))/x_part_1
x_part_2 = np.power(star_1[0] - star_3[0], 2)
x_error_2 = (2 * np.power(sigma, 2))/x_part_2
y_part_1 = np.power(star_1[1] - star_2[1], 2)
y_error_1 = (2 * np.power(sigma, 2))/y_part_1
y_part_2 = np.power(star_1[1] - star_3[1], 2)
y_error_2 = (2 * np.power(sigma, 2))/y_part_2
var = x_part_1*x_part_2*(x_error_1 + x_error_2) + y_part_1*y_part_2*(y_error_1 + y_error_2)
error = np.power(var, 0.5)
except :
return 0, 0
else:
return inner_prod, error
# check the number of matching inner prod of two stars, then return the number.
def num_relation_lister(ref_star, star, error):
valid_inner_prod = 0
for ref_inner_prod in ref_star:
for i in xrange(len(star)):
if ref_inner_prod <= star[i] + error[i] and ref_inner_prod >= star[i] - error[i]:
valid_inner_prod = valid_inner_prod + 1
continue
return valid_inner_prod
# choose a star as a target, than choose two else the calculate the inner product.
def get_inner_product(iraf_table, infos = None):
inner_prod_star_list = []
inner_prod_error_list = []
sigma = 2.0
# choose a star, named A
for i in xrange(len(iraf_table)):
inner_prod_star = np.array([])
inner_prod_error = np.array([])
# choose two else stars, named B and C, to get inner product of two side AB and AC.
for j in xrange(len(iraf_table)):
if i == j:
continue
for k in xrange(len(iraf_table)):
if k == i:
continue
if k <= j:
continue
inner_prod, error = inner_product(iraf_table[i,1:3], iraf_table[j,1:3], iraf_table[k,1:3], sigma)
inner_prod_star = np.append(inner_prod_star, inner_prod)
inner_prod_error = np.append(inner_prod_error, error)
# set all inner product as a list, seems like DNA of this star
inner_prod_star_list.append(inner_prod_star)
inner_prod_error_list.append(inner_prod_error)
inner_prod_star_list = np.array(inner_prod_star_list)
inner_prod_error_list = np.array(inner_prod_error_list)
return inner_prod_star_list, inner_prod_error_list
# choose a star as a target, than choose two else the calculate the inner product.
def get_inner_product_SE(SE_table):
inner_prod_star_list = []
inner_prod_error_list = []
sigma = 2.0
# choose a star, named A
for i in xrange(len(SE_table)):
inner_prod_star = np.array([])
inner_prod_error = np.array([])
# choose two else stars, named B and C, to get inner product of two side AB and AC.
for j in xrange(len(SE_table)):
if i == j:
continue
for k in xrange(len(SE_table)):
if k == i:
continue
if k <= j:
continue
inner_prod, error = inner_product(SE_table[i,2:4], SE_table[j,2:4], SE_table[k,2:4], sigma)
inner_prod_star = np.append(inner_prod_star, inner_prod)
inner_prod_error = np.append(inner_prod_error, error)
# set all inner product as a list, seems like DNA of this star
inner_prod_star_list.append(inner_prod_star)
inner_prod_error_list.append(inner_prod_error)
inner_prod_star_list = np.array(inner_prod_star_list)
inner_prod_error_list = np.array(inner_prod_error_list)
return inner_prod_star_list, inner_prod_error_list
#--------------------------------------------------------------------
# This is a func to wipe out exotic number in a list
# This one is made for matching images
def get_rid_of_exotic_severe(value_list, VERBOSE = 0):
answer_value_list = value_list[:]
std = np.std(answer_value_list)
# resursive condition
while std > 1 :
mean = np.mean(answer_value_list)
# get the error of each value to the mean, than delete one with largest error.
sub_value_list = np.subtract(answer_value_list, mean)
abs_value_list = np.absolute(sub_value_list)
index_max = np.argmax(abs_value_list)
answer_value_list= np.delete(answer_value_list, index_max)
std = | np.std(answer_value_list) | numpy.std |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
import numpy as np
import cv2
from lib.show_images import debugShowBoxes
class BaseContoursHeatmap(object):
cv_thresh = cv2.THRESH_BINARY
cv_contour_method = cv2.CHAIN_APPROX_NONE
contour_mode = cv2.RETR_TREE
def __init__(self):
pass
def determenistic_boxes(self, orig, hmap, thresh=0.7, draw=False):
dfunc = partial(self._deterministic_threshold, thresh=thresh)
return self._base_get_bboxes(thresh_func=dfunc, orig=orig, hmap=hmap, draw=draw)
def edge_boxes(self, orig, hmap, draw=False):
return self._base_get_bboxes(thresh_func=self._edges_thresh, orig=orig, hmap=hmap, draw=draw)
def _base_get_bboxes(self, thresh_func, orig, hmap, draw=False):
o_shape = orig.shape
h_shape = hmap.shape
edges = thresh_func(hmap=hmap)
conts = self._get_contours(threshed_hmap=edges)
boxes = self._bboxes_from_contours(conts=conts)
if boxes.shape[0] > 0:
scales = [o_shape[0] / float(h_shape[0]), o_shape[1]/float(h_shape[1])]
scales = np.array(scales+scales)
boxes = boxes*scales
if draw:
debugShowBoxes(orig, boxes=boxes, wait=3000)
return boxes
return np.zeros(shape=(1, 4))
def _deterministic_threshold(self, hmap, thresh=0.7, scale=255):
hmap = (hmap*scale).astype(np.uint8)
_, thresh = cv2.threshold(hmap, int(scale * thresh), scale, self.cv_thresh)
return thresh
def _edges_thresh(self, hmap, thresh=0.5, scale=255):
hmap = (hmap * scale).astype(np.uint8)
edges = cv2.Canny(hmap, scale*thresh, scale)
return edges
def _binomial_threshold(self, hmap):
orig_shape = hmap.shape
p = hmap.flatten()
thresh = | np.random.binomial(n=1, p=p) | numpy.random.binomial |
import numpy
from .. import system
from . import ss
class DTTF(system.System):
r"""
:py:class:`pyctrl.system.DTTF` implements a single-input-single-output (SISO) transfer-function.
The underlying model is of the form:
.. math::
den[0] y_k + den[1] y_{k-1} + \cdots + den[n] y_{k-n} = num[0] uk + num[1] u_{k-1} + \cdots + num[m] u_{k-m}
which corresponds to the transfer-function:
.. math::
G(z) = \frac{\sum_{i = 0}^m z^{-i} num[i]}{\sum_{i = 0}^n z^{-i} den[i]}
Denominator is always normalized so that :math:`den[0] = 1`.
Model is implementated in terms of the auxiliary variable $z$ as follows. Let
.. math::
z_k + den[1] z_{k-1} + \cdots + den[n] z_{k-n} = u_k
By linearity:
.. math::
y_k = num[0] z_k + num[1] z_{k-1} + \cdots + den[n] z_{k-n}
:param num: numpy m-dimensional 1D-vector numerator (default [1])
:param den: numpy n-dimensional 1D-vector denominator (default [1])
:param state: numpy n-dimensional 1D-vector representing vector z (default `None`)
"""
def __init__(self,
num = numpy.array((1,)),
den = numpy.array((1,)),
state = None):
# make sure it is numpy array
num = numpy.array(num)
den = numpy.array(den)
# must be proper
n = num.size - 1
m = den.size - 1
#print('n = {}\nm = {}'.format(n, m))
# Make vectors same size
self.den = den.astype(float)
self.num = num.astype(float)
if m < n:
self.den.resize(num.shape)
m = n
elif m > n:
self.num.resize(den.shape)
n = m
# inproper?
if not self.den[0]:
raise system.SystemException('Order of numerator cannot be greater than order of the denominator')
# normalize denominator
self.num = self.num / self.den[0]
self.den = self.den / self.den[0]
if state is None:
self.state = numpy.zeros((n,), dtype=float)
elif state.size == n:
self.state = state.astype(float)
else:
raise system.SystemException('Order of state must match order of denominator')
#print('num = {}'.format(self.num))
#print('den = {}'.format(self.den))
#print('state = {}'.format(self.state))
def set_output(self, yk):
r"""
Sets the internal state of the :py:class:`pyctrl.system.DTTF` so that a call to `update` with `uk = 0` yields `yk`.
It is calculated as follows. With :math:`u_k = 0`
.. math::
z_k + den[1] z_{k-1} + \cdots + den[n] z_{k-n} = 0
and
.. math::
y_k &= num[0] z_k + num[1] z_{k-1} + \cdots + num[n] z_{k-n} \\
&= num[1] z_{k-1} + \cdots + num[n] z_{k-n} - num[0] (den[1] z_{k-1} + \cdots + den[n] z_{k-n}) \\
and :math:`y_k =` `yk` if :math:`num[1] \neq num[0] den[1]` and
.. math::
z_{k-1} = \frac{y_k - \sum_{i = 2}^{n} (num[i] - num[0] den[i]) z_{k-i}}{num[1] - num[0] den[1]}
TODO: if :math:`num[1] \neq num[0] den[1]` then choose next nonzero coefficient.
:param yk: scalar desired `yk`
"""
self.state[1:] = 0
if yk != 0:
self.state[0] = (yk - self.state[1:].dot(self.num[2:]) + self.num[0] * self.state[1:].dot(self.den[2:]) ) / (self.num[1] - self.num[0] * self.den[1])
elif self.state.size > 0:
self.state[0] = 0
#print('state = {}'.format(self.state))
def shape(self):
return (1,1,len(self.state))
def update(self, uk):
r"""
Update :py:class:`pyctrl.system.DTTF` model. Implements the recursion:
.. math::
z_k + den[1] z_{k-1} + \cdots + den[n] z_{k-n} &= u_k \\
y_k &= num[0] z_k + num[1] z_{k-1} + \cdots + den[n] z_{k-n}
:param numpy.array uk: input at time k
"""
#print('uk = {}, state = {}'.format(uk, self.state))
zk = uk - self.state.dot(self.den[1:])
yk = self.num[0] * zk + self.state.dot(self.num[1:])
if self.state.size > 0:
if self.state.size > 1:
# shift state
self.state[1:] = self.state[:-1]
self.state[0] = zk
return yk
def as_DTSS(self):
"""
:returns: a state-space representation (:py:class:`pyctrl.system.DTSS`) of the :py:class:`pyctrl.system.DTTF`.
"""
n = self.num.size - 1
m = 1
p = 1
A = numpy.zeros((n,n))
B = numpy.zeros((n,m))
C = | numpy.zeros((p,n)) | numpy.zeros |
import math
from PIL import Image
import numpy as np
from numpy.ma import masked_array
from matplotlib import colors, cm
import matplotlib.gridspec as gridspec
from matplotlib.lines import Line2D
import matplotlib.pyplot as plt
from tensorboard import summary as summary_lib
from tensorboard.plugins.custom_scalar import layout_pb2
FIG_DPI = 150
class MidpointNormalize(colors.Normalize):
"""custom colormap with two linear ranges as per https://matplotlib.org/users/colormapnorms.html#custom-normalization-two-linear-ranges"""
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
def figure2array(fig):
fig.canvas.draw()
buf, (width, height) = fig.canvas.print_to_buffer()
# buffer is rgba
d = np.frombuffer(buf, dtype=np.uint8)
d = d.reshape((height, width, 4))
d = d[None, :, :, :] # TF2 requires a batch axis in front
plt.close(fig)
return d
def save_figure_array(figarr, f):
"""save array representing colored figure (produced by figure2array) to file"""
# figarr by default has shape [1,H,W,3] for compat. with tensorboard
Image.fromarray(figarr[0]).save(f)
ignored_cmap = None
def get_ignored_cmap():
global ignored_cmap
if ignored_cmap is None:
# ignored_cmap = colors.LinearSegmentedColormap('ignored', {'red': ((0,0.1,0.1), (1,0.1,0.1)), 'green': ((0,0.5,0.5), (1,0.5,0.5)), 'blue': ((0,0.1,0.1), (1,0.1,0.1))})
ignored_cmap = colors.LinearSegmentedColormap('ignored', {'red': ((0,0.325,0.325), (1,0.325,0.325)), 'green': ((0,0.325,0.325), (1,0.325,0.325)), 'blue': ((0,0.325,0.325), (1,0.325,0.325))})
return ignored_cmap
def create_volume_dose_figure(arr, col_labels=[], dpi=FIG_DPI, cmap='viridis', own_scale=False, return_fig=False, ndiff_cols=1):
"""create figure of axes rows mapped to each slice in numpy array [Rows, Cols, H, W]
with each row containing imshow() instances with matched value limits (vmin, vmax) according
to data (min, max)
"""
nrows, ncols, H, W = arr.shape
axwidth_inches = 1.25
width_ratios=[1.0]*ncols + [0.25]*2
figwidth = axwidth_inches*np.sum(width_ratios)
figheight = axwidth_inches/W*H*nrows/0.98
fig = plt.figure(figsize=(figwidth, figheight), dpi=dpi)
spec = gridspec.GridSpec(
nrows=nrows,
ncols=ncols+2,
width_ratios=width_ratios,
wspace=0,
hspace=0,
left=0.0, right=1.0,
bottom=0.0, top=0.98,
figure=fig,
)
# annotation settings
annotate_fontsize = 5
annotate_margin = 0.03 # as fraction of imshow axes
# create axes and set data
row_minmax = []
row_diff_absmax = []
for row in range(nrows):
rowarr = arr[row]
if ndiff_cols>0:
vmin, vmax = np.amin(rowarr[:-ndiff_cols]), np.amax(rowarr[:-ndiff_cols])
diffmin, diffmax = np.amin(rowarr[-ndiff_cols:]), np.amax(rowarr[-ndiff_cols:])
diffabsmax = max(abs(diffmin), abs(diffmax))
row_diff_absmax.append(diffabsmax)
else:
vmin, vmax = np.amin(rowarr), np.amax(rowarr)
row_minmax.append((vmin, vmax))
for col in range(ncols):
if col>=ncols-ndiff_cols:
# logarithmic cmap with diverging (difference map)
# this_norm = colors.SymLogNorm(vmin=-diffabsmax, vmax=diffabsmax, linthresh=0.01, linscale=0.01)
this_norm = colors.Normalize(vmin=-diffabsmax, vmax=diffabsmax)
this_cmap = 'RdBu_r'
this_fontcolor = 'black'
else:
# linear cmap with default colors
this_norm = colors.Normalize(vmin=vmin, vmax=vmax)
this_cmap = cmap
this_fontcolor = 'white'
# draw array as image
cellarr = rowarr[col]
ax = fig.add_subplot(spec[row, col])
ax.set_xticks([])
ax.set_yticks([])
ax.imshow(cellarr,
interpolation='none', aspect='equal',
cmap=this_cmap,
norm=this_norm if not own_scale else None)
# add image min/max as annotation
margin = 0.03
fmt = '{:0.2f}'
cellmin, cellmax = np.amin(cellarr), np.amax(cellarr)
ax.text(margin, 1.0-margin, fmt.format(cellmax),
fontsize=annotate_fontsize,
color=this_fontcolor,
horizontalalignment='left', verticalalignment='top',
transform=ax.transAxes)
ax.text(margin, margin, fmt.format(cellmin),
fontsize=annotate_fontsize,
color=this_fontcolor,
horizontalalignment='left', verticalalignment='bottom',
transform=ax.transAxes)
# add column headings
if row==0 and col < len(col_labels):
ax.text(0.5, 1.01, col_labels[col],
horizontalalignment='center', verticalalignment='bottom',
transform=ax.transAxes)
# add shared colorbar (standard)
cbar_ax = fig.add_subplot(spec[:, -2])
fig.colorbar(
cm.ScalarMappable(norm=colors.Normalize(vmin=0, vmax=1), cmap='viridis'),
cax=cbar_ax,
ticks=[],
)
if ndiff_cols > 0:
# add shared colorbar (difference)
cbar_ax2 = fig.add_subplot(spec[:, -1])
fig.colorbar(
cm.ScalarMappable(norm=colors.Normalize(vmin=-1, vmax=1), cmap='RdBu_r'),
cax=cbar_ax2,
ticks=[],
)
# add row-wise vlimit labels to colorbar
for row in range(nrows):
ypos_high = 1.0 - (float(row+margin)/nrows)
ypos_low = 1.0 - (float(row+1-margin)/nrows)
row_min, row_max = row_minmax[row]
cbar_ax.text(0.5, ypos_high, '{:0.2f}'.format(row_max),
fontsize=annotate_fontsize,
horizontalalignment='center', verticalalignment='top',
transform=cbar_ax.transAxes)
cbar_ax.text(0.5, ypos_low, '{:0.2f}'.format(row_min),
fontsize=annotate_fontsize,
horizontalalignment='center', verticalalignment='bottom',
transform=cbar_ax.transAxes)
if ndiff_cols > 0:
row_diffabsmax = row_diff_absmax[row]
cbar_ax2.text(0.5, ypos_high, '{:0.2f}'.format(row_diffabsmax),
fontsize=annotate_fontsize,
horizontalalignment='center', verticalalignment='top',
transform=cbar_ax2.transAxes)
cbar_ax2.text(0.5, ypos_low, '{:0.2f}'.format(-row_diffabsmax),
fontsize=annotate_fontsize,
horizontalalignment='center', verticalalignment='bottom',
transform=cbar_ax2.transAxes)
if return_fig:
return fig
else:
return figure2array(fig)
def make_image_figure(colorbar=True, dpi=FIG_DPI):
"""makes standard figure with large imshow axes and colorbar to right"""
fig = plt.figure(dpi=dpi)
if colorbar:
ax_im = fig.add_axes([0, 0, 0.87, 1.0])
ax_cbar = fig.add_axes([0.89, 0.05, 0.04, 0.9])
else:
ax_im = fig.add_axes([0, 0, 1.0, 1.0])
ax_cbar = None
return (fig, ax_im, ax_cbar)
def plot_dose(*args, colorbar=True, dpi=FIG_DPI, **kwargs):
"""See _plot_gamma_components for function signature"""
fig, ax_im, ax_cbar = make_image_figure(colorbar, dpi)
_plot_dose(ax_im, ax_cbar, *args, **kwargs)
return figure2array(fig)
def _plot_dose(ax_im, ax_cbar, arr, **kwargs):
"""plots array using imshow with colorbar then converts back to png compatible rgb array"""
kwargs['cmap'] = kwargs.get('cmap', 'viridis')
im = ax_im.imshow(arr, interpolation='nearest', **kwargs)
ax_im.set_axis_off()
if ax_cbar is not None:
plt.colorbar(im, ax_cbar)
return (ax_im, ax_cbar)
def plot_gamma(*args, colorbar=True, dpi=FIG_DPI, **kwargs):
"""See _plot_gamma_components for function signature"""
fig, ax_im, ax_cbar = make_image_figure(colorbar, dpi)
_plot_gamma(fig, ax_im, ax_cbar, *args, **kwargs)
return figure2array(fig)
def _plot_gamma(fig, ax_im, ax_cbar, arr, annotate=None, **kwargs):
"""plots array using imshow with colorbar then converts back to png compatible rgb array"""
ignored_cmap = get_ignored_cmap()
im = ax_im.imshow(arr, cmap='RdBu_r', interpolation='nearest', norm=MidpointNormalize(0, 10, 1), **kwargs)
im1 = masked_array(arr,arr>=0) # shows ignored values
ax_im.imshow(im1, cmap=ignored_cmap, interpolation='nearest')
ax_im.set_axis_off()
if ax_cbar is not None:
plt.colorbar(im, ax_cbar)
if annotate:
ax_im.text(0.02,0.02, str(annotate),
fontsize=11,
bbox={'facecolor':'white', 'alpha':1.0},
horizontalalignment='left',
verticalalignment='bottom',
transform=fig.transFigure,
)
return (ax_im, ax_cbar)
def plot_gamma_components(*args, colorbar=True, dpi=FIG_DPI, **kwargs):
"""See _plot_gamma_components for function signature"""
fig, ax_im, ax_cbar = make_image_figure(colorbar, dpi)
_plot_gamma_components(fig, ax_im, ax_cbar, *args, **kwargs)
return figure2array(fig)
def _plot_gamma_components(fig, ax_im, ax_cbar, arr_dd, arr_dta, annotate=None, array_spacing=2, **kwargs):
"""plots array using imshow with colorbar then converts back to png compatible rgb array"""
ignored_cmap = get_ignored_cmap()
arr = np.concatenate([arr_dd, -1*np.ones((arr_dd.shape[0], array_spacing)), arr_dta], axis=1)
im = ax_im.imshow(arr, cmap='RdBu_r', interpolation='nearest', norm=MidpointNormalize(0, 10, 1), **kwargs)
im1 = masked_array(arr,arr>=0) # shows ignored values
ax_im.imshow(im1, cmap=ignored_cmap, interpolation='nearest')
ax_im.set_axis_off()
# annotate with component specific passing percentages
try: dd_passing = (np.count_nonzero(arr_dd<=1)-np.count_nonzero(arr_dd<0))/np.count_nonzero(arr_dd>=0) # passing score for our purpose
except: dd_passing = np.nan
ax_im.text(0.02,0.02, 'dd: {:0.2f}%'.format(dd_passing*100),
fontsize=9,
bbox={'facecolor':'white', 'alpha':1.0},
horizontalalignment='left',
verticalalignment='bottom',
transform=ax_im.transAxes,
)
try: dta_passing = (np.count_nonzero(arr_dta<=1)-np.count_nonzero(arr_dta<0))/np.count_nonzero(arr_dta>=0) # passing score for our purpose
except: dta_passing = np.nan
ax_im.text(0.52,0.02, 'dta: {:0.2f}%'.format(dta_passing*100),
fontsize=9,
bbox={'facecolor':'white', 'alpha':1.0},
horizontalalignment='left',
verticalalignment='bottom',
transform=ax_im.transAxes,
)
if ax_cbar is not None:
plt.colorbar(im, ax_cbar)
if annotate:
ax_im.text(0.02,0.02, str(annotate),
fontsize=11,
bbox={'facecolor':'white', 'alpha':1.0},
horizontalalignment='left',
verticalalignment='bottom',
transform=fig.transFigure,
)
return (ax_im, ax_cbar)
def plot_profile(arr_pred, arr_true=None, annotate=None, dpi=FIG_DPI, **kwargs):
"""plots line profiles at various depths then converts back to png compatible rgb array"""
idx = [0.1, 0.3, 0.5, 0.7, 0.9]
color = ['green', 'red', 'yellow', 'skyblue', 'blue']
fig = plt.figure(dpi=dpi)
ax = fig.add_subplot(1,1,1)
x_axis = [i for i in range(arr_pred.shape[1])]
for j in range(len(idx)):
sliceidx = int(idx[j]*arr_pred.shape[0])
dose_pred = arr_pred[sliceidx,:].tolist()
ax.plot(x_axis,dose_pred,color[j],label='profile_at_%d_pixel'%sliceidx, **kwargs)
if arr_true is not None:
dose_true = arr_true[sliceidx,:].tolist()
ax.plot(x_axis,dose_true,color[j],linestyle=':',label=None, **kwargs)
plt.legend()
ax.set_ylabel('dose')
if annotate:
ax.text(0.02,0.02, str(annotate),
fontsize=11,
bbox={'facecolor':'white', 'alpha':1.0},
horizontalalignment='left',
verticalalignment='bottom',
transform=fig.transFigure,
)
return figure2array(fig)
def plot_gamma_scatter(*args, dpi=FIG_DPI, **kwargs):
fig = plt.figure(dpi=dpi)
ax = fig.add_subplot(1,1,1)
_plot_gamma_scatter(fig, ax, *args, **kwargs)
fig.tight_layout()
return figure2array(fig)
def _plot_gamma_scatter(fig, ax, arr_dd, arr_dta, arr_gamma, dd_thresh, dta_thresh, **kwargs):
"""place voxels on scatter-plot based on coordinates in dd-dta space"""
select = np.logical_and(arr_gamma>=0, np.isfinite(arr_gamma))
dd_flat = np.ravel(arr_dd[select])*dd_thresh*100
dta_flat = np.ravel(arr_dta[select])*dta_thresh
gamma_flat = np.ravel(arr_gamma[select])
scat = ax.scatter(dd_flat, dta_flat, s=4, marker='o', color='black')#, c=gamma_flat, cmap=ignored_cmap, norm=MidpointNormalize(0,10,1))
dd_max = np.max(dd_flat)
dta_max = np.max(dta_flat)
axis_buffer = 0.01
ax.set_xlim([-axis_buffer*dd_max, dd_max+axis_buffer*dd_max])
ax.set_ylim([-axis_buffer*dta_max, dta_max+axis_buffer*dta_max])
ax.set_xlabel('Percent dose difference')
ax.set_ylabel('Distance to agreement (mm)')
# criteria lines
lineargs = {'linewidth': 1, 'linestyle': '-', 'color': 'black'}
if dd_max > dd_thresh*100:
ax.add_line(Line2D(ax.get_xlim(), [dta_thresh, dta_thresh], **lineargs))
if dta_max > dta_thresh:
ax.add_line(Line2D([dd_thresh*100, dd_thresh*100], ax.get_ylim(), **lineargs))
# text annotation
try: gamma_passing = (np.count_nonzero(arr_gamma<=1)-np.count_nonzero(arr_gamma<0))/np.count_nonzero(arr_gamma>=0) # passing score for our purpose
except: gamma_passing = np.nan
try: dd_passing = (np.count_nonzero(arr_dd<=1)-np.count_nonzero(arr_dd<0))/np.count_nonzero(arr_dd>=0) # passing score for our purpose
except: dd_passing = np.nan
try: dta_passing = (np.count_nonzero(arr_dta<=1)-np.count_nonzero(arr_dta<0))/np.count_nonzero(arr_dta>=0) # passing score for our purpose
except: dta_passing = np.nan
nautofail = np.count_nonzero(np.isinf(arr_gamma))/np.count_nonzero(arr_gamma>=0)
ax.text(1.0, 1.0, 'gamma: {:0.2f}%\ndd: {:0.2f}%\ndta: {:0.2f}%\ninf: {:0.2f}%'.format(gamma_passing*100, dd_passing*100, dta_passing*100, nautofail*100),
fontsize=11,
bbox={'facecolor':'white', 'alpha':1.0},
horizontalalignment='right',
verticalalignment='top',
transform=ax.transAxes,
)
return ax
def plot_gamma_summary(arr_dd, arr_dta, arr_gamma, dd_thresh, dta_thresh, colorbar=True, annotate=None, dpi=FIG_DPI*2, **kwargs):
fig = plt.figure(dpi=dpi)
gs_l = gridspec.GridSpec(2, 2, fig, right=0.87)
gs_r = gridspec.GridSpec(1, 1, fig, left=0.89)
ax_im_gamma = fig.add_subplot(gs_l[0,0])
ax_im_scatter = fig.add_subplot(gs_l[0,1])
ax_im_components = fig.add_subplot(gs_l[1,:])
ax_cbar = fig.add_subplot(gs_r[:,:])
_plot_gamma(fig, ax_im_gamma, ax_cbar, arr_gamma)
_plot_gamma_scatter(fig, ax_im_scatter, arr_dd, arr_dta, arr_gamma, dd_thresh, dta_thresh)
_plot_gamma_components(fig, ax_im_components, None, arr_dd, arr_dta)
fig.tight_layout()
return figure2array(fig)
def register_custom_scalars_layout(writer):
"""define custom plotting in 'Custom Scalars' tab of TensorBoard"""
layout_summary = summary_lib.custom_scalar_pb(
layout_pb2.Layout(category=[
layout_pb2.Category(
title="all",
chart=[
layout_pb2.Chart(
title='loss',
multiline=layout_pb2.MultilineChartContent(
tag=[r'train/loss',r'eval/loss/test', r'eval/loss/train'],
)
),
layout_pb2.Chart(
title='eval-avg_gammapass/0.1mm_0.1%',
multiline=layout_pb2.MultilineChartContent(
tag=[r'eval-avg_gammapass/0.1mm_0.1%/.*'],
)
),
layout_pb2.Chart(
title='eval-avg_gammapass/0.2mm_0.2%',
multiline=layout_pb2.MultilineChartContent(
tag=[r'eval-avg_gammapass/0.2mm_0.2%/.*'],
)
),
layout_pb2.Chart(
title='eval-avg_gammapass/0.5mm_0.5%',
multiline=layout_pb2.MultilineChartContent(
tag=[r'eval-avg_gammapass/0.5mm_0.5%/.*'],
)
),
layout_pb2.Chart(
title='eval-avg_gammapass/1.0mm_1.0%',
multiline=layout_pb2.MultilineChartContent(
tag=[r'eval-avg_gammapass/1.0mm_1.0%/.*'],
)
),
layout_pb2.Chart(
title='eval-avg_gammapass/2.0mm_2.0%',
multiline=layout_pb2.MultilineChartContent(
tag=[r'eval-avg_gammapass/2.0mm_2.0%/.*'],
)
),
layout_pb2.Chart(
title='MSE',
multiline=layout_pb2.MultilineChartContent(
tag=[r'.*mse.*'],
)
),
],
),
])
)
writer.add_summary(layout_summary)
def tile(array_list, perrow, square=False, pad_width=5, pad_intensity=1000):
"""Takes a list of arrays and number of images per row and constructs a tiled array for margin-less
visualization
Args:
array_list -- list of np.ndarrays to be tiled in row-major order
perrow -- integer specifying number of images per row
Optional Args:
square -- Try to make length and width equal by tiling vertical columns side-by-side
pad_width -- # columns between vertical tiling columns
pad_intensity -- # intensity value of padding cells
Returns:
numpy matrix/2dArray
"""
# setup
if (not isinstance(array_list, list)):
array_list_old = array_list
ndims = len(array_list_old.shape)
if (ndims == 3):
array_list = []
array_list_old_2dshape = (array_list_old.shape[1], array_list_old.shape[2])
for i in range(array_list_old.shape[0]):
array_list.append(array_list_old[i, :, :].reshape(array_list_old_2dshape))
elif (ndims == 2):
array_list = [array_list_old]
nimages = len(array_list)
expect_row_shape = (array_list[0].shape[0], perrow * array_list[0].shape[1])
# make concatenated rows
rows_list = []
this_row_array = None
for i in range(nimages+1):
if (i % perrow == 0):
# add previous row to list
if (i > 0):
rows_list.append(this_row_array)
this_row_array = None
# start new row
if i < nimages:
this_row_array = array_list[i]
else:
# add to row
this_row_array = np.concatenate((this_row_array, array_list[i]), axis=1)
# extend short rows with zeros
for i, row in enumerate(rows_list):
if (row.shape != expect_row_shape):
extra = np.zeros((expect_row_shape[0], expect_row_shape[1] - row.shape[1]))
row = np.concatenate((row, extra), axis=1)
rows_list[i] = row
# concatenate rows into matrix
if (square):
# try to make length and width equal by tiling vertically, leaving a space and continuing in
# another column to the right
if (pad_width >= 0):
pad = pad_width
else:
pad = 0
if (pad_intensity <= 0):
pad_intensity = 0
rows = len(rows_list) * expect_row_shape[0]
cols = expect_row_shape[1]
# get area, then find side length that will work best
area = rows * cols
pref_rows = math.ceil((math.sqrt(area) / expect_row_shape[0]))
# pref_cols = int(area / (pref_rows * expect_row_shape[0]) / expect_row_shape[1]) + 1
# construct matrix
cols_list = []
this_col_array = []
for i in range(len(rows_list)+1):
if (i%pref_rows == 0) or i >= len(rows_list):
if (i>0):
# add previous column to list
cols_list.append(this_col_array)
if i>= len(rows_list):
break
if (pad > 0 and i < len(rows_list)-1):
# add padding column
cols_list.append(pad_intensity * np.ones((pref_rows * expect_row_shape[0], pad)))
# start new column
this_col_array = rows_list[i]
else:
# add to column
this_col_array = np.concatenate((this_col_array, rows_list[i]), axis=0)
# extend short cols with zeros
for i, col in enumerate(cols_list):
if (col.shape[0] != pref_rows * expect_row_shape[0]):
extra = np.zeros((expect_row_shape[0] * pref_rows - col.shape[0], expect_row_shape[1]))
row = np.concatenate((col, extra), axis=0)
cols_list[i] = row
tiled_array = np.concatenate(cols_list, axis=1)
else:
tiled_array = np.concatenate(rows_list, axis=0)
return tiled_array
def vis_slice(arrbot, arrtop, thresh=1e-4, opacity=0.5, ax=None):
if ax is None:
fig = plt.figure(figsize=(10,10))
ax = fig.add_axes([0,0,1,1])
ax.set_xticks([])
ax.set_yticks([])
cmap = cm.viridis
ntop = colors.Normalize()(arrtop)
ctop = cmap(ntop)
alphamap = | np.ones_like(arrtop) | numpy.ones_like |
# Copyright (c) OpenMMLab. All rights reserved.
import functools
import operator
import cv2
import numpy as np
import pyclipper
import torch
from mmcv.ops import contour_expand, pixel_group
from numpy.fft import ifft
from numpy.linalg import norm
from shapely.geometry import Polygon
from skimage.morphology import skeletonize
from mmocr.core import points2boundary
from mmocr.core.evaluation.utils import boundary_iou
def filter_instance(area, confidence, min_area, min_confidence):
return bool(area < min_area or confidence < min_confidence)
def decode(
decoding_type='pan', # 'pan' or 'pse'
**kwargs):
if decoding_type == 'pan':
return pan_decode(**kwargs)
if decoding_type == 'pse':
return pse_decode(**kwargs)
if decoding_type == 'db':
return db_decode(**kwargs)
if decoding_type == 'textsnake':
return textsnake_decode(**kwargs)
if decoding_type == 'fcenet':
return fcenet_decode(**kwargs)
if decoding_type == 'drrg':
return drrg_decode(**kwargs)
raise NotImplementedError
def pan_decode(preds,
text_repr_type='poly',
min_text_confidence=0.5,
min_kernel_confidence=0.5,
min_text_avg_confidence=0.85,
min_text_area=16):
"""Convert scores to quadrangles via post processing in PANet. This is
partially adapted from https://github.com/WenmuZhou/PAN.pytorch.
Args:
preds (tensor): The head output tensor of size 6xHxW.
text_repr_type (str): The boundary encoding type 'poly' or 'quad'.
min_text_confidence (float): The minimal text confidence.
min_kernel_confidence (float): The minimal kernel confidence.
min_text_avg_confidence (float): The minimal text average confidence.
min_text_area (int): The minimal text instance region area.
Returns:
boundaries: (list[list[float]]): The instance boundary and its
instance confidence list.
"""
preds[:2, :, :] = torch.sigmoid(preds[:2, :, :])
preds = preds.detach().cpu().numpy()
text_score = preds[0].astype(np.float32)
text = preds[0] > min_text_confidence
kernel = (preds[1] > min_kernel_confidence) * text
embeddings = preds[2:].transpose((1, 2, 0)) # (h, w, 4)
region_num, labels = cv2.connectedComponents(
kernel.astype(np.uint8), connectivity=4)
contours, _ = cv2.findContours((kernel * 255).astype(np.uint8),
cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
kernel_contours = np.zeros(text.shape, dtype='uint8')
cv2.drawContours(kernel_contours, contours, -1, 255)
text_points = pixel_group(text_score, text, embeddings, labels,
kernel_contours, region_num,
min_text_avg_confidence)
boundaries = []
for text_inx, text_point in enumerate(text_points):
text_confidence = text_point[0]
text_point = text_point[2:]
text_point = np.array(text_point, dtype=int).reshape(-1, 2)
area = text_point.shape[0]
if filter_instance(area, text_confidence, min_text_area,
min_text_avg_confidence):
continue
vertices_confidence = points2boundary(text_point, text_repr_type,
text_confidence)
if vertices_confidence is not None:
boundaries.append(vertices_confidence)
return boundaries
def pse_decode(preds,
text_repr_type='poly',
min_kernel_confidence=0.5,
min_text_avg_confidence=0.85,
min_kernel_area=0,
min_text_area=16):
"""Decoding predictions of PSENet to instances. This is partially adapted
from https://github.com/whai362/PSENet.
Args:
preds (tensor): The head output tensor of size nxHxW.
text_repr_type (str): The boundary encoding type 'poly' or 'quad'.
min_text_confidence (float): The minimal text confidence.
min_kernel_confidence (float): The minimal kernel confidence.
min_text_avg_confidence (float): The minimal text average confidence.
min_kernel_area (int): The minimal text kernel area.
min_text_area (int): The minimal text instance region area.
Returns:
boundaries: (list[list[float]]): The instance boundary and its
instance confidence list.
"""
preds = torch.sigmoid(preds) # text confidence
score = preds[0, :, :]
masks = preds > min_kernel_confidence
text_mask = masks[0, :, :]
kernel_masks = masks[0:, :, :] * text_mask
score = score.data.cpu().numpy().astype(np.float32) # to numpy
kernel_masks = kernel_masks.data.cpu().numpy().astype(np.uint8) # to numpy
region_num, labels = cv2.connectedComponents(
kernel_masks[-1], connectivity=4)
# labels = pse(kernel_masks, min_kernel_area)
labels = contour_expand(kernel_masks, labels, min_kernel_area, region_num)
labels = np.array(labels)
label_num = np.max(labels)
boundaries = []
for i in range(1, label_num + 1):
points = np.array(np.where(labels == i)).transpose((1, 0))[:, ::-1]
area = points.shape[0]
score_instance = np.mean(score[labels == i])
if filter_instance(area, score_instance, min_text_area,
min_text_avg_confidence):
continue
vertices_confidence = points2boundary(points, text_repr_type,
score_instance)
if vertices_confidence is not None:
boundaries.append(vertices_confidence)
return boundaries
def box_score_fast(bitmap, _box):
h, w = bitmap.shape[:2]
box = _box.copy()
xmin = np.clip(np.floor(box[:, 0].min()).astype(np.int32), 0, w - 1)
xmax = np.clip(np.ceil(box[:, 0].max()).astype(np.int32), 0, w - 1)
ymin = np.clip(np.floor(box[:, 1].min()).astype(np.int32), 0, h - 1)
ymax = np.clip(np.ceil(box[:, 1].max()).astype(np.int32), 0, h - 1)
mask = np.zeros((ymax - ymin + 1, xmax - xmin + 1), dtype=np.uint8)
box[:, 0] = box[:, 0] - xmin
box[:, 1] = box[:, 1] - ymin
cv2.fillPoly(mask, box.reshape(1, -1, 2).astype(np.int32), 1)
return cv2.mean(bitmap[ymin:ymax + 1, xmin:xmax + 1], mask)[0]
def unclip(box, unclip_ratio=1.5):
poly = Polygon(box)
distance = poly.area * unclip_ratio / poly.length
offset = pyclipper.PyclipperOffset()
offset.AddPath(box, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)
expanded = np.array(offset.Execute(distance))
return expanded
def db_decode(preds,
text_repr_type='poly',
mask_thr=0.3,
min_text_score=0.3,
min_text_width=5,
unclip_ratio=1.5,
max_candidates=3000):
"""Decoding predictions of DbNet to instances. This is partially adapted
from https://github.com/MhLiao/DB.
Args:
preds (Tensor): The head output tensor of size nxHxW.
text_repr_type (str): The boundary encoding type 'poly' or 'quad'.
mask_thr (float): The mask threshold value for binarization.
min_text_score (float): The threshold value for converting binary map
to shrink text regions.
min_text_width (int): The minimum width of boundary polygon/box
predicted.
unclip_ratio (float): The unclip ratio for text regions dilation.
max_candidates (int): The maximum candidate number.
Returns:
boundaries: (list[list[float]]): The predicted text boundaries.
"""
prob_map = preds[0, :, :]
text_mask = prob_map > mask_thr
score_map = prob_map.data.cpu().numpy().astype(np.float32)
text_mask = text_mask.data.cpu().numpy().astype(np.uint8) # to numpy
contours, _ = cv2.findContours((text_mask * 255).astype(np.uint8),
cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
boundaries = []
for i, poly in enumerate(contours):
if i > max_candidates:
break
epsilon = 0.01 * cv2.arcLength(poly, True)
approx = cv2.approxPolyDP(poly, epsilon, True)
points = approx.reshape((-1, 2))
if points.shape[0] < 4:
continue
score = box_score_fast(score_map, points)
if score < min_text_score:
continue
poly = unclip(points, unclip_ratio=unclip_ratio)
if len(poly) == 0 or isinstance(poly[0], list):
continue
poly = poly.reshape(-1, 2)
if text_repr_type == 'quad':
poly = points2boundary(poly, text_repr_type, score, min_text_width)
elif text_repr_type == 'poly':
poly = poly.flatten().tolist()
if score is not None:
poly = poly + [score]
if len(poly) < 8:
poly = None
else:
raise ValueError(f'Invalid text repr type {text_repr_type}')
if poly is not None:
boundaries.append(poly)
return boundaries
def fill_hole(input_mask):
h, w = input_mask.shape
canvas = np.zeros((h + 2, w + 2), np.uint8)
canvas[1:h + 1, 1:w + 1] = input_mask.copy()
mask = np.zeros((h + 4, w + 4), np.uint8)
cv2.floodFill(canvas, mask, (0, 0), 1)
canvas = canvas[1:h + 1, 1:w + 1].astype(np.bool)
return ~canvas | input_mask
def centralize(points_yx,
normal_sin,
normal_cos,
radius,
contour_mask,
step_ratio=0.03):
h, w = contour_mask.shape
top_yx = bot_yx = points_yx
step_flags = np.ones((len(points_yx), 1), dtype=np.bool)
step = step_ratio * radius * | np.hstack([normal_sin, normal_cos]) | numpy.hstack |
import numpy as np
import tensorflow as tf
from BeamSearch import BeamSearch
from BeamSearch_Poisson import BeamSearch_Poisson
from LengthModule import LengthModule
import glob
import os
from keras.utils import np_utils
from keras.optimizers import rmsprop
from keras.models import load_model
import metrics
import VisualModel_utils
print (tf.VERSION) # 1.13.1, and Keras version is 2.2.4, PYTHON VERSION 3
dir="./"
var_save_path='./'
num_epochs=41
keep_p=0.895851031640025
hard_label=False
beam_size=150
n_classes=48
n_atomic_actions=14
n_length=7 #output of the length model (# last nodes)
regul=0.0001
obj_regul=0.0001
BG=0 #background index
#exponennts for the action selector module:
zeta_=1 #for the object selector
beta_=40 #for the verb selector
lambda_=1 #for the action recognizer
#params for the TCFPN
n_nodes = [48, 64, 96]
conv_len = 25
vis_n_epoch=100
vis_batch_size=8
# End of params for the TCFPN
nObjects=19
Video_Info_Size=n_atomic_actions#13
Video_Obj_Info_Size=nObjects#18
def FindStep4AtomicActions(Sec_Ahead,nlength_bin,mode):
step = {}
gamma={}
for a, l in Sec_Ahead:
L = l #
if a in step:
step[a].append(L)
else:
step[a]=[]
step[a].append(L)
for a in step:
gamma[a] = max(int(np.average(step[a])), 1)
if mode=='mid-median': #median is the middle bin
step[a] = int(np.median(step[a]) // ((nlength_bin // 2) + 1))
elif mode=='median': #median is the max
step[a]=int( np.median(step[a]) // nlength_bin )
elif mode == 'average': #average is the max
step[a] = int(np.average(step[a]) // nlength_bin)
elif mode=='maximum': #max is the max length
step[a] = int(np.max(step[a]) // nlength_bin)
else:
assert 1==0," mode not specified!!!!!"
if step[a]==0:
step[a]=1
# print("step 0 modified")
return step,gamma
def map_action2label(action_obj_list):
action2label_dict={}
with open(action_obj_list, 'r') as action_obj_file:
for (i, line) in enumerate(action_obj_file):
item= line.split(' ')
if item[1] not in action2label_dict:
action2label_dict[item[1]]=[item[0],item[2],item[3],item[4][:-1]]
return action2label_dict
def FindMinDuration4Video(pseudo_gt):
min_length=[]
for video in pseudo_gt:
temp = video[1:] - video[0:-1]
idx = np.where(temp != 0)
if np.sum(idx)==0:
min_length.append(len(video))
continue
idx = idx[0] + 1
idx = np.append(idx, len(video))
MIN=np.min(idx[1:]-idx[0:-1])
MIN=min(MIN,idx[0])
min_length.append(MIN)
return min_length
def FindMaxDuration4Actions(X_actions,N,sample_rate):
length_cap = {}
for a, l in zip(X_actions, N):
L=(l+1)*sample_rate #
if a in length_cap:
if L> length_cap[a]:
length_cap[a] =L
else:
length_cap[a] = L
return length_cap
def ignore_repetition(actions):
actions = np.asarray(actions)
temp = actions[1:] - actions[0:-1]
idx = np.where(temp != 0)
if np.sum(idx)==0:
u_action=np.asarray([actions[-1]])
return u_action
u_action = actions[idx]
if len(actions != 0):
u_action = np.append(u_action, actions[-1])
return u_action
def determine_class_weight(labels,nclasses,hard):
#labels : [#samples], assuming 0<=labels[i]<nclasses
w=[x for x in [0]*nclasses]
for i in range(len(labels)):
if type(labels[i])==list:
for j in range(len(labels[i])):
if not hard:
klasse=np.argmax(labels[i][:])
else:
klasse = labels[i][j]
w[klasse]=w[klasse]+1
else:
if not hard:
klasse = np.argmax(labels[i][:])
w[klasse] = w[klasse] + 1
elif labels[i]<nclasses:
klasse = labels[i]
w[klasse] = w[klasse] + 1
# assert np.sum(np.asarray(w))==len(labels),'Error in weight calculation'
w=np.asarray(w)
class_cts = (1 / w) ** 0.5
class_cts /= (1 / nclasses * np.sum(class_cts))
# print("The distribution of classes for the training set: ")
# print(w/np.sum(w))
return class_cts
def Read_Dataset(dir):
dict_word_to_index_file_name = 'action_object_list.txt'
action2label =map_action2label(dict_word_to_index_file_name)
data = [[] for i in range(8)] # x,y for 4 splits
gt_per_sec=[[] for i in range(4)]
all = ["P%02d" % i for i in range(3, 56)]
splits = [all[:13], all[13:26], all[26:39], all[39:52]]
features = glob.glob(os.path.join(dir,'features','*.txt'))
transcripts = glob.glob(os.path.join(dir, 'labels','*.txt'))
features.sort()
transcripts.sort()
print('Loading the dataset...Hold On Please...')
for f_file, t_file in zip(features, transcripts):
person=t_file.split('_')[0].split('/')[-1]
for i, split in enumerate(splits):
if person in split:
feature =np.loadtxt(f_file)
actions = open(t_file).readlines()
gt_per_frame = np.repeat(0, int(actions[-1].split()[0].split('-')[1]))
for act in actions:
tm, lb = act.split()
gt_per_frame[(int(tm.split('-')[0]) - 1):int(tm.split('-')[1])] = int(action2label[lb][0])
n = min(len(feature), len(gt_per_frame)) # make sure the same length
action_labels = ignore_repetition(gt_per_frame[5:n:15]) # make sure there is no consecutive repetition
data[2 * i].append(feature[5:n, 1:]) # feature
data[2 * i + 1].append(action_labels) # labels
gt_per_sec[i].append(gt_per_frame[5:n:15])
return data,gt_per_sec
data,gt_per_sec=Read_Dataset(dir)
# gt_per_sec=np.load("gt_per_sec.npy")
# data=np.load("data.npy")
print("Data Loaded")
action2atomic_action={0:0,1:2,2:1,3:1,4:3,5:2,6:1,7:1,8:8,9:3,10:1,11:5,12:6,13:7,14:2,15:8,16:2,17:9,18:4,19:2,20:11,21:1,22:2,23:2,24:8,25:3,26:8,27:3,28:1,29:6,30:7,
31:1,32:10,33:7,34:12,35:3,36:4,37:2,38:13,39:2,40:7,41:7,42:3,43:1,44:6,45:8,46:1,47:3}
action2object_map={0:18,1:0,2:1,3:2,4:0,5:3,6:4,7:3,8:3,9:3,10:5,11:6,12:5,13:6,14:7,15:7,16:6,17:8,18:9,19:11,20:11,21:11,22:3,23:10,24:12,25:3,26:17,27:0,
28:5,29:5,30:13,31:17,32:9,33:9,34:9,35:0,36:14,37:8,38:10,39:15,40:15,41:14,42:0,43:5,44:5,45:16,46:4,47:3}
for split in [1,2,3,4]:
######### Generate the data for this split###############
#########################################################
x_train=[]
y_train=[]
per_sec_y_train=[]
x_val=[]
y_val=[]
ft=True
print("Split Number: "+ str(split))
for i in [1,2,3,4]:
if i!=split:
x_train=x_train+data[(i-1)*2]
y_train=y_train+data[(i-1)*2+1]
per_sec_y_train =per_sec_y_train+ gt_per_sec[i - 1]
x_test=data[(split-1)*2]
y_test=data[(split-1)*2+1]
per_sec_y_test = gt_per_sec[split - 1]
if split==1:
pseudo_gt_total = np.load('predictions1.npy') #frame(second)-level pseudo-gt of the baseline (alignment output of their trained model on the training data)
Vis_model = load_model('Vis_model1_final.h5') #Trained TCFPN model
if split==2:
pseudo_gt_total = np.load('predictions2.npy')
Vis_model = load_model('Vis_model2_final.h5')
if split==3:
pseudo_gt_total = np.load('predictions3.npy')
Vis_model = load_model('Vis_model3_final.h5')
if split==4:
pseudo_gt_total = np.load('predictions4.npy')
Vis_model = load_model('Vis_model4_final.h5')
pseudo_gt_val=[]
pseudo_gt = []
end=0
pseudo_gt_total = list(pseudo_gt_total)
pseudo_gt=pseudo_gt_total
######### End ###############
#########################################################
# Vis_model belongs to <NAME>
# https://github.com/Zephyr-D/TCFPN-ISBA
Vis_model.compile(optimizer=rmsprop(lr=1e-6), loss='categorical_crossentropy', sample_weight_mode="temporal")
########################################################################
########################################################################
########################################################################
print(' ')
x_train_per_sec = [i[::15] for i in x_train]
x_test_per_sec = [i[::15] for i in x_test]
x_val_per_sec = [i[::15] for i in x_val]
max_len = int(np.max([i.shape[0] for i in x_train_per_sec + x_test_per_sec+x_val_per_sec]))
max_len = int(np.ceil(np.float(max_len) / (2 ** len(n_nodes)))) * 2 **len(n_nodes)
class_cts = np.array([sum([np.sum(j == i) for j in y_train]) for i in range(n_classes)])
class_cts = (1 / class_cts) ** 0.5
class_cts /= (1 / n_classes * np.sum(class_cts))
class_weight = dict(zip(range(n_classes), class_cts))
# One-hot encoding
Y_test = [np_utils.to_categorical(y, n_classes) for y in per_sec_y_test]
X_test_m, Y_test_m, M_test = VisualModel_utils.mask_data(x_test_per_sec, Y_test, max_len, mask_value=-1)
VisModelTempPred_test_pre1 = Vis_model.predict(X_test_m[0:], verbose=1)
VisModelTempPred_test_pre = VisualModel_utils.unmask(VisModelTempPred_test_pre1, M_test[0:])
prev_meth_pre = [np.argmax(video, axis=1) for video in VisModelTempPred_test_pre] ####################################################################################################################################### ########## #########
X_train_m, _, M_train = VisualModel_utils.mask_data(x_train_per_sec, [], max_len, mask_value=-1)
VisModelTempPred = Vis_model.predict(X_train_m, verbose=1)
g_1 = tf.Graph()
with g_1.as_default():
LengthNet = LengthModule(graph=g_1,nClasses=n_atomic_actions,nActions=n_classes,nObjects=nObjects, length=n_length,
video_info_size=Video_Info_Size,video_obj_info_size=Video_Obj_Info_Size,pre_f_size=64, h_size=64, emb_size=32,num_layers=1, feature_size=64,
batch_size=64, step=3, duration=1*60, sample_rate=4, hard_label=hard_label)
#####adding video level lebels
vid_object_info_test=np.squeeze(LengthNet.create_categorical_map_label(action2object_map, y_test, nObjects))
vid_object_info_tr = np.squeeze(LengthNet.create_categorical_map_label(action2object_map, y_train, nObjects))
vid_info_test=np.squeeze(LengthNet.create_categorical_map_label(action2atomic_action, y_test, n_atomic_actions))
vid_info_tr =np.squeeze( LengthNet.create_categorical_map_label(action2atomic_action, y_train,n_atomic_actions))
#####adding video level lebels
######### Training the length model #####################
#########################################################
print('Training the length model')
dt_4_length_pred = LengthNet.generate_X_Y(x_train, pseudo_gt,action2atomic_action,action2object_map,True,pseudo_gt,vid_info_tr,vid_object_info_tr)
dt_4_length_pred_val = LengthNet.generate_X_Y(x_train, per_sec_y_train,action2atomic_action,action2object_map,False,per_sec_y_train,vid_info_tr,vid_object_info_tr)
X_features, X_actions, Y, N, Sec_Ahead, X_48actions, objects, per_vid_info, per_vid_obj_info = dt_4_length_pred
data_f_val, data_a_val, labels_val, nSeg_val, Sec_Ahead_val, X_48actions_val, objects_val, per_vid_info_val, per_vid_obj_info_val = dt_4_length_pred_val
objects_categorical = LengthNet.create_categorical_label( objects, nObjects)
objects_categorical_val = LengthNet.create_categorical_label(objects_val, nObjects)
with g_1.as_default():
LengthNet.model()
if ft:
lr=5.062003853160735e-05
obj_lr=5.062003853160735e-05
else:
lr=5.062003853160735e-05
obj_lr=lr
min_length_list=FindMinDuration4Video(pseudo_gt)
length_cap=FindMaxDuration4Actions(X_48actions,N,LengthNet.sample_rate)
length_cap_sorted = sorted(length_cap.items(), key=lambda s: s[1])
action2step_map,gamma = FindStep4AtomicActions(Sec_Ahead, n_length, 'median')
action2bin_size,gamma = FindStep4AtomicActions(Sec_Ahead, n_length, 'mid-median')
Y = LengthNet.create_adaptive_soft_labels(action2bin_size, Y, n_length,X_actions) # input Y is int, but the output is the "soft" one-hot vector
labels_val = LengthNet.create_adaptive_soft_labels(action2bin_size, labels_val, n_length, data_a_val)
length_class_weight=determine_class_weight(Y,n_length,hard_label)
base_class_weight = determine_class_weight(X_actions, n_atomic_actions, True)
object_class_weight = determine_class_weight(objects, nObjects, True)
N = LengthNet.rescale_N(action2bin_size, N, n_length, X_actions)
nSeg_val = LengthNet.rescale_N(action2bin_size, nSeg_val, n_length, data_a_val)
training_pack = [X_features, X_actions, Y, N, X_48actions, objects_categorical, per_vid_info, per_vid_obj_info]
valid_pack = [data_f_val, data_a_val, labels_val, nSeg_val, X_48actions_val, objects_categorical_val,per_vid_info_val, per_vid_obj_info_val]
weights=[length_class_weight,base_class_weight,object_class_weight]
with g_1.as_default():
LengthNet.train(ft,training_pack,valid_pack,weights,keep_p, var_save_path,num_epochs, lr=lr, regul=regul)
for XX in [training_pack,valid_pack,X_features,X_actions,Y, N,data_f_val, data_a_val,labels_val, nSeg_val]: #clean up
del XX
ft=False
########################################################
########################################################
######## Find the best alignment with beam search #######
#########################################################
print('Finding the best alignment with beam search')
# beam_alg=BeamSearch_Poisson(zeta_,beta_,lambda_)#uncomment for the Poisson Model
beam_alg = BeamSearch(zeta_,beta_,lambda_) #uncomment for the Duration Network
VisModelTempPred=VisualModel_utils.unmask(VisModelTempPred,M_train)
# LAtuples = beam_alg.search(var_save_path, x_train[:], y_train[:], vid_info_tr[:], vid_object_info_tr[:], VisModelTempPred[:],min_length_list, LengthNet, beam_size, length_cap, action2atomic_action,action2object_map, True, action2step_map, action2bin_size,gamma) #uncomment for the poisson model
LAtuples = beam_alg.search(var_save_path, x_train, y_train,vid_info_tr,vid_object_info_tr, VisModelTempPred,min_length_list, LengthNet,beam_size,action2atomic_action,action2object_map,True,action2step_map,action2bin_size) # a list of (lengths,actions) for all videos #uncomment for the Duration model
prev_pseudo_gt=pseudo_gt
pseudo_gt=[]
for (lengths,actions) in LAtuples: #iterate thru vids
video_length_sec=np.sum(lengths)
_y=[]
for L,A in zip(lengths,actions):
temp = list(A*np.ones(L,dtype=np.int64))
_y=_y+temp
_y=np.asarray(_y)
pseudo_gt.append(_y)
#######################################################
prev_pseudo_gt=prev_pseudo_gt[:]
per_sec_y_train=per_sec_y_train[:]
t = [np.sum(pseudo_gt[i] != prev_pseudo_gt[i]) for i in range(len(pseudo_gt))]
T = [len(pseudo_gt[i]) for i in range(len(pseudo_gt))]
diff_percentage = np.sum(t) / np.sum(T)
# diff_percentage=[np.sum(pseudo_gt[i]!=prev_pseudo_gt[i])/len(pseudo_gt[i]) for i in range(len(pseudo_gt))]
print("The average frame diff percentage is " + str( | np.average(diff_percentage) | numpy.average |
from __future__ import division, print_function, absolute_import
import numpy as np
# Equation A29 to A33
def kichain(xhix):
xhix2 = xhix**2
xhix3 = xhix**3
xhix4 = xhix2**2
xhix_1 = (1 - xhix)
xhix_13 = xhix_1**3
k0 = -np.log(xhix_1) + (42*xhix - 39*xhix2 + 9*xhix3 - 2*xhix4)/(6*xhix_13)
k1 = (xhix4 + 6*xhix2 - 12*xhix)/(2*xhix_13)
k2 = -3*xhix2/(8*xhix_1**2)
k3 = (-xhix4 + 3*xhix2 + 3*xhix)/(6*xhix_13)
return np.array([k0, k1, k2, k3])
def dkichain_dxhi00(xhix, dxhix_dxhi00):
xhix2 = xhix**2
xhix3 = xhix2*xhix
xhix4 = xhix3*xhix
xhix_1 = (1 - xhix)
xhix_13 = xhix_1**3
xhix_14 = xhix_13*xhix_1
k0 = -np.log(xhix_1) + (42*xhix - 39*xhix2 + 9*xhix3 - 2*xhix4)/(6*xhix_13)
k1 = (xhix4 + 6*xhix2 - 12*xhix)/(2*xhix_13)
k2 = -3*xhix2/(8*xhix_1**2)
k3 = (-xhix4 + 3*xhix2 + 3*xhix)/(6*xhix_13)
dk0 = 24 + xhix * (-6 + xhix * (3 - 7 * xhix + xhix2))
dk0 *= dxhix_dxhi00 / 3. / xhix_14
dk1 = 12 + xhix * (2+xhix) * (6 - 6 * xhix + xhix2)
dk1 *= -dxhix_dxhi00 / 2. / xhix_14
dk2 = 3*xhix
dk2 *= dxhix_dxhi00 / 4. / -xhix_13
dk3 = 3 + xhix * (12 + (-3 + xhix) * (-xhix + xhix2))
dk3 *= dxhix_dxhi00 / 6. / xhix_14
dks = np.array([[k0, k1, k2, k3], [dk0, dk1, dk2, dk3]])
return dks
def d2kichain_dxhi00(xhix, dxhix_dxhi00):
dxhix_dxhi00_2 = dxhix_dxhi00**2
xhix2 = xhix**2
xhix3 = xhix2*xhix
xhix4 = xhix3*xhix
xhix_1 = (1 - xhix)
xhix_13 = xhix_1**3
xhix_14 = xhix_13*xhix_1
xhix_15 = xhix_14*xhix_1
k0 = -np.log(xhix_1) + (42*xhix - 39*xhix2 + 9*xhix3 - 2*xhix4)/(6*xhix_13)
k1 = (xhix4 + 6*xhix2 - 12*xhix)/(2*xhix_13)
k2 = -3*xhix2/(8*xhix_1**2)
k3 = (-xhix4 + 3*xhix2 + 3*xhix)/(6*xhix_13)
dk0 = 24 + xhix * (-6 + xhix * (3 - 7 * xhix + xhix2))
dk0 *= dxhix_dxhi00 / 3. / xhix_14
dk1 = 12 + xhix * (2+xhix) * (6 - 6 * xhix + xhix2)
dk1 *= -dxhix_dxhi00 / 2. / xhix_14
dk2 = 3*xhix
dk2 *= dxhix_dxhi00 / 4. / -xhix_13
dk3 = 3 + xhix * (12 + (-3 + xhix) * (-xhix + xhix2))
dk3 *= dxhix_dxhi00 / 6. / xhix_14
d2k0 = 3 * (-30 + xhix * (1+xhix) * (4 + xhix))
d2k0 *= dxhix_dxhi00_2 / 3. / -xhix_15
d2k1 = 12 * (5 - 2 * (-1 + xhix) * xhix)
d2k1 *= dxhix_dxhi00_2 / 2. / -xhix_15
d2k2 = -3*(1+2*xhix)
d2k2 *= dxhix_dxhi00_2 / 4. / xhix_14
d2k3 = 6*(-4 + xhix * (-7 + xhix))
d2k3 *= dxhix_dxhi00_2 / 6. / -xhix_15
d2ks = np.array([[k0, k1, k2, k3],
[dk0, dk1, dk2, dk3],
[d2k0, d2k1, d2k2, d2k3]])
return d2ks
def dkichain_dx(xhix, dxhix_dx):
xhix2 = xhix**2
xhix3 = xhix2*xhix
xhix4 = xhix3*xhix
xhix_1 = (1 - xhix)
xhix_13 = xhix_1**3
xhix_14 = xhix_13*xhix_1
k0 = -np.log(xhix_1) + (42*xhix - 39*xhix2 + 9*xhix3 - 2*xhix4)/(6*xhix_13)
k1 = (xhix4 + 6*xhix2 - 12*xhix)/(2*xhix_13)
k2 = -3*xhix2/(8*xhix_1**2)
k3 = (-xhix4 + 3*xhix2 + 3*xhix)/(6*xhix_13)
dk0 = 24 + xhix * (-6 + xhix * (3 - 7 * xhix + xhix2))
dk0 *= dxhix_dx / 3. / xhix_14
dk1 = 12 + xhix * (2+xhix) * (6 - 6 * xhix + xhix2)
dk1 *= -dxhix_dx / 2. / xhix_14
dk2 = 3*xhix
dk2 *= dxhix_dx / 4. / -xhix_13
dk3 = 3 + xhix * (12 + (-3 + xhix) * (-xhix + xhix2))
dk3 *= dxhix_dx / 6. / xhix_14
ks = np.array([k0, k1, k2, k3])
dksx = np.array([dk0, dk1, dk2, dk3]).T
return ks, dksx
def dkichain_dx_dxhi00(xhix, dxhix_dxhi00, dxhix_dx):
xhix2 = xhix**2
xhix3 = xhix2*xhix
xhix4 = xhix3*xhix
xhix_1 = (1 - xhix)
xhix_13 = xhix_1**3
xhix_14 = xhix_13*xhix_1
k0 = -np.log(xhix_1) + (42*xhix - 39*xhix2 + 9*xhix3 - 2*xhix4)/(6*xhix_13)
k1 = (xhix4 + 6*xhix2 - 12*xhix)/(2*xhix_13)
k2 = -3*xhix2/(8*xhix_1**2)
k3 = (-xhix4 + 3*xhix2 + 3*xhix)/(6*xhix_13)
aux_k0 = 24 + xhix * (-6 + xhix * (3 - 7 * xhix + xhix2))
aux_k0 /= 3.*xhix_14
aux_k1 = 12 + xhix * (2+xhix) * (6 - 6 * xhix + xhix2)
aux_k1 /= -2.*xhix_14
aux_k2 = 3*xhix / 4. / -xhix_13
aux_k3 = 3 + xhix * (12 + (-3 + xhix) * (-xhix + xhix2))
aux_k3 /= 6.*xhix_14
dks = np.array([[k0, k1, k2, k3], [aux_k0, aux_k1, aux_k2, aux_k3]])
dksx = np.multiply.outer(dxhix_dx, dks[1])
dks[1] *= dxhix_dxhi00
return dks, dksx
def gdHS(x0i_matrix, xhix):
ks = kichain(xhix)
# x0i_matrix = np.array([x0i**0, x0i, x0i**2, x0i**3])
g = np.exp(np.dot(ks, x0i_matrix))
return g
def dgdHS_dxhi00(x0i_matrix, xhix, dxhix_dxhi00):
dks = dkichain_dxhi00(xhix, dxhix_dxhi00)
# x0i_matrix = np.array([x0i**0, x0i, x0i**2, x0i**3])
dg = np.dot(dks, x0i_matrix)
dg[0] = np.exp(dg[0])
dg[1] *= dg[0]
return dg
def d2gdHS_dxhi00(x0i_matrix, xhix, dxhix_dxhi00):
d2ks = d2kichain_dxhi00(xhix, dxhix_dxhi00)
# x0i_matrix = np.array([x0i**0, x0i, x0i**2, x0i**3])
d2g = np.matmul(d2ks, x0i_matrix)
d2g[0] = np.exp(d2g[0])
d2g[2] += d2g[1]**2
d2g[2] *= d2g[0]
d2g[1] *= d2g[0]
return d2g
def dgdHS_dx(x0i_matrix, xhix, dxhix_dx):
ks, dksx = dkichain_dx(xhix, dxhix_dx)
# x0i_matrix = np.array([x0i**0, x0i, x0i**2, x0i**3])
g = g = np.exp( | np.dot(ks, x0i_matrix) | numpy.dot |
# -*- coding: utf-8 -*-
#
# Copyright 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Utility functions for data loading and training of VGSL networks.
"""
import json
import regex
import torch
import traceback
import unicodedata
import numpy as np
import pkg_resources
import bidi.algorithm as bd
import shapely.geometry as geom
import torch.nn.functional as F
import torchvision.transforms.functional as tf
from os import path
from functools import partial
from shapely.ops import split, snap
from PIL import Image, ImageDraw
from itertools import groupby
from collections import Counter, defaultdict
from torchvision import transforms
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence
from typing import Dict, List, Tuple, Iterable, Sequence, Callable, Optional, Any, Union, cast
from skimage.draw import polygon
from kraken.lib.xml import parse_alto, parse_page, parse_xml
from kraken.lib.util import is_bitonal
from kraken.lib.codec import PytorchCodec
from kraken.lib.models import TorchSeqRecognizer
from kraken.lib.segmentation import extract_polygons, calculate_polygonal_environment
from kraken.lib.exceptions import KrakenInputException
from kraken.lib.lineest import CenterNormalizer, dewarp
from kraken.lib import functional_im_transforms as F_t
__all__ = ['BaselineSet', 'PolygonGTDataset', 'GroundTruthDataset', 'compute_error', 'generate_input_transforms', 'preparse_xml_data']
import logging
logger = logging.getLogger(__name__)
def generate_input_transforms(batch: int, height: int, width: int, channels: int, pad: int, valid_norm: bool = True, force_binarization=False) -> transforms.Compose:
"""
Generates a torchvision transformation converting a PIL.Image into a
tensor usable in a network forward pass.
Args:
batch (int): mini-batch size
height (int): height of input image in pixels
width (int): width of input image in pixels
channels (int): color channels of input
pad (int): Amount of padding on horizontal ends of image
valid_norm (bool): Enables/disables baseline normalization as a valid
preprocessing step. If disabled we will fall back to
standard scaling.
force_binarization (bool): Forces binarization of input images using
the nlbin algorithm.
Returns:
A torchvision transformation composition converting the input image to
the appropriate tensor.
"""
scale = (height, width) # type: Tuple[int, int]
center_norm = False
mode = 'RGB' if channels == 3 else 'L'
if height == 1 and width == 0 and channels > 3:
perm = (1, 0, 2)
scale = (channels, 0)
if valid_norm:
center_norm = True
mode = 'L'
elif height > 1 and width == 0 and channels in (1, 3):
perm = (0, 1, 2)
if valid_norm and channels == 1:
center_norm = True
elif height == 0 and width > 1 and channels in (1, 3):
perm = (0, 1, 2)
# fixed height and width image => bicubic scaling of the input image, disable padding
elif height > 0 and width > 0 and channels in (1, 3):
perm = (0, 1, 2)
pad = 0
elif height == 0 and width == 0 and channels in (1, 3):
perm = (0, 1, 2)
pad = 0
else:
raise KrakenInputException('Invalid input spec {}, {}, {}, {}, {}'.format(batch,
height,
width,
channels,
pad))
if mode != 'L' and force_binarization:
raise KrakenInputException('Invalid input spec {}, {}, {}, {} in'
' combination with forced binarization.'.format(batch,
height,
width,
channels,
pad))
out_transforms = []
out_transforms.append(transforms.Lambda(partial(F_t.pil_to_mode, mode=mode)))
if force_binarization:
out_transforms.append(transforms.Lambda(F_t.pil_to_bin))
# dummy transforms to ensure we can determine color mode of input material
# from first two transforms. It's stupid but it works.
out_transforms.append(transforms.Lambda(F_t.dummy))
if scale != (0, 0):
if center_norm:
lnorm = CenterNormalizer(scale[0])
out_transforms.append(transforms.Lambda(partial(F_t.pil_dewarp, lnorm=lnorm)))
out_transforms.append(transforms.Lambda(partial(F_t.pil_to_mode, mode=mode)))
else:
out_transforms.append(transforms.Lambda(partial(F_t.pil_fixed_resize, scale=scale)))
if pad:
out_transforms.append(transforms.Pad((pad, 0), fill=255))
out_transforms.append(transforms.ToTensor())
# invert
out_transforms.append(transforms.Lambda(F_t.tensor_invert))
out_transforms.append(transforms.Lambda(partial(F_t.tensor_permute, perm=perm)))
return transforms.Compose(out_transforms)
def _fast_levenshtein(seq1: Sequence[Any], seq2: Sequence[Any]) -> int:
oneago = None
thisrow = list(range(1, len(seq2) + 1)) + [0]
rows = [thisrow]
for x in range(len(seq1)):
oneago, thisrow = thisrow, [0] * len(seq2) + [x + 1]
for y in range(len(seq2)):
delcost = oneago[y] + 1
addcost = thisrow[y - 1] + 1
subcost = oneago[y - 1] + (seq1[x] != seq2[y])
thisrow[y] = min(delcost, addcost, subcost)
rows.append(thisrow)
return thisrow[len(seq2) - 1]
def global_align(seq1: Sequence[Any], seq2: Sequence[Any]) -> Tuple[int, List[str], List[str]]:
"""
Computes a global alignment of two strings.
Args:
seq1 (Sequence[Any]):
seq2 (Sequence[Any]):
Returns a tuple (distance, list(algn1), list(algn2))
"""
# calculate cost and direction matrix
cost = [[0] * (len(seq2) + 1) for x in range(len(seq1) + 1)]
for i in range(1, len(cost)):
cost[i][0] = i
for i in range(1, len(cost[0])):
cost[0][i] = i
direction = [[(0, 0)] * (len(seq2) + 1) for x in range(len(seq1) + 1)]
direction[0] = [(0, x) for x in range(-1, len(seq2))]
for i in range(-1, len(direction) - 1):
direction[i + 1][0] = (i, 0)
for i in range(1, len(cost)):
for j in range(1, len(cost[0])):
delcost = ((i - 1, j), cost[i - 1][j] + 1)
addcost = ((i, j - 1), cost[i][j - 1] + 1)
subcost = ((i - 1, j - 1), cost[i - 1][j - 1] + (seq1[i - 1] != seq2[j - 1]))
best = min(delcost, addcost, subcost, key=lambda x: x[1])
cost[i][j] = best[1]
direction[i][j] = best[0]
d = cost[-1][-1]
# backtrace
algn1: List[Any] = []
algn2: List[Any] = []
i = len(direction) - 1
j = len(direction[0]) - 1
while direction[i][j] != (-1, 0):
k, l = direction[i][j]
if k == i - 1 and l == j - 1:
algn1.insert(0, seq1[i - 1])
algn2.insert(0, seq2[j - 1])
elif k < i:
algn1.insert(0, seq1[i - 1])
algn2.insert(0, '')
elif l < j:
algn1.insert(0, '')
algn2.insert(0, seq2[j - 1])
i, j = k, l
return d, algn1, algn2
def compute_confusions(algn1: Sequence[str], algn2: Sequence[str]):
"""
Compute confusion matrices from two globally aligned strings.
Args:
align1 (Sequence[str]): sequence 1
align2 (Sequence[str]): sequence 2
Returns:
A tuple (counts, scripts, ins, dels, subs) with `counts` being per-character
confusions, `scripts` per-script counts, `ins` a dict with per script
insertions, `del` an integer of the number of deletions, `subs` per
script substitutions.
"""
counts: Dict[Tuple[str, str], int] = Counter()
with pkg_resources.resource_stream(__name__, 'scripts.json') as fp:
script_map = json.load(fp)
def _get_script(c):
for s, e, n in script_map:
if ord(c) == s or (e and s <= ord(c) <= e):
return n
return 'Unknown'
scripts: Dict[Tuple[str, str], int] = Counter()
ins: Dict[Tuple[str, str], int] = Counter()
dels: int = 0
subs: Dict[Tuple[str, str], int] = Counter()
for u,v in zip(algn1, algn2):
counts[(u, v)] += 1
for k, v in counts.items():
if k[0] == '':
dels += v
else:
script = _get_script(k[0])
scripts[script] += v
if k[1] == '':
ins[script] += v
elif k[0] != k[1]:
subs[script] += v
return counts, scripts, ins, dels, subs
def compute_error(model: TorchSeqRecognizer, validation_set: Iterable[Dict[str, torch.Tensor]]) -> Tuple[int, int]:
"""
Computes error report from a model and a list of line image-text pairs.
Args:
model (kraken.lib.models.TorchSeqRecognizer): Model used for recognition
validation_set (list): List of tuples (image, text) for validation
Returns:
A tuple with total number of characters and edit distance across the
whole validation set.
"""
total_chars = 0
error = 0
for batch in validation_set:
preds = model.predict_string(batch['image'], batch['seq_lens'])
total_chars += batch['target_lens'].sum()
for pred, text in zip(preds, batch['target']):
error += _fast_levenshtein(pred, text)
return total_chars, error
def preparse_xml_data(filenames, format_type='xml', repolygonize=False):
"""
Loads training data from a set of xml files.
Extracts line information from Page/ALTO xml files for training of
recognition models.
Args:
filenames (list): List of XML files.
format_type (str): Either `page`, `alto` or `xml` for
autodetermination.
repolygonize (bool): (Re-)calculates polygon information using the
kraken algorithm.
Returns:
A list of dicts {'text': text, 'baseline': [[x0, y0], ...], 'boundary':
[[x0, y0], ...], 'image': PIL.Image}.
"""
training_pairs = []
if format_type == 'xml':
parse_fn = parse_xml
elif format_type == 'alto':
parse_fn = parse_alto
elif format_type == 'page':
parse_fn = parse_page
else:
raise Exception(f'invalid format {format_type} for preparse_xml_data')
for fn in filenames:
try:
data = parse_fn(fn)
except KrakenInputException as e:
logger.warning(e)
continue
try:
with open(data['image'], 'rb') as fp:
Image.open(fp)
except FileNotFoundError as e:
logger.warning(f'Could not open file {e.filename} in {fn}')
continue
if repolygonize:
logger.info('repolygonizing {} lines in {}'.format(len(data['lines']), data['image']))
data['lines'] = _repolygonize(data['image'], data['lines'])
for line in data['lines']:
training_pairs.append({'image': data['image'], **line})
return training_pairs
def _repolygonize(im: Image.Image, lines):
"""
Helper function taking an output of the lib.xml parse_* functions and
recalculating the contained polygonization.
Args:
im (Image.Image): Input image
lines (list): List of dicts [{'boundary': [[x0, y0], ...], 'baseline': [[x0, y0], ...], 'text': 'abcvsd'}, {...]
Returns:
A data structure `lines` with a changed polygonization.
"""
im = Image.open(im).convert('L')
polygons = calculate_polygonal_environment(im, [x['baseline'] for x in lines])
return [{'boundary': polygon, 'baseline': orig['baseline'], 'text': orig['text'], 'script': orig['script']} for orig, polygon in zip(lines, polygons)]
def collate_sequences(batch):
"""
Sorts and pads sequences.
"""
sorted_batch = sorted(batch, key=lambda x: x['image'].shape[2], reverse=True)
seqs = [x['image'] for x in sorted_batch]
seq_lens = torch.LongTensor([seq.shape[2] for seq in seqs])
max_len = seqs[0].shape[2]
seqs = torch.stack([F.pad(seq, pad=(0, max_len-seq.shape[2])) for seq in seqs])
if isinstance(sorted_batch[0]['target'], str):
labels = [x['target'] for x in sorted_batch]
else:
labels = torch.cat([x['target'] for x in sorted_batch]).long()
label_lens = torch.LongTensor([len(x['target']) for x in sorted_batch])
return {'image': seqs, 'target': labels, 'seq_lens': seq_lens, 'target_lens': label_lens}
class InfiniteDataLoader(DataLoader):
"""
Version of DataLoader that auto-reinitializes the iterator once it is
exhausted.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dataset_iter = super().__iter__()
def __iter__(self):
return self
def __next__(self):
try:
sample = next(self.dataset_iter)
except StopIteration:
self.dataset_iter = super().__iter__()
sample = next(self.dataset_iter)
return sample
class PolygonGTDataset(Dataset):
"""
Dataset for training a line recognition model from polygonal/baseline data.
"""
def __init__(self,
normalization: Optional[str] = None,
whitespace_normalization: bool = True,
reorder: bool = True,
im_transforms: Callable[[Any], torch.Tensor] = transforms.Compose([]),
preload: bool = True,
augmentation: bool = False) -> None:
self._images = [] # type: Union[List[Image], List[torch.Tensor]]
self._gt = [] # type: List[str]
self.alphabet = Counter() # type: Counter
self.text_transforms = [] # type: List[Callable[[str], str]]
# split image transforms into two. one part giving the final PIL image
# before conversion to a tensor and the actual tensor conversion part.
self.head_transforms = transforms.Compose(im_transforms.transforms[:2])
self.tail_transforms = transforms.Compose(im_transforms.transforms[2:])
self.transforms = im_transforms
self.preload = preload
self.aug = None
self.seg_type = 'baselines'
# built text transformations
if normalization:
self.text_transforms.append(partial(F_t.text_normalize, normalization=normalization))
if whitespace_normalization:
self.text_transforms.append(F_t.text_whitespace_normalize)
if reorder:
self.text_transforms.append(F_t.text_reorder)
if augmentation:
from albumentations import (
Compose, ToFloat, FromFloat, Flip, OneOf, MotionBlur, MedianBlur, Blur,
ShiftScaleRotate, OpticalDistortion, ElasticTransform, RandomBrightnessContrast,
)
self.aug = Compose([
ToFloat(),
OneOf([
MotionBlur(p=0.2),
MedianBlur(blur_limit=3, p=0.1),
Blur(blur_limit=3, p=0.1),
], p=0.2),
ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=3, p=0.2),
OneOf([
OpticalDistortion(p=0.3),
ElasticTransform(p=0.1),
], p=0.2),
], p=0.5)
self.im_mode = '1'
def add(self, *args, **kwargs):
"""
Adds a line to the dataset.
Args:
im (path): Path to the whole page image
text (str): Transcription of the line.
baseline (list): A list of coordinates [[x0, y0], ..., [xn, yn]].
boundary (list): A polygon mask for the line.
"""
if 'preparse' not in kwargs or not kwargs['preparse']:
kwargs = self.parse(image, text, baseline, boundary, *args, **kwargs)
if kwargs['preload']:
self.im_mode = kwargs['im_mode']
self._images.append(kwargs['image'])
else:
self._images.append((kwargs['image'], kwargs['baseline'], kwargs['boundary']))
self._gt.append(kwargs['text'])
self.alphabet.update(kwargs['text'])
def parse(self, image: Union[str, Image.Image], text: str, baseline: List[Tuple[int, int]], boundary: List[Tuple[int, int]], *args, **kwargs):
"""
Parses a sample for the dataset and returns it.
This function is mainly uses for parallelized loading of training data.
Args:
im (path): Path to the whole page image
text (str): Transcription of the line.
baseline (list): A list of coordinates [[x0, y0], ..., [xn, yn]].
boundary (list): A polygon mask for the line.
"""
for func in self.text_transforms:
text = func(text)
if not text:
raise KrakenInputException('Text line is empty after transformations')
if not baseline:
raise KrakenInputException('No baseline given for line')
if not boundary:
raise KrakenInputException('No boundary given for line')
if self.preload:
if not isinstance(image, Image.Image):
im = Image.open(image)
try:
im, _ = next(extract_polygons(im, {'type': 'baselines', 'lines': [{'baseline': baseline, 'boundary': boundary}]}))
except IndexError:
raise KrakenInputException('Patch extraction failed for baseline')
try:
im = self.head_transforms(im)
im = self.tail_transforms(im)
except ValueError:
raise KrakenInputException(f'Image transforms failed on {image}')
self._images.append(im)
return {'text': text, 'image': im, 'baseline': baseline, 'boundary': boundary, 'im_mode': im.mode, 'preload': True, 'preparse': True}
else:
return {'text': text, 'image': image, 'baseline': baseline, 'boundary': boundary, 'preload': False, 'preparse': True}
def encode(self, codec: Optional[PytorchCodec] = None) -> None:
"""
Adds a codec to the dataset and encodes all text lines.
Has to be run before sampling from the dataset.
"""
if codec:
self.codec = codec
else:
self.codec = PytorchCodec(''.join(self.alphabet.keys()))
self.training_set = [] # type: List[Tuple[Union[Image, torch.Tensor], torch.Tensor]]
for im, gt in zip(self._images, self._gt):
self.training_set.append((im, self.codec.encode(gt)))
def no_encode(self) -> None:
"""
Creates an unencoded dataset.
"""
self.training_set = [] # type: List[Tuple[Union[Image, torch.Tensor], str]]
for im, gt in zip(self._images, self._gt):
self.training_set.append((im, gt))
def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]:
if self.preload:
x, y = self.training_set[index]
if self.aug:
x = x.permute((1, 2, 0)).numpy()
o = self.aug(image=x)
x = torch.tensor(o['image'].transpose(2, 0, 1))
return {'image': x, 'target': y}
else:
item = self.training_set[index]
try:
logger.debug(f'Attempting to load {item[0]}')
im = item[0][0]
if not isinstance(im, Image.Image):
im = Image.open(im)
im, _ = next(extract_polygons(im, {'type': 'baselines', 'lines': [{'baseline': item[0][1], 'boundary': item[0][2]}]}))
im = self.head_transforms(im)
if not is_bitonal(im):
self.im_mode = im.mode
im = self.tail_transforms(im)
if self.aug:
im = im.permute((1, 2, 0)).numpy()
o = self.aug(image=im)
im = torch.tensor(o['image'].transpose(2, 0, 1))
return {'image': im, 'target': item[1]}
except Exception:
idx = np.random.randint(0, len(self.training_set))
logger.debug(traceback.format_exc())
logger.info(f'Failed. Replacing with sample {idx}')
return self[np.random.randint(0, len(self.training_set))]
def __len__(self) -> int:
return len(self.training_set)
class GroundTruthDataset(Dataset):
"""
Dataset for training a line recognition model.
All data is cached in memory.
"""
def __init__(self, split: Callable[[str], str] = F_t.default_split,
suffix: str = '.gt.txt',
normalization: Optional[str] = None,
whitespace_normalization: bool = True,
reorder: bool = True,
im_transforms: Callable[[Any], torch.Tensor] = transforms.Compose([]),
preload: bool = True,
augmentation: bool = False) -> None:
"""
Reads a list of image-text pairs and creates a ground truth set.
Args:
split (func): Function for generating the base name without
extensions from paths
suffix (str): Suffix to attach to image base name for text
retrieval
mode (str): Image color space. Either RGB (color) or L
(grayscale/bw). Only L is compatible with vertical
scaling/dewarping.
scale (int, tuple): Target height or (width, height) of dewarped
line images. Vertical-only scaling is through
CenterLineNormalizer, resizing with Lanczos
interpolation. Set to 0 to disable.
normalization (str): Unicode normalization for gt
whitespace_normalization (str): Normalizes unicode whitespace and
strips whitespace.
reorder (bool): Whether to rearrange code points in "display"/LTR
order
im_transforms (func): Function taking an PIL.Image and returning a
tensor suitable for forward passes.
preload (bool): Enables preloading and preprocessing of image files.
"""
self.suffix = suffix
self.split = partial(F_t.suffix_split, split=split, suffix=suffix)
self._images = [] # type: Union[List[Image], List[torch.Tensor]]
self._gt = [] # type: List[str]
self.alphabet = Counter() # type: Counter
self.text_transforms = [] # type: List[Callable[[str], str]]
# split image transforms into two. one part giving the final PIL image
# before conversion to a tensor and the actual tensor conversion part.
self.head_transforms = transforms.Compose(im_transforms.transforms[:2])
self.tail_transforms = transforms.Compose(im_transforms.transforms[2:])
self.aug = None
self.preload = preload
self.seg_type = 'bbox'
# built text transformations
if normalization:
self.text_transforms.append(partial(F_t.text_normalize, normalization=normalization))
if whitespace_normalization:
self.text_transforms.append(F_t.text_whitespace_normalize)
if reorder:
self.text_transforms.append(F_t.text_reorder)
if augmentation:
from albumentations import (
Compose, ToFloat, FromFloat, Flip, OneOf, MotionBlur, MedianBlur, Blur,
ShiftScaleRotate, OpticalDistortion, ElasticTransform, RandomBrightnessContrast,
)
self.aug = Compose([
ToFloat(),
OneOf([
MotionBlur(p=0.2),
MedianBlur(blur_limit=3, p=0.1),
Blur(blur_limit=3, p=0.1),
], p=0.2),
ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
OneOf([
OpticalDistortion(p=0.3),
ElasticTransform(p=0.1),
], p=0.2),
], p=0.5)
self.im_mode = '1'
def add(self, *args, **kwargs) -> None:
"""
Adds a line-image-text pair to the dataset.
Args:
image (str): Input image path
"""
if 'preparse' not in kwargs or not kwargs['preparse']:
kwargs = self.parse(image, *args, **kwargs)
if kwargs['preload']:
self.im_mode = kwargs['im_mode']
self._images.append(kwargs['image'])
self._gt.append(kwargs['text'])
self.alphabet.update(kwargs['text'])
def parse(self, image: Union[str, Image.Image], *args, **kwargs) -> Dict:
"""
Parses a sample for this dataset.
This is mostly used to parallelize populating the dataset.
Args:
image (str): Input image path
"""
with open(self.split(image), 'r', encoding='utf-8') as fp:
gt = fp.read().strip('\n\r')
for func in self.text_transforms:
gt = func(gt)
if not gt:
raise KrakenInputException(f'Text line is empty ({fp.name})')
if self.preload:
try:
im = Image.open(image)
im = self.head_transforms(im)
im = self.tail_transforms(im)
except ValueError:
raise KrakenInputException(f'Image transforms failed on {image}')
return {'image': im, 'text': gt, 'im_mode': im.mode, 'preload': True, 'preparse': True}
else:
return {'image': image, 'text': gt, 'preload': False, 'preparse': True}
def add_loaded(self, image: Image.Image, gt: str) -> None:
"""
Adds an already loaded line-image-text pair to the dataset.
Args:
image (PIL.Image.Image): Line image
gt (str): Text contained in the line image
"""
if self.preload:
try:
im = self.head_transforms(im)
if not is_bitonal(im):
self.im_mode = im.mode
im = self.tail_transforms(im)
except ValueError:
raise KrakenInputException(f'Image transforms failed on {image}')
self._images.append(im)
else:
self._images.append(image)
for func in self.text_transforms:
gt = func(gt)
self._gt.append(gt)
self.alphabet.update(gt)
def encode(self, codec: Optional[PytorchCodec] = None) -> None:
"""
Adds a codec to the dataset and encodes all text lines.
Has to be run before sampling from the dataset.
"""
if codec:
self.codec = codec
else:
self.codec = PytorchCodec(''.join(self.alphabet.keys()))
self.training_set = [] # type: List[Tuple[Union[Image, torch.Tensor], torch.Tensor]]
for im, gt in zip(self._images, self._gt):
self.training_set.append((im, self.codec.encode(gt)))
def no_encode(self) -> None:
"""
Creates an unencoded dataset.
"""
self.training_set = [] # type: List[Tuple[Union[Image, torch.Tensor], str]]
for im, gt in zip(self._images, self._gt):
self.training_set.append((im, gt))
def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]:
if self.preload:
x, y = self.training_set[index]
if self.aug:
im = x.permute((1, 2, 0)).numpy()
o = self.aug(image=im)
im = torch.tensor(o['image'].transpose(2, 0, 1))
return {'image': im, 'target': y}
return {'image': x, 'target': y}
else:
item = self.training_set[index]
try:
logger.debug(f'Attempting to load {item[0]}')
im = item[0]
if not isinstance(im, Image.Image):
im = Image.open(im)
im = self.head_transforms(im)
if not is_bitonal(im):
self.im_mode = im.mode
im = self.tail_transforms(im)
if self.aug:
im = im.permute((1, 2, 0)).numpy()
o = self.aug(image=im)
im = torch.tensor(o['image'].transpose(2, 0, 1))
return {'image': im, 'target': item[1]}
except Exception:
idx = np.random.randint(0, len(self.training_set))
logger.debug(traceback.format_exc())
logger.info(f'Failed. Replacing with sample {idx}')
return self[np.random.randint(0, len(self.training_set))]
def __len__(self) -> int:
return len(self.training_set)
class BaselineSet(Dataset):
"""
Dataset for training a baseline/region segmentation model.
"""
def __init__(self, imgs: Sequence[str] = None,
suffix: str = '.path',
line_width: int = 4,
im_transforms: Callable[[Any], torch.Tensor] = transforms.Compose([]),
mode: str = 'path',
augmentation: bool = False,
valid_baselines: Sequence[str] = None,
merge_baselines: Dict[str, Sequence[str]] = None,
valid_regions: Sequence[str] = None,
merge_regions: Dict[str, Sequence[str]] = None):
"""
Reads a list of image-json pairs and creates a data set.
Args:
imgs (list):
suffix (int): Suffix to attach to image base name to load JSON
files from.
line_width (int): Height of the baseline in the scaled input.
target_size (tuple): Target size of the image as a (height, width) tuple.
mode (str): Either path, alto, page, xml, or None. In alto, page,
and xml mode the baseline paths and image data is
retrieved from an ALTO/PageXML file. In `None` mode
data is iteratively added through the `add` method.
augmentation (bool): Enable/disable augmentation.
valid_baselines (list): Sequence of valid baseline identifiers. If
`None` all are valid.
merge_baselines (dict): Sequence of baseline identifiers to merge.
Note that merging occurs after entities not
in valid_* have been discarded.
valid_regions (list): Sequence of valid region identifiers. If
`None` all are valid.
merge_regions (dict): Sequence of region identifiers to merge.
Note that merging occurs after entities not
in valid_* have been discarded.
"""
super().__init__()
self.mode = mode
self.im_mode = '1'
self.aug = None
self.targets = []
# n-th entry contains semantic of n-th class
self.class_mapping = {'aux': {'_start_separator': 0, '_end_separator': 1}, 'baselines': {}, 'regions': {}}
self.class_stats = {'baselines': defaultdict(int), 'regions': defaultdict(int)}
self.num_classes = 2
self.mbl_dict = merge_baselines if merge_baselines is not None else {}
self.mreg_dict = merge_regions if merge_regions is not None else {}
self.valid_baselines = valid_baselines
self.valid_regions = valid_regions
if mode in ['alto', 'page', 'xml']:
if mode == 'alto':
fn = parse_alto
elif mode == 'page':
fn = parse_page
elif mode == 'xml':
fn = parse_xml
im_paths = []
self.targets = []
for img in imgs:
try:
data = fn(img)
im_paths.append(data['image'])
lines = defaultdict(list)
for line in data['lines']:
if valid_baselines is None or line['script'] in valid_baselines:
lines[self.mbl_dict.get(line['script'], line['script'])].append(line['baseline'])
self.class_stats['baselines'][self.mbl_dict.get(line['script'], line['script'])] += 1
regions = defaultdict(list)
for k, v in data['regions'].items():
if valid_regions is None or k in valid_regions:
regions[self.mreg_dict.get(k, k)].extend(v)
self.class_stats['regions'][self.mreg_dict.get(k, k)] += len(v)
data['regions'] = regions
self.targets.append({'baselines': lines, 'regions': data['regions']})
except KrakenInputException as e:
logger.warning(e)
continue
# get line types
imgs = im_paths
# calculate class mapping
line_types = set()
region_types = set()
for page in self.targets:
for line_type in page['baselines'].keys():
line_types.add(line_type)
for reg_type in page['regions'].keys():
region_types.add(reg_type)
idx = -1
for idx, line_type in enumerate(line_types):
self.class_mapping['baselines'][line_type] = idx + self.num_classes
self.num_classes += idx + 1
idx = -1
for idx, reg_type in enumerate(region_types):
self.class_mapping['regions'][reg_type] = idx + self.num_classes
self.num_classes += idx + 1
elif mode == 'path':
pass
elif mode is None:
imgs = []
else:
raise Exception('invalid dataset mode')
if augmentation:
from albumentations import (
Compose, ToFloat, FromFloat, RandomRotate90, Flip, OneOf, MotionBlur, MedianBlur, Blur,
ShiftScaleRotate, OpticalDistortion, ElasticTransform, RandomBrightnessContrast,
HueSaturationValue,
)
self.aug = Compose([
ToFloat(),
RandomRotate90(),
Flip(),
OneOf([
MotionBlur(p=0.2),
MedianBlur(blur_limit=3, p=0.1),
Blur(blur_limit=3, p=0.1),
], p=0.2),
ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
OneOf([
OpticalDistortion(p=0.3),
ElasticTransform(p=0.1),
], p=0.2),
HueSaturationValue(hue_shift_limit=20, sat_shift_limit=0.1, val_shift_limit=0.1, p=0.3),
], p=0.5)
self.imgs = imgs
self.line_width = line_width
# split image transforms into two. one part giving the final PIL image
# before conversion to a tensor and the actual tensor conversion part.
self.head_transforms = transforms.Compose(im_transforms.transforms[:2])
self.tail_transforms = transforms.Compose(im_transforms.transforms[2:])
self.seg_type = None
def add(self,
image: Union[str, Image.Image],
baselines: List[List[List[Tuple[int, int]]]] = None,
regions: Dict[str, List[List[Tuple[int, int]]]] = None,
*args,
**kwargs):
"""
Adds a page to the dataset.
Args:
im (path): Path to the whole page image
baseline (dict): A list containing dicts with a list of coordinates
and script types [{'baseline': [[x0, y0], ...,
[xn, yn]], 'script': 'script_type'}, ...]
regions (dict): A dict containing list of lists of coordinates {'region_type_0': [[x0, y0], ..., [xn, yn]]], 'region_type_1': ...}.
"""
if self.mode:
raise Exception(f'The `add` method is incompatible with dataset mode {self.mode}')
baselines_ = defaultdict(list)
for line in baselines:
line_type = self.mbl_dict.get(line['script'], line['script'])
if self.valid_baselines is None or line['script'] in self.valid_baselines:
baselines_[line_type].append(line['baseline'])
self.class_stats['baselines'][line_type] += 1
if line_type not in self.class_mapping['baselines']:
self.num_classes += 1
self.class_mapping['baselines'][line_type] = self.num_classes - 1
regions_ = defaultdict(list)
for k, v in regions.items():
reg_type = self.mreg_dict.get(k, k)
if self.valid_regions is None or reg_type in self.valid_regions:
regions_[reg_type].extend(v)
self.class_stats['baselines'][reg_type] += len(v)
if reg_type not in self.class_mapping['regions']:
self.num_classes += 1
self.class_mapping['regions'][reg_type] = self.num_classes - 1
self.targets.append({'baselines': baselines_, 'regions': regions_})
self.imgs.append(image)
def __getitem__(self, idx):
im = self.imgs[idx]
if self.mode != 'path':
target = self.targets[idx]
else:
with open('{}.path'.format(path.splitext(im)[0]), 'r') as fp:
target = json.load(fp)
if not isinstance(im, Image.Image):
try:
logger.debug(f'Attempting to load {im}')
im = Image.open(im)
im, target = self.transform(im, target)
return {'image': im, 'target': target}
except Exception:
idx = np.random.randint(0, len(self.imgs))
logger.debug(traceback.format_exc())
logger.info(f'Failed. Replacing with sample {idx}')
return self[np.random.randint(0, len(self.imgs))]
im, target = self.transform(im, target)
return {'image': im, 'target': target}
@staticmethod
def _get_ortho_line(lineseg, point, line_width, offset):
lineseg = | np.array(lineseg) | numpy.array |
"""
Mask R-CNN
Display and Visualization Functions.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by <NAME>
"""
import math
import random
import itertools
import colorsys
import numpy as np
import IPython.display
import tensorflow as tf
import keras.backend as KB
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.lines as lines
import skimage.util
from skimage.measure import find_contours
from PIL import Image
from matplotlib.patches import Polygon
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import mrcnn.utils as utils
from mrcnn.datagen import load_image_gt
############################################################
# Visualization
############################################################
def get_ax(rows=1, cols=1, size=8):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Change the default size attribute to control the size
of rendered images
"""
_, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
return ax
def random_colors(N, bright=True):
"""
Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
"""
brightness = 1.0 if bright else 0.7
hsv = [(i / N, 1, brightness) for i in range(N)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
random.shuffle(colors)
return colors
def apply_mask(image, mask, color, alpha=0.5):
"""Apply the given mask to the image.
"""
for c in range(3):
image[:, :, c] = np.where(mask == 1,
image[:, :, c] * (1 - alpha) + alpha * color[c] * 255,
image[:, :, c])
return image
##----------------------------------------------------------------------
## display_image
## figsize : tuple of integers, optional: (width, height) in inches
## default: None
## If not provided, defaults to rc figure.figsize.
##----------------------------------------------------------------------
def display_image(image, title='', cmap=None, norm=None,
interpolation=None, figsize=(10,10), ax=None):
"""
Display one image, optionally with titles.
image: list or array of image tensors in HWC format.
title: optional. A list of titles to display with each image.
cols: number of images per row
cmap: Optional. Color map to use. For example, "Blues".
norm: Optional. A Normalize instance to map values to colors.
interpolation: Optional. Image interporlation to use for display.
"""
plt.figure(figsize=figsize)
# if title is None:
title += "H x W={}x{}".format(image.shape[0], image.shape[1])
plt.title(title, fontsize=12)
plt.imshow(image, cmap=cmap,
norm=norm, interpolation=interpolation)
##----------------------------------------------------------------------
## display_image
## figsize : tuple of integers, optional: (width, height) in inches
## default: None
## If not provided, defaults to rc figure.figsize.
##----------------------------------------------------------------------
def display_image_bw(image, title="B/W Display" , cmap=None, norm=None,
interpolation=None, figsize=(10,10), ax=None):
"""
Display one image, optionally with titles.
image: list or array of image tensors in HWC format.
title: optional. A list of titles to display with each image.
cols: number of images per row
cmap: Optional. Color map to use. For example, "Blues".
norm: Optional. A Normalize instance to map values to colors.
interpolation: Optional. Image interporlation to use for display.
"""
plt.figure(figsize=figsize)
plt.title(title, fontsize=12)
arr = np.asarray(image)
# print(type(image), image.shape)
# print(type(arr), arr.shape)
# plt.imshow(image.astype(np.uint8), cmap=cmap,
# norm=norm, interpolation=interpolation)
plt.imshow(arr, cmap='gray')
##----------------------------------------------------------------------
## display_images
##----------------------------------------------------------------------
def display_images(images, titles=None, cols=4, cmap=None, norm=None,
interpolation=None, width=14):
"""
Display the given set of images, optionally with titles.
images: list or array of image tensors in HWC format.
titles: optional. A list of titles to display with each image.
cols: number of images per row
cmap: Optional. Color map to use. For example, "Blues".
norm: Optional. A Normalize instance to map values to colors.
interpolation: Optional. Image interporlation to use for display.
"""
titles = titles if titles is not None else [""] * len(images)
rows = len(images) // cols + 1
plt.figure(figsize=(width, width * rows // cols))
i = 1
for image, title in zip(images, titles):
title += " H x W={}x{}".format(image.shape[0], image.shape[1])
plt.subplot(rows, cols, i)
plt.title(title, fontsize=9)
plt.axis('off')
plt.imshow(image.astype(np.uint8), cmap=cmap,
norm=norm, interpolation=interpolation)
i += 1
plt.show()
##------------------------------------------------------------------------------------
## display_training_batch()
##------------------------------------------------------------------------------------
def display_image_gt(dataset, config, image_ids, masks= False, only_classes = None, size=12):
'''
display images in a mrcnn train_batch
'''
from mrcnn.datagen import data_gen_simulate
if not isinstance(image_ids, list):
image_ids = [image_ids]
for image_id in image_ids:
image = dataset.load_image(image_id)
# molded_image, image_meta, class_ids, bbox = load_image_gt(dataset, config, image_id)
_, image_meta, _, _ = load_image_gt(dataset, config, image_id)
mask, class_ids = dataset.load_mask(image_id)
bbox = utils.extract_bboxes(mask)
class_names = [str(dataset.class_names[class_id]) for class_id in class_ids]
print(' Image_id : ', image_id, ' Reference: ', dataset.image_reference(image_id) , 'Coco Id:', dataset.image_info[image_id]['id'])
print(' Image meta : ', image_meta[:10])
print(' Class ids : ', class_ids.shape, ' ' , class_ids)
print(' Class Names : ', class_names)
# display_top_masks(image, mask, class_ids, dataset.class_names)
if masks:
display_instances_with_mask(image, bbox, mask, class_ids, dataset.class_names, size =size)
else:
display_instances(image, bbox, class_ids, dataset.class_names, only_classes = only_classes, size=size)
return
##------------------------------------------------------------------------------------
## display_training_batch()
##------------------------------------------------------------------------------------
def display_training_batch(dataset, batch_x, masks= False):
'''
display images in a mrcnn train_batch
'''
# replaced following two lines with next line to avoid the need to pass model to this fuction
# imgmeta_idx = mrcnn_model.keras_model.input_names.index('input_image_meta')
# img_meta = train_batch_x[imgmeta_idx]
img_meta = batch_x[1]
for img_idx in range(img_meta.shape[0]):
image_id = img_meta[img_idx,0]
print('image id : ', image_id)
image = dataset.load_image(image_id)
mask, class_ids = dataset.load_mask(image_id)
bbox = utils.extract_bboxes(mask)
class_names = [str(dataset.class_names[class_id]) for class_id in class_ids]
print(' Image_id : ', image_id, ' Reference: ', dataset.image_reference(image_id) , 'Coco Id:', dataset.image_info[image_id]['id'])
print(' Image meta : ', img_meta[img_idx, :8])
print(' Class ids : ', class_ids.shape, ' ' , class_ids)
print(' Class Names : ', class_names) # print('Classes (1: circle, 2: square, 3: triangle ): ',class_ids)
if masks:
display_top_masks(image, mask, class_ids, dataset.class_names)
display_instances_with_mask(image, bbox, mask, class_ids, dataset.class_names)
else:
display_instances(image, bbox, class_ids, dataset.class_names)
return
##----------------------------------------------------------------------
## display_instances
##----------------------------------------------------------------------
def display_instances(image, boxes, class_ids, class_names,
scores=None, title="", only_classes = None,
figsize=(16, 16), ax=None, score_range = (-1.0, 1.0), size = 16):
"""
boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.
masks: [num_instances, height, width]
class_ids: [num_instances]
class_names: list of class names of the dataset
scores: (optional) confidence scores for each box
figsize: (optional) the size of the image.
max_score: show instances with score less than this
"""
# Number of instances
N = boxes.shape[0]
if not N:
print("\n*** No instances to display *** \n")
else:
assert boxes.shape[0] == class_ids.shape[0], " boxes.shape[0]: {:d} must be ==class_ids.shape[0]: {:d}".format(boxes.shape[0], class_ids.shape[0])
# assert boxes.shape[0] == class_ids.shape[0]
# print(' display_instances() : Image shape: ', image.shape)
if not ax:
ax = get_ax(rows =1, cols = 1, size= size)
# _, ax = plt.subplots(1, figsize=figsize)
# Generate random colors
colors = random_colors(N)
# Show area outside image boundaries.
height, width = image.shape[:2]
ax.set_ylim(height + 10, -10)
ax.set_xlim(-10, width + 10)
# ax.axis('off')
ax.set_title(title)
masked_image = image.astype(np.uint32).copy()
for i in range(N):
class_id = class_ids[i]
if only_classes is not None:
if class_id not in only_classes:
continue
if scores is not None:
# print(' boxes ' ,i,' ' , boxes[i], 'score: ', scores[i], ' ', score_range)
if scores[i] <= score_range[0] or scores[i] >= score_range[1]:
continue
color = colors[i]
# Bounding box
if not np.any(boxes[i]):
# Skip this instance. Has no bbox. Likely lost in image cropping.
continue
y1, x1, y2, x2 = boxes[i]
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
alpha=0.7, linestyle="dashed",
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Label
score = scores[i] if scores is not None else None
if class_id >= 0 :
label = "{:2d}-{:2d} {}".format(i,class_id, class_names[class_id])
else:
label = "{:2d}-{:2d} {}".format(i,class_id, class_names[-class_id]) + ' (CROWD)'
x = random.randint(x1, (x1 + x2) // 2)
caption = "{} {:.3f}".format(label, score) if score else label
t = ax.text(x1, y1 + 8, caption, color='k', size=8, backgroundcolor="w")
t.set_bbox(dict(facecolor='w', alpha=0.5, edgecolor='w'))
ax.imshow(masked_image.astype(np.uint8))
plt.show()
return
##----------------------------------------------------------------------
## display_instances_with_mask
##----------------------------------------------------------------------
def display_instances_with_mask(image, boxes, masks, class_ids, class_names,
scores=None, title="",
figsize=(16, 16), ax=None):
"""
boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.
masks: [num_instances, height, width]
class_ids: [num_instances]
class_names: list of class names of the dataset
scores: (optional) confidence scores for each box
figsize: (optional) the size of the image.
max_score: show instances with score less than this
"""
# Number of instances
# print(' display_instances WITH MASK() : Image shape: ', image.shape)
N = boxes.shape[0]
if not N:
print("\n*** No instances to display *** \n")
else:
assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]
if not ax:
_, ax = plt.subplots(1, figsize=figsize)
# Generate random colors
colors = random_colors(N)
# Show area outside image boundaries.
height, width = image.shape[:2]
ax.set_ylim(height + 10, -10)
ax.set_xlim(-10, width + 10)
# ax.axis('off')
ax.set_title(title)
masked_image = image.astype(np.uint32).copy()
for i in range(N):
color = colors[i]
# Bounding box
if not np.any(boxes[i]):
# Skip this instance. Has no bbox. Likely lost in image cropping.
continue
y1, x1, y2, x2 = boxes[i]
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
alpha=0.7, linestyle="dashed",
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Label
score = scores[i] if scores is not None else None
class_id = class_ids[i]
# label = class_names[class_id]
# if class_id >= 0 :
# label = class_names[class_id]
# else:
# label = class_names[-class_id] + ' (CROWD)'
# x = random.randint(x1, (x1 + x2) // 2)
# caption = "{} {:.3f}".format(label, score) if score else label
# ax.text(x1, y1 + 8, caption, color='k', size=11, backgroundcolor="w")
if class_id >= 0 :
label = "{:2d}-{:2d} {}".format(i,class_id, class_names[class_id])
else:
label = "{:2d}-{:2d} {}".format(i,class_id, class_names[-class_id]) + ' (CROWD)'
x = random.randint(x1, (x1 + x2) // 2)
caption = "{} {:.3f}".format(label, score) if score else label
t = ax.text(x1, y1 + 8, caption, color='k', size=8, backgroundcolor="w")
t.set_bbox(dict(facecolor='w', alpha=0.5, edgecolor='w'))
# Mask
mask = masks[:, :, i]
masked_image = apply_mask(masked_image, mask, color)
# Mask Polygon
# Pad to ensure proper polygons for masks that touch image edges.
padded_mask = np.zeros((mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = mask
contours = find_contours(padded_mask, 0.5)
for verts in contours:
# Subtract the padding and flip (y, x) to (x, y)
verts = np.fliplr(verts) - 1
p = Polygon(verts, facecolor="none", edgecolor=color)
ax.add_patch(p)
ax.imshow(masked_image.astype(np.uint8))
plt.show()
return
##----------------------------------------------------------------------
## display_instances from pr_scores
##----------------------------------------------------------------------
def display_instances_from_prscores(image, pr_scores, class_names,
title="", only_classes = None,
figsize=(16, 16), ax=None, score_range = (0.0, 1.0)):
"""
boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.
masks: [num_instances, height, width]
class_ids: [num_instances]
class_names: list of class names of the dataset
scores: (optional) confidence scores for each box
figsize: (optional) the size of the image.
max_score: show instances with score less than this
"""
# Number of instances
boxes = pr_scores[:,:4]
class_ids = pr_scores[:,4].astype(int)
scores = pr_scores[:,5]
det_ind = pr_scores[:,6].astype(int)
sequences = pr_scores[:,7].astype(int)
N = boxes.shape[0]
if not N:
print("\n*** No instances to display *** \n")
else:
assert boxes.shape[0] == class_ids.shape[0], " boxes.shape[0]: {:d} must be ==class_ids.shape[0]: {:d}".format(boxes.shape[0], class_ids.shape[0])
print(' display_instances() : Image shape: ', image.shape)
if not ax:
_, ax = plt.subplots(1, figsize=figsize)
# Generate random colors
colors = random_colors(N)
# Show area outside image boundaries.
height, width = image.shape[:2]
ax.set_ylim(height + 10, -10)
ax.set_xlim(-10, width + 10)
# ax.axis('off')
ax.set_title(title)
masked_image = image.astype(np.uint32).copy()
for i in range(N):
class_id = class_ids[i]
if only_classes is not None:
if class_id not in only_classes:
continue
if scores is not None:
if scores[i] <= score_range[0] or scores[i] >= score_range[1]:
continue
color = colors[i]
# Bounding box
if not np.any(boxes[i]):
# Skip this instance. Has no bbox. Likely lost in image cropping.
continue
y1, x1, y2, x2 = boxes[i]
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
alpha=0.7, linestyle="dashed",
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Label
score = scores[i] if scores is not None else None
if det_ind[i] == -1:
det_ttl = ' ADDED FP'
else:
det_ttl = ''
if class_id >= 0 :
label = class_names[class_id] + det_ttl
else:
label = class_names[-class_id] + ' (CROWD)'
x = random.randint(x1, (x1 + x2) // 2)
caption = "{:2d}-{} {:.4f}".format(class_id, label, score) if score else label
ax.text(x1, y1 - 8, caption, color='k', size=9, backgroundcolor="w")
ax.imshow(masked_image.astype(np.uint8))
plt.show()
return
##----------------------------------------------------------------------
## display_instances
##----------------------------------------------------------------------
def display_instances_two_scores(image, boxes, class_ids, class_names,
scores1=None, scores2= None , title="", only_classes = None,
figsize=(16, 16), ax=None, score_range = (-1.0, 1.0), size = 16):
"""
boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.
masks: [num_instances, height, width]
class_ids: [num_instances]
class_names: list of class names of the dataset
scores: (optional) confidence scores for each box
figsize: (optional) the size of the image.
max_score: show instances with score less than this
"""
# Number of instances
N = boxes.shape[0]
if not N:
print("\n*** No instances to display *** \n")
else:
assert boxes.shape[0] == class_ids.shape[0]
if scores2 is not None :
assert scores2.shape == scores1.shape
# print(' display_instances() : Image shape: ', image.shape)
if not ax:
ax = get_ax(rows =1, cols = 1, size= size)
# _, ax = plt.subplots(1, figsize=figsize)
# Generate random colors
colors = random_colors(N)
# Show area outside image boundaries.
height, width = image.shape[:2]
ax.set_ylim(height + 10, -10)
ax.set_xlim(-10, width + 10)
# ax.axis('off')
ax.set_title(title)
masked_image = image.astype(np.uint32).copy()
for i in range(N):
class_id = class_ids[i]
if only_classes is not None:
if class_id not in only_classes:
continue
# if scores1 is not None:
# print(' boxes ' ,i,' ' , boxes[i], 'score: ', scores[i], ' ', score_range)
# if scores1[i] <= score_range[0] or scores1[i] >= score_range[1]:
# continue
color = colors[i]
# Bounding box
if not np.any(boxes[i]):
# Skip this instance. Has no bbox. Likely lost in image cropping.
continue
y1, x1, y2, x2 = boxes[i]
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
alpha=0.7, linestyle="dashed",
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Label
score1 = scores1[i] if scores1 is not None else None
score2 = scores2[i] if scores2 is not None else None
if class_id >= 0 :
label = "{:2d}-{:2d} {}".format(i,class_id, class_names[class_id])
else:
label = "{:2d}-{:2d} {}".format(i,class_id, class_names[-class_id]) + ' (CROWD)'
x = random.randint(x1, (x1 + x2) // 2)
caption = "{} {:5.4f}".format(label, score1) if score1 else label
caption += "--> {:5.4f}".format(score2) if score2 else ''
t = ax.text(x1, y1 + 8, caption, color='k', size=8, backgroundcolor="w")
t.set_bbox(dict(facecolor='w', alpha=0.5, edgecolor='w'))
ax.imshow(masked_image.astype(np.uint8))
plt.show()
return
##----------------------------------------------------------------------
## draw_rois (along with the refined_rois)
##----------------------------------------------------------------------
# def draw_rois_with_refinements(image, rois, refined_rois, mask, class_ids, class_names, limit=10):
def draw_rois_with_refinements(image, rois, refined_rois, class_ids, class_names, limit=0,
ids = None, random = False, size = 16):
"""
rois: [n, 4 : {y1, x1, y2, x2}] list of anchors in image coordinates.
refined_rois: [n, 4 : {y1, x1, y2, x2}] the same anchors but refined to fit objects better.
"""
masked_image = image.copy()
# Pick random anchors in case there are too many.
print(' rois.shape[0]: ',rois.shape[0], ' limit = ', limit)
if limit == 0 :
limit = max(rois.shape[0], limit)
print(' limit : ', limit)
if ids is None:
ids = np.arange(limit, dtype=np.int32)
if random:
ids = np.random.choice(ids, limit, replace=False) if ids.shape[0] > limit else ids
print(' ids : ', ids)
fig, ax = plt.subplots(1,1, figsize=(size, size))
if rois.shape[0] > limit:
plt.title("Showing {} random ROIs out of {}".format(
len(ids), rois.shape[0]))
else:
plt.title("{} ROIs".format(len(ids)))
# Show area outside image boundaries.
ax.set_ylim(image.shape[0] + 20, -20)
ax.set_xlim(-50, image.shape[1] + 20)
# ax.axis('off')
for i, id in enumerate(ids):
# print('i: ', i, 'id :', id)
color = np.random.rand(3)
class_id = class_ids[id]
# ROI
y1, x1, y2, x2 = rois[id]
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
edgecolor=color if class_id else "gray",
facecolor='none', linestyle="dashed")
ax.add_patch(p)
# Refined ROI
if class_id:
ry1, rx1, ry2, rx2 = refined_rois[id]
p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2,
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Connect the top-left corners of the anchor and proposal for easy visualization
ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color))
# Label
label = class_names[class_id]
ax.text(rx1, ry1 + 8, "{}".format(label),
color='w', size=11, backgroundcolor="none")
# Mask
# m = utils.unmold_mask(mask[id], rois[id]
# [:4].astype(np.int32), image.shape)
# masked_image = apply_mask(masked_image, m, color)
ax.imshow(masked_image)
# Print stats
print("Positive ROIs: ", class_ids[class_ids > 0].shape[0])
print("Negative ROIs: ", class_ids[class_ids == 0].shape[0])
print("Positive Ratio: {:.2f}".format(
class_ids[class_ids > 0].shape[0] / class_ids.shape[0]))
##----------------------------------------------------------------------
## draw rois proposals (w/o refinements)
##----------------------------------------------------------------------
def draw_rois(image, rois, class_ids, class_names, bbox_ids = None , limit=0, random = False, display_bg = False):
"""
anchors: [n, (y1, x1, y2, x2)] list of anchors in image coordinates.
proposals: [n, 4] the same anchors but refined to fit objects better.
bbox_ids : list of bbox ids that will be displayed. If not specified will use limit
"""
masked_image = image.copy()
# Pick random anchors in case there are too many.
print(' rois.shape[0]: ',rois.shape[0], ' limit = ', limit)
if bbox_ids:
pass
else:
bbox_ids = np.arange(rois.shape[0])
print(' num of bbox_ids : ', len(bbox_ids))
print(' limit to display : ', limit)
if limit == 0 :
limit = len(bbox_ids)
else:
limit = min(len(bbox_ids), limit)
print(' limit to display : ', limit)
# bbox_ids = np.arange(limit, dtype=np.int32)
if random:
bbox_ids = np.random.choice(bbox_ids, limit, replace=False) if ids.shape[0] > limit else bbox_ids
# print(' bbox_ids : ', bbox_ids)
fig, ax = plt.subplots(1, figsize=(18, 18))
if rois.shape[0] > limit:
plt.title("Showing {} random ROIs out of {}".format(
len(bbox_ids), rois.shape[0]))
else:
plt.title("{} ROIs".format(len(bbox_ids)))
# Show area outside image boundaries.
ax.set_ylim(image.shape[0] + 20, -20)
ax.set_xlim(-50, image.shape[1] + 20)
# ax.axis('off')
for i, id in enumerate(bbox_ids):
if i == limit:
break
color = np.random.rand(3)
class_id = class_ids[id]
# ROI
if not display_bg and not class_id:
# Skip this instance. Has no class id
# print('index: ', i, 'box_id :', id, 'class_id: ', class_id,' Skipping box ',i)
continue
# print('index: ', i, 'box_id :', id, 'class_id: ', class_id)
y1, x1, y2, x2 = rois[id]
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
edgecolor=color if class_id else "gray",
facecolor='none', linestyle="dashed")
ax.add_patch(p)
# Refined ROI
# if not class_id:
# Skip this instance. Has no class id
# print(' Skipping box ',i)
# continue
# ry1, rx1, ry2, rx2 = refined_rois[id]
# p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2,
# edgecolor=color, facecolor='none')
# ax.add_patch(p)
# Connect the top-left corners of the anchor and proposal for easy visualization
# ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color))
# Label
label = class_names[class_id]
# ax.text(rx1, ry1 + 8, "{}".format(label),
# color='w', size=11, backgroundcolor="none")
ax.text(x1, y1 + 8, "{}".format(label),
color='w', size=11, backgroundcolor="none")
ax.imshow(masked_image)
# Print stats
print("Positive ROIs: ", class_ids[class_ids > 0].shape[0])
print("Negative ROIs: ", class_ids[class_ids == 0].shape[0])
print("Positive Ratio: {:.2f}".format(
class_ids[class_ids > 0].shape[0] / class_ids.shape[0]))
##----------------------------------------------------------------------
## draw_box
##----------------------------------------------------------------------
# TODO: Replace with matplotlib equivalent?
def draw_box(image, box, color):
"""Draw 3-pixel width bounding boxes on the given image array.
color: list of 3 int values for RGB.
"""
y1, x1, y2, x2 = box
image[y1:y1 + 2, x1:x2] = color
image[y2:y2 + 2, x1:x2] = color
image[y1:y2, x1:x1 + 2] = color
image[y1:y2, x2:x2 + 2] = color
return image
##----------------------------------------------------------------------
## display_top_masks
##----------------------------------------------------------------------
def display_top_masks(image, mask, class_ids, class_names, limit=4):
"""Display the given image and the top few class masks."""
to_display = []
titles = []
to_display.append(image)
titles.append("H x W={}x{}".format(image.shape[0], image.shape[1]))
# Pick top prominent classes in this image
unique_class_ids = np.unique(class_ids)
mask_area = [np.sum(mask[:, :, np.where(class_ids == i)[0]])
for i in unique_class_ids]
top_ids = [v[0] for v in sorted(zip(unique_class_ids, mask_area),
key=lambda r: r[1], reverse=True) if v[1] > 0]
# Generate images and titles
for i in range(limit):
class_id = top_ids[i] if i < len(top_ids) else -1
# Pull masks of instances belonging to the same class.
m = mask[:, :, | np.where(class_ids == class_id) | numpy.where |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import pytest
import numpy as np
import logging
from scipy.sparse import csr_matrix, eye
from interpret_community.common.explanation_utils import _convert_to_list, _generate_augmented_data, \
_get_raw_feature_importances, _is_one_to_many, _sort_values, _sort_feature_list_single, \
_sort_feature_list_multiclass, _two_dimensional_slice, _get_feature_map_from_list_of_indexes, \
_is_identity
from raw_explain.utils import _get_feature_map_from_indices_list
from interpret_community.common.constants import Scipy
from constants import owner_email_tools_and_ux
test_logger = logging.getLogger(__name__)
@pytest.mark.owner(email=owner_email_tools_and_ux)
@pytest.mark.usefixtures('clean_dir')
class TestExplanationUtils(object):
def test_working(self):
assert True
def test_convert_to_list_1d(self):
numpy_1d = np.ones(4)
list_1d = [1] * 4
assert _convert_to_list(numpy_1d) == list_1d
def test_convert_to_list_2d_full_numpy(self):
numpy_2d = np.ones((3, 4))
list_2d = [[1] * 4] * 3
assert _convert_to_list(numpy_2d) == list_2d
def test_convert_to_list_2d_list_of_numpy(self):
numpy_2d = np.ones(4)
numpy_list = [numpy_2d] * 3
list_2d = [[1] * 4] * 3
assert _convert_to_list(numpy_list) == list_2d
def test_sort_values(self):
feature_list = ['feature0', 'feature1', 'feature2', 'feature3']
order = [2, 3, 0, 1]
assert np.array_equal(_sort_values(feature_list, order),
np.array(['feature2', 'feature3', 'feature0', 'feature1']))
def test_sort_feature_list_single(self):
feature_list = ['feature0', 'feature1', 'feature2', 'feature3']
order = [2, 3, 0, 1]
assert _sort_feature_list_single(feature_list, order) == ['feature2', 'feature3', 'feature0', 'feature1']
def test_sort_feature_list_multiclass(self):
feature_list = ['feature0', 'feature1', 'feature2', 'feature3']
order = [
[2, 3, 0, 1],
[1, 3, 2, 0]
]
output = [
['feature2', 'feature3', 'feature0', 'feature1'],
['feature1', 'feature3', 'feature2', 'feature0']
]
assert _sort_feature_list_multiclass(feature_list, order) == output
def test_two_dimensional_slice(self):
big_list = [
['feature2', 'feature3', 'feature0', 'feature1'],
['feature1', 'feature3', 'feature2', 'feature0']
]
output = [
['feature2', 'feature3'],
['feature1', 'feature3']
]
assert _two_dimensional_slice(big_list, 2) == output
def test_generate_augmented_data_ndarray(self):
x = np.ones((3, 6))
x_augmented = _generate_augmented_data(x)
assert x_augmented.shape[0] == 6 and x_augmented.shape[1] == 6
def test_generate_augmented_data_sparse(self):
x = csr_matrix(np.zeros((3, 6)))
x_augmented = _generate_augmented_data(x)
assert x_augmented.shape[0] == 6 and x_augmented.shape[1] == 6
def test_get_raw_feats_regression(self):
feat_imps = np.ones((2, 5))
feat_imps[1] = 2 * np.ones(5)
raw_feat_indices = [[0, 1, 2], [3, 4]]
feature_map = _get_feature_map_from_indices_list(raw_feat_indices, 2, 5)
raw_imps = _get_raw_feature_importances(feat_imps, [feature_map])
assert np.all(raw_imps == [[3, 2], [6, 4]])
raw_imps = _get_raw_feature_importances(feat_imps, [csr_matrix(feature_map)])
assert np.all(raw_imps == [[3, 2], [6, 4]])
def test_get_raw_feats_classification(self):
feat_imps = np.ones((2, 3, 5))
feat_imps[1] = 2 * np.ones((3, 5))
raw_feat_indices = [[0, 1, 2], [3, 4]]
feature_map = _get_feature_map_from_indices_list(raw_feat_indices, num_raw_cols=2, num_generated_cols=5)
raw_imps = _get_raw_feature_importances(feat_imps, [feature_map])
raw_feat_imps_truth = \
[
[
[3, 2],
[3, 2],
[3, 2]
],
[
[6, 4],
[6, 4],
[6, 4]
],
]
assert np.all(raw_imps == raw_feat_imps_truth)
def test_get_raw_feats_regression_many_to_many(self):
feat_imps = np.ones((2, 5))
feat_imps[1] = 2 * np.ones(5)
raw_feat_indices = [[0, 1, 2], [3, 4, 1]]
feature_map = _get_feature_map_from_indices_list(raw_feat_indices, 2, 5)
feature_map[0, 1] = 0.5
feature_map[1, 1] = 0.5
raw_imps = _get_raw_feature_importances(feat_imps, [feature_map])
assert np.all(raw_imps == [[2.5, 2.5], [5, 5]])
raw_imps = _get_raw_feature_importances(feat_imps, [csr_matrix(feature_map)])
assert np.all(raw_imps == [[2.5, 2.5], [5, 5]])
def test_get_raw_feats_classification_many_to_many(self):
feat_imps = np.ones((2, 3, 5))
feat_imps[1] = 2 * np.ones((3, 5))
raw_feat_indices = [[0, 1, 2], [3, 4, 1]]
feature_map = _get_feature_map_from_indices_list(raw_feat_indices, num_raw_cols=2, num_generated_cols=5)
feature_map[0, 1] = 0.5
feature_map[1, 1] = 0.5
raw_imps = _get_raw_feature_importances(feat_imps, [feature_map])
raw_feat_imps_truth = \
[
[
[2.5, 2.5],
[2.5, 2.5],
[2.5, 2.5]
],
[
[5, 5],
[5, 5],
[5, 5]
],
]
assert np.all(raw_imps == raw_feat_imps_truth)
# check for sparse feature map
raw_imps = _get_raw_feature_importances(feat_imps, [csr_matrix(feature_map)])
assert np.all(raw_imps == raw_feat_imps_truth)
# check for un-normalized many to many weights
feature_map[0, 1] = 1
feature_map[1, 1] = 1
raw_imps = _get_raw_feature_importances(feat_imps, [feature_map])
assert | np.all(raw_imps == raw_feat_imps_truth) | numpy.all |
from cho_util.math.common import *
import numpy as np
def from_matrix(x, out=None):
x = np.asarray(x)
if out is None:
out = np.empty(shape=np.shape(x)[:-2] + (4,))
m00, m01, m02 = [x[..., 0, i] for i in range(3)]
m10, m11, m12 = [x[..., 1, i] for i in range(3)]
m20, m21, m22 = [x[..., 2, i] for i in range(3)]
np.subtract(m21, m12, out=out[..., 0])
np.subtract(m02, m20, out=out[..., 1])
np.subtract(m10, m01, out=out[..., 2])
out[..., :3] = uvec(out[..., :3])
out[..., 3] = np.arccos(np.clip((m00 + m11 + m22 - 1)*0.5, -1.0, 1.0))
return out
def from_quaternion(x, out=None):
x = np.asarray(x)
if out is None:
out = np.empty(shape=np.shape(x)[:-1] + (4,))
qw = x[..., 3:]
out[..., :3] = uvec(x[..., :3])
out[..., 3:] = 2 * np.arccos(np.clip(qw, -1.0, 1.0))
return out
def from_euler(x, out=None):
x = np.asarray(x)
if out is None:
out = np.empty(shape=np.shape(x)[:-1] + (4,))
x, y, z = [x[..., i] for i in range(3)]
x0 = np.cos(x)
x6 = | np.sin(x) | numpy.sin |
#!usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from operator import itemgetter
from collections import Counter
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction import DictVectorizer
from keras.utils import np_utils
import numpy as np
from model import build_model
def index_characters(tokens, focus_repr='recurrent', v2u=False):
if v2u:
vocab = {ch for tok in tokens for ch in tok.lower().replace('v', 'u')}
else:
vocab = {ch for tok in tokens for ch in tok.lower()}
if focus_repr == 'recurrent':
vocab = vocab.union({'$', '|', '%'})
char_vocab = tuple(sorted(vocab))
char_vector_dict, char_idx = {}, {}
filler = np.zeros(len(char_vocab), dtype='float32')
for idx, char in enumerate(char_vocab):
ph = filler.copy()
ph[idx] = 1
char_vector_dict[char] = ph
char_idx[idx] = char
return char_vector_dict, char_idx
def vectorize_tokens(tokens, char_vector_dict, focus_repr,
max_len=15, v2u=False):
X = []
for token in tokens:
token = token.lower()
if v2u:
token.lower().replace('v', 'u')
x = vectorize_token(seq=token,
char_vector_dict=char_vector_dict,
max_len=max_len,
focus_repr=focus_repr)
X.append(x)
return np.asarray(X, dtype='float32')
def vectorize_lemmas(lemmas, char_vector_dict,
max_len=15):
X = []
for lemma in lemmas:
lemma = lemma.lower()
x = vectorize_lemma(seq=lemma,
char_vector_dict=char_vector_dict,
max_len=max_len)
X.append(x)
X = np.asarray(X, dtype='float32')
return X
def vectorize_token(seq, char_vector_dict, max_len, focus_repr):
if focus_repr == 'recurrent':
# cut, if needed:
seq = seq[:(max_len - 2)]
seq = '%' + seq + '|'
seq = seq[::-1] # reverse order (cf. paper)!
elif focus_repr == 'convolutions':
seq = seq[:max_len]
filler = np.zeros(len(char_vector_dict), dtype='float32')
seq_X = []
for char in seq:
try:
seq_X.append(char_vector_dict[char])
except KeyError:
seq_X.append(filler)
while len(seq_X) < max_len:
seq_X.append(filler)
return | np.array(seq_X, dtype='float32') | numpy.array |
#!/usr/bin/python
'''Module for computing the pure rotational Raman spectra from H2, HD and D2'''
import math
import numpy as np
import matplotlib.pyplot as plt
import boltzmann_popln as bp
# Constants ------------------------------
K = np.float64(1.38064852e-23) # J/K
H = np.float64(6.626070040e-34) # J.s
C = np.float64(2.99792458e+10) # cm/s
# ----------------------------------------
# Laser properties------------------------
omega = 18789.9850 # laser freq in absolute cm-1
# ----------------------------------------
omega_sc = omega/1e4 # scaled frequency (for better numerical accuracy)
# ----------------------------------------
# Load data on the energy levels and the polarizability anisotropy
# Data on the rovibrational energy levels has been extracted from the
# calculated dissociation
# energy data published in the following works :
# a) <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# and <NAME>, J. Chem. Theory and Comput. 7, 3105 (2011).
#
# b) <NAME> and <NAME>, Phys. Chem. Chem. Phys. 12, 9188 (2010).
eJH2v0 = np.loadtxt("./energy_levels_and_gamma/H2eV0.dat")
eJH2v1 = np.loadtxt("./energy_levels_and_gamma/H2eV1.dat")
eJHDv0 = np.loadtxt("./energy_levels_and_gamma/HDeV0.dat")
eJHDv1 = np.loadtxt("./energy_levels_and_gamma/HDeV1.dat")
eJD2v0 = | np.loadtxt("./energy_levels_and_gamma/D2eV0.dat") | numpy.loadtxt |
import numpy as np
from ilc_models.base import ILCBase, g2
class Quad2DLin(ILCBase):
"""
Same as Quad2D but no sines and cosines
"""
n_state = 6
n_control = 2
n_out = 2
control_normalization = np.array((1e-1, 1e-3))
g_vec = g2
def get_ABCD(self, state, control, dt):
X = slice(0, 2)
V = slice(2, 4)
TH = slice(4, 5)
OM = slice(5, 6)
U = slice(0, 1)
AA = slice(1, 2)
theta = state[TH][0]
u = control[U][0]
A = np.zeros((self.n_state, self.n_state))
B = np.zeros((self.n_state, self.n_control))
C = np.zeros((self.n_out, self.n_state))
D = np.zeros((self.n_out, self.n_control))
A[X, X] = np.eye(2)
A[V, V] = np.eye(2)
A[X, V] = dt * np.eye(2)
A[V, TH] = u * dt * np.array(((-1, 0),)).T
A[TH, TH] = A[OM, OM] = 1
A[TH, OM] = dt
B[V, U] = dt * np.array(((-theta, 1),)).T
B[OM, AA] = dt
C[X, X] = np.eye(2)
return A, B, C, D
def simulate(self, t_end, fun, dt):
pos = | np.zeros(2) | numpy.zeros |
"""Class to generate realistic input parameters for atmospheric PSF sims."""
import numpy as np
import pickle
import pandas as pd
import pathlib
from psfws import utils
class ParameterGenerator():
"""Class to generate realistic input parameters for atmospheric PSF sims.
This class uses as main input global circulation model weather forecasting
outputs, from either the NOAA Global Forecasting System (GFS) analysis or
the European Center for Midrange Weather Forecasting (ECMWF) reanalysis
dataset ERA5.
Optionally, local wind measurements from the site of interest may be used
to improve the accuracy of the outputs. The package contains these data
for Cerro Pachon, and all defaults are set up to match this location.
Use of the code to generate parameters for Cerro Pachon (and nearby Cerro
Telolo, nearby) is straightforward, but for use at other observatories,
users must supply input data: instructions for downloading and formatting
forecasting data/telemetry are in the README.
Attributes
----------
data_fa : pandas dataframe
Above ground forecasting data, with DateTimes as index and columns 'u',
'v', 'speed', 'dir', 't', and 'p'. Each entry is a ndarray of values
for each altitude, with speed/velocity components in m/s, directions in
degrees, temperatures in Kelvin, and pressures in mbar. The u/v
components of velocity correspond to north/south winds, respectively,
and the wind direction is given as degrees west of north.
To select data in the free atmosphere use the gl_end parameter, for
example: data_fa.at[pt,'v'][fa_start:]
data_gl : pandas dataframe
Ground layer data, with DateTimes as index and columns 'speed','dir',
'u', 'v', 't', and optionally 'j_gl' (see rho_jv below). These values
are temporally matched to data_fa, so have identical indicies.
The data are either telemetry, if a data file was given, or forecast
data interpolated to ground altitude.
h : ndarray
Altitudes of free atmopshere forecasting data, in km.
h0 : float
Altitude of observatory, in km.
fa_start : int
Index of h corresponding to the start of the free atmosphere ~1km above
ground, to use when selecting for free atmosphere parameters.
j_pdf : dict
Dictionary containing parameters for lognormal PDFs of turbulence
integrals for both the ground layer and the free atmosphere values.
Keys are 'gl' and 'fa' respecitvely.
rho_jv : float or None (default)
Correlation coefficient between the ground layer wind speed and ground
turbulence integral. If None, no correlation is included.
N : int
Number of matched datasets.
Methods
-------
get_raw_measurements(pt)
Get a matched set of measurements from datapoint with index pt.
get_param_interpolation(pt, h_out, s=None)
Get set of parameters from datapoint with index pt, interpolated
to new altitudes h_out. Smoothness of cubic interpolation can be
specified with keyword s: None for scipy optimized value, 0 for no
smoothing.
get_fa_cn2(pt)
Get free atmosphere Cn2 profile for requested datapoint.
get_turbulence_integral(pt, layers='auto')
Get set of turbulence integrals associated for requested datapoint pt.
Centers of integration regions set by layers keyword; either array of
values, or 'auto' for them to be automatically calculated based on wind
and turbulence maximums.
get_cn2_all()
Get free atmosphere Cn2 profiles for entire dataset, returned as array.
draw_parameters(layers='auto')
Randomly draw a set of parameters: wind speed, wind direction,
turbulence integrals. These are returned in a dict along with layer
heights.
Output altitudes are set by layers keyword; either array of values, or
'auto' for them to be automatically calculated based on wind and
turbulence maximums.
Notes
-----
Code is written to output quantities formatted to match desired inputs for
GalSim atompsheric PSF simulations.
"""
def __init__(self, location='cerro-pachon', seed=None,
date_range=['2019-05-01', '2019-10-31'],
forecast_file='data/ecmwf_-30.25_-70.75_20190501_20191031.p',
telemetry_file='data/tel_dict_CP_20190501-20191101.pkl',
rho_j_wind=None):
"""Initialize generator and process input data.
Parameters
----------
location : str
The name of desired mountaintop (default is 'cerro-pachon').
Valid options: 'cerro-paranal', 'cerro-pachon', 'cerro-telolo',
'mauna-kea', and 'la-palma'.
To customize to another observatory, input instead a dict with keys
'altitude' (value in km) and 'turbulence_params', itself a nested
dict of lognormal PDF parameters 's' (sigma) and 'scale' (exp(mu))
for ground layer and free atmosphere, e.g. {'gl':{'s':, 'scale':}}.
seed : int
Seed to initialize random number generator (default is None)
telemetry_file : str or None
Path to file of telemetry data (default is
'data/tel_dict_CP_20190501-20191101.pkl'). If None, forecast data
will be used for ground layer information.
forecast_file : str
Path to file of weather forecast data (default is
'data/gfs_-30.0_289.5_20190501-20191101.pkl').
date_range : list
List of two strings representing dates, e.g. '2019-05-01'.
Data date range to use. Allows user to select subset of telemetry
(default: ['2019-05-01', '2019-10-31'])
rho_j_wind : float (default is None)
Desired correlation coefficient between ground wind speed and
turbulence integral. If None, no correlation is included. If a
float value is specified, the joint PDF of wind values and ground
turbulence is generated and the turbulence values are stored in
data_gl as the 'j_gl' column.
"""
# set up the paths to data files, and check they exist.
psfws_base = pathlib.Path(__file__).parents[0].absolute()
self._paths = \
{'forecast_data': pathlib.Path.joinpath(psfws_base, forecast_file),
'p_and_h': pathlib.Path.joinpath(psfws_base, 'data/p_and_h.p')}
if telemetry_file is not None:
self._paths['telemetry'] = pathlib.Path.joinpath(psfws_base,
telemetry_file)
for file_path in self._paths.values():
if not file_path.is_file():
print(f'code running from: {psfws_base}')
raise FileNotFoundError(f'file {file_path} not found!')
# set up random number generator with seed, if given
self._rng = np.random.default_rng(seed)
# set ground + telescope height, turbulence pdf (location specific)
self.h0, self.j_pdf = utils.initialize_location(location)
# TO DO: put this rho in the location specific utils?
self.rho_jv = rho_j_wind
# load and match forecast/telemetry data
self._load_data(date_range)
# if using correlation between wind speed and ground turbulence,
# draw values in advance and perform correlation of marginals
if self.rho_jv is not None:
# draw JGL values
j_gl = self.j_pdf['gl'].rvs(size=self.N, random_state=self._rng)
# correlate and store modified dataframe
self.data_gl = utils.correlate_marginals(self.data_gl,
j_gl,
self.rho_jv,
self._rng)
def _load_data(self, use_telemetry=True, dr=['2019-05-01', '2019-10-31']):
"""Load data from forecast, telemetry files, match, and store."""
forecast = pickle.load(open(self._paths['forecast_data'], 'rb'))
forecast = utils.process_forecast(forecast)
try:# first, find forecast dates within the date range desired
forecast_dates = forecast[dr[0]:dr[1]].index
except KeyError:
print("Requested dates are not within range of available data!")
# load heights and pressures
p_and_h = pickle.load(open(self._paths['p_and_h'], 'rb'))
self.src = 'ecmwf' if len(forecast['u'].iat[0]) > 50 else 'noaa'
# reverse to match forecast data order, convert to m
h = np.array([h/1000 for h in p_and_h[self.src]['h'][::-1]])
# find lower gl cutoff:
where_h0 = np.where(h > self.h0)[0][0]
# free atm ends at high altitude
where_end = | np.where(h > self.h0 + 23) | numpy.where |
import numpy as np
import pandas as pd
import pytest
import tensorflow as tf
from probflow.distributions import Bernoulli, Normal
from probflow.utils import metrics
def is_close(a, b, tol=1e-5):
return np.abs(a - b) < tol
def test_as_numpy():
"""Tests probflow.utils.metrics.as_numpy"""
@metrics.as_numpy
def func(a, b):
assert isinstance(a, np.ndarray)
assert isinstance(b, np.ndarray)
assert a.ndim == b.ndim
assert all(a.shape[d] == b.shape[d] for d in range(a.ndim))
# Input data types to test
N1 = np.random.randn(5)
N2 = np.random.randn(5, 1)
D = pd.DataFrame(np.random.randn(5, 1))
S = pd.Series(np.random.randn(5))
T = tf.random.normal([5])
T2 = tf.random.normal([5, 1])
# Test different combinations
func(N1, N2)
func(N1, D)
func(N1, S)
func(N1, T)
func(N1, T2)
func(N2, N1)
func(N2, D)
func(N2, S)
func(N2, T)
func(N2, T2)
func(D, N1)
func(D, N2)
func(D, S)
func(D, T)
func(D, T2)
func(S, N1)
func(S, N2)
func(S, D)
func(S, T)
func(S, T2)
func(T, N1)
func(T, N2)
func(T, D)
func(T, S)
func(T, T2)
func(T2, N1)
func(T2, N2)
func(T2, D)
func(T2, S)
func(T2, T)
def test_accuracy():
"""Tests probflow.utils.metrics.accuracy"""
# Predictive dist
probs = tf.constant([1, 1, 1, 1, 1, 1], dtype=tf.float32)
pred_dist = Bernoulli(probs=probs)
y_true = np.array([1, 0, 1, 1, 0, 1]).astype("float32")
# Compare metric
assert is_close(metrics.accuracy(y_true, pred_dist.mean()), 2.0 / 3.0)
def test_mean_squared_error():
"""Tests probflow.utils.metrics.mean_squared_error"""
# Predictive dist
preds = tf.constant([0, 1, 2, 0, 0, 0], dtype=tf.float32)
pred_dist = Normal(preds, 1)
y_true = np.array([0, 0, 0, 0, 1, 2]).astype("float32")
# Compare metric
assert is_close(
metrics.mean_squared_error(y_true, pred_dist.mean()), 10.0 / 6.0
)
def test_sum_squared_error():
"""Tests probflow.utils.metrics.sum_squared_error"""
# Predictive dist
preds = tf.constant([0, 1, 2, 0, 0, 0], dtype=tf.float32)
pred_dist = Normal(preds, 1)
y_true = np.array([0, 0, 0, 0, 1, 2]).astype("float32")
# Compare metric
assert is_close(metrics.sum_squared_error(y_true, pred_dist.mean()), 10.0)
def test_mean_absolute_error():
"""Tests probflow.utils.metrics.mean_absolute_error"""
# Predictive dist
preds = tf.constant([0, 1, 2, 0, 0, 0], dtype=tf.float32)
pred_dist = Normal(preds, 1)
y_true = np.array([0, 0, 0, 0, 1, 2]).astype("float32")
# Compare metric
assert is_close(metrics.mean_absolute_error(y_true, pred_dist.mean()), 1.0)
def test_r_squared():
"""Tests probflow.utils.metrics.r_squared"""
# Predictive dist
preds = tf.constant([0, 1, 2, 2, 2], dtype=tf.float32)
pred_dist = Normal(preds, 1)
y_true = np.array([0, 1, 2, 3, 4]).astype("float32")
# Compare metric
assert is_close(metrics.r_squared(y_true, pred_dist.mean()), 0.5)
def test_true_positive_rate():
"""Tests probflow.utils.metrics.true_positive_rate"""
# Predictive dist
probs = tf.constant([1, 1, 1, 1, 1, 0], dtype=tf.float32)
pred_dist = Bernoulli(probs=probs)
y_true = np.array([1, 0, 1, 1, 0, 1]).astype("float32")
# Compare metric
assert is_close(metrics.true_positive_rate(y_true, pred_dist.mean()), 0.75)
def test_true_negative_rate():
"""Tests probflow.utils.metrics.true_negative_rate"""
# Predictive dist
probs = tf.constant([1, 1, 1, 1, 1, 0], dtype=tf.float32)
pred_dist = Bernoulli(probs=probs)
y_true = | np.array([1, 0, 1, 1, 0, 0]) | numpy.array |
from __future__ import division, print_function
import numpy as np
import operator
# from highway_env import utils
from rl_agents.agents import utils
from rl_agents.agents.abstract import AbstractAgent
class TTCVIAgent(AbstractAgent):
"""
Implementation of Value Iteration over a Time-To-Collision (TTC) representation of the state.
The state reward is defined from a occupancy grid over different TTCs and lanes. The grid cells encode the
probability that the ego-vehicle will collide with another vehicle if it is located on a given lane in a given
duration, under the hypothesis that every vehicles observed will maintain a constant velocity (including the
ego-vehicle) and not change lane (excluding the ego-vehicle).
For instance, in a three-lane road with a vehicle on the left lane with collision predicted in 5s the grid will
be:
[0, 0, 0, 0, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0]
The TTC-state is a coordinate (lane, time) within this grid.
If the ego-vehicle has the ability to change its velocity, an additional layer is added to the occupancy grid
to iterate over the different velocity choices available.
"""
def __init__(self, env, config=None):
"""
New instance of TTCVI agent.
"""
super(TTCVIAgent, self).__init__(config)
self.env = env
self.state = None
self.grids = self.value = None
self.V = self.L = self.T = None
self.state_reward = None
self.action_reward = self.state_reward = None
self.lane = self.speed = None
@classmethod
def default_config(cls):
return dict(gamma=1.0, # The discount factor used for planning, in [0, 1].
time_quantization=1.0, # The time quantization used in the state representation [s]
horizon=10.0) # The time horizon used in the state representation [s]
def plan(self, state):
"""
Perform a value iteration and return the sequence of optimal actions.
Compute the TTC-grid to build the associated reward function, and run the value iteration.
:param state: an observation of the state
:return: a list of optimal actions
"""
# Use the true environment state, not the observation
state = self.env.unwrapped
if not hasattr(state, "vehicle"):
raise EnvironmentError("This agent is only able to interact with the highway-env environment")
# Initialize variables if needed
if self.grids is None:
self.grids = np.zeros((state.vehicle.SPEED_COUNT,
len(state.vehicle.road.lanes),
int(self.config["horizon"] / self.config["time_quantization"])))
self.V, self.L, self.T = np.shape(self.grids)
self.value = np.zeros(np.shape(self.grids))
# Update state and reward
self.state = state
self.update_ttc_state()
self.action_reward = {0: state.LANE_CHANGE_REWARD,
1: 0,
2: state.LANE_CHANGE_REWARD,
3: 0,
4: 0}
lanes = np.arange(self.L)/(self.L - 1)
vels = np.arange(self.V)/(self.V - 1)
self.state_reward = \
+ state.COLLISION_REWARD * self.grids \
+ state.RIGHT_LANE_REWARD * np.tile(lanes[np.newaxis, :, np.newaxis], (self.V, 1, self.T)) \
+ state.HIGH_VELOCITY_REWARD * np.tile(vels[:, np.newaxis, np.newaxis], (1, self.L, self.T))
# Run value iteration
self.value.fill(0)
self.value_iteration()
# Return chosen trajectory
path, actions = self.pick_trajectory()
return actions
def reset(self):
pass
def seed(self, seed=None):
return None
def act(self, state):
return self.plan(state)[0]
def record(self, state, action, reward, next_state, done):
pass
def update_ttc_state(self):
"""
Extract the TTC-grid and TTC-state (velocity, lane, time=0) from the current MDP state.
"""
self.fill_ttc_grid()
self.lane = self.state.vehicle.lane_index
self.speed = self.state.vehicle.speed_index()
def fill_ttc_grid(self):
"""
For each ego-velocity and lane, compute the predicted time-to-collision to each vehicle within the lane and
store the results in an occupancy grid.
"""
self.grids.fill(0)
for velocity_index in range(self.grids.shape[0]):
ego_velocity = self.state.vehicle.index_to_speed(velocity_index)
for other in self.state.road.vehicles:
if (other is self.state.vehicle) or (ego_velocity == other.velocity):
continue
margin = other.LENGTH / 2 + self.state.vehicle.LENGTH / 2
collision_points = [(0, 1), (-margin, 0.5), (margin, 0.5)]
for m, cost in collision_points:
distance = self.state.vehicle.lane_distance_to(other) + m
time_to_collision = distance / utils.not_zero(ego_velocity - other.velocity)
if time_to_collision < 0:
continue
# Quantize time-to-collision to both upper and lower values
lane = other.lane_index
for time in [int(time_to_collision / self.config["time_quantization"]),
int(np.ceil(time_to_collision / self.config["time_quantization"]))]:
if 0 <= lane < np.shape(self.grids)[1] and 0 <= time < np.shape(self.grids)[2]:
self.grids[velocity_index, lane, time] = max(self.grids[velocity_index, lane, time], cost)
def value_iteration(self, steps=50):
"""
Perform a value iteration over the TTC-state and reward.
:param steps: number of backup operations.
"""
for _ in range(steps):
self.backup()
def backup(self):
"""
Apply the Bellman optimal operator to the estimated value function.
"""
new_value = np.zeros(np.shape(self.value))
for h in range(self.V):
for i in range(self.L):
for j in range(self.T):
q_values = self.get_q_values(h, i, j)
if q_values:
new_value[h, i, j] = self.config["gamma"] * np.max(list(q_values.values()))
else:
new_value[h, i, j] = self.state_reward[h, i, j]
self.value = new_value
def clip_position(self, h, i, j):
"""
Clip a position in the TTC grid, so that it stays within bounds.
:param h: velocity index
:param i: lane index
:param j: time index
:return: the clipped position
"""
o = min(max(h, 0), np.shape(self.value)[0] - 1)
p = min(max(i, 0), np.shape(self.value)[1] - 1)
q = min(max(j, 0), | np.shape(self.value) | numpy.shape |
"""
After spending several day to customize and optimize, I finally implement successfully the orignal verion and
three variant version of PathFinder Algorithm.
class: BasePFA_old is the very first try to implement PathFinder, It was successfully but slow, I keep it at the end
of this file for reader who want to know how I develop a better version.
class: BasePFA is the final version of original version of PFA
class: OPFA is an enhanced version of PFA based on Opposition-based Learning
class: LPFA is an enhanced version of PFA based on Levy-flight trajectory
class: IPFA is an improved version of PFA based on both Opposition-based Learning and Levy-flight
(Our proposed in the paper)
Simple test with CEC14:
Lets try C1 objective function
BasePFA: after 12 loop, it reaches value 100.0
OPFA: after 10 loop, it reaches value 100.0 (a little improvement)
LPFA: after 4 loop, it reaches value 100.0 (a huge improvement)
IPFA: after 2 loop, it reaches value 100.0 (best improvement)
"""
import numpy as np
from copy import deepcopy
from math import gamma, pi
from models.multiple_solution.root_multiple import RootAlgo
class BasePFA(RootAlgo):
"""
A new meta-heuristic optimizer: Pathfinder algorithm
"""
ID_POS = 0
ID_FIT = 1
def __init__(self, root_algo_paras=None, pfa_paras = None):
RootAlgo.__init__(self, root_algo_paras)
self.epoch = pfa_paras["epoch"]
self.pop_size = pfa_paras["pop_size"]
def _train__(self):
# Init pop and calculate fitness
pop = [self._create_solution__(minmax=0) for _ in range(self.pop_size)]
# Find the pathfinder
pop = sorted(pop, key=lambda temp: temp[self.ID_FIT])
g_best = deepcopy(pop[0])
gbest_present = deepcopy(g_best)
for i in range(self.epoch):
alpha, beta = np.random.uniform(1, 2, 2)
A = np.random.uniform(self.domain_range[0], self.domain_range[1]) * np.exp(-2 * (i + 1) / self.epoch)
## Update the position of pathfinder and check the bound
temp = gbest_present[self.ID_POS] + 2 * | np.random.uniform() | numpy.random.uniform |
# -*- coding:utf-8 -*-
# Train Sarsa in Sokoban environment
import math, os, time, sys
import pdb
import numpy as np
import random, gym
from gym.wrappers import Monitor
from agent import SarsaAgent, SarsaAgent_sokoban
import gym_sokoban
##### START CODING HERE #####
# This code block is optional. You can import other libraries or define your utility functions if necessary.
from matplotlib import pyplot as plt
##### END CODING HERE #####
# construct the environment
env = gym.make('Sokoban-hw2-v0')
# get the size of action space
num_actions = env.action_space.n
all_actions = np.arange(num_actions)
# set random seed and make the result reproducible
RANDOM_SEED = 0
env.seed(RANDOM_SEED)
random.seed(RANDOM_SEED)
| np.random.seed(RANDOM_SEED) | numpy.random.seed |
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
import os
import argparse
import sys
import pickle
from tqdm import tqdm
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
from torch.optim.lr_scheduler import ReduceLROnPlateau
from utils import epoch
from models import MLP
parser = argparse.ArgumentParser()
parser.add_argument('data_path', metavar='DATA_PATH', help='path to datasets')
parser.add_argument('--output_dir', type=str, default='./', help='output directory. Default=Current folder')
parser.add_argument('--epochs', type=int, default=200, help='number of epochs. Default=200')
parser.add_argument('--batch_size', type=int, default=512, help='batch size. Default=512')
parser.add_argument('--eval_batch_size', type=int, default=512, help='batch size for eval mode. Default=512')
parser.add_argument('--lr', type=float, default=1e-1, help='initial learning rate. Default=1e-1')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum. Default=0.9')
parser.add_argument('--layers', type=int, default=5, help='number of hidden layers. Default=5')
parser.add_argument('--units', type=int, default=256, help='number of hidden units in each layer. Default=256')
parser.add_argument('--dropout', type=float, default=0.5, help='Dropout rate at input layer. Default=0.5')
parser.add_argument('--l1', type=float, default=0.0, help='L1 regularization. Default=0')
parser.add_argument('--clr', type=float, default=0.0, help='Cross-Lipschitz regularization. Default=0')
parser.add_argument('--no-cuda', dest='cuda', action='store_false', help='NOT use cuda')
parser.add_argument('--seed', type=int, default=0, help='random seed to use. Default=0')
parser.set_defaults(cuda=True)
def train_mlp(args, loaders, model, criterion, optimizer, scheduler, l1_factor=0.0, clr_factor=0.0, model_name='model'):
train_loader = loaders['train_loader']
valid_loader = loaders['valid_loader']
test_loader = loaders['test_loader']
if args.cuda:
model = model.cuda()
best_valid_loss = sys.float_info.max
train_losses = []
valid_losses = []
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
for i_epoch in tqdm(range(args.epochs), desc='Epochs'):
# Train
train_labels, train_preds, train_loss = epoch(train_loader, model, train=True, criterion=criterion, optimizer=optimizer, l1_factor=l1_factor, clr_factor=clr_factor)
train_losses.append(train_loss)
# Validation
valid_labels, valid_preds, valid_loss = epoch(valid_loader, model, criterion=criterion)
# Learning rate decay
if scheduler is not None:
scheduler.step(valid_loss)
valid_losses.append(valid_loss)
# remember best valid loss and save checkpoint
is_best = valid_loss < best_valid_loss
if is_best:
best_valid_loss = valid_loss
# evaluate on test set
test_labels, test_preds, test_loss = epoch(test_loader, model, criterion=criterion)
with open(args.output_dir + model_name + '_result.txt', 'w') as f:
f.write('Best Validation Epoch: {}\n'.format(i_epoch))
f.write('Best Validation Loss: {}\n'.format(best_valid_loss))
f.write('Train Loss: {}\n'.format(train_loss))
f.write('Test Loss: {}\n'.format(test_loss))
# Save entire model
torch.save(model, args.output_dir + model_name + '.pth')
# Save model params
torch.save(model.state_dict(), args.output_dir + model_name + '_params.pth')
# plot
plt.figure()
plt.plot(np.arange(len(train_losses)), np.array(train_losses), label='Training Loss')
plt.plot(np.arange(len(valid_losses)), | np.array(valid_losses) | numpy.array |
# -*- coding: utf-8 -*-
from __future__ import print_function
from lmfit import minimize, Parameters, Parameter, report_fit, Minimizer
from lmfit.minimizer import (SCALAR_METHODS, HAS_EMCEE,
MinimizerResult, _lnpost, _nan_policy)
from lmfit.lineshapes import gaussian
import numpy as np
from numpy import pi
from numpy.testing import (assert_, decorators, assert_raises,
assert_almost_equal, assert_equal,
assert_allclose)
import unittest
import nose
from nose import SkipTest
def check(para, real_val, sig=3):
err = abs(para.value - real_val)
print('Check Param w/ stderr: ', para.name, para.value, real_val, para.stderr)
assert(err < sig * para.stderr)
def check_wo_stderr(para, real_val, sig=0.1):
err = abs(para.value - real_val)
print('Check Param w/o stderr: ', para.name, para.value, real_val, sig)
assert(err < sig)
def check_paras(para_fit, para_real, sig=3):
for i in para_fit:
check(para_fit[i], para_real[i].value, sig=sig)
def test_simple():
# create data to be fitted
np.random.seed(1)
x = np.linspace(0, 15, 301)
data = (5. * np.sin(2 * x - 0.1) * np.exp(-x*x*0.025) +
np.random.normal(size=len(x), scale=0.2))
# define objective function: returns the array to be minimized
def fcn2min(params, x, data):
""" model decaying sine wave, subtract data"""
amp = params['amp']
shift = params['shift']
omega = params['omega']
decay = params['decay']
model = amp * np.sin(x * omega + shift) * np.exp(-x*x*decay)
return model - data
# create a set of Parameters
params = Parameters()
params.add('amp', value= 10, min=0)
params.add('decay', value= 0.1)
params.add('shift', value= 0.0, min=-pi / 2., max=pi / 2)
params.add('omega', value= 3.0)
# do fit, here with leastsq model
result = minimize(fcn2min, params, args=(x, data))
# calculate final result
final = data + result.residual
# write error report
print(" --> SIMPLE --> ")
print(result.params)
report_fit(result.params)
#assert that the real parameters are found
for para, val in zip(result.params.values(), [5, 0.025, -.1, 2]):
check(para, val)
def test_lbfgsb():
p_true = Parameters()
p_true.add('amp', value=14.0)
p_true.add('period', value=5.33)
p_true.add('shift', value=0.123)
p_true.add('decay', value=0.010)
def residual(pars, x, data=None):
amp = pars['amp']
per = pars['period']
shift = pars['shift']
decay = pars['decay']
if abs(shift) > pi/2:
shift = shift - np.sign(shift) * pi
model = amp * np.sin(shift + x / per) * np.exp(-x * x * decay * decay)
if data is None:
return model
return (model - data)
n = 2500
xmin = 0.
xmax = 250.0
noise = np.random.normal(scale=0.7215, size=n)
x = np.linspace(xmin, xmax, n)
data = residual(p_true, x) + noise
fit_params = Parameters()
fit_params.add('amp', value=11.0, min=5, max=20)
fit_params.add('period', value=5., min=1., max=7)
fit_params.add('shift', value=.10, min=0.0, max=0.2)
fit_params.add('decay', value=6.e-3, min=0, max=0.1)
init = residual(fit_params, x)
out = minimize(residual, fit_params, method='lbfgsb', args=(x,), kws={'data':data})
fit = residual(fit_params, x)
for name, par in out.params.items():
nout = "%s:%s" % (name, ' '*(20-len(name)))
print("%s: %s (%s) " % (nout, par.value, p_true[name].value))
for para, true_para in zip(out.params.values(), p_true.values()):
check_wo_stderr(para, true_para.value)
def test_derive():
def func(pars, x, data=None):
model= pars['a'] * np.exp(-pars['b'] * x) + pars['c']
if data is None:
return model
return model - data
def dfunc(pars, x, data=None):
v = np.exp(-pars['b']*x)
return np.array([v, -pars['a']*x*v, np.ones(len(x))])
def f(var, x):
return var[0]* np.exp(-var[1] * x)+var[2]
params1 = Parameters()
params1.add('a', value=10)
params1.add('b', value=10)
params1.add('c', value=10)
params2 = Parameters()
params2.add('a', value=10)
params2.add('b', value=10)
params2.add('c', value=10)
a, b, c = 2.5, 1.3, 0.8
x = np.linspace(0,4,50)
y = f([a, b, c], x)
data = y + 0.15*np.random.normal(size=len(x))
# fit without analytic derivative
min1 = Minimizer(func, params1, fcn_args=(x,), fcn_kws={'data':data})
out1 = min1.leastsq()
fit1 = func(out1.params, x)
# fit with analytic derivative
min2 = Minimizer(func, params2, fcn_args=(x,), fcn_kws={'data':data})
out2 = min2.leastsq(Dfun=dfunc, col_deriv=1)
fit2 = func(out2.params, x)
print ('''Comparison of fit to exponential decay
with and without analytic derivatives, to
model = a*exp(-b*x) + c
for a = %.2f, b = %.2f, c = %.2f
==============================================
Statistic/Parameter| Without | With |
----------------------------------------------
N Function Calls | %3i | %3i |
Chi-square | %.4f | %.4f |
a | %.4f | %.4f |
b | %.4f | %.4f |
c | %.4f | %.4f |
----------------------------------------------
''' % (a, b, c,
out1.nfev, out2.nfev,
out1.chisqr, out2.chisqr,
out1.params['a'].value, out2.params['a'].value,
out1.params['b'].value, out2.params['b'].value,
out1.params['c'].value, out2.params['c'].value ))
check_wo_stderr(out1.params['a'], out2.params['a'].value, 0.00005)
check_wo_stderr(out1.params['b'], out2.params['b'].value, 0.00005)
check_wo_stderr(out1.params['c'], out2.params['c'].value, 0.00005)
def test_peakfit():
def residual(pars, x, data=None):
g1 = gaussian(x, pars['a1'], pars['c1'], pars['w1'])
g2 = gaussian(x, pars['a2'], pars['c2'], pars['w2'])
model = g1 + g2
if data is None:
return model
return (model - data)
n = 601
xmin = 0.
xmax = 15.0
noise = np.random.normal(scale=.65, size=n)
x = np.linspace(xmin, xmax, n)
org_params = Parameters()
org_params.add_many(('a1', 12.0, True, None, None, None),
('c1', 5.3, True, None, None, None),
('w1', 1.0, True, None, None, None),
('a2', 9.1, True, None, None, None),
('c2', 8.1, True, None, None, None),
('w2', 2.5, True, None, None, None))
data = residual(org_params, x) + noise
fit_params = Parameters()
fit_params.add_many(('a1', 8.0, True, None, 14., None),
('c1', 5.0, True, None, None, None),
('w1', 0.7, True, None, None, None),
('a2', 3.1, True, None, None, None),
('c2', 8.8, True, None, None, None))
fit_params.add('w2', expr='2.5*w1')
myfit = Minimizer(residual, fit_params,
fcn_args=(x,), fcn_kws={'data': data})
myfit.prepare_fit()
init = residual(fit_params, x)
out = myfit.leastsq()
# print(' N fev = ', myfit.nfev)
# print(myfit.chisqr, myfit.redchi, myfit.nfree)
report_fit(out.params)
fit = residual(out.params, x)
check_paras(out.params, org_params)
def test_scalar_minimize_has_no_uncertainties():
# scalar_minimize doesn't calculate uncertainties.
# when a scalar_minimize is run the stderr and correl for each parameter
# should be None. (stderr and correl are set to None when a Parameter is
# initialised).
# This requires a reset after a leastsq fit has been done.
# Only when scalar_minimize calculates stderr and correl can this test
# be removed.
np.random.seed(1)
x = np.linspace(0, 15, 301)
data = (5. * np.sin(2 * x - 0.1) * np.exp(-x*x*0.025) +
np.random.normal(size=len(x), scale=0.2) )
# define objective function: returns the array to be minimized
def fcn2min(params, x, data):
""" model decaying sine wave, subtract data"""
amp = params['amp']
shift = params['shift']
omega = params['omega']
decay = params['decay']
model = amp * np.sin(x * omega + shift) * np.exp(-x*x*decay)
return model - data
# create a set of Parameters
params = Parameters()
params.add('amp', value= 10, min=0)
params.add('decay', value= 0.1)
params.add('shift', value= 0.0, min=-pi / 2., max=pi / 2)
params.add('omega', value= 3.0)
mini = Minimizer(fcn2min, params, fcn_args=(x, data))
out = mini.minimize()
assert_(np.isfinite(out.params['amp'].stderr))
print(out.errorbars)
assert_(out.errorbars == True)
out2 = mini.minimize(method='nelder-mead')
assert_(out2.params['amp'].stderr is None)
assert_(out2.params['decay'].stderr is None)
assert_(out2.params['shift'].stderr is None)
assert_(out2.params['omega'].stderr is None)
assert_(out2.params['amp'].correl is None)
assert_(out2.params['decay'].correl is None)
assert_(out2.params['shift'].correl is None)
assert_(out2.params['omega'].correl is None)
assert_(out2.errorbars == False)
def test_scalar_minimize_reduce_fcn():
# test that the reduce_fcn option for scalar_minimize
# gives different and improved results with outliers
np.random.seed(2)
x = np.linspace(0, 10, 101)
yo = 1.0 + 2.0*np.sin(4*x) * np.exp(-x / 5)
y = yo + np.random.normal(size=len(yo), scale=0.250)
outliers = np.random.random_integers(int(len(x)/3.0), len(x)-1,
int(len(x)/12))
y[outliers] += 5*np.random.random(len(outliers))
# define objective function: returns the array to be minimized
def objfunc(pars, x, data):
decay = pars['decay']
offset= pars['offset']
omega = pars['omega']
amp = pars['amp']
model = offset + amp * np.sin(x*omega) * np.exp(-x/decay)
return model - data
# create a set of Parameters
params = Parameters()
params.add('offset', 2.0)
params.add('omega', 3.3)
params.add('amp', 2.5)
params.add('decay', 1.0)
method='L-BFGS-B'
out1 = minimize(objfunc, params, args=(x, y), method=method)
out2 = minimize(objfunc, params, args=(x, y), method=method,
reduce_fcn='neglogcauchy')
#print assert all
assert_allclose(out1.params['omega'].value, 4.0, rtol=0.01)
assert_allclose(out1.params['decay'].value, 7.6, rtol=0.01)
assert_allclose(out2.params['omega'].value, 4.0, rtol=0.01)
assert_allclose(out2.params['decay'].value, 5.8, rtol=0.01)
def test_multidimensional_fit_GH205():
# test that you don't need to flatten the output from the objective
# function. Tests regression for GH205.
pos = np.linspace(0, 99, 100)
xv, yv = np.meshgrid(pos, pos)
f = lambda xv, yv, lambda1, lambda2: (np.sin(xv * lambda1)
+ np.cos(yv * lambda2))
data = f(xv, yv, 0.3, 3)
assert_(data.ndim, 2)
def fcn2min(params, xv, yv, data):
""" model decaying sine wave, subtract data"""
model = f(xv, yv, params['lambda1'], params['lambda2'])
return model - data
# create a set of Parameters
params = Parameters()
params.add('lambda1', value=0.4)
params.add('lambda2', value=3.2)
mini = Minimizer(fcn2min, params, fcn_args=(xv, yv, data))
res = mini.minimize()
class CommonMinimizerTest(unittest.TestCase):
def setUp(self):
"""
test scale minimizers except newton-cg (needs jacobian) and
anneal (doesn't work out of the box).
"""
p_true = Parameters()
p_true.add('amp', value=14.0)
p_true.add('period', value=5.33)
p_true.add('shift', value=0.123)
p_true.add('decay', value=0.010)
self.p_true = p_true
n = 2500
xmin = 0.
xmax = 250.0
noise = np.random.normal(scale=0.7215, size=n)
self.x = np.linspace(xmin, xmax, n)
self.data = self.residual(p_true, self.x) + noise
fit_params = Parameters()
fit_params.add('amp', value=11.0, min=5, max=20)
fit_params.add('period', value=5., min=1., max=7)
fit_params.add('shift', value=.10, min=0.0, max=0.2)
fit_params.add('decay', value=6.e-3, min=0, max=0.1)
self.fit_params = fit_params
self.mini = Minimizer(self.residual, fit_params, [self.x, self.data])
def residual(self, pars, x, data=None):
amp = pars['amp']
per = pars['period']
shift = pars['shift']
decay = pars['decay']
if abs(shift) > pi/2:
shift = shift - | np.sign(shift) | numpy.sign |
import torch
import torchvision.transforms as transforms
import numpy as np
import os
import math
import random
import time
import pyrr
import sys
import copy
from GQN.model import GenerativeQueryNetwork
from argparse import ArgumentParser
from util.datasets import RTRenderedDataset
from util.config import configure, read_checkpoint
from util.settings import *
from PIL import Image
from renderer.interface import RenderInterface
'''
This script lets you walk around scenes and visualize network predictions
'''
parser = ArgumentParser()
parser.add_argument('--checkpoint', type=str, default='', help='Checkpoint to load')
parser.add_argument('--config_dir', type=str, default='', help='Where config file is located')
parser.add_argument('--config', type=str, default='', help='Which config to read')
parser.add_argument('--device', type=str, default='', help='Device to run on')
parser.add_argument('--find_checkpoints', action='store_true', help='Attempt to find matching checkpoints automatically')
parser.add_argument('--scene_file', type=str, default='')
args = parser.parse_args()
cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if cuda else "cpu")
settings = configure(args, ignore_data=True)
checkpoint, iteration = read_checkpoint(args, settings)
# This is a trick to use the DataLoader instead of creating everything from scratch
settings.batch_size = 1
samples = settings.samples_per_pixel
settings.samples_per_pixel = 1
dataset = RTRenderedDataset(settings, device)
iterator = iter(dataset)
t = 0.0
data = next(iterator)
dataset.samples = samples
observation_samples = 256
def init_data():
global data, dataset
dataset.samples = samples
queries = dataset.get_current_view()
data["query_images"] = queries[0]
data["query_poses"] = queries[1]
random_observations()
def random_scene():
global data, iterator
dataset.renderer.random_scene()
init_data()
def set_spp(num_samples):
global dataset
dataset.samples = num_samples
def random_observations():
global data, dataset
# Create observations manually
dataset.samples = observation_samples
dataset.renderer.random_view()
view1 = dataset.get_current_view()
dataset.renderer.random_view()
view2 = dataset.get_current_view()
dataset.renderer.random_view()
view3 = dataset.get_current_view()
for key in data["observation_images"].keys():
data["observation_images"][key][0][0] = view1[0][key][0]
data["observation_images"][key][0][1] = view2[0][key][0]
data["observation_images"][key][0][2] = view3[0][key][0]
data["observation_poses"][0][0] = view1[1][0]
data["observation_poses"][0][1] = view2[1][0]
data["observation_poses"][0][2] = view3[1][0]
dataset.samples = samples
if args.scene_file != '':
if not os.path.isfile(args.scene_file):
print("Provided scene file does not exist!")
quit()
dataset.renderer.load_scene_file(args.scene_file)
init_data()
else:
random_scene()
iteration = checkpoint['iteration']
# Create network
net = GenerativeQueryNetwork(settings, iteration)
if 'representation_state' in checkpoint and 'generator_state' in checkpoint:
net.representation.load_state_dict(checkpoint['representation_state'])
net.generator.load_state_dict(checkpoint['generator_state'])
else:
net.load_state_dict(checkpoint['model_state'])
for i in range(len(net.representation.representations)):
net.representation.representations[i].iteration = iteration
net = net.to(device)
net.eval()
print(settings)
def format_buffer(buf):
tmp = buf.clone()
if tmp.shape[0] == 1:
tmp = tmp.repeat(3, 1, 1)
return tmp.detach().cpu().permute(1, 2, 0) ** (1 / 2.2)
import moderngl
import moderngl_window as mglw
from moderngl_window import geometry
from moderngl_window.integrations.imgui import ModernglWindowRenderer
from moderngl_window.scene.camera import KeyboardCamera
import imgui
class WindowEvents(mglw.WindowConfig):
gl_version = (3, 3)
window_size = (1200, 600)
aspect_ratio = window_size[0] / window_size[1]
title = "Neural Renderer"
def __init__(self, **kwargs):
super().__init__(**kwargs)
imgui.create_context()
self.wnd.ctx.error
self.imgui = ModernglWindowRenderer(self.wnd)
self.space_down = False
self.prog = self.ctx.program(vertex_shader="""
#version 330
uniform mat4 transform;
uniform vec3 clr;
uniform float aspect;
in vec3 in_vert;
out vec3 color;
out vec2 uv;
void main() {
vec3 pos = vec3(in_vert.x, in_vert.y * aspect, 0.0);
gl_Position = transform * vec4(pos, 1.0);
uv = vec2(in_vert.x * 0.5 + 0.5, in_vert.y * 0.5 + 0.5);
uv.y = 1.0 - uv.y;
color = vec3(1, 0, 0);
}
""",
fragment_shader="""
#version 330
uniform sampler2D tex_sampler;
out vec4 fragColor;
in vec3 color;
in vec2 uv;
void main() {
fragColor = vec4(texture(tex_sampler, uv).rgb, 1.0);
}
""",
)
self.reference_texture = self.ctx.texture((dataset.render_size, dataset.render_size), components=3)
self.prediction_texture = self.ctx.texture((dataset.render_size, dataset.render_size), components=3)
self.reference_texture.repeat_x = False
self.reference_texture.repeat_y = False
self.prediction_texture.repeat_x = False
self.prediction_texture.repeat_y = False
self.reference_texture.use(5)
self.prediction_texture.use(6)
self.prog['aspect'].value = 12 / 6
T = pyrr.matrix44.create_from_translation(np.array([-0.5, 0.15, 0]))
T2 = pyrr.matrix44.create_from_translation( | np.array([0.5, 0.15, 0]) | numpy.array |
'''
@author: <NAME>
@copyright: Copyright 2016-2019, <NAME>.
@license: MIT
@contact: <EMAIL>
'''
from __future__ import division
import numpy as np
from numpy.fft import rfft
from numpy import argmax, mean, diff, log
from scipy.signal import blackmanharris
from scipy.fftpack import fft
from scipy.signal import butter, lfilter, filtfilt
Nan = float("nan") # Not-a-number capitalized like None, True, False
Inf = float("inf") # infinite value capitalized ...
eps = np.finfo("float32").eps
def mad(a, normalize=True, axis=0):
from scipy.stats import norm
c = norm.ppf(3/4.) if normalize else 1
return np.median(np.abs(a - np.median(a)) / c, axis=axis)
def rssq(x):
return np.sqrt(np.sum(np.abs(x)**2))
def peak2rms(x):
num = max(abs(x))
den = rms (x)
return num/den
def rms(x):
return np.sqrt(np.mean(x**2))
def range_bytes (win):
return range(win)
def energy(x):
energy = np.sum(x**2) / len(x) # axis = 1 is column sum
return energy
def zcr_2(frame):
count = len(frame)
countZ = np.sum(np.abs(np.diff(np.sign(frame)))) / 2
return (np.float64(countZ) / np.float64(count-1.0))
def zcr(x):
count = (np.diff(np.sign(x)) != 0).sum()
rate = count/len(x)
return rate
""" Frequency-domain features """
def peakfreq_from_fft(sig, fs):
"""
Estimate frequency from peak of FFT
"""
# Compute Fourier transform of windowed signal
windowed = sig * blackmanharris(len(sig))
f = rfft(windowed)
# Find the peak and interpolate to get a more accurate peak
i = argmax(abs(f)) - 1 # Just use this for less-accurate, naive version
true_i = parabolic(log(abs(f)), i)[0]
# Convert to equivalent frequency
return fs * true_i / len(windowed)
def parabolic(f, x):
xv = 1/2 * (f[x-1] - f[x+1]) / (f[x-1] - 2 * f[x] + f[x+1]) + x
yv = f[x] - 1/4 * (f[x-1] - f[x+1]) * (xv - x)
return (xv, yv)
def spectralCentroidAndSpread(x, fs):
"""Computes spectral centroid of frame (given abs(FFT))"""
X = abs(fft(x)) # get fft magnitude
ind = (np.arange(1, len(X) + 1)) * (fs/(2.0 * len(X)))
Xt = X.copy()
Xt = Xt / Xt.max()
NUM = np.sum(ind * Xt)
DEN = np.sum(Xt) + eps
# Centroid:
C = (NUM / DEN)
# Spread:
S = np.sqrt(np.sum(((ind - C) ** 2) * Xt) / DEN)
# Normalize:
C = C / (fs / 2.0)
S = S / (fs / 2.0)
return (C, S)
def spectralRollOff(x, c, fs):
"""Computes spectral roll-off"""
X = abs(fft(x)) # get fft magnitude
totalEnergy = | np.sum(X ** 2) | numpy.sum |
import numpy as np
import matplotlib.pyplot as plt
from mpi4py import MPI
# Make sure you have this to ensure numpy doesn't automatically use multiple threads on a single compute node
# export OPENBLAS_NUM_THREADS=1
# Mode of the reduction (method of snapshots or SVD)
mos_mode = False
# Method of snapshots to accelerate
def generate_right_vectors_mos(Y):
'''
Y - Snapshot matrix - shape: NxS
returns V - truncated right singular vectors
'''
new_mat = np.matmul(np.transpose(Y),Y)
w, v = np.linalg.eig(new_mat)
svals = np.sqrt(np.abs(w))
rval = np.argmax(svals<0.0001) # eps0
return v[:,:rval], np.sqrt(np.abs(w[:rval])) # Covariance eigenvectors, singular values
def generate_right_vectors_svd(Y):
# Run a local SVD and threshold
_, slocal, vt = np.linalg.svd(Y)
rval = np.argmax(slocal<0.0001) # eps0
slocal = slocal[:rval]
vlocal = vt.T[:,:rval]
return vlocal, slocal
if __name__ == '__main__':
# Initialize MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
nprocs = comm.Get_size()
# Check what data you have to grab
# Here we assume that the snapshots are already segregated into different files
# Should be (points per rank) x (snapshots) - total data matrix is nprocs*points x snapshots
local_data = np.load('points_rank_'+str(rank)+'.npy')
if mos_mode: # Method of snapshots
vlocal, slocal = generate_right_vectors_mos(local_data)
else: # SVD
vlocal, slocal = generate_right_vectors_svd(local_data)
# Find Wr
wlocal = np.matmul(vlocal, | np.diag(slocal) | numpy.diag |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2018 <NAME> <<EMAIL>>
#
# Distributed under terms of the MIT license.
"""
testMagicWrapper.py
We test if autograd can be used and use the toy problem to demonstrate so.
"""
import sys, os, time
import numpy as np
import autograd.numpy as numpy
import matplotlib.pyplot as plt
import logging
import autograd.numpy as numpy
from trajoptlib import DaeSystemWrapper
from trajoptlib import DaeSystem, TrajOptCollocProblem
from trajoptlib import LqrObj
from trajoptlib.utility import show_sol
from trajoptlib import OptConfig, OptSolver
def sysFunOrder2(t, X, u, p):
x, dx, ddx = X
return numpy.array([ddx - u[0]])
def sysFunOrder1(t, X, u, p):
x, v, dx, dv = X
y0 = dx - v
y1 = dv - u[0]
return numpy.array([y0, y1])
def main():
# prob = constructOrderOne()
prob = constructOrderTwo()
# construct a solver for the problem
cfg = OptConfig(print_level=5)
slv = OptSolver(prob, cfg)
rst = slv.solve_rand()
print(rst.flag)
if rst.flag == 1:
# parse the solution
sol = prob.parse_sol(rst.sol)
show_sol(sol)
def constructOrderOne():
"""Test the wrapper class for this naive problem"""
sys = DaeSystemWrapper(sysFunOrder1, 4, 1, 0, 2)
N = 20
t0 = 0.0
tf = 10.0
prob = TrajOptCollocProblem(sys, N, t0, tf)
prob.xbd = [np.array([-1e20, -1e20, -1e20, -1e20]), np.array([1e20, 1e20, 1e20, 1e20])]
prob.ubd = [np.array([-1.5]), np.array([1.5])]
prob.x0bd = [np.array([0, 0, -1e20, -1e20]), np.array([0, 0, 1e20, 1e20])]
prob.xfbd = [np.array([np.pi, 0, -1e20, -1e20]), np.array([np.pi, 0, 1e20, 1e20])]
lqr = LqrObj(R=np.ones(1))
prob.add_lqr_obj(lqr)
prob.pre_process() # construct the problem
return prob
def constructOrderTwo():
"""Test the wrapper class for yet another naive problem."""
sys = DaeSystemWrapper(sysFunOrder2, 3, 1, 0, 1)
N = 20
t0 = 0.0
tf = 10.0
prob = TrajOptCollocProblem(sys, N, t0, tf)
prob.xbd = [np.array([-1e20, -1e20, -1e20]), | np.array([1e20, 1e20, 1e20]) | numpy.array |
import argparse
import math
import h5py
import numpy as np
import tensorflow as tf
import socket
import importlib
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, 'utils'))
import provider
import tf_util
""" argument parser """
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--model', default='model', help='Model name: pointnet_cls or pointnet_cls_basic [default: pointnet_cls]')
parser.add_argument('--log_dir', default='log', help='Log dir [default: log]')
parser.add_argument('--num_point', type=int, default=512, help='Point Number [256/512/1024/2048] [default: 1024]')
parser.add_argument('--max_epoch', type=int, default=250, help='Epoch to run [default: 250]')
parser.add_argument('--batch_size', type=int, default=16, help='Batch Size during training [default: 16]')
parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]')
parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')
parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]')
parser.add_argument('--decay_step', type=int, default=200000, help='Decay step for lr decay [default: 200000]')
parser.add_argument('--decay_rate', type=float, default=0.7, help='Decay rate for lr decay [default: 0.8]')
FLAGS = parser.parse_args()
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = FLAGS.num_point
MAX_EPOCH = FLAGS.max_epoch
BASE_LEARNING_RATE = FLAGS.learning_rate
GPU_INDEX = FLAGS.gpu
MOMENTUM = FLAGS.momentum
OPTIMIZER = FLAGS.optimizer
DECAY_STEP = FLAGS.decay_step
DECAY_RATE = FLAGS.decay_rate
""" model and training file """
MODEL = importlib.import_module(FLAGS.model) # import network module
MODEL_FILE = os.path.join(BASE_DIR, FLAGS.model+'.py')
LOG_DIR = FLAGS.log_dir
if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR)
os.system('cp %s %s' % (MODEL_FILE, LOG_DIR)) # bkp of model def
os.system('cp train.py %s' % (LOG_DIR)) # bkp of train procedure
""" log """
LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
""" learning parameters """
MAX_NUM_POINT = 512
NUM_CLASSES = 8
BN_INIT_DECAY = 0.5
BN_DECAY_DECAY_RATE = 0.5
BN_DECAY_DECAY_STEP = float(DECAY_STEP)
BN_DECAY_CLIP = 0.99
HOSTNAME = socket.gethostname()
""" import train/test data set """
TRAIN_FILES = provider.getDataAllFiles( \
os.path.abspath(os.path.join(BASE_DIR, '../data/train')))
TEST_FILES = provider.getDataAllFiles(\
os.path.abspath(os.path.join(BASE_DIR, '../data/test')))
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def get_learning_rate(batch):
learning_rate = tf.train.exponential_decay(
BASE_LEARNING_RATE, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
DECAY_STEP, # Decay step.
DECAY_RATE, # Decay rate.
staircase=True)
learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!
return learning_rate
def get_bn_decay(batch):
bn_momentum = tf.train.exponential_decay(
BN_INIT_DECAY,
batch * BATCH_SIZE,
BN_DECAY_DECAY_STEP,
BN_DECAY_DECAY_RATE,
staircase=True)
bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)
return bn_decay
def train():
with tf.Graph().as_default():
with tf.device('/gpu:'+str(GPU_INDEX)):
pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT)
is_training_pl = tf.placeholder(tf.bool, shape=())
print(is_training_pl)
# Note the global_step=batch parameter to minimize.
# That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains.
batch = tf.Variable(0)
bn_decay = get_bn_decay(batch)
tf.summary.scalar('bn_decay', bn_decay)
# Get model and loss
pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl, bn_decay=bn_decay)
loss = MODEL.get_loss(pred, labels_pl, end_points)
tf.summary.scalar('loss', loss)
correct = tf.equal(tf.argmax(pred, 1), tf.to_int64(labels_pl))
accuracy = tf.reduce_sum(tf.cast(correct, tf.float32)) / float(BATCH_SIZE)
tf.summary.scalar('accuracy', accuracy)
# Get training operator
learning_rate = get_learning_rate(batch)
tf.summary.scalar('learning_rate', learning_rate)
if OPTIMIZER == 'momentum':
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)
elif OPTIMIZER == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(loss, global_step=batch)
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess = tf.Session(config=config)
# Add summary writers
#merged = tf.merge_all_summaries()
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'),
sess.graph)
test_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'test'))
# Init variables
init = tf.global_variables_initializer()
# To fix the bug introduced in TF 0.12.1 as in
# http://stackoverflow.com/questions/41543774/invalidargumenterror-for-tensor-bool-tensorflow-0-12-1
#sess.run(init)
sess.run(init, {is_training_pl: True})
ops = {'pointclouds_pl': pointclouds_pl,
'labels_pl': labels_pl,
'is_training_pl': is_training_pl,
'pred': pred,
'loss': loss,
'train_op': train_op,
'merged': merged,
'step': batch}
for epoch in range(MAX_EPOCH):
log_string('**** EPOCH %03d ****' % (epoch))
sys.stdout.flush()
train_one_epoch(sess, ops, train_writer)
eval_one_epoch(sess, ops, test_writer)
# Save the variables to disk.
if epoch % 10 == 0:
save_path = saver.save(sess, os.path.join(LOG_DIR, "model.ckpt"))
log_string("Model saved in file: %s" % save_path)
def train_one_epoch(sess, ops, train_writer):
""" ops: dict mapping from string to tf ops """
is_training = True
# Shuffle train files
train_file_idxs = np.arange(0, len(TRAIN_FILES))
| np.random.shuffle(train_file_idxs) | numpy.random.shuffle |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
class Sink:
def write(self, key, obj):
raise Exception('Virtual function is not overriden')
def flush(self):
raise Exception('Virtual function is not overriden')
class CompositeSink(Sink):
def __init__(self, sinks):
self._sinks = sinks
def write(self, key, obj):
for sink in self._sinks:
sink.write(key, obj)
def flush(self):
for sink in self._sinks:
sink.flush()
class HDFSink(Sink):
def __init__(self, ctx, file_path):
self._file_path = file_path
self._data = {}
self._logger = ctx.logger
def write(self, name, df, reset_index=True):
self._data[name] = df.reset_index() if reset_index else df
def flush(self):
store = pd.HDFStore(self._file_path, complib='blosc', complevel=9)
for name, data in self._data.items():
self._logger.info("saving dataset to hdf store '%s'", name)
store[name] = data
store.close()
self._data = {}
class CommonSink(Sink):
"""
The most general sink.
"""
def __init__(self, ctx, store):
self._ctx = ctx #model context
self._store = store
n_tick = int(ctx.ctrl.ticks)
self.n_agent = np.zeros(n_tick, int)
self.mean_age = np.zeros(n_tick)
self.n_female = np.zeros(n_tick, int)
self.skill_dist = np.zeros((5, n_tick), int)
self.mean_wealth = np.zeros(n_tick)
self.n_partnered = np.zeros(n_tick, int)
self.n_coupling = np.zeros(n_tick, int)
self.n_uncoupling = np.zeros(n_tick, int)
self.skill_dist_new = np.zeros((5, n_tick), int)
self.mean_savings = np.zeros(n_tick)
self.mean_interest = np.zeros(n_tick)
self.n_dying = np.zeros(n_tick, int)
self.mean_age_dying = np.zeros(n_tick)
self.n_baby = np.zeros(n_tick, int)
self.mean_age_mother = np.zeros(n_tick)
self.gini = np.zeros(n_tick)
self.gini_adult = np.zeros(n_tick)
self.gini_20_39 = np.zeros(n_tick)
self.gini_40_64 = np.zeros(n_tick)
self.gini_65p = np.zeros(n_tick)
self.direct_amount = np.zeros(n_tick)
self.n_direct = np.zeros(n_tick, int)
self.trust_amount = np.zeros(n_tick)
self.n_from_trust = np.zeros(n_tick, int)
self.max_age = 150
self.n_agent_by_age = np.zeros((n_tick, self.max_age), int)
self.n_female_by_age = np.zeros((n_tick, self.max_age), int)
self.mean_wealth_by_age = np.zeros((n_tick, self.max_age))
self.n_partnered_by_age = np.zeros((n_tick, self.max_age), int)
self.n_coupling_by_age = np.zeros((n_tick, self.max_age), int)
self.n_uncoupling_by_age = np.zeros((n_tick, self.max_age), int)
self.n_dying_by_age = np.zeros((n_tick, self.max_age), int)
self.n_baby_by_age = | np.zeros((n_tick, self.max_age), int) | numpy.zeros |
"""
Interval unit commitment
@author:<NAME>
@e-mail:<EMAIL>
"""
from pypower import loadcase, ext2int, makeBdc
from scipy.sparse import csr_matrix as sparse
from numpy import zeros, c_, shape, ix_, ones, r_, arange, sum, concatenate, array, diag, eye
from solvers.mixed_integer_solvers_cplex import mixed_integer_linear_programming as lp
import pandas as pd
def problem_formulation(case, BETA=0.15, BETA_HYDRO=0.05, BETA_LOAD=0.03):
"""
:param case: The test case for unit commitment problem
:return:
"""
CAP_WIND = 1 # The capacity of wind farm
# The disturbance range of wind farm
# The disturbance range of wind farm
CAPVALUE = 10 # The capacity value
Price_energy = r_[ones(8), 3 * ones(8), ones(8)]
from pypower.idx_brch import F_BUS, T_BUS, BR_X, TAP, SHIFT, BR_STATUS, RATE_A
from pypower.idx_cost import MODEL, NCOST, PW_LINEAR, COST, POLYNOMIAL
from pypower.idx_bus import BUS_TYPE, REF, VA, VM, PD, GS, VMAX, VMIN, BUS_I
from pypower.idx_gen import GEN_BUS, VG, PG, QG, PMAX, PMIN, QMAX, QMIN
mpc = ext2int.ext2int(case)
baseMVA, bus, gen, branch, gencost = mpc["baseMVA"], mpc["bus"], mpc["gen"], mpc["branch"], mpc["gencost"]
nb = shape(mpc['bus'])[0] ## number of buses
nl = shape(mpc['branch'])[0] ## number of branches
ng = shape(mpc['gen'])[0] ## number of dispatchable injections
# Bbus = makeBdc.makeBdc(baseMVA, bus, branch)
# Distribution_factor = Bbus[1] * inv(Bbus[0])
Distribution_factor = array([
[0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[-0.005, -0.005, -0.005, -1.005, -0.005, -0.005, -0.005, -0.005, -0.005, -0.005, -0.005, -0.005, -0.005,
-0.005, ],
[0.47, 0.47, 0.47, 0.47, -0.03, -0.03, -0.03, -0.03, -0.03, -0.03, -0.03, -0.03, -0.03, -0.03],
[0.47, 0.47, 0.47, 0.47, -0.03, - 0.03, -0.03, -0.03, -0.03, -0.03, -0.03, -0.03, -0.03, -0.03],
[0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0.32, 0.32, 0.32, 0.32, 0.32, 0.32, -0.68, -0.68, 0.32, 0.32, 0.32, 0.32, 0.32, 0.32],
[0.32, 0.32, 0.32, 0.32, 0.32, 0.32, 0.32, 0.32, -0.68, -0.68, 0.32, 0.32, 0.32, 0.32],
[0.16, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16, -0.84, 0.16, 0.16, 0.16, 0.16],
[-0.16, -0.16, -0.16, -0.16, -0.16, -0.16, -0.16, -0.16, -0.16, -0.16, -1.16, -0.16, -1.16, -0.16],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0],
[-0.16, -0.16, -0.16, -0.16, -0.16, -0.16, -0.16, -0.16, -0.16, -0.16, -0.16, -1.16, -0.16, -0.16],
[-0.08, -0.08, -0.08, -0.08, -0.08, -0.08, -0.08, -0.08, -0.08, -0.08, -0.08, -0.08, -0.08, -1.08],
])
Distribution_factor = sparse(Distribution_factor)
# Formulate connection matrix for wind farms
i = []
PWMAX = []
PWMIN = []
for index in range(ng):
if gen[index, PMIN] == 0:
i.append(index)
PWMAX.append(gen[index, PMAX])
PWMIN.append(gen[index, PMIN])
i = array(i)
nw = i.shape[0]
Cw = sparse((ones(nw), (gen[i, GEN_BUS], arange(nw))), shape=(nb, nw))
PWMAX = array(PWMAX).reshape((len(PWMAX), 1))
PWMIN = array(PWMIN).reshape((len(PWMIN), 1))
# Formulate the connection matrix for hydro power plants
i = []
PHMAX = []
PHMIN = []
for index in range(ng):
if gen[index, PMIN] > 0:
i.append(index)
PHMAX.append(gen[index, PMAX])
PHMIN.append(gen[index, PMIN])
i = array(i)
nh = i.shape[0]
Ch = sparse((ones(nh), (gen[i, GEN_BUS], arange(nh))), shape=(nb, nh))
PHMAX = array(PHMAX).reshape((len(PHMAX), 1))
PHMIN = array(PHMIN).reshape((len(PHMIN), 1))
# Formulate the external power systems
i = []
PEXMAX = []
PEXMIN = []
for index in range(ng):
if gen[index, PMIN] < 0:
i.append(index)
PEXMAX.append(gen[index, PMAX])
PEXMIN.append(gen[index, PMIN])
i = array(i)
nex = i.shape[0]
Cex = sparse((ones(nex), (gen[i, GEN_BUS], arange(nex))), shape=(nb, nex))
PEXMAX = array(PEXMAX).reshape((len(PEXMAX), 1))
PEXMIN = array(PEXMIN).reshape((len(PEXMIN), 1))
PLMAX = branch[:, RATE_A].reshape((nl, 1)) # The power flow limitation
T = 24
## Profiles
# Wind profile
WIND_PROFILE = array(
[591.35, 714.50, 1074.49, 505.06, 692.78, 881.88, 858.48, 609.11, 559.95, 426.86, 394.54, 164.47, 27.15, 4.47,
54.08, 109.90, 111.50, 130.44, 111.59, 162.38, 188.16, 216.98, 102.94, 229.53]).reshape((T, 1))
WIND_PROFILE = WIND_PROFILE / WIND_PROFILE.max()
WIND_PROFILE_FORECAST = zeros((T * nw, 1))
Delta_wind = zeros((T * nw, 1))
for i in range(T):
WIND_PROFILE_FORECAST[i * nw:(i + 1) * nw, :] = WIND_PROFILE[i] * PWMAX
Delta_wind[i * nw:(i + 1) * nw, :] = WIND_PROFILE[i] * PWMAX * BETA
# Load profile
LOAD_PROFILE = array([0.632596195634005, 0.598783973523217, 0.580981513054525, 0.574328051348912, 0.584214221241601,
0.631074282084712, 0.708620833751212, 0.797665730618795, 0.877125330124026, 0.926981579915087,
0.947428654208872, 0.921588439808779, 0.884707317888543, 0.877717046100358, 0.880387289807107,
0.892056129442049, 0.909233443653261, 0.926748403704075, 0.968646575067696, 0.999358974358974,
0.979169591816267, 0.913517534182463, 0.806453715775750, 0.699930632166617]).reshape((T, 1))
LOAD_FORECAST = zeros((T * nb, 1))
Delta_load = zeros((T * nb, 1))
load_base = bus[:, PD].reshape(nb, 1)
for i in range(T):
LOAD_FORECAST[i * nb:(i + 1) * nb, :] = load_base * LOAD_PROFILE[i]
Delta_load[i * nb:(i + 1) * nb, :] = load_base * BETA_LOAD
# Hydro information
HYDRO_INJECT = array([6, 2, 4, 3]).reshape((nh, 1))
HYDRO_INJECT_FORECAST = zeros((T * nh, 1))
Delta_hydro = zeros((T * nh, 1))
for i in range(T):
HYDRO_INJECT_FORECAST[i * nh:(i + 1) * nh, :] = HYDRO_INJECT
Delta_hydro[i * nh:(i + 1) * nh, :] = HYDRO_INJECT * BETA_HYDRO
MIN_DOWN = ones((nh, 1))
MIN_UP = ones((nh, 1))
QMIN = array([1.5, 1, 1, 1]).reshape((nh, 1))
QMAX = array([20, 10, 10, 10]).reshape((nh, 1))
VMIN = array([70, 50, 70, 40]).reshape((nh, 1))
VMAX = array([160, 140, 150, 130]).reshape((nh, 1))
V0 = array([110, 90, 100, 80]).reshape((nh, 1))
M_transfer = diag(array([8.8649, 6.4444, 6.778, 7.3333]))
C_TEMP = array([30, 2, 9, 4]).reshape((4, 1))
Q_TEMP = array([1.5, 1, 1, 1]).reshape((4, 1))
# Define the first stage decision variables
ON = 0
OFF = 1
IHG = 2
PHG = 3
RUHG = 4
RDHG = 5
QHG = 6
QUHG = 7
QDHG = 8
V = 9
S = 10
PWC = 11
PLC = 12
PEX = 13
CEX = 14
NX = PWC * nh * T + nw * T + nb * T + nex * T + 1
lb = zeros((NX, 1))
ub = zeros((NX, 1))
c = zeros((NX, 1))
vtypes = ["c"] * NX
for i in range(T):
for j in range(nh):
# lower boundary information
lb[ON * nh * T + i * nh + j] = 0
lb[OFF * nh * T + i * nh + j] = 0
lb[IHG * nh * T + i * nh + j] = 0
lb[PHG * nh * T + i * nh + j] = 0
lb[RUHG * nh * T + i * nh + j] = 0
lb[RDHG * nh * T + i * nh + j] = 0
lb[QHG * nh * T + i * nh + j] = 0
lb[QUHG * nh * T + i * nh + j] = 0
lb[QDHG * nh * T + i * nh + j] = 0
lb[V * nh * T + i * nh + j] = VMIN[j]
lb[S * nh * T + i * nh + j] = 0
# upper boundary information
ub[ON * nh * T + i * nh + j] = 1
ub[OFF * nh * T + i * nh + j] = 1
ub[IHG * nh * T + i * nh + j] = 1
ub[PHG * nh * T + i * nh + j] = PHMAX[j]
ub[RUHG * nh * T + i * nh + j] = PHMAX[j]
ub[RDHG * nh * T + i * nh + j] = PHMAX[j]
ub[QHG * nh * T + i * nh + j] = QMAX[j]
ub[QUHG * nh * T + i * nh + j] = QMAX[j]
ub[QDHG * nh * T + i * nh + j] = QMAX[j]
ub[V * nh * T + i * nh + j] = VMAX[j]
ub[S * nh * T + i * nh + j] = 10 ** 8
# objective value
c[S * nh * T + i * nh + j] = 1
c[RUHG * nh * T + i * nh + j] = -Price_energy[j]
c[RDHG * nh * T + i * nh + j] = Price_energy[j]
# variables types
vtypes[ON * nh * T + i * nh + j] = "D"
vtypes[OFF * nh * T + i * nh + j] = "D"
vtypes[IHG * nh * T + i * nh + j] = "D"
if i == T - 1:
lb[V * nh * T + i * nh + j] = V0[j]
ub[V * nh * T + i * nh + j] = V0[j]
for j in range(nw):
# lower boundary information
lb[PWC * nh * T + i * nw + j] = 0
# upper boundary information
ub[PWC * nh * T + i * nw + j] = WIND_PROFILE_FORECAST[i * nw + j]
# objective value
c[PWC * nh * T + i * nw + j] = 1
for j in range(nb):
# lower boundary information
lb[PWC * nh * T + nw * T + i * nb + j] = 0
# upper boundary information
ub[PWC * nh * T + nw * T + i * nb + j] = bus[j, PD] * LOAD_PROFILE[i]
# objective value
c[PWC * nh * T + nw * T + i * nb + j] = 10 ** 8
for j in range(nex):
# lower boundary information
lb[PWC * nh * T + nw * T + nb * T + i * nex + j] = PEXMIN[j]
# upper boundary information
ub[PWC * nh * T + nw * T + nb * T + i * nex + j] = PEXMAX[j]
# objective value
c[PWC * nh * T + nw * T + nb * T + i * nex + j] = -Price_energy[i]
# lower boundary information
lb[PWC * nh * T + nw * T + nb * T + nex * T] = PEXMIN[0]
# upper boundary information
ub[PWC * nh * T + nw * T + nb * T + nex * T] = PEXMAX[0]
# objective value
# c[PWC * nh * T + nw * T + nb * T + nex * T] = -CAPVALUE
# 2) Constraint set
# 2.1) Power balance equation
Aeq = zeros((T, NX))
beq = zeros((T, 1))
for i in range(T):
# For the hydro units
for j in range(nh):
Aeq[i, PHG * nh * T + i * nh + j] = 1
# For the wind farms
for j in range(nw):
Aeq[i, PWC * nh * T + i * nw + j] = -1
# For the loads
for j in range(nb):
Aeq[i, PWC * nh * T + nw * T + i * nb + j] = 1
# For the power exchange
for j in range(nex):
Aeq[i, PWC * nh * T + nw * T + nb * T + i * nex + j] = -1
beq[i] = sum(load_base) * LOAD_PROFILE[i] - sum(WIND_PROFILE_FORECAST[i * nw:(i + 1) * nw])
# 2.2) Status transformation of each unit
Aeq_temp = zeros((T * nh, NX))
beq_temp = zeros((T * nh, 1))
for i in range(T):
for j in range(nh):
Aeq_temp[i * nh + j, ON * nh * T + i * nh + j] = -1
Aeq_temp[i * nh + j, OFF * nh * T + i * nh + j] = 1
Aeq_temp[i * nh + j, IHG * nh * T + i * nh + j] = 1
if i != 0:
Aeq_temp[i * nh + j, IHG * nh * T + (i - 1) * nh + j] = -1
else:
beq_temp[i * T + j] = 0
Aeq = concatenate((Aeq, Aeq_temp), axis=0)
beq = concatenate((beq, beq_temp), axis=0)
# 2.3) water status change
Aeq_temp = zeros((T * nh, NX))
beq_temp = HYDRO_INJECT_FORECAST
for i in range(T):
for j in range(nh):
Aeq_temp[i * nh + j, V * nh * T + i * nh + j] = 1
Aeq_temp[i * nh + j, S * nh * T + i * nh + j] = 1
Aeq_temp[i * nh + j, QHG * nh * T + i * nh + j] = 1
if i != 0:
Aeq_temp[i * nh + j, V * nh * T + (i - 1) * nh + j] = -1
else:
beq_temp[i * T + j] += V0[j]
Aeq = concatenate((Aeq, Aeq_temp), axis=0)
beq = concatenate((beq, beq_temp), axis=0)
# 2.4) Power water transfering
Aeq_temp = zeros((T * nh, NX))
beq_temp = zeros((T * nh, 1))
for i in range(T):
for j in range(nh):
Aeq_temp[i * nh + j, PHG * nh * T + i * nh + j] = 1
Aeq_temp[i * nh + j, QHG * nh * T + i * nh + j] = -M_transfer[j, j]
Aeq_temp[i * nh + j, IHG * nh * T + i * nh + j] = -C_TEMP[j] + M_transfer[j, j] * Q_TEMP[j]
Aeq = concatenate((Aeq, Aeq_temp), axis=0)
beq = concatenate((beq, beq_temp), axis=0)
# 2.5) Power range limitation
Aineq = zeros((T * nh, NX))
bineq = zeros((T * nh, 1))
for i in range(T):
for j in range(nh):
Aineq[i * nh + j, ON * nh * T + i * nh + j] = 1
Aineq[i * nh + j, OFF * nh * T + i * nh + j] = 1
bineq[i * nh + j] = 1
Aineq_temp = zeros((T * nh, NX))
bineq_temp = zeros((T * nh, 1))
for i in range(T):
for j in range(nh):
Aineq_temp[i * nh + j, IHG * nh * T + i * nh + j] = PHMIN[j]
Aineq_temp[i * nh + j, PHG * nh * T + i * nh + j] = -1
Aineq_temp[i * nh + j, RDHG * nh * T + i * nh + j] = 1
Aineq = concatenate((Aineq, Aineq_temp), axis=0)
bineq = concatenate((bineq, bineq_temp), axis=0)
Aineq_temp = zeros((T * nh, NX))
bineq_temp = zeros((T * nh, 1))
for i in range(T):
for j in range(nh):
Aineq_temp[i * nh + j, IHG * nh * T + i * nh + j] = -PHMAX[j]
Aineq_temp[i * nh + j, PHG * nh * T + i * nh + j] = 1
Aineq_temp[i * nh + j, RUHG * nh * T + i * nh + j] = 1
Aineq = concatenate((Aineq, Aineq_temp), axis=0)
bineq = concatenate((bineq, bineq_temp), axis=0)
# 2.6) Water reserve constraints
Aineq_temp = zeros((T * nh, NX))
bineq_temp = zeros((T * nh, 1))
for i in range(T):
for j in range(nh):
Aineq_temp[i * nh + j, PHG * nh * T + i * nh + j] = 1
Aineq_temp[i * nh + j, RUHG * nh * T + i * nh + j] = 1
Aineq_temp[i * nh + j, IHG * nh * T + i * nh + j] = -C_TEMP[j] + M_transfer[j, j] * Q_TEMP[j]
Aineq_temp[i * nh + j, QHG * nh * T + i * nh + j] = -M_transfer[j, j]
Aineq_temp[i * nh + j, QUHG * nh * T + i * nh + j] = -M_transfer[j, j]
Aineq = concatenate((Aineq, Aineq_temp), axis=0)
bineq = concatenate((bineq, bineq_temp), axis=0)
Aineq_temp = zeros((T * nh, NX))
bineq_temp = zeros((T * nh, 1))
for i in range(T):
for j in range(nh):
Aineq_temp[i * nh + j, PHG * nh * T + i * nh + j] = -1
Aineq_temp[i * nh + j, RDHG * nh * T + i * nh + j] = 1
Aineq_temp[i * nh + j, IHG * nh * T + i * nh + j] = C_TEMP[j] - M_transfer[j, j] * Q_TEMP[j]
Aineq_temp[i * nh + j, QHG * nh * T + i * nh + j] = M_transfer[j, j]
Aineq_temp[i * nh + j, QDHG * nh * T + i * nh + j] = -M_transfer[j, j]
Aineq = concatenate((Aineq, Aineq_temp), axis=0)
bineq = concatenate((bineq, bineq_temp), axis=0)
# 2.7) water flow constraints
Aineq_temp = zeros((T * nh, NX))
bineq_temp = zeros((T * nh, 1))
for i in range(T):
for j in range(nh):
Aineq_temp[i * nh + j, IHG * nh * T + i * nh + j] = QMIN[j]
Aineq_temp[i * nh + j, QHG * nh * T + i * nh + j] = -1
Aineq_temp[i * nh + j, QDHG * nh * T + i * nh + j] = 1
Aineq = concatenate((Aineq, Aineq_temp), axis=0)
bineq = concatenate((bineq, bineq_temp), axis=0)
Aineq_temp = zeros((T * nh, NX))
bineq_temp = zeros((T * nh, 1))
for i in range(T):
for j in range(nh):
Aineq_temp[i * nh + j, IHG * nh * T + i * nh + j] = -QMAX[j]
Aineq_temp[i * nh + j, QHG * nh * T + i * nh + j] = 1
Aineq_temp[i * nh + j, QUHG * nh * T + i * nh + j] = 1
Aineq = concatenate((Aineq, Aineq_temp), axis=0)
bineq = concatenate((bineq, bineq_temp), axis=0)
# 2.8) Water reserve limitation
Aineq_temp = zeros((T * nh, NX))
bineq_temp = zeros((T * nh, 1))
for i in range(T):
for j in range(nh):
Aineq_temp[i * nh + j, V * nh * T + i * nh + j] = 1
Aineq_temp[i * nh + j, QHG * nh * T + i * nh + j] = -1
Aineq_temp[i * nh + j, QDHG * nh * T + i * nh + j] = 1
Aineq_temp[i * nh + j, S * nh * T + i * nh + j] = -1
bineq_temp[i * nh + j] = VMAX[j] - HYDRO_INJECT_FORECAST[i * nh + j]
Aineq = concatenate((Aineq, Aineq_temp), axis=0)
bineq = concatenate((bineq, bineq_temp), axis=0)
Aineq_temp = zeros((T * nh, NX))
bineq_temp = zeros((T * nh, 1))
for i in range(T):
for j in range(nh):
Aineq_temp[i * nh + j, V * nh * T + i * nh + j] = -1
Aineq_temp[i * nh + j, QHG * nh * T + i * nh + j] = 1
Aineq_temp[i * nh + j, QUHG * nh * T + i * nh + j] = 1
Aineq_temp[i * nh + j, S * nh * T + i * nh + j] = 1
bineq_temp[i * nh + j] = -VMIN[j] + HYDRO_INJECT_FORECAST[i * nh + j]
Aineq = concatenate((Aineq, Aineq_temp), axis=0)
bineq = concatenate((bineq, bineq_temp), axis=0)
# 2.9) Line flow limitation
Aineq_temp = zeros((T * nl, NX))
bineq_temp = zeros((T * nl, 1))
for i in range(T):
Aineq_temp[i * nl:(i + 1) * nl, PHG * nh * T + i * nh:PHG * nh * T + (i + 1) * nh] = -(
Distribution_factor * Ch).todense()
Aineq_temp[i * nl:(i + 1) * nl, PWC * nh * T + i * nw:PWC * nh * T + (i + 1) * nw] = (
Distribution_factor * Cw).todense()
Aineq_temp[i * nl:(i + 1) * nl,
PWC * nh * T + nw * T + i * nb:PWC * nh * T + nw * T + (i + 1) * nb] = -Distribution_factor.todense()
Aineq_temp[i * nl:(i + 1) * nl,
PWC * nh * T + nw * T + nb * T + i * nex:PWC * nh * T + nw * T + nb * T + (i + 1) * nex] = (
Distribution_factor * Cex).todense()
bineq_temp[i * nl:(i + 1) * nl, :] = PLMAX - Distribution_factor * (
(bus[:, PD] * LOAD_PROFILE[i]).reshape(nb, 1) - Cw * WIND_PROFILE_FORECAST[i * nw:(i + 1) * nw])
Aineq = concatenate((Aineq, Aineq_temp), axis=0)
bineq = concatenate((bineq, bineq_temp), axis=0)
Aineq_temp = zeros((T * nl, NX))
bineq_temp = zeros((T * nl, 1))
for i in range(T):
Aineq_temp[i * nl:(i + 1) * nl, PHG * nh * T + i * nh:PHG * nh * T + (i + 1) * nh] = (
Distribution_factor * Ch).todense()
Aineq_temp[i * nl:(i + 1) * nl, PWC * nh * T + i * nw:PWC * nh * T + (i + 1) * nw] = -(
Distribution_factor * Cw).todense()
Aineq_temp[i * nl:(i + 1) * nl,
PWC * nh * T + nw * T + i * nb:PWC * nh * T + nw * T + (i + 1) * nb] = Distribution_factor.todense()
Aineq_temp[i * nl:(i + 1) * nl,
PWC * nh * T + nw * T + nb * T + i * nex:PWC * nh * T + nw * T + nb * T + (i + 1) * nex] = -(
Distribution_factor * Cex).todense()
bineq_temp[i * nl:(i + 1) * nl, :] = PLMAX + Distribution_factor * (
(bus[:, PD] * LOAD_PROFILE[i]).reshape(nb, 1) - Cw * WIND_PROFILE_FORECAST[i * nw:(i + 1) * nw])
Aineq = concatenate((Aineq, Aineq_temp), axis=0)
bineq = concatenate((bineq, bineq_temp), axis=0)
# 2.10) Capacity limitation
Aineq_temp = zeros((T, NX))
bineq_temp = zeros((T, 1))
for i in range(T):
Aineq_temp[i, PWC * nh * T + nw * T + nb * T + nex * T] = 1
Aineq_temp[i, PWC * nh * T + nw * T + nb * T + i * nex] = -1
Aineq = concatenate((Aineq, Aineq_temp), axis=0)
bineq = concatenate((bineq, bineq_temp), axis=0)
# 2.11) Up and down reserve for the forecasting errors
# Up reserve limitation
Aineq_temp = zeros((T, NX))
bineq_temp = zeros((T, 1))
for i in range(T):
for j in range(nh):
Aineq_temp[i, RUHG * nh * T + i * nh + j] = -1
for j in range(nw):
bineq_temp[i] -= Delta_wind[i * nw + j]
for j in range(nb):
bineq_temp[i] -= Delta_load[i * nb + j]
Aineq = concatenate((Aineq, Aineq_temp), axis=0)
bineq = concatenate((bineq, bineq_temp), axis=0)
# Down reserve limitation
Aineq_temp = zeros((T, NX))
bineq_temp = zeros((T, 1))
for i in range(T):
for j in range(nh):
Aineq_temp[i, RDHG * nh * T + i * nh + j] = -1
for j in range(nw):
bineq_temp[i] -= Delta_wind[i * nw + j]
for j in range(nb):
bineq_temp[i] -= Delta_load[i * nb + j]
Aineq = concatenate((Aineq, Aineq_temp), axis=0)
bineq = concatenate((bineq, bineq_temp), axis=0)
model_first_stage = {"c": c,
"lb": lb,
"ub": ub,
"A": Aineq,
"b": bineq,
"Aeq": Aeq,
"beq": beq,
"vtypes": vtypes}
## Formualte the second stage decision making problem
phg = 0
qhg = 1
v = 2
s = 3
pwc = 4
plc = 5
pex = 6
cex = 7
nx = pwc * nh * T + nw * T + nb * T + nex * T + 1
# Generate the lower and boundary for the first stage decision variables
lb = zeros((nx, 1))
ub = zeros((nx, 1))
c = zeros((nx, 1))
vtypes = ["c"] * nx
nu = nh * T + nw * T + nb * T
u_mean = concatenate([HYDRO_INJECT_FORECAST, WIND_PROFILE_FORECAST, LOAD_FORECAST])
u_delta = concatenate([Delta_hydro, Delta_wind, Delta_load])
for i in range(T):
for j in range(nh):
# lower boundary information
lb[phg * nh * T + i * nh + j] = 0
lb[qhg * nh * T + i * nh + j] = 0
lb[v * nh * T + i * nh + j] = VMIN[j]
lb[s * nh * T + i * nh + j] = 0
# upper boundary information
ub[phg * nh * T + i * nh + j] = PHMAX[j]
ub[qhg * nh * T + i * nh + j] = QMAX[j]
ub[v * nh * T + i * nh + j] = VMAX[j]
ub[s * nh * T + i * nh + j] = 10 ** 8
# objective value
c[s * nh * T + i * nh + j] = 1
if i == T - 1:
lb[v * nh * T + i * nh + j] = V0[j]
ub[v * nh * T + i * nh + j] = V0[j]
for j in range(nw):
# lower boundary information
lb[pwc * nh * T + i * nw + j] = 0
# upper boundary information
ub[pwc * nh * T + i * nw + j] = 10 ** 4
# objective value
c[pwc * nh * T + i * nw + j] = 1
for j in range(nb):
# lower boundary information
lb[pwc * nh * T + nw * T + i * nb + j] = 0
# upper boundary information
ub[pwc * nh * T + nw * T + i * nb + j] = 10 ** 4
# objective value
c[pwc * nh * T + nw * T + i * nb + j] = 10 ** 6
for j in range(nex):
# lower boundary information
lb[pwc * nh * T + nw * T + nb * T + i * nex + j] = PEXMIN[j]
# upper boundary information
ub[pwc * nh * T + nw * T + nb * T + i * nex + j] = PEXMAX[j]
# objective value
# c[pwc * nh * T + nw * T + nb * T + i * nex + j] = -Price_energy[i]
# lower boundary information
lb[pwc * nh * T + nw * T + nb * T + nex * T] = PEXMIN[0]
# upper boundary information
ub[pwc * nh * T + nw * T + nb * T + nex * T] = PEXMAX[0]
# objective value
c[pwc * nh * T + nw * T + nb * T + nex * T] = -CAPVALUE
# Generate correlate constraints
# 3.1) Power balance constraints
E = zeros((T, NX))
M = zeros((T, nu))
G = zeros((T, nx))
h = beq[0:T]
for i in range(T):
# For the hydro units
for j in range(nh):
G[i, phg * nh * T + i * nh + j] = 1
# For the wind farms
for j in range(nw):
G[i, pwc * nh * T + i * nw + j] = -1
# For the loads
for j in range(nb):
G[i, pwc * nh * T + nw * T + i * nb + j] = 1
# For the power exchange
for j in range(nex):
G[i, pwc * nh * T + nw * T + nb * T + i * nex + j] = -1
# Update G,M,E,h
G = concatenate([G, -G])
M = concatenate([M, -M])
E = concatenate([E, -E])
h = concatenate([h, -h])
# 3.2) water status change
E_temp = zeros((T * nh, NX))
M_temp = zeros((T * nh, nu))
G_temp = zeros((T * nh, nx))
h_temp = HYDRO_INJECT_FORECAST
for i in range(T):
for j in range(nh):
G_temp[i * nh + j, v * nh * T + i * nh + j] = 1
G_temp[i * nh + j, s * nh * T + i * nh + j] = 1
G_temp[i * nh + j, qhg * nh * T + i * nh + j] = 1
if i != 0:
G_temp[i * nh + j, v * nh * T + (i - 1) * nh + j] = -1
else:
h_temp[i * T + j] = V0[j]
# M_temp[i * nh + j, i * nh + j] = -1
G = concatenate([G, G_temp, -G_temp])
M = concatenate([M, M_temp, -M_temp])
E = concatenate([E, E_temp, -E_temp])
h = concatenate([h, h_temp, -h_temp])
# 3.3) Power water transfering
E_temp = zeros((T * nh, NX))
M_temp = zeros((T * nh, nu))
G_temp = zeros((T * nh, nx))
h_temp = zeros((T * nh, 1))
for i in range(T):
for j in range(nh):
G_temp[i * nh + j, phg * nh * T + i * nh + j] = 1
G_temp[i * nh + j, qhg * nh * T + i * nh + j] = -M_transfer[j, j]
E_temp[i * nh + j, IHG * nh * T + i * nh + j] = -C_TEMP[j] + M_transfer[j, j] * Q_TEMP[j]
G = concatenate([G, G_temp, -G_temp])
M = concatenate([M, M_temp, -M_temp])
E = concatenate([E, E_temp, -E_temp])
h = concatenate([h, h_temp, -h_temp])
# 3.4) Power range limitation
# Some problem found
E_temp = zeros((T * nh, NX))
M_temp = zeros((T * nh, nu))
G_temp = zeros((T * nh, nx))
h_temp = zeros((T * nh, 1))
for i in range(T):
for j in range(nh):
G_temp[i * nh + j, phg * nh * T + i * nh + j] = 1
E_temp[i * nh + j, PHG * nh * T + i * nh + j] = -1
E_temp[i * nh + j, RDHG * nh * T + i * nh + j] = 1
G = concatenate([G, G_temp])
M = concatenate([M, M_temp])
E = concatenate([E, E_temp])
h = concatenate([h, h_temp])
E_temp = zeros((T * nh, NX))
M_temp = zeros((T * nh, nu))
G_temp = zeros((T * nh, nx))
h_temp = zeros((T * nh, 1))
for i in range(T):
for j in range(nh):
G_temp[i * nh + j, phg * nh * T + i * nh + j] = -1
E_temp[i * nh + j, PHG * nh * T + i * nh + j] = 1
E_temp[i * nh + j, RUHG * nh * T + i * nh + j] = 1
G = concatenate([G, G_temp])
M = concatenate([M, M_temp])
E = concatenate([E, E_temp])
h = concatenate([h, h_temp])
# 3.5) Water flow constraints
E_temp = zeros((T * nh, NX))
M_temp = zeros((T * nh, nu))
G_temp = zeros((T * nh, nx))
h_temp = zeros((T * nh, 1))
for i in range(T):
for j in range(nh):
G_temp[i * nh + j, qhg * nh * T + i * nh + j] = 1
E_temp[i * nh + j, QHG * nh * T + i * nh + j] = -1
E_temp[i * nh + j, QDHG * nh * T + i * nh + j] = 1
G = concatenate([G, G_temp])
M = concatenate([M, M_temp])
E = concatenate([E, E_temp])
h = concatenate([h, h_temp])
E_temp = zeros((T * nh, NX))
M_temp = zeros((T * nh, nu))
G_temp = zeros((T * nh, nx))
h_temp = zeros((T * nh, 1))
for i in range(T):
for j in range(nh):
G_temp[i * nh + j, qhg * nh * T + i * nh + j] = -1
E_temp[i * nh + j, QHG * nh * T + i * nh + j] = 1
E_temp[i * nh + j, QUHG * nh * T + i * nh + j] = 1
G = concatenate([G, G_temp])
M = concatenate([M, M_temp])
E = concatenate([E, E_temp])
h = concatenate([h, h_temp])
# 3.6) Line flow constraints
E_temp = zeros((T * nl, NX))
M_temp = zeros((T * nl, nu))
G_temp = zeros((T * nl, nx))
h_temp = zeros((T * nl, 1))
for i in range(T):
G_temp[i * nl:(i + 1) * nl, phg * nh * T + i * nh:phg * nh * T + (i + 1) * nh] = (
Distribution_factor * Ch).todense()
G_temp[i * nl:(i + 1) * nl, pwc * nh * T + i * nw: pwc * nh * T + (i + 1) * nw] = -(
Distribution_factor * Cw).todense()
G_temp[i * nl:(i + 1) * nl,
pwc * nh * T + nw * T + i * nb:pwc * nh * T + nw * T + (i + 1) * nb] = Distribution_factor.todense()
G_temp[i * nl:(i + 1) * nl,
pwc * nh * T + nw * T + nb * T + i * nex:pwc * nh * T + nw * T + nb * T + (i + 1) * nex] = -(
Distribution_factor * Cex).todense()
M_temp[i * nl:(i + 1) * nl, nh * T + i * nw: nh * T + (i + 1) * nw] = (
Distribution_factor * Cw).todense()
M_temp[i * nl:(i + 1) * nl,
nh * T + nw * T + i * nb: nh * T + nw * T + (i + 1) * nb] = -Distribution_factor.todense()
h_temp[i * nl:(i + 1) * nl, :] = -PLMAX
G = concatenate([G, G_temp])
M = concatenate([M, M_temp])
E = concatenate([E, E_temp])
h = concatenate([h, h_temp])
E_temp = zeros((T * nl, NX))
M_temp = zeros((T * nl, nu))
G_temp = zeros((T * nl, nx))
h_temp = zeros((T * nl, 1))
for i in range(T):
G_temp[i * nl:(i + 1) * nl, phg * nh * T + i * nh:phg * nh * T + (i + 1) * nh] = -(
Distribution_factor * Ch).todense()
G_temp[i * nl:(i + 1) * nl, pwc * nh * T + i * nw: pwc * nh * T + (i + 1) * nw] = (
Distribution_factor * Cw).todense()
G_temp[i * nl:(i + 1) * nl,
pwc * nh * T + nw * T + i * nb:pwc * nh * T + nw * T + (i + 1) * nb] = -Distribution_factor.todense()
G_temp[i * nl:(i + 1) * nl,
pwc * nh * T + nw * T + nb * T + i * nex:pwc * nh * T + nw * T + nb * T + (i + 1) * nex] = (
Distribution_factor * Cex).todense()
M_temp[i * nl:(i + 1) * nl, nh * T + i * nw: nh * T + (i + 1) * nw] = -(
Distribution_factor * Cw).todense()
M_temp[i * nl:(i + 1) * nl,
nh * T + nw * T + i * nb: nh * T + nw * T + (i + 1) * nb] = Distribution_factor.todense()
h_temp[i * nl:(i + 1) * nl, :] = -PLMAX
G = concatenate([G, G_temp])
M = concatenate([M, M_temp])
E = concatenate([E, E_temp])
h = concatenate([h, h_temp])
# 3.7) Capacity constraints
E_temp = zeros((T, NX))
M_temp = zeros((T, nu))
G_temp = zeros((T, nx))
h_temp = zeros((T, 1))
for i in range(T):
G_temp[i, pwc * nh * T + nw * T + nb * T + nex * T] = -1
G_temp[i, pwc * nh * T + nw * T + nb * T + i * nex] = 1
G = concatenate([G, G_temp])
M = concatenate([M, M_temp])
E = concatenate([E, E_temp])
h = concatenate([h, h_temp])
# 3.8) Dispatch range constraints
# Wind curtailment range
E_temp = zeros((T * nw, NX))
M_temp = zeros((T * nw, nu))
G_temp = zeros((T * nw, nx))
h_temp = zeros((T * nw, 1))
for i in range(T):
for j in range(nw):
G_temp[i * nw + j, pwc * nh * T + i * nw + j] = -1
M_temp[i * nw + j, nh * T + i * nw + j] = 1
G = concatenate([G, G_temp])
M = concatenate([M, M_temp])
E = concatenate([E, E_temp])
h = | concatenate([h, h_temp]) | numpy.concatenate |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import time
import netCDF4 as nc
import matplotlib.pylab as pl
from scipy.interpolate import interp2d
from mpl_toolkits.basemap import Basemap
import operator
import datetime as dt
from matplotlib.patches import Rectangle
# Personal libraries
import biogeolib as bg
# Declaration of functionsi
# ||==========================================
# || ||
# || Tout venant ||
# || ||
# || ||
# ==========================================||
def height_trapezoid(A, h1, delta):
"""
Compute height of trapezoid of area A, small side distance h1
from circle center, and angle delta.
Solve a degree 2 polynomial : L=2htan(delta/2) ; h=2A/(l+L)
"""
l = 2 * h1 * np.tan(delta/2)
d = l**2 + 4 * np.tan(delta/2) * (h1 * l + 4 * A + h1**2 * np.tan(delta/2))
h = (2 * h1 * np.tan(delta/2) - l + np.sqrt(d)) / (4 * np.tan(delta/2))
return h - h1
def compute_directions(speed, theta, nb_directions, bins_classes):
nb_classes = len(bins_classes) - 1
#flatten data and remove nan
speed = speed[~ | np.isnan(speed) | numpy.isnan |
#!/usr/bin/env python
"""
buspower.py: Plot the current, voltage, and power of the HRC Primary bus
"""
__author__ = "Dr. <NAME>"
__license__ = "MIT"
import sys
import datetime as dt
from astropy.io import ascii
import matplotlib.pyplot as plt
from matplotlib.dates import epoch2num
try:
import plotly.plotly as py
import plotly.graph_objs as go
except ImportError:
print("plotly is not installed. Disabling this functionality.")
import numpy as np
from scipy import stats
def convert_time(rawtimes):
"""
Convert input CXC time (sec) to the time base required for the matplotlib
plot_date function (days since start of the Year 1 A.D - yes, really).
:param times: iterable list of times, in units of CXCsec (sec since 1998.0)
:rtype: plot_date times (days since Year 1 A.D.)
"""
# rawtimes is in units of CXC seconds, or seconds since 1998.0
# Compute the Delta T between 1998.0 (CXC's Epoch) and 1970.0 (Unix Epoch)
seconds_since_1998_0 = rawtimes[0]
cxctime = dt.datetime(1998, 1, 1, 0, 0, 0)
unixtime = dt.datetime(1970, 1, 1, 0, 0, 0)
# Calculate the first offset from 1970.0, needed by matplotlib's plotdate
# The below is equivalent (within a few tens of seconds) to the command
# t0 = Chandra.Time.DateTime(times[0]).unix
delta_time = (cxctime - unixtime).total_seconds() + seconds_since_1998_0
plotdate_start = epoch2num(delta_time)
# Now we use a relative offset from plotdate_start
# the number 86,400 below is the number of seconds in a UTC day
times = ( | np.asarray(rawtimes) | numpy.asarray |
import time
import queue
import warnings
import numpy as np
import reip
class Rebuffer(reip.Block):
'''Collect numpy arrays and convert them to a different resolution'''
def __init__(self, size=None, duration=None, *a, sr_key='sr', **kw):
assert size or duration, 'You must specify a size or duration.'
self.size = size
self.duration = duration
self.sr_key = sr_key
super().__init__(**kw)
def init(self):
self._q = queue.Queue()
self.sr = None
self.current_size = 0
def _calc_size(self, meta):
# calculate the size the first time using the sr in metadata
if not self.size and meta:
self.sr = self.sr or meta[self.sr_key]
self.size = self.duration * self.sr
def process(self, x, meta):
self._calc_size(meta)
# place buffer into queue. If buffer is full, gather buffers and emit.
if self._place_buffer(x, meta):
self.log.debug(f"full: {self.current_size} >= {self.size}")
return self._gather_buffers()
def _place_buffer(self, x, meta):
'''put items on the queue and return if the buffer is full.'''
xsize = self._get_size(x)
self.current_size += xsize
self._q.put((x, meta, xsize))
return self.current_size >= self.size
def _gather_buffers(self):
'''Read items from the queue and emit once it's reached full size.'''
size = 0
xs, metas = [], []
while size < self.size:
x, meta, xsize = self._q.get()
xs.append(x)
metas.append(meta)
size += xsize
self.current_size -= xsize
return [self._merge_buffers(xs)], self._merge_meta(metas)
def _get_size(self, buf):
'''Return the size of a buffer, based on it's type.'''
return len(buf)
def _merge_buffers(self, bufs):
return np.concatenate(bufs)
def _merge_meta(self, metas):
return metas[0]
class FastRebuffer(reip.Block):
def __init__(self, size=None, **kw):
assert size , 'You must specify a size.'
self.size = size
super().__init__(**kw)
def init(self):
self.buffers, self.meta = None, None
self.current_pos = 0
def process(self, x, meta):
# calculate the size the first time using the sr in metadata
if self.buffers is None:
shape = list(x.shape)
shape[0] = shape[0] * self.size
# self.buffers = np.concatenate([np.zeros_like(x)] * self.size)
self.buffers = np.zeros(tuple(shape), dtype=x.dtype)
self.meta = dict(meta)
self.current_pos = 0
l, i = x.shape[0], self.current_pos
self.buffers[i*l:(i+1)*l, ...] = x[...]
self.current_pos = (self.current_pos + 1) % self.size
if self.current_pos == 0:
ret = [self.buffers], self.meta
self.buffers, self.meta = None, None
else:
ret = None
return ret
class GatedRebuffer(reip.Block):
'''Rebuffer while also sampling sparsely in time. For example, sampling audio files
while leaving gaps between them for privacy reasons.'''
TIME_KEY = 'time'
def __init__(self, sampler, size=10, **kw):
self.sampler = (
sampler if callable(sampler) else
(lambda: sampler) if sampler else None)
self.size = size
self.pause_until = 0
self.allow_index = None
super().__init__(**kw)
def check_input_skip(self, meta):
if not self.sampler:
return
t0 = meta.get(self.TIME_KEY) or time.time()
# check if we are currently allowing buffers
if self.allow_index is not None:
self.log.debug(f'allow: {self.processed} - {self.allow_index} >= {self.size}')
if self.processed - self.allow_index < self.size: # under the limit
return
# we've exceeded
self.allow_index = None
s = self.sampler()
self.pause_until = t0 + s
self.log.debug(f'will be sleeping for {s:.3g}s')
# check if we should be pausing
if t0 < self.pause_until:
return True
# if not, record the processed count
self.allow_index = self.processed
self.log.debug(f'allow_index: {self.allow_index}')
def init(self):
self.buffers = []
self.meta = []
def finish(self):
self.clear()
def clear(self):
self.buffers.clear()
self.meta.clear()
def process(self, x, meta):
if self.check_input_skip(meta):
return
self.log.debug(f'collecting buffer: {x.shape}')
self.buffers.append(x)
self.meta.append(meta)
if len(self.buffers) >= self.size:
X = | np.concatenate(self.buffers) | numpy.concatenate |
import unittest
import numpy as np
class Test_UFUNCTests(unittest.TestCase):
#region UFUNC ADD tests
def test_UFUNC_AddAccumlate_1(self):
x = np.arange(8);
a = np.add.accumulate(x)
print(a)
x = np.arange(8).reshape((2,2,2))
b = np.add.accumulate(x)
print(b)
c = np.add.accumulate(x, 0)
print(c)
d = np.add.accumulate(x, 1)
print(d)
e = np.add.accumulate(x, 2)
print(e)
def test_UFUNC_AddReduce_1(self):
x = np.arange(8);
a = np.add.reduce(x)
print(a)
x = np.arange(8).reshape((2,2,2))
b = np.add.reduce(x)
print(b)
c = np.add.reduce(x, 0)
print(c)
d = np.add.reduce(x, 1)
print(d)
e = np.add.reduce(x, 2)
print(e)
def test_UFUNC_AddReduce_2(self):
x = np.arange(8).reshape((2,2,2))
b = np.add.reduce(x)
print(b)
c = np.add.reduce(x, (0,1))
print(c)
d = np.add.reduce(x, (1,2))
print(d)
e = np.add.reduce(x, (2,1))
print(e)
def test_UFUNC_AddReduceAt_1(self):
a =np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2]
print(a)
print("**********")
x = np.linspace(0, 15, 16).reshape(4,4)
b = np.add.reduceat(x, [0, 3, 1, 2, 0])
print(b)
print("**********")
c = np.multiply.reduceat(x, [0, 3], axis = 1)
print(c)
def test_UFUNC_AddOuter_1(self):
x = np.arange(4);
a = np.add.outer(x, x)
print(a.shape)
print(a)
x = np.arange(6).reshape((3,2))
y = np.arange(6).reshape((2,3))
b = np.add.outer(x, y)
print(b.shape)
print(b)
#endregion
#region UFUNC SUBTRACT tests
def test_UFUNC_SubtractAccumulate_1(self):
x = np.arange(8);
a = np.subtract.accumulate(x)
print(a)
x = np.arange(8).reshape((2,2,2))
b = np.subtract.accumulate(x)
print(b)
c = np.subtract.accumulate(x, 0)
print(c)
d = np.subtract.accumulate(x, 1)
print(d)
e = np.subtract.accumulate(x, 2)
print(e)
def test_UFUNC_SubtractReduce_1(self):
x = np.arange(8);
a = np.subtract.reduce(x)
print(a)
x = np.arange(8).reshape((2,2,2))
b = np.subtract.reduce(x)
print(b)
c = np.subtract.reduce(x, 0)
print(c)
d = np.subtract.reduce(x, 1)
print(d)
e = np.subtract.reduce(x, 2)
print(e)
def test_UFUNC_SubtractReduceAt_1(self):
a =np.subtract.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2]
print(a)
print("**********")
x = np.linspace(0, 15, 16).reshape(4,4)
b = np.subtract.reduceat(x, [0, 3, 1, 2, 0])
print(b)
print("**********")
c = np.multiply.reduceat(x, [0, 3], axis = 1)
print(c)
def test_UFUNC_SubtractOuter_1(self):
x = np.arange(4);
a = np.subtract.outer(x, x)
print(a.shape)
print(a)
x = np.arange(6).reshape((3,2))
y = np.arange(6).reshape((2,3))
b = np.subtract.outer(x, y)
print(b.shape)
print(b)
#endregion
#region UFUNC MULTIPLY tests
def test_UFUNC_MultiplyAccumulate_1(self):
x = np.arange(8);
a = np.multiply.accumulate(x)
print(a)
x = np.arange(8).reshape((2,2,2))
b = np.multiply.accumulate(x)
print(b)
c = np.multiply.accumulate(x, 0)
print(c)
d = np.multiply.accumulate(x, 1)
print(d)
e = np.multiply.accumulate(x, 2)
print(e)
def test_UFUNC_MultiplyReduce_1(self):
x = np.arange(8);
a = np.multiply.reduce(x)
print(a)
x = np.arange(8).reshape((2,2,2))
b = np.multiply.reduce(x)
print(b)
c = np.multiply.reduce(x, 0)
print(c)
d = np.multiply.reduce(x, 1)
print(d)
e = np.multiply.reduce(x, 2)
print(e)
def test_UFUNC_MultiplyReduceAt_1(self):
a =np.multiply.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2]
print(a)
print("**********")
x = np.linspace(0, 15, 16).reshape(4,4)
b = np.multiply.reduceat(x, [0, 3, 1, 2, 0])
print(b)
print("**********")
c = np.multiply.reduceat(x, [0, 3], axis = 1)
print(c)
def test_UFUNC_MultiplyOuter_1(self):
x = np.arange(4);
a = np.multiply.outer(x, x)
print(a.shape)
print(a)
x = np.arange(6).reshape((3,2))
y = np.arange(6).reshape((2,3))
b = np.multiply.outer(x, y)
print(b.shape)
print(b)
#endregion
#region UFUNC DIVIDE tests
def test_UFUNC_DivideAccumulate_1(self):
x = np.arange(8, 16, dtype=np.float64);
a = np.divide.accumulate(x)
print(a)
x = np.arange(8, 16, dtype=np.float64).reshape((2,2,2))
b = np.divide.accumulate(x)
print(b)
c = np.divide.accumulate(x, 0)
print(c)
d = np.divide.accumulate(x, 1)
print(d)
e = np.divide.accumulate(x, 2)
print(e)
def test_UFUNC_DivideReduce_1(self):
x = np.arange(8, 16, dtype=np.float64);
a = np.divide.reduce(x)
print(a)
print("*****")
x = np.arange(8, 16, dtype=np.float64).reshape((2,2,2))
b = np.divide.reduce(x)
print(b)
print("*****")
c = np.divide.reduce(x, 0)
print(c)
print("*****")
d = np.divide.reduce(x, 1)
print(d)
print("*****")
e = np.divide.reduce(x, 2)
print(e)
def test_UFUNC_DivideReduceAt_1(self):
a =np.divide.reduceat(np.arange(8, 16, dtype=np.float64),[0,4, 1,5, 2,6, 3,7])[::2]
print(a)
print("**********")
x = np.linspace(0, 15, 16).reshape(4,4)
b = np.divide.reduceat(x, [0, 3, 1, 2, 0])
print(b)
print("**********")
c = np.divide.reduceat(x, [0, 3], axis = 1)
print(c)
def test_UFUNC_DivideOuter_1(self):
x = np.arange(4, 8, dtype=np.float64);
a = np.divide.outer(x, x)
print(a.shape)
print(a)
x = np.arange(8,14, dtype=np.float64).reshape((3,2))
y = np.arange(8,14, dtype=np.float64).reshape((2,3))
b = np.divide.outer(x, y)
print(b.shape)
print(b)
#endregion
#region UFUNC REMAINDER tests
def test_UFUNC_RemainderAccumulate_1(self):
x = np.arange(16, 8, -1, dtype=np.float64);
a = np.remainder.accumulate(x)
print(a)
x = np.arange(16, 8, -1, dtype=np.float64).reshape((2,2,2))
b = np.remainder.accumulate(x)
print(b)
c = np.remainder.accumulate(x, 0)
print(c)
d = np.remainder.accumulate(x, 1)
print(d)
e = np.remainder.accumulate(x, 2)
print(e)
def test_UFUNC_RemainderReduce_1(self):
x = np.arange(16, 8, -1, dtype=np.float64);
a = np.remainder.reduce(x)
print(a)
print("*****")
x = np.arange(16, 8, -1, dtype=np.float64).reshape((2,2,2))
b = np.remainder.reduce(x)
print(b)
print("*****")
c = | np.remainder.reduce(x, 0) | numpy.remainder.reduce |
import numpy as np
from dnnv.nn.converters.tensorflow import *
from dnnv.nn.operations import *
def test_Concat_consts():
x0 = np.arange(5)
x1 = np.arange(10, 20)
op = Concat([x0, x1], 0)
tf_op = TensorflowConverter().visit(op)
result = tf_op().numpy()
y = np.array([0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19])
assert np.all(result == y)
def test_Concat_x0_is_op():
x0 = np.arange(5)
x1 = np.arange(10, 20)
input_op0 = Input((5,), np.dtype(np.int64))
op = Concat([input_op0, x1], 0)
tf_op = TensorflowConverter().visit(op)
result = tf_op(x0).numpy()
y = np.array([0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19])
assert np.all(result == y)
def test_Concat_x1_is_op():
x0 = np.arange(5)
x1 = np.arange(10, 20)
input_op1 = Input((10,), np.dtype(np.int64))
op = Concat([x0, input_op1], 0)
tf_op = TensorflowConverter().visit(op)
result = tf_op(x1).numpy()
y = np.array([0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19])
assert np.all(result == y)
def test_Concat_x0_x1_are_op():
x0 = np.arange(5)
x1 = np.arange(10, 20)
input_op0 = Input((5,), np.dtype(np.int64))
input_op1 = Input((10,), np.dtype(np.int64))
op = Concat([input_op0, input_op1], 0)
tf_op = TensorflowConverter().visit(op)
result = tf_op(x0, x1).numpy()
y = np.array([0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19])
assert np.all(result == y)
def test_Concat_1d():
x0 = np.array([1, 2], dtype=np.float32)
x1 = | np.array([3, 4], dtype=np.float32) | numpy.array |
"""Implementation of local2global algorithm"""
# Copyright (c) 2021. <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import scipy as sp
from scipy import sparse as ss
from scipy.linalg import orthogonal_procrustes
import numpy as np
from scipy.sparse.linalg import lsqr, lsmr
from scipy.spatial import procrustes
import copy
from collections import defaultdict
from typing import List, Callable, Any
import networkx as nx
from pathlib import Path
import json
import ilupp
from tqdm.auto import tqdm
from .patch import Patch
rg = np.random.default_rng()
eps = np.finfo(float).eps
def seed(new_seed):
"""
Change seed of random number generator.
Args:
new_seed: New seed value
"""
global rg
rg = np.random.default_rng(new_seed)
def ensure_extension(filename, extension):
"""
check filename for extension and add it if necessary
Args:
filename: input filename
extension: desired extension (including `.`)
Returns:
filename with extension added
Raises:
ValueError: if filename has the wrong extension
"""
filename = Path(filename)
if filename.suffix == "":
filename = filename.with_suffix(extension)
elif filename.suffix != extension:
raise ValueError(f"filename should have extension {extension}, not {filename.suffix}")
return filename
def procrustes_error(coordinates1, coordinates2):
"""
compute the procrustes alignment error between two sets of coordinates
Args:
coordinates1: First set of coordinates (array-like)
coordinates2: Second set of coordinates (array-like)
Note that the two sets of coordinates need to have the same shape.
"""
return procrustes(coordinates1, coordinates2)[2]
def local_error(patch: Patch, reference_coordinates):
"""
compute the euclidean distance between patch coordinate and reference
coordinate for each node in patch
Args:
patch:
reference_coordinates:
Returns:
vector of error values
"""
return np.linalg.norm(reference_coordinates[patch.nodes, :] - patch.coordinates, axis=1)
def transform_error(transforms):
"""
Compute the recovery error based on tracked transformations.
After recovery, all transformations should be constant across patches
as we can recover the embedding only up to a global scaling/rotation/translation.
The error is computed as the mean over transformation elements of the standard deviation over patches.
Args:
transforms: list of transforms
"""
return np.mean(np.std(transforms, axis=0))
def orthogonal_MSE_error(rots1, rots2):
"""
Compute the MSE between two sets of orthogonal transformations up to a global transformation
Args:
rots1: First list of orthogonal matrices
rots2: Second list of orthogonal matrices
"""
dim = len(rots1[0])
rots1 = np.asarray(rots1)
rots1 = rots1.transpose((0, 2, 1))
rots2 = np.asarray(rots2)
combined = np.mean(rots1 @ rots2, axis=0)
_, s, _ = sp.linalg.svd(combined)
return 2*(dim - np.sum(s))
def _cov_svd(coordinates1: np.ndarray, coordinates2: np.ndarray):
"""
Compute SVD of covariance matrix between two sets of coordinates
Args:
coordinates1: First set of coordinates (array-like)
coordinates2: Second set of coordinates (array-like)
Note that the two sets of coordinates need to have the same shape.
"""
coordinates1 = coordinates1 - coordinates1.mean(axis=0)
coordinates2 = coordinates2 - coordinates2.mean(axis=0)
cov = coordinates1.T @ coordinates2
return sp.linalg.svd(cov)
def relative_orthogonal_transform(coordinates1, coordinates2):
"""
Find the best orthogonal transformation aligning two sets of coordinates for the same nodes
Args:
coordinates1: First set of coordinates (array-like)
coordinates2: Second set of coordinates (array-like)
Note that the two sets of coordinates need to have the same shape.
"""
# Note this is completely equivalent to the approach in
# "Closed-Form Solution of Absolute Orientation using Orthonormal Matrices"
# Journal of the Optical Society of America A · July 1988
U, s, Vh = _cov_svd(coordinates1, coordinates2)
return U @ Vh
def nearest_orthogonal(mat):
"""
Compute nearest orthogonal matrix to a given input matrix
Args:
mat: input matrix
"""
U, s, Vh = sp.linalg.svd(mat)
return U @ Vh
def relative_scale(coordinates1, coordinates2, clamp=1e8):
"""
compute relative scale of two sets of coordinates for the same nodes
Args:
coordinates1: First set of coordinates (array-like)
coordinates2: Second set of coordinates (array-like)
Note that the two sets of coordinates need to have the same shape.
"""
scale1 = np.linalg.norm(coordinates1 - np.mean(coordinates1, axis=0))
scale2 = np.linalg.norm(coordinates2 - np.mean(coordinates2, axis=0))
if scale1 > clamp * scale2:
print('extremely large scale clamped')
return clamp
if scale1 * clamp < scale2:
print('extremely small scale clamped')
return 1/clamp
return scale1 / scale2
class AlignmentProblem:
"""
Implements the standard local2global algorithm using an unweighted patch graph
"""
n_nodes = None
"""total number of nodes"""
n_patches = None
"""number of patches"""
dim = None
"""embedding dimension"""
scales = None
"""tracks scale transformations applied to patches (updated by :meth:`scale_patches`)"""
rotations = None
"""tracks orthogonal transformations applied to patches (updated by :meth:`rotate_patches`)"""
shifts = None
"""tracks translation transformations applied to patches (updated by :meth:`scale_patches`,
:meth:`rotate_patches`, and :meth:`translate_patches`)"""
verbose = False
"""print debug output if `True`"""
def weight(self, i, j):
"""Compute the weighting factor for a pair of patches
Args:
i: First patch index
j: Second patch index
Returns:
1
Override this in subclasses for weighted alignment
"""
return 1
def __init__(self, patches: List[Patch], patch_edges=None,
min_overlap=None, copy_data=True, self_loops=False, verbose=False):
"""
Initialise the alignment problem with a list of patches
Args:
patches: List of patches to synchronise
patch_edges: if provided, only compute relative transformations for given patch edges (all pairs of patches
with at least ``min_overlap`` points in common are included by default)
min_overlap (int): minimum number of points in the overlap required for two patches to be considered
connected (defaults to `dim+1`) where `dim` is the embedding dimension of the patches
copy_data (bool): if ``True``, input patches are copied (default: ``True``)
self_loops (bool): if ``True``, self-loops from a patch to itself are included in the synchronisation problem
(default: ``False``)
verbose(bool): if True print diagnostic information (default: ``False``)
"""
if copy_data:
self.patches = [copy.copy(patch) for patch in patches]
else:
self.patches = patches
self.verbose = verbose
self.n_nodes = max(max(patch.index.keys()) for patch in self.patches) + 1
self.n_patches = len(self.patches)
self.dim = self.patches[0].shape[1]
self.scales = np.ones(self.n_patches)
self.rotations = np.tile(np.eye(self.dim), (self.n_patches, 1, 1))
self.shifts = np.zeros((self.n_patches, self.dim))
self._aligned_embedding = None
if min_overlap is None:
min_overlap = self.dim + 1
# create an index for the patch membership of each node
self.patch_index = [[] for _ in range(self.n_nodes)]
for i, patch in enumerate(self.patches):
for node in patch.nodes:
self.patch_index[node].append(i)
# find patch overlaps
self.patch_overlap = defaultdict(list)
for i, patch in enumerate(self.patches):
for node in patch.index:
for j in self.patch_index[node]:
if self_loops or i != j:
self.patch_overlap[i, j].append(node)
# restrict to patch edges if provided
if patch_edges is not None:
self.patch_overlap = {e: self.patch_overlap[e] for e in patch_edges}
# remove small overlaps
keys = list(self.patch_overlap.keys())
for e in keys:
if self_loops or e[0] != e[1]:
if len(self.patch_overlap[e]) < min_overlap:
if patch_edges is None:
del self.patch_overlap[e]
else:
raise RuntimeError("Patch edges do not satisfy minimum overlap")
else:
del self.patch_overlap[e] # remove spurious self-loops
# find patch degrees
self.patch_degrees = [0] * self.n_patches
for i, j in self.patch_overlap.keys():
self.patch_degrees[i] += 1
patch_graph = nx.Graph()
patch_graph.add_edges_from(self.patch_overlap.keys())
if nx.number_connected_components(patch_graph) > 1:
raise RuntimeError("patch graph is not connected")
if self.verbose:
print(f'mean patch degree: {np.mean(self.patch_degrees)}')
def scale_patches(self, scale_factors=None):
"""
Synchronise scales of the embeddings for each patch
Args:
scale_factors: if provided apply the given scales instead of synchronising
"""
if scale_factors is None:
scale_factors = [1 / x for x in self.calc_synchronised_scales()]
for i, scale in enumerate(scale_factors):
self.patches[i].coordinates *= scale
# track transformations
self.scales[i] *= scale
self.shifts[i] *= scale
return self
def calc_synchronised_scales(self, max_scale=1e8):
"""
Compute the scaling transformations that best align the patches
Args:
max_scale: maximum allowed scale (all scales are clipped to the range [``1/max_scale``, ``max_scale``])
(default: 1e8)
Returns:
list of scales
"""
scaling_mat = self._transform_matrix(lambda ov1, ov2: relative_scale(ov1, ov2, max_scale), 1)
vec = self._synchronise(scaling_mat, 1)
vec = vec.flatten()
vec = np.abs(vec)
vec /= vec.mean()
vec = | np.clip(vec, a_min=1/max_scale, a_max=max_scale, out=vec) | numpy.clip |
# Imports
import numpy as np
import cv2
import os
import torch
import torchvision
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor
saved_model = "saved_model" # Output directory of the saved the model
num_classes = 3 # Number of classes
threshold = 0.1 # Minimum threshold for pixel-wise mask segmentation
filename = "beagle_1.jpg" # Image filename
img_path = "OxfordDataset/Images_val/" + filename
def get_model(num_classes):
# Load instance segmentation model pre-trained on COCO
model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained = True)
# Get the number of input features
in_features = model.roi_heads.box_predictor.cls_score.in_features
# Replace the pre-trained head with a new head
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
# Get the number of input features for mask classification
in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
hidden_layer = 256
# Replace the mask predictor with new one
model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, hidden_layer, num_classes)
return model
def get_transforms(train):
transforms = []
# Convert numpy image to PyTorch Tensor
transforms.append(T.ToTensor())
if train:
# Data augmentation
transforms.append(T.RandomHorizontalFlip(0.5))
return T.Compose(transforms)
img = cv2.imread(img_path)
# Create a copy of the original image
img_cp = img
# Convert BGR to RGB
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# Convert numpy to torch tensor and reshape
img = torchvision.transforms.ToTensor()(img)
# Load the model
loaded_model = get_model(num_classes)
loaded_model.load_state_dict(torch.load(os.path.join(saved_model, 'model'), map_location = 'cpu'))
loaded_model.eval()
with torch.no_grad():
prediction = loaded_model([img])
# Get bounding box from prediction
box = prediction[0]['boxes'][0]
box = np.array(box).astype("int")
# Get label id from prediction
label_id = prediction[0]['labels']
label_id = np.array(label_id)
label_id = label_id[0]
# Get score from prediction
score = prediction[0]['scores']
score = | np.array(score) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 28 13:25:28 2020
@author: <NAME>, <NAME>
"""
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import operator
import sklearn as sklearn
import xgboost as xgb
from Functions_downselection_training_RF import plot_RF_test #plot_heatmap, plot_RF_test, splitAndScale, scale, inverseScale, compare_features_barplot
# In[4]:
def analyze_XGB_for_multiple_seeds(list_X, list_y, ho_params = None, n_seeds = 20, save_pickle = False, bar_plot = True, groups = None, groups_only_for_plotting = False, test_proportion = 0.21, top_n = 20, plotting=True, saveas = None, title=True):
n_datasets = len(list_X)
# Let's repeat y stratification. At the same, let's create a dataset for
# RF hyperparameter optimization.
R2_all2 = np.zeros((n_seeds,n_datasets))
RMSE_all2 = np.zeros((n_seeds,n_datasets))
top_features_all2 = []
features_all2 = []
X_tests = []
y_tests = []
X_trains = []
y_trains = []
regressors = []
filenames = ['X_tests_imp', 'y_tests_imp', 'X_tests', 'y_tests',
'X_trains_imp', 'y_trains_imp', 'X_trains', 'y_trains']
for j in range(n_datasets):
if ho_params is not None:
n_estimators = ho_params[j]['n_estimators']
max_depth = ho_params[j]['max_depth']
gamma = ho_params[j]['gamma']
eta = ho_params[j]['eta']
top_features_temp = []
features_temp = []
X_tests_temp = []
y_tests_temp = []
X_trains_temp = []
y_trains_temp = []
regressors_temp = []
if title is not None:
title_temp = True
else:
title_temp = None
for i in range(n_seeds):
if saveas is not None:
saveas_temp = saveas+str(i)
else:
saveas_temp = saveas
if ho_params is None:
feature_weights, top_feature_weights, regressor, R2, RMSE, scaler_test, X_test, y_test, y_pred, X_train, y_train = XGB_feature_analysis(
list_X[j], list_y[j], groups=groups,
groups_only_for_plotting = groups_only_for_plotting,
test_indices = None, test_proportion = test_proportion,
top_n = top_n, i='', random_state = i,
sample_weighing = False, plotting=plotting, saveas = saveas_temp, title = title_temp)
else:
feature_weights, top_feature_weights, regressor, R2, RMSE, scaler_test, X_test, y_test, y_pred, X_train, y_train = XGB_feature_analysis(
list_X[j], list_y[j], groups=groups,
groups_only_for_plotting = groups_only_for_plotting,
test_indices = None, test_proportion = test_proportion,
top_n = top_n, i='', random_state = i,
sample_weighing = False, plotting=plotting, saveas = saveas_temp, title = title_temp,
max_depth= int(max_depth), gamma = gamma, n_estimators=n_estimators, eta = eta)
R2_all2[i,j] = R2
RMSE_all2[i,j] = RMSE
top_features_temp.append(top_feature_weights.copy())
features_temp.append(feature_weights.copy())
X_tests_temp.append(X_test.copy())
y_tests_temp.append(y_test.copy())
X_trains_temp.append(X_train.copy())
y_trains_temp.append(y_train.copy())
regressors_temp.append(regressor)
top_features_all2.append(top_features_temp)
features_all2.append(features_temp)
X_tests.append(X_tests_temp)
y_tests.append(y_tests_temp)
X_trains.append(X_trains_temp)
y_trains.append(y_trains_temp)
regressors.append(regressors_temp)
print('R2 and RMSE for dataset ', j, ': ', R2_all2[:,j], RMSE_all2[:,j])
print('Mean: ', np.mean(R2_all2[:,j]), np.mean(RMSE_all2[:,j]))
print('Std: ', np.std(R2_all2[:,j]), np.std(RMSE_all2[:,j]))
print('Min: ', np.min(R2_all2[:,j]), np.min(RMSE_all2[:,j]))
print('Max: ', np.max(R2_all2[:,j]), np.max(RMSE_all2[:,j]))
if save_pickle == True:
# Pickles for HO:
if j == 0:
save_to_pickle(X_tests, filenames[2])
save_to_pickle(y_tests, filenames[3])
save_to_pickle(X_trains, filenames[6])
save_to_pickle(y_trains, filenames[7])
if j == 1:
save_to_pickle(X_tests, filenames[0])
save_to_pickle(y_tests, filenames[1])
save_to_pickle(X_trains, filenames[4])
save_to_pickle(y_trains, filenames[5])
# Plot the results. Compare feature weights of two methods. E.g., here the top
# 50 feature weights of FilteredImportant dataset are compared to the top 50
# feature weights of the Filtered dataset.
if (bar_plot == True) and (n_datasets>1):
compare_features_barplot(top_features_all2[0][0], top_features_all2[1][0])
return R2_all2, RMSE_all2, top_features_all2, features_all2, X_tests, y_tests, X_trains, y_trains, regressors
# In[ ]:
# In[6]:
def XGB_feature_analysis(X, y, groups = None, groups_only_for_plotting = False,
test_indices = None, test_proportion = 0.1, top_n = 5,
n_estimators = 100, max_depth = 10,
gamma = 2, eta = 0.5, i='',
random_state = None, sample_weighing = None,
plotting = True, saveas = None, title = True):
"""
Splits 'X' and 'y' to train and test sets so that 'test_proportion' of
samples is in the test set. Fits a
(sklearn) random forest model to the data according to RF parameters
('n_estimators', 'max_depth', 'min_samples_split', 'min_samples_leaf',
'max_features', 'bootstrap'). Estimates feature importances and determines
'top_n' most important features. A plot and printouts for describing the
results.
Parameters:
X (df): X data (features in columns, samples in rows)
y (df): y data (one column, samples in rows)
test_proportion (float, optional): Proportion of the test size from the original data.
top_n (float, optional): The number of features in output 'top_feature_weights'
n_estimators (int, optional): Number of trees in the forest
max_depth (int, optional): Maximum depth of the tree
min_samples split (int, optional): minimum number of samples required to split an internal node (could also be a float, see sklearn documentation)
min_samples_leaf (int, optional): The minimum number od samples to be at a leaf node (could also be a float, see sklearn documentation)
max_features (str, float, string, or None, optional): The number of features to consider when looking for the best split (see the options in sklearn documentation, 'sqrt' means max number is sqrt(number of features))
bootstrap (boolean, optional): False means the whole dataset is used for building each tree, True means bootstrap of samples is used
TO DO: Add value range that works for 5K dataset
i (int, optional): Optional numeric index for figure filename.
random_state (int, optional): Seed for train test split.
Returns:
feature_weights (df): weights of all the features
top_feature_weights (df): weights of the features with the most weight
regressor (RandomForestRegressor) RF regressor
R2 (float): R2 value of the prediction for the test set.
"""
if test_proportion == 0:
# Use the whole dataset for both training and "testing".
X_train = X.copy()
X_test = X.copy()
y_train = y.copy()
y_test = y.copy()
elif test_proportion == None:
# Assume X and y are lists with two datasets...
# Use dataset 0 as train and dataset 1 as test.
X_train = X[0].copy()
X_test = X[1].copy()
y_train = y[0].copy()
y_test = y[1].copy()
else:
# Split into test and train sets, and scale with StandardScaler.
if test_indices is None:
if groups is not None:
if groups_only_for_plotting == False:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_proportion, random_state=random_state, stratify=groups)
else:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_proportion, random_state=random_state)
#shufflesplit = sklearn.model_selection.ShuffleSplit(n_splits=1, test_size=test_proportion, random_state=random_state)
#X_train, X_test, y_train, y_test = shufflesplit.split(X, y, groups=groups)
else:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_proportion, random_state=random_state)
else:
#X_test = X.copy() # Are these needed?
#y_test = y.copy() # Are these needed?
X_test = X[test_indices].copy()
y_test = y[test_indices].copy()
#X_train = X.copy()
#y_train = y.copy()
X_train = X[~test_indices].copy()
y_train = y[~test_indices].copy()
print(y_test)
if sample_weighing:
#sample_weight = np.divide(1,y_train.iloc[:,0]+0.1)
#sample_weight = np.abs(y_train.iloc[:,0]-8.5)
#sample_weight = np.abs(y_train.iloc[:,0]-4.1)
sample_weight = y_train.copy()
sample_weight[y_train<=3] = 5
sample_weight[y_train>=8] = 5
sample_weight[(y_train>3)&(y_train<8)] = 1
sample_weight = sample_weight.squeeze()
else:
sample_weight = None
params = {'eta': eta,
'gamma': gamma,
'max_depth': max_depth,
'n_estimators': n_estimators}
# if you want to do weighting, you can do it manually on y_train.
#print(params)
#print(np.array(X_train))
#print(np.array(y_train))
regressor = xgb.XGBRegressor(**params).fit( | np.array(X_train) | numpy.array |
'''
The forward backward algorithm of hidden markov model (HMM) .
Mainly used in the E-step of IOHMM given the
(1) initial probabilities, (2) transition probabilities, and (3) emission probabilities.
A feature of this implementation is that it is vectorized to the greatest extent
that we use numpy matrix operation as much as possible.
We have only one for loop in forward/backward calculation,
which is necessary due to dynamic programming (DP).
Another feature of this implementation is that it is calculated at the log level,
so that it is more robust to long sequences.
'''
from __future__ import division
from builtins import range
import warnings
import numpy as np
from scipy.misc import logsumexp
warnings.simplefilter("ignore")
def forward_backward(log_prob_initial, log_prob_transition, log_Ey, log_state={}):
"""
The forward_backward algorithm.
Parameters
----------
log_prob_initial : array-like of shape (k, )
where k is the number of states of the HMM
The log of the probability of initial state at timestamp 0.
log_prob_initial_{i} is the log of the probability of being in state i
at timestamp 0.
log_prob_transition : array-like of shape (t-1, k, k)
where t is the number of timestamps (length) of the sequence.
log_prob_transition_{t, i, j} is the log of the probability of transferring
to state j from state i at timestamp t.
log_Ey : array-like of shape (t, k)
log_Ey_{t, i} is the log of the probability of observing emission variables
from state i at timestamp t.
log_state: dict(int -> array-like of shape (k, ))
timestamp i is a key of log_state if we know the state of that timestamp.
Mostly used in semi-supervised and supervised IOHMM.
log_state[t][i] is 0 and log_state[t][~i] is -np.Infinity
if we know the state is i at timestamp t.
Returns
-------
(1) posterior state log probability of each timestamp.
(2) posterior "transition" log probability of each timestamp.
(3) log likelihood of the sequence.
see https://en.wikipedia.org/wiki/Forward-backward_algorithm for details.
"""
log_alpha = forward(log_prob_initial, log_prob_transition, log_Ey, log_state)
log_beta = backward(log_prob_transition, log_Ey, log_state)
log_likelihood = cal_log_likelihood(log_alpha)
log_gamma = cal_log_gamma(log_alpha, log_beta, log_likelihood, log_state)
log_epsilon = cal_log_epsilon(log_prob_transition, log_Ey, log_alpha,
log_beta, log_likelihood, log_state)
return log_gamma, log_epsilon, log_likelihood
def forward(log_prob_initial, log_prob_transition, log_Ey, log_state={}):
"""
The forward function to calculate log of forward variable alpha.
Parameters
----------
log_prob_initial : array-like of shape (k, )
where k is the number of states of the HMM
The log of the probability of initial state at timestamp 0.
log_prob_initial_{i} is the log of the probability of being in state i
at timestamp 0.
log_prob_transition : array-like of shape (t-1, k, k)
where t is the number of timestamps (length) of the sequence.
log_prob_transition_{t, i, j} is the log of the probability of transferring
to state j from state i at timestamp t.
log_Ey : array-like of shape (t, k)
log_Ey_{t, i} is the log of the probability of observing emission variables
from state i at timestamp t.
log_state: dict(int -> array-like of shape (k, ))
timestamp i is a key of log_state if we know the state of that timestamp.
Mostly used in semi-supervised and supervised IOHMM.
log_state[t][i] is 0 and log_state[t][~i] is -np.Infinity
if we know the state is i at timestamp t.
Returns
-------
log_alpha : array-like of shape (t, k)
log of forward variable alpha.
see https://en.wikipedia.org/wiki/Forward-backward_algorithm for details.
"""
assert log_prob_initial.ndim == 1
assert log_prob_transition.ndim == 3
assert log_Ey.ndim == 2
t = log_Ey.shape[0]
k = log_Ey.shape[1]
log_alpha = | np.zeros((t, k)) | numpy.zeros |
import unittest
import numpy
from cqcpy import test_utils
import cqcpy.spin_utils as spin_utils
class SpinUtilsTest(unittest.TestCase):
def setUp(self):
self.thresh = 1e-14
def test_F_sym(self):
noa = 2
nva = 3
nob = 2
nvb = 3
Faa = test_utils.make_random_F(noa, nva)
Fbb = test_utils.make_random_F(nob, nvb)
F = spin_utils.F_to_spin(Faa, Fbb, noa, nva, nob, nvb)
z = F.oo[:noa, noa:]
s = numpy.linalg.norm(z) < self.thresh
err = "non-zero ab block of Foo"
self.assertTrue(s, err)
z = F.ov[:noa, nva:]
s = numpy.linalg.norm(z) < self.thresh
err = "non-zero ab block of Fov"
self.assertTrue(s, err)
z = F.vo[:nva, noa:]
s = numpy.linalg.norm(z) < self.thresh
err = "non-zero ab block of Fvo"
self.assertTrue(s, err)
z = F.vo[:nva, nva:]
s = numpy.linalg.norm(z) < self.thresh
err = "non-zero ab block of Fvv"
self.assertTrue(s, err)
def test_I_sym(self):
noa = 3
nob = 2
nva = 2
nvb = 3
# random integrals over a,b spatial orbitals
I_aaaa = test_utils.make_random_Ifull(noa, nva)
I_bbbb = test_utils.make_random_Ifull(nob, nvb)
I_abab = test_utils.make_random_Ifull_gen(
noa, nva, nob, nvb, noa, nva, nob, nvb)
I = spin_utils.int_to_spin(I_aaaa, I_bbbb, I_abab, noa, nva, nob, nvb)
# check a coupled selected blocks that should be zero
z = I.oooo[noa:, :noa, :noa, :noa]
s = numpy.linalg.norm(z) < self.thresh
err = "non-zero abbb block of Ioooo"
self.assertTrue(s, err)
z = I.ooov[noa:, :noa, :noa, :nva]
s = numpy.linalg.norm(z) < self.thresh
err = "non-zero abbb block of Iooov"
self.assertTrue(s, err)
z = I.vovo[nva:, :noa, :nva, :noa]
s = numpy.linalg.norm(z) < self.thresh
err = "non-zero abbb block of Ivovo"
self.assertTrue(s, err)
z = I.vvvv[nva:, :nva, :nva, :nva]
s = numpy.linalg.norm(z) < self.thresh
err = "non-zero abbb block of Ivvvv"
self.assertTrue(s, err)
def test_I(self):
noa = 3
nob = 2
nva = 2
nvb = 3
# random integrals over a,b spatial orbitals
Ia_ref = test_utils.make_random_I(noa, nva)
Ib_ref = test_utils.make_random_I(nob, nvb)
Iabab_ref = test_utils.make_random_Ifull_gen(
noa, nva, nob, nvb, noa, nva, nob, nvb)
I = spin_utils.int_to_spin2(
Ia_ref, Ib_ref, Iabab_ref, noa, nva, nob, nvb)
Ia, Ib, Iabab = spin_utils.int_to_spatial(I, noa, nob, nva, nvb)
test = Ia.vvvv - Ia_ref.vvvv
s = numpy.linalg.norm(test) < self.thresh
err = "error in Ia vvvv integrals"
self.assertTrue(s, err)
test = Ia.vvvo - Ia_ref.vvvo
s = numpy.linalg.norm(test) < self.thresh
err = "error in Ia vvvo integrals"
self.assertTrue(s, err)
test = Ia.vovv - Ia_ref.vovv
s = numpy.linalg.norm(test) < self.thresh
err = "error in Ia vovv integrals"
self.assertTrue(s, err)
test = Ia.vvoo - Ia_ref.vvoo
s = numpy.linalg.norm(test) < self.thresh
err = "error in Ia vvoo integrals"
self.assertTrue(s, err)
test = Ia.vovo - Ia_ref.vovo
s = numpy.linalg.norm(test) < self.thresh
err = "error in Ia vovo integrals"
self.assertTrue(s, err)
test = Ia.oovv - Ia_ref.oovv
s = numpy.linalg.norm(test) < self.thresh
err = "error in Ia oovv integrals"
self.assertTrue(s, err)
test = Ia.vooo - Ia_ref.vooo
s = numpy.linalg.norm(test) < self.thresh
err = "error in Ia vooo integrals"
self.assertTrue(s, err)
test = Ia.ooov - Ia_ref.ooov
s = numpy.linalg.norm(test) < self.thresh
err = "error in Ia ooov integrals"
self.assertTrue(s, err)
test = Ia.oooo - Ia_ref.oooo
s = numpy.linalg.norm(test) < self.thresh
err = "error in Ia oooo integrals"
self.assertTrue(s, err)
test = Ib.vvvv - Ib_ref.vvvv
s = numpy.linalg.norm(test) < self.thresh
err = "error in Ib vvvv integrals"
self.assertTrue(s, err)
test = Ib.vvvo - Ib_ref.vvvo
s = numpy.linalg.norm(test) < self.thresh
err = "error in Ib vvvo integrals"
self.assertTrue(s, err)
test = Ib.vovv - Ib_ref.vovv
s = numpy.linalg.norm(test) < self.thresh
err = "error in Ib vovv integrals"
self.assertTrue(s, err)
test = Ib.vvoo - Ib_ref.vvoo
s = numpy.linalg.norm(test) < self.thresh
err = "error in Ib vvoo integrals"
self.assertTrue(s, err)
test = Ib.vovo - Ib_ref.vovo
s = numpy.linalg.norm(test) < self.thresh
err = "error in Ib vovo integrals"
self.assertTrue(s, err)
test = Ib.oovv - Ib_ref.oovv
s = numpy.linalg.norm(test) < self.thresh
err = "error in Ib oovv integrals"
self.assertTrue(s, err)
test = Ib.vooo - Ib_ref.vooo
s = numpy.linalg.norm(test) < self.thresh
err = "error in Ib vooo integrals"
self.assertTrue(s, err)
test = Ib.ooov - Ib_ref.ooov
s = numpy.linalg.norm(test) < self.thresh
err = "error in Ib ooov integrals"
self.assertTrue(s, err)
test = Ib.oooo - Ib_ref.oooo
s = numpy.linalg.norm(test) < self.thresh
err = "error in Ib oooo integrals"
self.assertTrue(s, err)
test = Iabab.vvvv - Iabab_ref.vvvv
s = numpy.linalg.norm(test) < self.thresh
err = "error in Iab vvvv integrals"
self.assertTrue(s, err)
test = Iabab.vvvo - Iabab_ref.vvvo
s = numpy.linalg.norm(test) < self.thresh
err = "error in Iab vvvo integrals"
self.assertTrue(s, err)
test = Iabab.vvov - Iabab_ref.vvov
s = numpy.linalg.norm(test) < self.thresh
err = "error in Iab vvov integrals"
self.assertTrue(s, err)
test = Iabab.vovv - Iabab_ref.vovv
s = numpy.linalg.norm(test) < self.thresh
err = "error in Iab vovv integrals"
self.assertTrue(s, err)
test = Iabab.ovvv - Iabab_ref.ovvv
s = numpy.linalg.norm(test) < self.thresh
err = "error in Iab ovvv integrals"
self.assertTrue(s, err)
test = Iabab.vvoo - Iabab_ref.vvoo
s = numpy.linalg.norm(test) < self.thresh
err = "error in Iab vvoo integrals"
self.assertTrue(s, err)
test = Iabab.vovo - Iabab_ref.vovo
s = numpy.linalg.norm(test) < self.thresh
err = "error in Iab vovo integrals"
self.assertTrue(s, err)
test = Iabab.voov - Iabab_ref.voov
s = numpy.linalg.norm(test) < self.thresh
err = "error in Iab voov integrals"
self.assertTrue(s, err)
test = Iabab.ovov - Iabab_ref.ovov
s = numpy.linalg.norm(test) < self.thresh
err = "error in Iab ovov integrals"
self.assertTrue(s, err)
test = Iabab.ovvo - Iabab_ref.ovvo
s = numpy.linalg.norm(test) < self.thresh
err = "error in Iab ovvo integrals"
self.assertTrue(s, err)
test = Iabab.oovv - Iabab_ref.oovv
s = numpy.linalg.norm(test) < self.thresh
err = "error in Iab oovv integrals"
self.assertTrue(s, err)
test = Iabab.vooo - Iabab_ref.vooo
s = numpy.linalg.norm(test) < self.thresh
err = "error in Iab vooo integrals"
self.assertTrue(s, err)
test = Iabab.ovoo - Iabab_ref.ovoo
s = numpy.linalg.norm(test) < self.thresh
err = "error in Iab ovoo integrals"
self.assertTrue(s, err)
test = Iabab.ooov - Iabab_ref.ooov
s = numpy.linalg.norm(test) < self.thresh
err = "error in Iab ooov integrals"
self.assertTrue(s, err)
test = Iabab.oovo - Iabab_ref.oovo
s = numpy.linalg.norm(test) < self.thresh
err = "error in Iab oovo integrals"
self.assertTrue(s, err)
test = Iabab.oooo - Iabab_ref.oooo
s = numpy.linalg.norm(test) < self.thresh
err = "error in Iab oooo integrals"
self.assertTrue(s, err)
def test_Be_plus(self):
from pyscf import gto, scf
from cqcpy import integrals
mol = gto.M(
verbose=0,
atom='Be 0 0 0',
basis='sto-3G',
spin=1, charge=1)
mf = scf.UHF(mol)
mf.conv_tol = 1e-13
mf.scf()
mo_occ = mf.mo_occ
mo_occa = mo_occ[0]
mo_occb = mo_occ[1]
moa = mf.mo_coeff[0]
mob = mf.mo_coeff[1]
oa = (mf.mo_coeff[0])[:, mo_occa > 0]
va = (mf.mo_coeff[0])[:, mo_occa == 0]
ob = (mf.mo_coeff[1])[:, mo_occb > 0]
vb = (mf.mo_coeff[1])[:, mo_occb == 0]
noa = oa.shape[1]
nva = va.shape[1]
nob = ob.shape[1]
nvb = vb.shape[1]
Iaaaa = integrals.get_phys(mol, moa, moa, moa, moa)
Iaaaa = test_utils.make_two_e_blocks_full(
Iaaaa, noa, nva, noa, nva, noa, nva, noa, nva)
Ibbbb = integrals.get_phys(mol, mob, mob, mob, mob)
Ibbbb = test_utils.make_two_e_blocks_full(
Ibbbb, nob, nvb, nob, nvb, nob, nvb, nob, nvb)
Iabab = integrals.get_phys(mol, moa, mob, moa, mob)
Iabab = test_utils.make_two_e_blocks_full(
Iabab, noa, nva, nob, nvb, noa, nva, nob, nvb)
I = spin_utils.int_to_spin(Iaaaa, Ibbbb, Iabab, noa, nva, nob, nvb)
I_ref = integrals.eri_blocks(mf)
z = I.vvvv - I_ref.vvvv
s = numpy.linalg.norm(z) < self.thresh
err = "error in vvvv block"
self.assertTrue(s, err)
z = I.vvvo - I_ref.vvvo
s = numpy.linalg.norm(z) < self.thresh
err = "error in vvvo block"
self.assertTrue(s, err)
z = I.vovv - I_ref.vovv
s = numpy.linalg.norm(z) < self.thresh
err = "error in vovo block"
self.assertTrue(s, err)
z = I.vovo - I_ref.vovo
s = numpy.linalg.norm(z) < self.thresh
err = "error in vovo block"
self.assertTrue(s, err)
z = I.vvoo - I_ref.vvoo
s = | numpy.linalg.norm(z) | numpy.linalg.norm |
#This code reads the output files of fitAll.py for linear model and
#calculates split renal function (SRF) and total GFR
#and plots regreassion curve and Bland-Altman (BA) plot for SRF and total GFR.
#It also prints the correlation coefficient, mean difference, stdev difference,
#p-values of SRF and total GFR for entire group and for 3T and 1T subgroup separately.
##Choose model as 'Linear'
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
group = [ ['MRH008_wellcome','PANDA','NKRF'],['1Tdata']]
model = 'Linear'
for ii in range(2):
gfr = []
isogfr=[]
gfr_LK = []
gfr_RK = []
isogfr_LK = []
isogfr_RK = []
for name in group[ii]:
Z = np.genfromtxt('%s_%s.csv' %(name,model), dtype=None, delimiter=',',names=True, unpack = True)
ind = np.where((Z['Parameter']== b'SK-GFR') & (Z['Access']==1)& (Z['AccessSF']==1))
ind2 = np.where((Z['Parameter']== b'Iso-SK-GFR') & (Z['Access']==1)& (Z['AccessSF']==1))
ind_LK = np.where((Z['Parameter']== b'SK-GFR') & (Z['Kidney']==b'LK') & (Z['Access']==1)& (Z['AccessSF']==1))
#print(ind_LK)
ind_RK = np.where((Z['Parameter']== b'SK-GFR') & (Z['Kidney']==b'RK') & (Z['Access']==1)& (Z['AccessSF']==1))
ind2_LK = np.where((Z['Parameter']== b'Iso-SK-GFR') & (Z['Kidney']==b'LK') & (Z['Access']==1)& (Z['AccessSF']==1))
ind2_RK = np.where((Z['Parameter']== b'Iso-SK-GFR') & (Z['Kidney']==b'RK') & (Z['Access']==1)& (Z['AccessSF']==1))
correction = 'Value_%s' %model
gfr.extend(Z['%s' %correction][ind])
isogfr.extend(Z['%s' %correction][ind2])
gfr_LK.extend(Z['%s' %correction][ind_LK])
isogfr_LK.extend(Z['%s' %correction][ind2_LK])
gfr_RK.extend(Z['%s' %correction][ind_RK])
isogfr_RK.extend(Z['%s' %correction][ind2_RK])
gfr = np.array(gfr)
isogfr = np.array(isogfr)
gfr_LK = np.array(gfr_LK)
isogfr_LK = np.array(isogfr_LK)
gfr_RK = np.array(gfr_RK)
isogfr_RK = np.array(isogfr_RK)
gfr_tot = gfr_LK+gfr_RK
sf_LK = gfr_LK/gfr_tot
sf_RK = gfr_RK/gfr_tot
isogfr_tot = isogfr_LK + isogfr_RK
isosf_LK = isogfr_LK/isogfr_tot
isosf_RK = isogfr_RK/isogfr_tot
if ii==0:
sf_3T = sf_LK
isosf_3T =isosf_LK
gfr_tot_3T = gfr_tot
isogfr_tot_3T = isogfr_tot
else:
sf_1T = sf_LK
isosf_1T =isosf_LK
gfr_tot_1T = gfr_tot
isogfr_tot_1T = isogfr_tot
gfr_tot = np.concatenate((gfr_tot_3T,gfr_tot_1T))
isogfr_tot = np.concatenate((isogfr_tot_3T,isogfr_tot_1T))
sf = np.concatenate((sf_3T,sf_1T))
isosf = np.concatenate((isosf_3T,isosf_1T))
slope, intercept, r_value, p_value, std_err = stats.linregress(isosf,sf)
mean_sf_3T = np.mean([sf_3T,isosf_3T], axis=0)
diff_sf_3T = np.array(sf_3T)-np.array(isosf_3T)
mean_sf_1T = np.mean([sf_1T,isosf_1T], axis=0)
diff_sf_1T = np.array(sf_1T)- | np.array(isosf_1T) | numpy.array |
import cv2
import matplotlib.pyplot as plt
import sys
from actions_from_video import Action
import base64
from io import BytesIO
import numpy as np
# def open_video():
# capture = cv2.VideoCapture(-1)
# return 1
def analysis(file_path):
s = Action()
res = s.Offline_Analysis(file_path)
suggestion = 1
alarm_action = list(res.keys())
alarm_date = list(res.values())
return alarm_action,alarm_date,suggestion
def Online_Init():
return Action(reg_frame=9)
def Online_Analysis(action_class, img):
format, imgstr = img.split(';base64,')
img = base64.b64decode(imgstr)
nparr = | np.fromstring(img, np.uint8) | numpy.fromstring |
import numpy as np
# Photon history bits (see photon.h for source)
NO_HIT = 0x1 << 0
BULK_ABSORB = 0x1 << 1
SURFACE_DETECT = 0x1 << 2
SURFACE_ABSORB = 0x1 << 3
RAYLEIGH_SCATTER = 0x1 << 4
REFLECT_DIFFUSE = 0x1 << 5
REFLECT_SPECULAR = 0x1 << 6
SURFACE_REEMIT = 0x1 << 7
SURFACE_TRANSMIT = 0x1 << 8
BULK_REEMIT = 0x1 << 9
CHERENKOV = 0x1 << 10
SCINTILLATION = 0x1 << 11
NAN_ABORT = 0x1 << 31
class Steps(object):
def __init__(self,x,y,z,t,dx,dy,dz,ke,edep,qedep):
self.x = x
self.y = y
self.z = z
self.t = t
self.dx = dx
self.dy = dy
self.dz = dz
self.ke = ke
self.edep = edep
self.qedep = qedep
class Vertex(object):
def __init__(self, particle_name, pos, dir, ke, t0=0.0, pol=None, steps=None, children=None, trackid=-1, pdgcode=-1):
'''Create a particle vertex.
particle_name: string
Name of particle, following the GEANT4 convention.
Examples: e-, e+, gamma, mu-, mu+, pi0
pos: array-like object, length 3
Position of particle vertex (mm)
dir: array-like object, length 3
Normalized direction vector
ke: float
Kinetic energy (MeV)
t0: float
Initial time of particle (ns)
pol: array-like object, length 3
Normalized polarization vector. By default, set to None,
and the particle is treated as having a random polarization.
'''
self.particle_name = particle_name
self.pos = pos
self.dir = dir
self.pol = pol
self.ke = ke
self.t0 = t0
self.steps = steps
self.children = children
self.trackid = trackid
self.pdgcode = pdgcode
def __str__(self):
return 'Vertex('+self.particle_name+',ke='+str(self.ke)+',steps='+str(True if self.steps else False)+')'
__repr__ = __str__
class Photons(object):
def __init__(self, pos=np.empty((0,3)), dir= | np.empty((0,3)) | numpy.empty |
import numpy as np
from ..env_cmd import CmdEnv
from UTILS.tensor_ops import dir2rad, np_softmax, reg_rad_at, reg_rad, reg_deg_at
def maneuver_cold_to_ms(uav):
unit_delta = uav.delta_oppsite_to_ms()
H2 = unit_delta * 100e3 + uav.pos2d
def check_dis(goto_location):
d = goto_location[0]
dis = np.linalg.norm(uav.pos3d - np.array([d['X'], d['Y'], d['Z']]))
assert dis > 10e3
goto_location = [
{
"X": H2[0], "Y": H2[1], "Z": uav.Z
}
]
check_dis(goto_location)
return goto_location
def maneuver_vertical_to_ms(uav):
unit_delta = uav.delta_oppsite_to_ms()
unit_delta = np.append(unit_delta, 0)
unit_delta = np.cross(unit_delta, np.array([0, 0, 1]))
H2 = unit_delta[:2] * 100e3 + uav.pos2d
def check_dis(goto_location):
d = goto_location[0]
dis = np.linalg.norm(uav.pos3d - np.array([d['X'], d['Y'], d['Z']]))
assert dis > 10e3
goto_location = [
{
"X": H2[0],
"Y": H2[1],
"Z": uav.Z
}
]
check_dis(goto_location)
return goto_location
def check_dis(goto_location, uav):
d = goto_location[0]
dis = np.linalg.norm(uav.pos3d - np.array([d['X'], d['Y'], d['Z']]))
assert dis > 10e3, ("distance %.2f"%dis, uav.pos3d, goto_location)
# def choose_maneuver_side(uav, angle):
# pos = +angle
# neg = -angle
# deg_ms_coming = dir2rad(uav.delta_oppsite_to_ms()[:2]) * 180/np.pi
def maneuver_angle_to_ms(uav, angle):
rad = angle * np.pi / 180
unit_delta = np.matmul(
uav.delta_oppsite_to_ms()[:2],
np.array([[np.cos(rad), np.sin(rad)],
[np.sin(-rad), np.cos(rad)]]))
if angle != 0 and angle != 180:
unit_delta_side2 = np.matmul(
uav.delta_oppsite_to_ms()[:2],
np.array([[np.cos(rad), np.sin(-rad)],
[np.sin(rad), np.cos(rad)]]))
rad1 = dir2rad(unit_delta)
rad2 = dir2rad(unit_delta_side2)
uav_head_rad = np.pi / 2 - uav.Heading
rad1 = reg_rad_at(rad1, uav_head_rad)
rad2 = reg_rad_at(rad2, uav_head_rad)
delta1 = np.abs(rad1 - uav_head_rad) * 180 / np.pi
delta2 = np.abs(rad2 - uav_head_rad) * 180 / np.pi
if delta2 < delta1 - 3: # 另一侧机动
unit_delta = unit_delta_side2
H2 = unit_delta[:2] * 100e3 + uav.pos2d
goto_location = [
{
"X": H2[0],
"Y": H2[1],
"Z": uav.Z
}
]
check_dis(goto_location, uav)
return goto_location
def maneuver_angle_to_op_vip(uav, vip, angle):
rad = angle * np.pi / 180
delta_oppsite_to_vip = uav.pos3d - vip.pos3d
unit_delta = np.matmul(
delta_oppsite_to_vip[:2],
np.array([[np.cos(rad), np.sin(rad)],
[np.sin(-rad), | np.cos(rad) | numpy.cos |
import time
import string
import pyautogui
import cv2
import numpy as np
import os
import random
from pynput import keyboard
from pynput import mouse
from pynput.mouse import Button as mouseButton
from sys import platform
class Reader:
def __init__(self):
self.running = False
self.listeners = False
self.start_button = '`'
self.object_on_mouse = 'Test'
if platform == "linux" or platform == "linux2":
self.imagedir = os.path.dirname(os.path.abspath(__file__)) + '/images'
elif platform == "win32":
self.imagedir = os.path.dirname(os.path.abspath(__file__)) + '\\images\\'
self.object_directory = ''
self.number_directory = os.path.join(self.imagedir, 'Numbers')
self.character_directory = os.path.join(self.imagedir, 'Characters')
self.currentMouseX, self.currentMouseY = pyautogui.position()
self.prevMouseX = 0
self.prevMouseY = 0
self.crop_size = 32
self.image_index = 0
self.lastTimestamp = round(time.perf_counter(), 3)
self.deltaTime = round((time.perf_counter() - self.lastTimestamp), 1)
def image_search(self, needle_filename, haystack_filename):
# Search for needle in a haystack
needle = cv2.imread(self.imagedir+needle_filename, cv2.IMREAD_UNCHANGED)
grayscale_needle = cv2.cvtColor(needle, cv2.COLOR_BGR2GRAY)
haystack = cv2.imread(self.imagedir+haystack_filename, cv2.IMREAD_UNCHANGED)
grayscale_haystack = cv2.cvtColor(haystack, cv2.COLOR_BGR2GRAY)
result = cv2.matchTemplate(grayscale_haystack, grayscale_needle, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
# Max location has the best match with max_val to be % accuracy
width = needle.shape[1]
height = needle.shape[0]
bottom_right = (max_loc[0] + width, max_loc[1] + height)
# Threshold is the % accuracy compared to original needle
threshold = .80
yloc, xloc = np.where(result >= threshold)
# Keep track of all matches and identify unique cases
matches = []
if len(xloc) > 0:
print("There are {0} total matches in the haystack.".format(len(xloc)))
for (x, y) in zip(xloc, yloc):
# Twice to ensure singles are kept after picking unique cases
matches.append([int(x), int(y), int(width), int(height)])
matches.append([int(x), int(y), int(width), int(height)])
# Grouping function
matches, weights = cv2.groupRectangles(matches, 1, 0.2)
print("There are {0} unique matches in the haystack.".format(len(matches)))
# Display image with rectangle
for (x, y, width, height) in matches:
cv2.rectangle(haystack, (x, y), (x + width, y + height), (255, 255, 0), 2)
# cv2.imshow('Haystack', haystack)
# cv2.waitKey()
# cv2.destroyAllWindows()
else:
print("There are no matches.")
return matches
def number_search(self, threshold, haystack_filename):
# Keep track of all matches and identify unique cases
numbers = []
if os.path.exists(self.number_directory):
files = os.listdir(self.number_directory)
self.image_index = 1
haystack = cv2.imread(self.imagedir + haystack_filename, cv2.IMREAD_UNCHANGED)
grayscale_haystack = cv2.cvtColor(haystack, cv2.COLOR_BGR2GRAY)
match_number = 0
while match_number < 10:
for f in files:
if '.png' in f:
matches = []
image_path = os.path.join(self.number_directory, str(match_number) + '_' + str(self.image_index) + '.png')
print(image_path)
if not os.path.exists(image_path):
break
needle = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)
grayscale_needle = cv2.cvtColor(needle, cv2.COLOR_BGR2GRAY)
result = cv2.matchTemplate(grayscale_haystack, grayscale_needle, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
# Max location has the best match with max_val to be % accuracy
width = needle.shape[1]
height = needle.shape[0]
bottom_right = (max_loc[0] + width, max_loc[1] + height)
# Threshold is the % accuracy compared to original needle
yloc, xloc = np.where(result >= threshold)
if len(xloc) > 0:
print("There are {0} total matches in the haystack.".format(len(xloc)))
for (x, y) in zip(xloc, yloc):
# Twice to ensure singles are kept after picking unique cases
matches.append([int(x), int(y), int(width), int(height)])
matches.append([int(x), int(y), int(width), int(height)])
# Grouping function
matches, weights = cv2.groupRectangles(matches, 1, 0.2)
print("There are {0} unique matches in the haystack.".format(len(matches)))
# Display image with rectangle
for (x, y, width, height) in matches:
if (x, y, width, height) not in numbers:
numbers.append([int(x), int(y), int(width), int(height), int(match_number)])
cv2.rectangle(haystack, (x, y), (x + width, y + height), (255, 255, 0), 2)
# cv2.imshow('Haystack', haystack)
# cv2.waitKey()
# cv2.destroyAllWindows()
else:
print("There are no matches.")
self.image_index += 1
print("Found " + str(len(numbers)) + " of numbers in " + haystack_filename)
match_number += 1
self.image_index = 1
else:
print("Numbers do not exist in screenshot.")
print(numbers)
return numbers
def sort_matches(self, sort_index, threshold, match_list):
# Bubble sort algo
match_len = len(match_list)
for y1 in range(match_len-1):
for y2 in range(0, match_len - y1 - 1):
# If index is greater than next index
if match_list[y2][sort_index] > match_list[y2+1][sort_index]:
# If difference of index values is greater than the threshold
if abs(match_list[y2][sort_index] - match_list[y2+1][sort_index]) > threshold:
match_list[y2], match_list[y2+1] = match_list[y2+1], match_list[y2]
def number_concat(self, threshold, price, matches):
# Concat data together to form usable numbers
match_len = len(matches)
temp_array = matches[0]
result_array = []
for index in range(0, match_len-1):
print(index)
if abs(matches[index][1] - matches[index+1][1]) < threshold:
print(temp_array)
# Sum width of each character
temp_array[2] = temp_array[2] + matches[index+1][2]
# Multiply index value by 10 and add index+1
temp_array[4] = (temp_array[4]*10) + matches[index+1][4]
print(temp_array)
if index == match_len - 2:
result_array.append(temp_array)
temp_array = matches[index + 1]
else:
result_array.append(temp_array)
temp_array = matches[index+1]
if price:
for number in result_array:
number[4] = number[4]/100
print(result_array)
def set_selection(self, x1, y1, x2, y2):
# Selection is used to only search for objects in an area
self.selection = [x1, y1, x2, y2]
def get_selection(self, x1, y1, x2, y2):
# Selection is used to only search for objects in an area
return self.selection
def split_image(self, directory, filename, width_split, height_split):
# Split image is used to split an image into smaller images
image_path = os.path.join(directory, filename)
filenames_list = []
if not os.path.exists(image_path):
print("Filepath does not exist for split_image method.")
return filenames_list
img = cv2.imread(image_path)
width = int(img.shape[0])
height = int(img.shape[1])
if width_split > width:
print("Width is too large.")
return filenames_list
if height_split > height:
print("Height is too large.")
return filenames_list
for index_a in range(0, width, int(width_split)):
for index_b in range(0, height, int(height_split)):
x = index_a
y = index_b
image_path = os.path.join(directory, f"{filename.replace('.png', '')}_{index_a}_{index_b}.png")
cv2.imwrite(image_path, img[index_a:index_a + int(width_split), index_b:index_b + int(height_split), :])
filenames_list.append([int(x), int(y), int(width), int(height), f"{filename.replace('.png', '')}_{index_a}_{index_b}.png"])
print(filenames_list)
return filenames_list
def image_difference(self, threshold, needle_directory, needle_filename, haystack_directory, haystack_filename, iterations):
# Recursive function to find differences with remaining iterations
# representing how many times it will be cut into four parts
remaining_iterations = iterations - 1
# Record similarities and differences between two first images
similarities = []
differences = []
# Load haystack image
haystack_image_path = os.path.join(haystack_directory, haystack_filename)
if not os.path.exists(haystack_image_path):
print("Haystack file does not exist.")
return differences
img = cv2.imread(haystack_image_path)
section_width = img.shape[0] / 2
section_height = img.shape[1] / 2
# Cut haystack image into 4 sections
haystack_images = self.split_image(haystack_directory, haystack_filename, section_width, section_height)
# Load needle image
needle_image_path = os.path.join(needle_directory, needle_filename)
if not os.path.exists(needle_image_path):
print("Needle file does not exist.")
return differences
img = cv2.imread(needle_image_path)
# Cut needle image into 4 sections
section_width = img.shape[0] / 2
section_height = img.shape[1] / 2
needle_images = self.split_image(needle_directory, needle_filename, section_width, section_height)
# Loop through each needle section and haystack section and compare
for haystack_section in haystack_images:
for needle_section in needle_images:
# Format for needle/haystack section [x, y, width, height, filename]
# Only do loop if the x, y positions match
if not haystack_section[0] == needle_section[0]:
continue
if not haystack_section[1] == needle_section[1]:
continue
haystack_path = os.path.join(haystack_directory, haystack_section[4])
if not os.path.exists(haystack_path):
print("Split haystack files do not exist.")
return differences
needle_path = os.path.join(needle_directory, needle_section[4])
if not os.path.exists(needle_path):
print("Split needle files do not exist.")
return differences
# Turn both images to grayscale to speed up process
haystack = cv2.imread(haystack_path, cv2.IMREAD_UNCHANGED)
grayscale_haystack = cv2.cvtColor(haystack, cv2.COLOR_BGR2GRAY)
needle = cv2.imread(needle_path, cv2.IMREAD_UNCHANGED)
grayscale_needle = cv2.cvtColor(needle, cv2.COLOR_BGR2GRAY)
# Match the results
matches = []
result = cv2.matchTemplate(grayscale_haystack, grayscale_needle, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
# Max location has the best match with max_val to be % accuracy
width = needle.shape[1]
height = needle.shape[0]
bottom_right = (max_loc[0] + width, max_loc[1] + height)
# Threshold is the % accuracy compared to original needle
yloc, xloc = np.where(result >= threshold)
if len(xloc) > 0:
# Similarity found
print("There are {0} total matches in the haystack.".format(len(xloc)))
for (x, y) in zip(xloc, yloc):
# Twice to ensure singles are kept after picking unique cases
matches.append([needle_section[0], needle_section[1], needle_section[2], needle_section[3]])
matches.append([needle_section[0], needle_section[1], needle_section[2], needle_section[3]])
# Grouping function
matches, weights = cv2.groupRectangles(matches, 1, 0.2)
print("There are {0} unique matches in the haystack.".format(len(matches)))
if remaining_iterations == 0:
# Display image with rectangle
for (x, y, width, height) in matches:
print("Similarity found:" + str(needle_section))
similarities.append(needle_section)
# cv2.rectangle(haystack, (x, y), (x + width, y + height), (255, 255, 0), 2)
# cv2.imshow('Haystack', haystack)
# cv2.waitKey()
# cv2.destroyAllWindows()
elif remaining_iterations == 0:
# Differences found in the last iteration
print("Difference found:" + str(needle_section))
differences.append(needle_section)
elif remaining_iterations > 0:
# If there are differences then iterate this section for efficiency
same_parts, different_parts = self.image_difference(threshold, needle_directory, needle_section[4], haystack_directory, haystack_section[4], remaining_iterations)
similarities.append(same_parts)
differences.append(different_parts)
else:
print("There are no matches.")
return similarities, differences
def character_concat(self, threshold, multiboxes, matches):
# Concat data together to form usable words
match_len = len(matches)
temp_array = matches[0]
result_array = []
if multiboxes:
skip_next = False
for index in range(1, match_len):
if abs(matches[index-1][1] - matches[index][1]) < threshold:
print(temp_array)
# Sum width of each character
temp_array[2] = temp_array[2] + (matches[index][0]-matches[index-1][0])
# Add character to the end
if multiboxes:
if not skip_next:
temp_array[4] = temp_array[4] + matches[index][4]
else:
skip_next = False
else:
temp_array[4] = temp_array[4] + matches[index][4]
print(temp_array)
if index < match_len-1 and abs((matches[index][0]+matches[index][2]) - matches[index+1][0]) > threshold and len(temp_array[4]) > 1:
if multiboxes:
skip_next = True
temp_array[2] = matches[index][2] + matches[index][0] - temp_array[0]
result_array.append(temp_array)
temp_array = matches[index+1]
index += 2
else:
temp_array[4] = temp_array[4] + ' '
if index == match_len - 1 and len(temp_array[4]) > 1:
result_array.append(temp_array)
temp_array = matches[index]
elif len(temp_array[4]) > 1:
result_array.append(temp_array)
temp_array = matches[index]
# Clean up results
print(result_array[0][4])
result_len = len(result_array)
for index in range(0, result_len-2):
if multiboxes:
temp_str = result_array[index][4]
result_array[index][4] = temp_str.replace("o8", "8").replace("co", "c").replace("og", "g")
if index > 0 and result_array[index][4] == 'rg':
result_array.pop(index)
result_array[index-1][4] = result_array[index-1][4] + 'g'
if result_array[index][4] == 'lng':
result_array.pop(index)
result_array[index-1][4] = result_array[index-1][4] + 'ng'
elif not multiboxes:
result_array[index][4] = result_array[index][4].replace("n r g", "ng").replace("c o", "c").replace("i l", "i").replace("n r ", "n")
return result_array
def proximity_combine(self, list_a, list_b):
# Combine elements in lists to form combinations
list_a_len = len(list_a)
list_b_len = len(list_b)
combined_list = []
for index_a in range(0, list_a_len):
for index_b in range(0, list_b_len):
distance_x = abs(list_a[index_a][0] - list_b[index_b][0])
distance_y = abs(list_a[index_a][1] - list_b[index_b][1])
# Finds smallest width
padding = (min(list_a[index_a][2], list_b[index_b][2]))*2
if distance_x < padding and distance_y < padding:
if list_a[index_a][0] < list_b[index_b][0]:
combined_str = str(list_a[index_a][4])+str(list_b[index_b][4])
new_x = list_a[index_a][0]
new_y = list_a[index_a][1]
else:
combined_str = str(list_b[index_b][4]) + str(list_a[index_a][4])
new_x = list_a[index_b][0]
new_y = list_a[index_b][1]
if combined_str not in combined_list:
combined_width = list_a[index_a][2]+list_b[index_b][2]
new_height = list_a[index_a][3]
combined_list.append([new_x, new_y, combined_width, new_height, combined_str])
return combined_list
def character_search(self, threshold, letters, haystack_filename):
# Keep track of all matches and identify unique cases
chars = []
chars_list = []
if letters:
chars_list = ['A', 'B', 'C', 'D', 'E',
'F', 'G', 'H', 'I', 'J',
'K', 'L', 'M', 'N', 'O',
'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y',
'Z', '0', '1', '2', '3',
'4', '5', '6', '7', '8',
'9']
else:
chars_list = ['!', '@', '#', '$', '%', '^', '&', '*', '(', ')', '_', '-', '+', '=']
chars_list_len = len(chars_list)
if os.path.exists(self.character_directory):
files = os.listdir(self.character_directory)
self.image_index = 1
haystack = cv2.imread(self.imagedir + haystack_filename, cv2.IMREAD_UNCHANGED)
grayscale_haystack = cv2.cvtColor(haystack, cv2.COLOR_BGR2GRAY)
match_number = 0
while match_number < chars_list_len:
for f in files:
if '.png' in f:
matches = []
image_path = os.path.join(self.character_directory,
chars_list[match_number] + '_' + str(self.image_index) + '.png')
print(image_path)
if not os.path.exists(image_path):
break
needle = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)
grayscale_needle = cv2.cvtColor(needle, cv2.COLOR_BGR2GRAY)
result = cv2.matchTemplate(grayscale_haystack, grayscale_needle, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
# Max location has the best match with max_val to be % accuracy
width = needle.shape[1]
height = needle.shape[0]
bottom_right = (max_loc[0] + width, max_loc[1] + height)
# Threshold is the % accuracy compared to original needle
yloc, xloc = np.where(result >= threshold)
if len(xloc) > 0:
print("There are {0} total matches in the haystack.".format(len(xloc)))
for (x, y) in zip(xloc, yloc):
# Twice to ensure singles are kept after picking unique cases
matches.append([int(x), int(y), int(width), int(height)])
matches.append([int(x), int(y), int(width), int(height)])
# Grouping function
matches, weights = cv2.groupRectangles(matches, 1, 0.2)
print("There are {0} unique matches in the haystack.".format(len(matches)))
# Display image with rectangle
for (x, y, width, height) in matches:
if (x, y, width, height) not in chars:
chars.append([int(x), int(y), int(width), int(height), str(chars_list[match_number]).lower()])
# cv2.rectangle(haystack, (x, y), (x + width, y + height), (255, 255, 0), 2)
# cv2.imshow('Haystack', haystack)
# cv2.waitKey()
# cv2.destroyAllWindows()
else:
print("There are no matches.")
self.image_index += 1
print("Found " + str(len(chars)) + " of numbers in " + haystack_filename)
match_number += 1
self.image_index = 1
else:
print("Characters do not exist in screenshot.")
print(chars)
return chars
def draw_info(self, matches, haystack_filename):
# Draw the matches on the original image
boxes = []
haystack = cv2.imread(self.imagedir + haystack_filename, cv2.IMREAD_UNCHANGED)
for (x, y, width, height, name) in matches:
if (x, y, width, height) not in boxes:
boxes.append([int(x), int(y), int(width), int(height)])
cv2.rectangle(haystack, (x, y), (x + width, y + height), (255, 255, 0), 2)
cv2.imshow('Haystack', haystack)
cv2.waitKey()
cv2.destroyAllWindows()
def object_search(self, object_name, threshold, haystack_filename):
# Keep track of all matches and identify unique cases
self.object_directory = os.path.join(self.imagedir, object_name)
objects = []
if os.path.exists(self.object_directory):
files = os.listdir(self.object_directory)
self.image_index = 1
haystack = cv2.imread(self.imagedir + haystack_filename, cv2.IMREAD_UNCHANGED)
grayscale_haystack = cv2.cvtColor(haystack, cv2.COLOR_BGR2GRAY)
for f in files:
if '.png' in f:
matches = []
image_path = os.path.join(self.object_directory, object_name+str(self.image_index)+'.png')
needle = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)
grayscale_needle = cv2.cvtColor(needle, cv2.COLOR_BGR2GRAY)
result = cv2.matchTemplate(grayscale_haystack, grayscale_needle, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
# Max location has the best match with max_val to be % accuracy
width = needle.shape[1]
height = needle.shape[0]
bottom_right = (max_loc[0] + width, max_loc[1] + height)
# Threshold is the % accuracy compared to original needle
yloc, xloc = | np.where(result >= threshold) | numpy.where |
'''
Routines to plot a comparison of rivers and catchments between an evaluated dataset and a
reference dataset.
Created on Jul 21, 2016
@author: thomasriddick
'''
import numpy as np
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import matplotlib as mpl
import re
from HD_Plots.utilities import plotting_tools as pts
def find_catchment_numbers(ref_catchment_field,data_catchment_field,
data_catchment_field_original_scale,
data_original_scale_flowtocellfield,pair,
catchment_grid_changed,swap_ref_and_data_when_finding_labels=False,
use_original_scale_field_for_determining_data_and_ref_labels=False,
ref_original_scale_flowtocellfield=None,
ref_catchment_field_original_scale=None,
data_original_scale_grid_type='HD',
ref_original_scale_grid_type='HD',
data_original_scale_grid_kwargs={},
ref_original_scale_grid_kwargs={},
grid_type='HD',**grid_kwargs):
"""Find the catchment number of a reference and a data catchment"""
if use_original_scale_field_for_determining_data_and_ref_labels:
ref_course_coords=pair[0].get_coords()
ref_catchment_num =\
pts.find_data_catchment_number(ref_catchment_field,
ref_catchment_field_original_scale,
ref_original_scale_flowtocellfield,ref_course_coords,
catchment_grid_changed,ref_original_scale_grid_type,
ref_original_scale_grid_kwargs,grid_type,**grid_kwargs)[0]
data_course_coords=pair[1].get_coords()
data_catchment_num,scale_factor =\
pts.find_data_catchment_number(data_catchment_field,
data_catchment_field_original_scale,
data_original_scale_flowtocellfield,data_course_coords,
catchment_grid_changed,data_original_scale_grid_type,
data_original_scale_grid_kwargs,grid_type,**grid_kwargs)
elif swap_ref_and_data_when_finding_labels:
data_catchment_num = data_catchment_field[pair[0].get_coords()]
ref_course_coords=pair[1].get_coords()
ref_catchment_num,scale_factor =\
pts.find_data_catchment_number(ref_catchment_field,
data_catchment_field_original_scale,
data_original_scale_flowtocellfield,ref_course_coords,
catchment_grid_changed,data_original_scale_grid_type,
data_original_scale_grid_kwargs,grid_type,**grid_kwargs)
else:
ref_catchment_num = ref_catchment_field[pair[0].get_coords()]
data_course_coords=pair[1].get_coords()
data_catchment_num,scale_factor =\
pts.find_data_catchment_number(data_catchment_field,
data_catchment_field_original_scale,
data_original_scale_flowtocellfield,data_course_coords,
catchment_grid_changed,data_original_scale_grid_type,
data_original_scale_grid_kwargs,grid_type,**grid_kwargs)
return ref_catchment_num,data_catchment_num,scale_factor
def select_bounds_around_rivermouth(pair,border=10):
"""Select a set of bounds for a plot of a river mouth"""
imin = max(min(pair[0].get_lat(),pair[1].get_lat()) - border,0)
imax = max(pair[0].get_lat(),pair[1].get_lat()) + border
jmin = max(min(pair[0].get_lon(),pair[1].get_lon()) - border,0)
jmax = max(pair[0].get_lon(),pair[1].get_lon()) + border
return imin,imax,jmin,jmax
def plot_orography_section(ax,cax,section,min_height,max_height,outflow_lat,outflow_lon,new_cb=False,
num_levels=50,plot_type='Image',alpha=1.0,cmap_name="terrain",
lat_offset=0,lon_offset=0):
"""Plot a section from an orography field"""
if plot_type=='Filled Contour' or plot_type=='Contour':
levels = np.linspace(min_height,max_height,num_levels)
if plot_type=='Filled Contour':
cs = ax.contourf(section,levels=levels,alpha=alpha,cmap=cm.get_cmap('viridis'))
else:
cs = ax.contour(section,levels=levels,alpha=alpha)
elif plot_type=='Image':
im = ax.imshow(section,interpolation='nearest',cmap=cm.get_cmap(cmap_name),
norm = cm.colors.Normalize(vmax=max_height,vmin=min_height),rasterized=True)
ax.format_coord = pts.OrogCoordFormatter(lon_offset,lat_offset)
ax.set_title(" Outflow"
" Lat: " + (lambda x: str((0.5*x - 90)*(-1 if x<=180 else 1))
+ r'$^{\circ}$' + ('N' if x <=180 else 'S'))(outflow_lat)
+ ' Lon: ' + (lambda y: str((0.5*y-180)*(-1 if y<= 360 else 1))
+ r'$^{\circ}$' + ('W' if y <= 360 else 'E'))(outflow_lon))
ax.axis('image')
if not new_cb:
cax.clear()
if plot_type=='Filled Contour' or plot_type=='Contour':
cb = plt.colorbar(cs,cax=cax)
elif plot_type=='Image':
cb = plt.colorbar(im,cax=cax)
cb.set_label("Height Above Sea Level (m)",labelpad=10)
def select_rivermaps_section(ref_flowtocellfield,data_flowtocellfield,
rdirs_field,imin,imax,jmin,jmax,threshold,
points_to_mark,mark_true_sinks=False,
data_true_sinks = None,
allow_new_sink_points=False):
"""Select a section of a flow to cell field"""
rmap_field = np.copy(ref_flowtocellfield)
rmap_field[rdirs_field <= 0] = 0
rmap_field[np.logical_and(threshold > rmap_field,rmap_field > 0)] = 1
rmap_field[rmap_field >= threshold] = 2
rmap_field[np.logical_and(rmap_field == 2,data_flowtocellfield >= threshold)] = 3
rmap_field[np.logical_and(rmap_field != 3,data_flowtocellfield >= threshold)] = 4
if mark_true_sinks:
rmap_field[rdirs_field == 5] = 0
if points_to_mark:
for i,point in enumerate(points_to_mark,start=5):
rmap_field[point] = i
if data_true_sinks is not None:
rmap_field[rdirs_field == 5] = 7
rmap_field[np.logical_and(rdirs_field == 5,data_true_sinks)] = 8
if np.any(np.logical_and(rdirs_field != 5,data_true_sinks)):
if allow_new_sink_points:
rmap_field[np.logical_and(rdirs_field != 5,data_true_sinks)] = 9
else:
raise RuntimeWarning("New true sink point has appeared in data")
return rmap_field[imin:imax,jmin:jmax]
def plot_whole_river_flowmap(ax,pair,ref_flowtocellfield,data_flowtocellfield,rdirs_field,data_rdirs_field,
catchment_bounds,allow_new_sink_points=False,simplified_flowmap_plot=False,colors=None):
points_to_mark = [pair[0].get_coords(),pair[1].get_coords()]
rmap_threshold_wholecatch = 25
whole_catchment_rmap_section = select_rivermaps_section(ref_flowtocellfield,data_flowtocellfield,
rdirs_field,*catchment_bounds,
threshold=rmap_threshold_wholecatch,
points_to_mark=points_to_mark,
mark_true_sinks=True,
data_true_sinks=data_rdirs_field,
allow_new_sink_points=allow_new_sink_points)
plot_flowmap(ax, section=whole_catchment_rmap_section,colors=colors,reduced_map=simplified_flowmap_plot)
plt.subplots_adjust(hspace=0.25,left=0.1)
def plot_flowmap(ax,section,reduced_map=False,cax=None,
interpolation='none',alternative_colors=False,
remove_ticks_flag=True,colors=None):
if reduced_map:
num_colors = 5
else:
num_colors = 9
cmap_wholec,norm_wholec = create_colormap(section,num_colors=num_colors,
alternative_colors=alternative_colors,
colors=colors)
ax.imshow(section,interpolation=interpolation,cmap=cmap_wholec,norm=norm_wholec,rasterized=True)
if remove_ticks_flag:
pts.remove_ticks(ax)
if reduced_map:
mappable_wc = mpl.cm.ScalarMappable(norm=norm_wholec,cmap=cmap_wholec)
mappable_wc.set_array(section)
cb_wc = plt.colorbar(mappable_wc,ax=ax,cax=cax)
tic_labels_wc = ['Sea', 'Land','Reference River Path','Common River Path','Data River Path']
tic_loc_wc = np.arange(5) + 0.5
cb_wc.set_ticks(tic_loc_wc)
cb_wc.set_ticklabels(tic_labels_wc)
else:
mappable_wc = mpl.cm.ScalarMappable(norm=norm_wholec,cmap=cmap_wholec)
mappable_wc.set_array(section)
cb_wc = plt.colorbar(mappable_wc,ax=ax,cax=cax)
tic_labels_wc = ['Sea', 'Land','Reference River Path','Common River Path','Data River Path',
'Reference River Mouth','Data River Mouth','Reference True Sink',
'Common True Sink']
tic_loc_wc = | np.arange(10) | numpy.arange |
from __future__ import print_function
import numpy as np
import fitsio
from legacypipe.image import LegacySurveyImage
from legacypipe.bits import DQ_BITS
from legacypipe.survey import create_temp
'''
This class handles Hyper-SuprimeCam CALEXP calibrated image files
produced by the LSST software stack.
These are one file per CCD, with variance maps, flags, WCS, and PsfEx
models included in BINTABLE HDUs.
'''
class HscImage(LegacySurveyImage):
def __init__(self, survey, ccd, image_fn=None, image_hdu=0):
if ccd is not None:
ccd.plver = 'xxx'
ccd.procdate = 'xxx'
ccd.plprocid = 'xxx'
super().__init__(survey, ccd, image_fn=image_fn, image_hdu=image_hdu)
self.dq_hdu = 2
self.wt_hdu = 3
# Adjust zeropoint for exposure time
self.ccdzpt += 2.5 * np.log10(self.exptime)
# Nominal zeropoints
# These are used only for "ccdskybr", so are not critical.
# These are just from DECam!!
self.zp0 = dict(
g = 25.001,
r = 25.209,
r2 = 25.209,
# i,Y from DESY1_Stripe82 95th percentiles
i = 25.149,
i2 = 25.149,
z = 24.875,
y = 23.712,
)
self.k_ext = dict(g = 0.17,
r = 0.10,
r2 = 0.10,
i = 0.08,
i2 = 0.08,
z = 0.06,
y = 0.058,
)
# Sky has already been calibrated out, and Psf is included in the CALEXP file,
# so no external calib files!
self.sefn = None
self.psffn = None
self.skyfn = None
self.merged_psffn = None
self.merged_skyfn = None
self.old_merged_skyfns = []
self.old_merged_psffns = []
self.old_single_psffn = None
self.old_single_skyfn = None
@classmethod
def get_nominal_pixscale(cls):
return 0.168
def get_extension_list(self, debug=False):
return [1,]
def calibration_good(self, primhdr):
return True
def has_astrometric_calibration(self, ccd):
return True
'''
def get_psfex_unmerged_filename(self):
basefn = os.path.basename(self.fn_base)
basedir = os.path.dirname(self.fn_base)
base = basefn.split('.')[0]
fn = base + '-psfex.fits'
fn = os.path.join(self.calibdir, 'psfex-single', basedir, base, fn)
return fn
def get_splinesky_unmerged_filename(self):
basefn = os.path.basename(self.fn_base)
basedir = os.path.dirname(self.fn_base)
base = basefn.split('.')[0]
fn = base + '-splinesky.fits'
fn = os.path.join(self.calibdir, 'sky-single', basedir, base, fn)
return fn
'''
def compute_filenames(self):
self.dqfn = self.imgfn
self.wtfn = self.imgfn
def get_expnum(self, primhdr):
return primhdr['EXPID']
def get_ccdname(self, primhdr, hdr):
return primhdr['DETSER'].strip().upper()
def get_airmass(self, primhdr, imghdr, ra, dec):
return primhdr['BORE-AIRMASS']
def get_gain(self, primhdr, hdr):
return np.mean([primhdr['T_GAIN%i' % i] for i in [1,2,3,4]])
def get_fwhm(self, primhdr, imghdr):
fwhm = primhdr['SEEING']
if fwhm == 0.0:
psf = self.read_psf_model(0., 0., pixPsf=True)
fwhm = psf.fwhm
return fwhm
# convert from arcsec to pixels (hard-coded pixscale here)
fwhm /= HscImage.get_nominal_pixscale()
def get_propid(self, primhdr):
return primhdr['PROP-ID']
def get_camera(self, primhdr):
cam = super().get_camera(primhdr)
if cam == 'hyper suprime-cam':
cam = 'hsc'
return cam
def get_wcs(self, hdr=None):
from astrometry.util.util import Sip
if hdr is None:
hdr = self.read_image_header()
wcs = Sip(hdr)
# Correction: ccd ra,dec offsets from zeropoints/CCDs file
dra,ddec = self.dradec
# debug('Applying astrometric zeropoint:', (dra,ddec))
r,d = wcs.get_crval()
wcs.set_crval((r + dra / np.cos(np.deg2rad(d)), d + ddec))
wcs.version = ''
wcs.plver = ''
#phdr = self.read_image_primary_header()
#wcs.plver = phdr.get('PLVER', '').strip()
return wcs
def read_sky_model(self, **kwargs):
from tractor import ConstantSky
sky = ConstantSky(0.)
return sky
def read_psf_model(self, x0, y0,
gaussPsf=False, pixPsf=False, hybridPsf=False,
normalizePsf=False, old_calibs_ok=False,
psf_sigma=1., w=0, h=0):
assert(gaussPsf or pixPsf or hybridPsf)
if gaussPsf:
from tractor import GaussianMixturePSF
v = psf_sigma**2
psf = GaussianMixturePSF(1., 0., 0., v, v, 0.)
psf.version = '0'
psf.plver = ''
return psf
# spatially varying pixelized PsfEx
from tractor import PsfExModel
from astrometry.util.fits import fits_table
from tractor import PixelizedPsfEx
from legacypipe.image import NormalizedPixelizedPsfEx
fn = self.imgfn
# PsfEx model information is spread across two BINTABLE hdus,
# each with AR_NAME='PsfexPsf' and no other easily recognized
# headers.
F = fitsio.FITS(fn)
TT = []
for i in range(1, len(F)):
hdr = F[i].read_header()
if hdr.get('AR_NAME') == 'PsfexPsf':
T = fits_table(fn, hdu=i)
assert(len(T) == 1)
TT.append(T)
assert(len(TT) == 2)
T1,T2 = TT
T1.rename('_pixstep', 'pixstep')
T2.rename('_comp', 'comp')
T2.rename('_size', 'size')
T2.rename('_context_first', 'context_first')
T2.rename('_context_second', 'context_second')
t1 = T1[0]
t2 = T2[0]
psfex = PsfExModel()
psfex.sampling = t1.pixstep
degree = psfex.degree = t2.degree
# PSF distortion bases are polynomials of x,y
psfex.x0, psfex.y0 = t2.context_first
psfex.xscale, psfex.yscale = t2.context_second
# number of terms in polynomial
ne = (degree + 1) * (degree + 2) / 2
size = t2.size
assert(size[2] == ne)
ims = t2.comp.reshape(list(reversed(size)))
ims = ims.astype(np.float32)
assert(len(ims.shape) == 3)
assert(ims.shape[0] == ne)
psfex.psfbases = ims
bh, bw = psfex.psfbases[0].shape
psfex.radius = (bh + 1) / 2.
# We don't have a FWHM measurement, so hack up a measurement on the first
# PSF basis image.
import photutils
from scipy.interpolate import interp1d
psf0 = psfex.psfbases[0,:,:]
cx = bw//2
cy = bh//2
sb = []
rads = np.arange(0, 20.1, 0.5)
for rad1,rad2 in zip(rads, rads[1:]):
aper = photutils.CircularAnnulus((cx, cy), max(1e-3, rad1), rad2)
p = photutils.aperture_photometry(psf0, aper)
f = p.field('aperture_sum')[0]
f /= (np.pi * (rad2**2 - rad1**2))
sb.append(f)
f = interp1d(sb, rads[:-1])
mx = psf0.max()
hwhm = f(psf0.max() / 2.)
fwhm = hwhm * 2. * psfex.sampling
psfex.fwhm = fwhm
print('Measured PsfEx FWHM', fwhm)
if normalizePsf:
psf = NormalizedPixelizedPsfEx(None, psfex=psfex)
else:
psf = PixelizedPsfEx(None, psfex=psfex)
psf.version = ''
psf.plver = ''
psf.procdate = ''
psf.plprocid = ''
psf.datasum = ''
psf.fwhm = fwhm
psf.header = None
psf.shift(x0, y0)
if hybridPsf:
from tractor.psf import HybridPixelizedPSF, NCircularGaussianPSF
psf = HybridPixelizedPSF(psf, cx=w/2., cy=h/2.,
gauss=NCircularGaussianPSF([psf.fwhm / 2.35], [1.]))
return psf
def colorterm_ps1_to_observed(self, ps1stars, band):
"""ps1stars: ps1.median 2D array of median mag for each band"""
from legacypipe.ps1cat import ps1_to_hsc
return ps1_to_hsc(ps1stars, band)
def read_image(self, header=False, **kwargs):
img = super().read_image(header=header, **kwargs)
if header:
img,hdr = img
img[np.logical_not(np.isfinite(img))] = 0.
if header:
img = img,hdr
return img
def remap_dq(self, dq, header):
return remap_hsc_bitmask(dq, header)
def get_zeropoint(self, primhdr, hdr):
flux = primhdr['FLUXMAG0']
zpt = 2.5 * np.log10(flux / self.exptime)
return zpt
def estimate_sky(self, img, invvar, dq, primhdr, imghdr):
return 0., primhdr['SKYLEVEL'], primhdr['SKYSIGMA']
def read_invvar(self, dq=None, **kwargs):
# HSC has a VARIANCE map (not a weight map)
v = self._read_fits(self.wtfn, self.wt_hdu, **kwargs)
iv = 1./v
iv[v==0] = 0.
iv[np.logical_not( | np.isfinite(iv) | numpy.isfinite |
import numpy as np
import openmdao.api as om
class VortexMesh(om.ExplicitComponent):
"""
Compute the vortex mesh based on the deformed aerodynamic mesh.
Parameters
----------
def_mesh[nx, ny, 3] : numpy array
We have a mesh for each lifting surface in the problem.
That is, if we have both a wing and a tail surface, we will have both
`wing_def_mesh` and `tail_def_mesh` as inputs.
height_agl : scalar
If ground effect is turned on, this input defines the height above
the groud plane (defined from the origin 0,0,0)
alpha : scalar
If ground effect is turned on, this input defines the angular
rotation of the ground plane
Returns
-------
vortex_mesh[nx, ny, 3] : numpy array
The actual aerodynamic mesh used in VLM calculations, where we look
at the rings of the panels instead of the panels themselves. That is,
this mesh coincides with the quarter-chord panel line, except for the
final row, where it lines up with the trailing edge.
"""
def initialize(self):
self.options.declare('surfaces', types=list)
def setup(self):
surfaces = self.options['surfaces']
# Because the vortex_mesh always comes from the deformed mesh in the
# same way, the Jacobian is fully linear and can be set here instead
# of doing compute_partials.
# We do have to account for symmetry here to create a ghost mesh
# by mirroring the symmetric mesh.
any_ground_effect = False
for surface in surfaces:
mesh=surface['mesh']
nx = mesh.shape[0]
ny = mesh.shape[1]
name = surface['name']
mesh_name = '{}_def_mesh'.format(name)
vortex_mesh_name = '{}_vortex_mesh'.format(name)
self.add_input(mesh_name, shape=(nx, ny, 3), units='m')
ground_effect = surface.get('groundplane', False)
if ground_effect:
if not any_ground_effect:
# only need to add the extra inputs once
any_ground_effect = True
self._cached_constant_partial_vals = dict()
self.add_input('height_agl', val=8000.0, units='m')
self.add_input('alpha', val=0.*np.pi/180, units='rad')
if surface['symmetry']:
if ground_effect:
self.add_output(vortex_mesh_name, shape=(2*nx, ny*2-1, 3), units='m')
# these are cheaper to just do with CS
self.declare_partials(vortex_mesh_name, ['alpha', 'height_agl'], method='cs')
mesh_indices = np.arange(nx * ny * 3).reshape((nx, ny, 3))
vor_indices = np.arange(2*nx * (2*ny-1) * 3).reshape((2*nx, (2*ny-1), 3))
quadrant_1_indices = vor_indices[:nx,:ny,:]
quadrant_2_indices = vor_indices[:nx,ny:,:]
quadrant_3_indices = vor_indices[nx:,:ny,:]
quadrant_4_indices = vor_indices[nx:,ny:,:]
else:
# no groundplane
self.add_output(vortex_mesh_name, shape=(nx, ny*2-1, 3), units='m')
mesh_indices = np.arange(nx * ny * 3).reshape((nx, ny, 3))
vor_indices = np.arange(nx * (2*ny-1) * 3).reshape((nx, (2*ny-1), 3))
quadrant_1_indices = vor_indices[:nx,:ny,:]
quadrant_2_indices = vor_indices[:nx,ny:,:]
# quadrant 1 is just the baseline mesh
rows = np.tile(quadrant_1_indices[:-1, :, :].flatten(), 2)
rows = np.hstack((rows, quadrant_1_indices[-1, :, :].flatten()))
cols = np.concatenate([
mesh_indices[:-1, :, :].flatten(),
mesh_indices[1: , :, :].flatten(),
mesh_indices[-1 , :, :].flatten(),
])
data = np.concatenate([
0.75 * np.ones((nx-1) * ny * 3),
0.25 * np.ones((nx-1) * ny * 3),
np.ones(ny * 3), ]) # back row,
# quadrant 2 is the reflection of the baseline across the midline
# need to build these piecewise xyz because of the midline reflection
for dim3 in range(3):
rows = np.hstack((rows, np.tile(quadrant_2_indices[:-1, :, dim3].flatten(), 2)))
rows = np.hstack((rows, quadrant_2_indices[-1 , :, dim3].flatten()))
cols = np.concatenate([cols,
mesh_indices[:-1, :-1, dim3][:,::-1].flatten(),
mesh_indices[1: , :-1, dim3][:,::-1].flatten(),
mesh_indices[-1 , :-1, dim3][::-1].flatten(),
])
data = np.concatenate([
data,
0.75 * np.ones((nx-1) * (ny - 1)),
0.25 * np.ones((nx-1) * (ny - 1)),
np.ones(ny-1),
-0.75 * np.ones((nx-1) * (ny - 1)),
-0.25 * np.ones((nx-1) * (ny - 1)),
-np.ones(ny-1),
0.75 * np.ones((nx-1) * (ny - 1)),
0.25 * np.ones((nx-1) * (ny - 1)),
np.ones(ny-1),
])
if ground_effect:
# these reflections (across the groundplane) are more complex because of the alpha rotation
# which means that the x and z points of the reflected mesh depend on BOTH the x and z points of the initial mesh
# y only depends on y as usual
# third quadrant dependencies (x on x, y on y, z on z, x on z, z on x)
list_of_deps = [(0,0),(1,1),(2,2),(0,2),(2,0)]
for dep_of, dep_on in list_of_deps:
rows = np.hstack((rows, np.tile(quadrant_3_indices[:-1, :, dep_of].flatten(), 2)))
rows = np.hstack((rows, quadrant_3_indices[-1 , :, dep_of].flatten()))
cols = np.concatenate([cols,
mesh_indices[:-1, :, dep_on].flatten(),
mesh_indices[1: , :, dep_on].flatten(),
mesh_indices[-1 , :, dep_on].flatten(),
])
# fourth quadrant dependencies (x on x, y on y, z on z, x on z, z on x)
for dep_of, dep_on in list_of_deps:
rows = np.hstack((rows, np.tile(quadrant_4_indices[:-1, :, dep_of].flatten(), 2)))
rows = np.hstack((rows, quadrant_4_indices[-1 , :, dep_of].flatten()))
cols = np.concatenate([cols,
mesh_indices[:-1, :-1, dep_on][:,::-1].flatten(),
mesh_indices[1: , :-1, dep_on][:,::-1].flatten(),
mesh_indices[-1 , :-1, dep_on][::-1].flatten(),
])
# can't declare constant partials because these depend on alpha (and h?)
self.declare_partials(vortex_mesh_name, mesh_name, rows=rows, cols=cols)
self._cached_constant_partial_vals[name] = data.copy()
else:
# no groundplane, constant partial values
self.declare_partials(vortex_mesh_name, mesh_name, val=data, rows=rows, cols=cols)
else:
if ground_effect:
raise ValueError('Ground effect is not supported without symmetry turned on')
self.add_output(vortex_mesh_name, shape=(nx, ny, 3), units='m')
mesh_indices = np.arange(nx * ny * 3).reshape(
(nx, ny, 3))
rows = np.tile(mesh_indices[:(nx-1), :, :].flatten(), 2)
rows = np.hstack((rows, mesh_indices[-1 , :, :].flatten()))
cols = np.concatenate([
mesh_indices[:-1, :, :].flatten(),
mesh_indices[1: , :, :].flatten(),
mesh_indices[-1 , :, :].flatten(),
])
data = np.concatenate([
0.75 * np.ones((nx-1) * ny * 3),
0.25 * np.ones((nx-1) * ny * 3),
np.ones(ny * 3), # back row
])
self.declare_partials(vortex_mesh_name, mesh_name, val=data, rows=rows, cols=cols)
def compute(self, inputs, outputs):
surfaces = self.options['surfaces']
for surface in surfaces:
nx = surface['mesh'].shape[0]
ny = surface['mesh'].shape[1]
name = surface['name']
ground_effect = surface.get('groundplane', False)
mesh_name = '{}_def_mesh'.format(name)
vortex_mesh_name = '{}_vortex_mesh'.format(name)
if not ground_effect:
if surface['symmetry']:
mesh = np.zeros((nx, ny*2-1, 3), dtype=type(inputs[mesh_name][0, 0, 0]))
mesh[:, :ny, :] = inputs[mesh_name]
# indices are numbered from tip to centerline
# reflection is all but midpoint in rev order
mesh[:, ny:, :] = inputs[mesh_name][:, :-1, :][:, ::-1, :]
mesh[:, ny:, 1] *= -1.
else:
mesh = inputs[mesh_name]
# all but the last station are moved to the quarterchord point
outputs[vortex_mesh_name][:-1, :, :] = 0.75 * mesh[:-1, :, :] + 0.25 * mesh[1:, :, :]
# the last one is coincident
outputs[vortex_mesh_name][-1, :, :] = mesh[-1, :, :]
else:
# symmetric in y plus ground plane using the first dimension
mesh = np.zeros((2*nx, ny*2-1, 3), dtype=type(inputs[mesh_name][0, 0, 0]))
# regular image
mesh[:nx, :ny, :] = inputs[mesh_name]
# indices are numbered from tip to centerline
# reflection is all but midpoint in rev order
mesh[:nx, ny:, :] = inputs[mesh_name][:, :-1, :][:, ::-1, :]
mesh[:nx, ny:, 1] *= -1.
# alpha = 5*np.pi/180
alpha = inputs['alpha'][0]
plane_normal = np.array([np.sin(alpha), 0.0, -np.cos(alpha)]).reshape((1,1,3))
plane_point = np.zeros((1,1,3)) + plane_normal * inputs['height_agl']
# reflect about the ground plane
# plane is defined parallel to the free stream and height_agl from the origin 0 0 0
v = mesh[:nx,:,:] - plane_point
temp = np.inner(v,plane_normal).squeeze()[:,:,np.newaxis]
v_par = temp*plane_normal
mesh[nx:,:,:] = mesh[:nx,:,:] - 2*v_par
outputs[vortex_mesh_name][:nx-1, :, :] = 0.75 * mesh[:nx-1, :, :] + 0.25 * mesh[1:nx, :, :]
outputs[vortex_mesh_name][nx-1, :, :] = mesh[nx-1, :, :]
outputs[vortex_mesh_name][nx:-1, :, :] = 0.75 * mesh[nx:-1, :, :] + 0.25 * mesh[nx+1:, :, :]
outputs[vortex_mesh_name][-1, :, :] = mesh[-1, :, :]
def compute_partials(self, inputs, J):
surfaces = self.options['surfaces']
for surface in surfaces:
mesh=surface['mesh']
nx = mesh.shape[0]
ny = mesh.shape[1]
name = surface['name']
ground_effect = surface.get('groundplane', False)
mesh_name = '{}_def_mesh'.format(name)
vortex_mesh_name = '{}_vortex_mesh'.format(name)
if not ground_effect:
# if ground effect is not enabled the derivatives are constant
# and this method need nto be called
pass
else:
data = self._cached_constant_partial_vals[name]
# we've already figured out the partials for quadrants 1 and 2
# quandrants 3 and 4 are the ground plane reflections which
# depend on angle of attack so they need to be computed each time
# first comes quadrant 3
# x on x, y on y, z on z, x on z, z on x is the order
alpha = inputs['alpha']
x_on_x_const = 1 - 2*np.sin(alpha)**2
z_on_z_const = 1 - 2*np.cos(alpha)**2
x_on_z_const = 2*np.sin(alpha)*np.cos(alpha)
z_on_x_const = 2*np.sin(alpha)*np.cos(alpha)
data = np.concatenate([
data,
# x on x
x_on_x_const * 0.75 * np.ones((nx-1) * ny),
x_on_x_const * 0.25 * np.ones((nx-1) * ny),
x_on_x_const * np.ones(ny),
# y on y
0.75 * np.ones((nx-1) * ny),
0.25 * np.ones((nx-1) * ny),
np.ones(ny),
# z on z
z_on_z_const * 0.75 * np.ones((nx-1) * ny),
z_on_z_const * 0.25 * np.ones((nx-1) * ny),
z_on_z_const * np.ones(ny),
# x on z
x_on_z_const * 0.75 * np.ones((nx-1) * ny),
x_on_z_const * 0.25 * np.ones((nx-1) * ny),
x_on_z_const * np.ones(ny),
# z on x
z_on_x_const * 0.75 * np.ones((nx-1) * ny),
z_on_x_const * 0.25 * np.ones((nx-1) * ny),
z_on_x_const * np.ones(ny),
])
# now quadrant 4 with different dims and reflected y coords
data = np.concatenate([
data,
# x on x
x_on_x_const * 0.75 * | np.ones((nx-1) * (ny - 1)) | numpy.ones |
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
import math
import seaborn as sns
import Calculation
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import GenNorm
import Terminal
import AxesFrame
from scipy.interpolate import UnivariateSpline
def draw_compare_error_fix_point(ax, time : 'int', point_number : 'int', terminalA : 'Terminal.CartesianPoint', terminalB : 'Terminal.CartesianPoint',
target : 'Terminal.CartesianPoint', step : 'int' = "1"): # 1, same time, diff mpoint
x = np.linspace(0, time+5, 100)
y = np.linspace(-5, 5, 200)
step_count = 1
# former_exp_x = 0
# former_exp_y = 0
# former_opt_x = 0
# former_opt_y = 0
former_exp = np.array([0,0])
former_opt = np.array([0,0])
exp_sum = 0
opt_sum = 0
for _ in range(int(time/step)): #20/1
exp_dis = 0
opt_dis = 0
round_count = 0
for __ in range(step_count):
round_count = __ + 1
setA = GenNorm.gen_normal_point(point_number, terminalA, target)
setB = GenNorm.gen_normal_point(point_number, terminalB, target)
exp_dis += (Calculation.get_distance_from_origin_by_set(Calculation.get_ideal_coord_by_set(setA)) + Calculation.get_distance_from_origin_by_set(Calculation.get_ideal_coord_by_set(setB)))/2
op_set = Calculation.get_optimized_target(terminalA,
terminalB,
Calculation.get_ideal_coord_by_set(
Calculation.get_modified_coord_by_nor_set_and_terminal(setA,
terminalA)),
Calculation.get_ideal_coord_by_set(
Calculation.get_modified_coord_by_nor_set_and_terminal(setB,
terminalB))
)
opt_dis += Calculation.get_distance_from_origin_by_set(op_set[2])
step_count += step
exp_dis /= round_count
opt_dis /= round_count
exp_sum += exp_dis
opt_sum += opt_dis
if _ == 0 :
ax.scatter(_*step, exp_dis, alpha=0.45, c = "b", label = "exp_err")
ax.scatter(_*step, opt_dis, alpha=0.45, c = "r", label = "opt_err", marker = "^")
else:
ax.scatter(_*step, exp_dis, alpha=0.45, c = "b", s = [60])
ax.scatter(_*step, opt_dis, alpha=0.45, c = "r", s = [60], marker = "^")
ax.plot(np.array([former_exp[0], _*step]), np.array([former_exp[1], exp_dis]), c="b", ls = "--")
ax.plot(np.array([former_opt[0], _*step]), np.array([former_opt[1], opt_dis]), c="r")
former_exp = np.array([_*step, exp_dis])
former_opt = np.array([_*step, opt_dis])
ax.axhline(y = exp_sum/(time/step), c='b', ls='--', lw=2,
alpha=0.7,
label="avr_exp_err")
ax.axhline(y = opt_sum/(time/step), c='r', ls='-', lw=2,
alpha=0.7,
label="avr_opt_err")
ax.xaxis.grid(True, which='major', linestyle=(0, (8, 4)))
ax.yaxis.grid(True, which='major', linestyle=(0, (8, 4)))
#ax.set_xlabel("Measure Round", fontsize=12)
ax.set_ylabel("Error Distance", fontsize=12)
ax.set_title("round: " + str(time) + " point: " + str(point_number) + " step: " + str(step))
ax.legend()
def draw_compare_error_fix_time(ax, time : 'int', point_number : 'int', terminalA : 'Terminal.CartesianPoint', terminalB : 'Terminal.CartesianPoint',
target : 'Terminal.CartesianPoint', step : 'int' = "1"):
x = np.linspace(0, time+5, 100)
y = np.linspace(-5, 5, 200)
step_count = 1
# former_exp_x = 0
# former_exp_y = 0
# former_opt_x = 0
# former_opt_y = 0
former_exp = np.array([0,0])
former_opt = np.array([0,0])
exp_sum = 0
opt_sum = 0
for _ in range(int(point_number/step)): #20/1
exp_dis = 0
opt_dis = 0
#round_count = 0
for __ in range(time):
#round_count = __ + 1
setA = GenNorm.gen_normal_point(_+1, terminalA, target)
setB = GenNorm.gen_normal_point(_+1, terminalB, target)
exp_dis += (Calculation.get_distance_from_origin_by_set(Calculation.get_ideal_coord_by_set(setA)) + Calculation.get_distance_from_origin_by_set(Calculation.get_ideal_coord_by_set(setB)))/2
op_set = Calculation.get_optimized_target(terminalA,
terminalB,
Calculation.get_ideal_coord_by_set(
Calculation.get_modified_coord_by_nor_set_and_terminal(setA,
terminalA)),
Calculation.get_ideal_coord_by_set(
Calculation.get_modified_coord_by_nor_set_and_terminal(setB,
terminalB))
)
opt_dis += Calculation.get_distance_from_origin_by_set(op_set[2])
step_count += step
exp_dis /= time
opt_dis /= time
exp_sum += exp_dis
opt_sum += opt_dis
if _ == 0 :
ax.scatter(_*step, exp_dis, alpha=0.45, c = "b", label = "exp_err")
ax.scatter(_*step, opt_dis, alpha=0.45, c = "r", label = "opt_err", marker = "^")
else:
ax.scatter(_*step, exp_dis, alpha=0.45, c = "b", s = [60])
ax.scatter(_*step, opt_dis, alpha=0.45, c = "r", s = [60],marker = "^")
ax.plot(np.array([former_exp[0], _*step]), np.array([former_exp[1], exp_dis]), c="b", ls = "--")
ax.plot(np.array([former_opt[0], _*step]), np.array([former_opt[1], opt_dis]), c="r")
former_exp = np.array([_*step, exp_dis])
former_opt = np.array([_*step, opt_dis])
#ax.axhline(y = exp_sum/(time/step), c='b', ls='--', lw=2,alpha=0.7,label="avr_exp_err")
#ax.axhline(y = opt_sum/(time/step), c='r', ls='-', lw=2,alpha=0.7,label="avr_opt_err")
ax.xaxis.grid(True, which='major', linestyle=(0, (8, 4)))
ax.yaxis.grid(True, which='major', linestyle=(0, (8, 4)))
#ax.set_xlabel("Measure Number", fontsize=12)
ax.set_ylabel("Error Distance", fontsize=12)
ax.set_title("round: " + str(time) + " point: " + str(point_number) + " step: " + str(step))
ax.legend()
def draw_compare_per_fix_point(ax, time : 'int', point_number : 'int', terminalA : 'Terminal.CartesianPoint', terminalB : 'Terminal.CartesianPoint',
target : 'Terminal.CartesianPoint', step : 'int' = "1"): # 1, same time, diff mpoint
x = np.linspace(0, time+1, 100)
y = np.linspace(-5, 5, 200)
step_count = 1
# former_exp_x = 0
# former_exp_y = 0
# former_opt_x = 0
# former_opt_y = 0
former_exp = np.array([0,0])
former_opt = np.array([0,0])
exp_sum = 0
opt_sum = 0
re_sum = 0
for _ in range(int(time/step)): #20/1
exp_dis = 0
opt_dis = 0
round_count = 0
for __ in range(step_count):
round_count = __ + 1
setA = GenNorm.gen_normal_point(point_number, terminalA, target)
setB = GenNorm.gen_normal_point(point_number, terminalB, target)
exp_dis += (Calculation.get_distance_from_origin_by_set(Calculation.get_ideal_coord_by_set(setA)) + Calculation.get_distance_from_origin_by_set(Calculation.get_ideal_coord_by_set(setB)))/2
op_set = Calculation.get_optimized_target(terminalA,
terminalB,
Calculation.get_ideal_coord_by_set(
Calculation.get_modified_coord_by_nor_set_and_terminal(setA,
terminalA)),
Calculation.get_ideal_coord_by_set(
Calculation.get_modified_coord_by_nor_set_and_terminal(setB,
terminalB))
)
opt_dis += Calculation.get_distance_from_origin_by_set(op_set[2])
step_count += step
exp_dis /= round_count
opt_dis /= round_count
exp_sum += exp_dis
opt_sum += opt_dis
re = -3*((opt_dis - exp_dis) / exp_dis)*100
re_sum += re
if re > 0 :
ax.scatter(_ * step, re, alpha=0.45, s=[60], c="g")
ax.plot(np.array([former_exp[0], _ * step]), np.array([former_exp[1], re]), c="g", ls="--")
else :
ax.scatter(_ * step, re, alpha=0.45, s=[60], c="r")
ax.plot(np.array([former_exp[0], _ * step]), | np.array([former_exp[1], re]) | numpy.array |
import unittest
import numpy as np
from brainbox.plot_base import (DefaultPlot, ImagePlot, ScatterPlot, ProbePlot, LinePlot,
scatter_xyc_plot, arrange_channels2banks)
class TestPlotBase(unittest.TestCase):
def setUp(self):
"""
Test Basic functionality of the plot class method
"""
self.x = np.arange(100, dtype=float)
self.y = np.arange(0, 1000, 10, dtype=float) + 500
self.z = np.arange(100, dtype=float) - 500
self.c = np.arange(100, dtype=float) + 1000
self.img = np.random.rand(100, 100)
def test_default(self):
data = {'x': self.x, 'y': self.y, 'z': self.z, 'c': self.c}
plot_default = DefaultPlot('default', data)
# Check the default max min lims computations
plot_default.set_xlim()
assert(plot_default.xlim == (0, 99))
plot_default.set_ylim()
assert(plot_default.ylim == (500, 1490))
plot_default.set_zlim()
assert(plot_default.zlim == (-500, -401))
plot_default.set_clim()
assert(plot_default.clim == (1000, 1099))
# Check it deals with nans properly (i.e ignores them)
plot_default.data['x'][0:5] = np.nan
plot_default.set_xlim()
assert(plot_default.xlim == (5, 99))
# Check that when you specify range that is added
plot_default.set_xlim((0, 150))
assert(plot_default.xlim == (0, 150))
plot_default.set_clim((1050, 1100))
assert(plot_default.clim == (1050, 1100))
# Test the _set_default method
out = plot_default._set_default(val=None, default=2)
assert(out == 2)
out = plot_default._set_default(val=5, default=2)
assert(out == 5)
# Test instantiation of titles and labels etc
assert(not plot_default.labels.title)
assert(not plot_default.labels.xlabel)
assert(not plot_default.labels.ylabel)
plot_default.set_labels(title='mytitle', xlabel='myxlabel')
assert(plot_default.labels.title == 'mytitle')
assert(plot_default.labels.xlabel == 'myxlabel')
assert(not plot_default.labels.ylabel)
# Test adding vertical with default options
assert(len(plot_default.vlines) == 0)
plot_default.add_lines(10, 'v')
assert(len(plot_default.vlines) == 1)
assert(plot_default.vlines[0].lim == plot_default.ylim)
assert(plot_default.vlines[0].style == '--')
assert(plot_default.vlines[0].width == 3)
assert(plot_default.vlines[0].color == 'k')
# Test adding horizontal line with specified options
plot_default.add_lines(10, 'h', lim=(40, 80), style='-', width=8, color='g')
assert(len(plot_default.hlines) == 1)
assert(plot_default.hlines[0].lim == (40, 80))
assert(plot_default.hlines[0].style == '-')
assert(plot_default.hlines[0].width == 8)
assert(plot_default.hlines[0].color == 'g')
# Test conversion to dict
plot_dict = plot_default.convert2dict()
keys_to_expect = ['data', 'plot_type', 'hlines', 'vlines', 'labels', 'xlim', 'ylim',
'zlim', 'clim']
assert(all(key in plot_dict for key in keys_to_expect))
def test_image(self):
# Instantiation without specifying x and y
plot_image = ImagePlot(self.img)
assert(plot_image.plot_type == 'image')
assert(np.all(plot_image.data.c == self.img))
assert(np.all(plot_image.data.x == np.arange(self.img.shape[0])))
assert(np.all(plot_image.data.y == | np.arange(self.img.shape[1]) | numpy.arange |
import torch
import numpy as np
import random
import _pickle as pk
import config as default_config
from torch.utils.data import Dataset, DataLoader
from pytorch_metric_learning import samplers
import math
class SIMSDataset(Dataset):
def __init__(self, type, use_similarity=False, simi_return_mono=False, config=default_config):
raw_data_path = config.SIMS.path.raw_data_path
with open(raw_data_path, 'rb') as f:
self.data = pk.load(f)[type]
self.simi_return_mono = simi_return_mono
self.data['raw_text'] = np.array(self.data['raw_text'])
self.data['id'] = np.array(self.data['id'])
self.size = len(self.data['raw_text'])
self.data['index'] = torch.tensor(range(self.size))
self.vision_fea_size = self.data['vision'][0].shape
self.audio_fea_size = self.data['audio'][0].shape
self.scaled_embedding_averaged = False
# self.__normalize()
self.__gen_mask()
if type == 'train' and use_similarity:
self.__scale()
self.__gen_cos_matrix()
self.use_similarity = use_similarity
def __gen_mask(self):
vision_tmp = torch.sum(torch.tensor(self.data['vision']), dim=-1)
vision_mask = (vision_tmp == 0)
for i in range(self.size):
vision_mask[i][0] = False
vision_mask = torch.cat((vision_mask[:, 0:1], vision_mask), dim=-1)
self.data['vision_padding_mask'] = vision_mask
audio_tmp = torch.sum(torch.tensor(self.data['audio']), dim=-1)
audio_mask = (audio_tmp == 0)
for i in range(self.size):
audio_mask[i][0] = False
audio_mask = torch.cat((audio_mask[:, 0:1], audio_mask), dim=-1)
self.data['audio_padding_mask'] = audio_mask
# self.data['vision'] = self.data['vision'][:, 1:, :]
# self.data['audio'] = self.data['audio'][:, 1:, :]
def __pad(self):
PAD = torch.zeros(self.data['vision'].shape[0], 1, self.data['vision'].shape[2])
self.data['vision'] = np.concatenate((self.data['vision'], PAD), axis=1)
Ones = torch.ones(self.data['vision'].shape[0], self.data['vision'].shape[2])
for i in range(len(self.data['vision'])):
self.data['vision'][i, self.data['vision_lengths'], :] = Ones
PAD = torch.zeros(self.data['audio'].shape[0], 1, self.data['audio'].shape[2])
self.data['audio'] = np.concatenate((self.data['audio'], PAD), axis=1)
Ones = torch.ones(self.data['audio'].shape[0], self.data['audio'].shape[2])
for i in range(len(self.data['audio'])):
self.data['audio'][i, self.data['audio_lengths'], :] = Ones
def __normalize(self):
# (num_examples,max_len,feature_dim) -> (max_len, num_examples, feature_dim)
self.data['vision'] = np.transpose(self.data['vision'], (1, 0, 2))
self.data['audio'] = np.transpose(self.data['audio'], (1, 0, 2))
# for visual and audio modality, we average across time
# here the original data has shape (max_len, num_examples, feature_dim)
# after averaging they become (1, num_examples, feature_dim)
self.data['vision'] = np.mean(self.data['vision'], axis=0, keepdims=True)
self.data['audio'] = np.mean(self.data['audio'], axis=0, keepdims=True)
# remove possible NaN values
self.data['vision'][self.data['vision'] != self.data['vision']] = 0
self.data['audio'][self.data['audio'] != self.data['audio']] = 0
self.data['vision'] = np.transpose(self.data['vision'], (1, 0, 2))
self.data['audio'] = np.transpose(self.data['audio'], (1, 0, 2))
def __scale(self):
self.scaled_audio = self.data['audio'].copy()
self.scaled_vision = self.data['vision'].copy()
self.scaled_text = self.data['text'].copy()
for i in range(self.audio_fea_size[-1]):
max_num = np.max(self.data['audio'][:, :, i])
min_num = np.min(self.data['audio'][:, :, i])
self.scaled_audio[:, :, i] = (self.data['audio'][:, :, i] - min_num) / (max_num - min_num) * 2 - 1
for i in range(self.vision_fea_size[-1]):
max_num = np.max(self.data['vision'][:, :, i])
min_num = | np.min(self.data['vision'][:, :, i]) | numpy.min |
from __future__ import annotations
import itertools
import logging
import math
import re
from collections import Counter
from itertools import combinations, starmap
from typing import Dict, Iterable, List, TextIO, Tuple
import networkx as nx
import numpy as np
import numpy.typing as npt
from networkx.drawing.nx_pydot import write_dot
from ..cli import run_with_file_argument
from ..io_utils import read_line
logger = logging.getLogger(__name__)
HEADER_PATTERN = re.compile(r"^\-\-\-\sscanner\s\d+\s\-\-\-$")
def read_beacons(input: TextIO) -> Iterable[npt.NDArray]:
while True:
header = read_line(input)
if not header:
break
beacons: List[Tuple[int, int, int]] = []
assert HEADER_PATTERN.match(header) is not None
while line := read_line(input):
x, y, z = map(int, line.split(","))
beacons.append((x, y, z))
yield np.array(beacons)
def distance(a: npt.NDArray[int], b: npt.NDArray[int]) -> float:
dist: float = np.linalg.norm(a - b)
return dist
def get_edges(beacons: npt.NDArray[int]) -> Dict[float, Tuple[int, int]]:
enumerated_beacons = enumerate(beacons)
result: Dict[float, Tuple[int, int]] = {}
for (a_idx, a_beacon), (b_idx, b_beacon) in combinations(enumerated_beacons, 2):
dist = distance(a_beacon, b_beacon)
assert dist not in result
result[dist] = a_idx, b_idx
return result
def resolve_scanner(
source_beacons: npt.NDArray[int], target_beacons: npt.NDArray[int]
) -> Tuple[npt.NDArray[int], npt.NDArray[int]]:
# find common edges
source_edges = get_edges(source_beacons)
target_edges = get_edges(target_beacons)
common_edges = set(source_edges) & set(target_edges)
# Now pick 2 nodes at random, then one more and find their equivalents
first_edge, second_edge, *_ = common_edges
source_node_a, source_node_b = source_edges[first_edge]
other_source_nodes = source_edges[
second_edge
] # at least one is guaranteed to be neither a nor b
source_node_c, *_ = set(other_source_nodes) - {source_node_a, source_node_b}
source_a_to_b = distance(
source_beacons[source_node_a], source_beacons[source_node_b]
)
source_a_to_c = distance(
source_beacons[source_node_a], source_beacons[source_node_c]
)
source_b_to_c = distance(
source_beacons[source_node_b], source_beacons[source_node_c]
)
target_nodes_a_or_b = target_edges[first_edge]
target_nodes_c_or_d = target_edges[second_edge]
assert (
distance(
target_beacons[target_nodes_a_or_b[0]],
target_beacons[target_nodes_a_or_b[1]],
)
== source_a_to_b
)
# Figure out which nodes are which (map A, B, C from source to target)
if (
distance(
target_beacons[target_nodes_a_or_b[0]],
target_beacons[target_nodes_c_or_d[0]],
)
== source_a_to_c
):
target_node_a, target_node_b = target_nodes_a_or_b
target_node_c, target_node_d = target_nodes_c_or_d
elif (
distance(
target_beacons[target_nodes_a_or_b[1]],
target_beacons[target_nodes_c_or_d[0]],
)
== source_a_to_c
):
target_node_b, target_node_a = target_nodes_a_or_b
target_node_c, target_node_d = target_nodes_c_or_d
elif (
distance(
target_beacons[target_nodes_a_or_b[0]],
target_beacons[target_nodes_c_or_d[1]],
)
== source_a_to_c
):
target_node_a, target_node_b = target_nodes_a_or_b
target_node_d, target_node_c = target_nodes_c_or_d
else:
assert (
distance(
target_beacons[target_nodes_a_or_b[1]],
target_beacons[target_nodes_c_or_d[1]],
)
== source_a_to_c
)
target_node_b, target_node_a = target_nodes_a_or_b
target_node_d, target_node_c = target_nodes_c_or_d
# make sure that our triangle is correct
assert (
distance(target_beacons[target_node_a], target_beacons[target_node_b])
== source_a_to_b
)
assert (
distance(target_beacons[target_node_a], target_beacons[target_node_c])
== source_a_to_c
)
assert (
distance(target_beacons[target_node_b], target_beacons[target_node_c])
== source_b_to_c
)
# now figure out the coords transformation
source_a_coords = source_beacons[source_node_a]
target_a_coords = target_beacons[target_node_a]
source_b_coords = source_beacons[source_node_b]
target_b_coords = target_beacons[target_node_b]
# analyze how the coords change for a know pair of mirrored
source_vector = source_a_coords - source_b_coords
target_vector = target_a_coords - target_b_coords
# to execute the naive approach we need the translation to be unique on all axes
abs_source_vector = np.abs(source_vector)
assert len(np.unique(abs_source_vector)) == 3
abs_target_vector = np.abs(target_vector)
assert len(np.unique(abs_target_vector)) == 3
# the absolute differences should match
assert set(abs_source_vector) == set(abs_target_vector)
# now we just need to figure out which axis is which and then the scanners position
rotation_matrix = np.zeros((3, 3), dtype=int)
for source_axis, abs_source_value in enumerate(abs_source_vector):
(target_axis,) = np.where(abs_target_vector == abs_source_value)
is_negated = | np.sign(source_vector[source_axis]) | numpy.sign |
from common.caching import read_input_dir, cached
from common.dataio import get_aps_data_hdf5, get_passenger_clusters
import numpy as np
import skimage.transform
import glob
import os
import tqdm
import h5py
import pickle
import imageio
import skimage.measure
import cv2
SEGMENTATION_COLORS = np.array([[255, 0, 0], [255, 0, 255], [0, 0, 255]])
def _get_mask(image, color):
mask = np.all(image[..., :3] == color, axis=-1)
mask = np.stack(np.split(mask, 16, axis=1), axis=-1)
return mask
@cached(get_aps_data_hdf5, version=2, subdir='ssd')
def get_threat_heatmaps(mode):
if not os.path.exists('done'):
names, labels, x = get_aps_data_hdf5(mode)
f = h5py.File('data.hdf5', 'w')
th = f.create_dataset('th', x.shape + (3,))
with read_input_dir('hand_labeling/threat_segmentation/base'):
for i, (name, label, data) in tqdm.tqdm(enumerate(zip(names, labels, x)), total=len(x)):
files = glob.glob(name + '*')
assert files, 'missing hand segmentation for %s' % name
image = imageio.imread(files[0])
masks = [_get_mask(image, SEGMENTATION_COLORS[ci]) for ci in range(3)]
with read_input_dir('hand_labeling/threat_segmentation/revision_v0'):
for revision in glob.glob(name + '*'):
rlabel = int(revision.split('_')[1].split('.')[0])
rci = [i+1 for i in range(17) if label[i]].index(rlabel)
rimage = imageio.imread(revision)
masks[rci] = _get_mask(rimage, SEGMENTATION_COLORS[0])
th[i] = np.stack(masks, axis=-1)
open('done', 'w').close()
else:
f = h5py.File('data.hdf5', 'r')
th = f['th']
return th
@cached(get_threat_heatmaps, version=8, subdir='ssd', cloud_cache=True)
def get_augmented_threat_heatmaps(mode):
if not os.path.exists('done'):
th_in = get_threat_heatmaps(mode)
f = h5py.File('data.hdf5', 'w')
th = f.create_dataset('th', (len(th_in), 16, 660, 512, 6))
def segmentation_mask(masks):
ret = np.zeros((16, 660, 512, 2))
for i in range(16):
for j in range(3):
cur = masks[..., i, j]
if not cur.any():
continue
ret[i, ..., 0] += cur / np.max(cur)
ret[i, ..., 1] += cur / np.sum(cur)
return ret
def com_mask(masks):
ret = np.zeros((16, 660, 512, 2))
for i in range(16):
for j in range(3):
cur = masks[..., i, j]
if not cur.any():
continue
M = skimage.measure.moments(cur.astype('double'))
xb, yb = M[0, 1]/M[0, 0], M[1, 0]/M[0, 0]
cov = np.array([[16, 0], [0, 16]])
covinv = np.linalg.inv(cov)
mean = np.array([xb, yb])
gx, gy = np.meshgrid(np.arange(512), np.arange(660))
g = np.reshape(np.stack([gy, gx], axis=-1), (-1, 2))
g = np.exp(-0.5*np.sum((g-mean).dot(covinv)*(g-mean), axis=1))
g = np.reshape(g, (660, 512))
ret[i, ..., 0] += g / np.max(g)
ret[i, ..., 1] += g / np.sum(g)
return ret
def distance_mask(masks):
ret = np.zeros((16, 660, 512, 2))
for i in range(16):
for j in range(3):
cur = (masks[..., i, j]*255).astype('uint8')
if not cur.any():
continue
g = cv2.distanceTransform(cur, cv2.DIST_L2, cv2.DIST_MASK_PRECISE)
ret[i, ..., 0] += g / np.max(g)
ret[i, ..., 1] += g / np.sum(g)
return ret
mean = np.zeros(6)
for i, data in enumerate(tqdm.tqdm(th_in)):
th[i, ..., 0:2] = segmentation_mask(data)
th[i, ..., 2:4] = com_mask(data)
th[i, ..., 4:6] = distance_mask(data)
mean += np.mean(th[i], axis=(0, 1, 2)) / len(th)
| np.save('mean.npy', mean) | numpy.save |
from abc import ABC, abstractmethod
import scipy
import numpy as np
class Server(ABC):
def __init__(self, server_model, merged_update):
self.model = server_model
self.merged_update = merged_update
self.total_weight = 0
@abstractmethod
def train_model(self, my_round, num_syncs, clients_per_group,
sampler, batch_size, base_dist):
"""Aggregate clients' models after each iteration. If
num_syncs synchronizations are reached, middle servers'
models are then aggregated at the top server.
Args:
my_round: The current training round, used for learning rate
decay.
num_syncs: Number of client - middle server synchronizations
in each round before sending to the top server.
clients_per_group: Number of clients to select in
each synchronization.
sampler: Sample method, could be "random", "brute",
"probability", "bayesian", "ga" (namely genetic algorithm),
and "gbp-cs" (namely gradient-based binary permutation
client selection).
batch_size: Number of samples in a batch data.
base_dist: Real data distribution, usually global_dist.
Returns:
update: The trained model after num_syncs synchronizations.
"""
return None
def merge_updates(self, weight, update):
"""Aggregate updates based on their weights.
Args:
weight: Weight for this update.
update: The trained model.
"""
merged_update_ = list(self.merged_update.get_params())
current_update_ = list(update)
num_params = len(merged_update_)
self.total_weight += weight
for p in range(num_params):
merged_update_[p].set_data(
merged_update_[p].data() +
(weight * current_update_[p].data()))
def update_model(self):
"""Update self.model with averaged merged update."""
merged_update_ = list(self.merged_update.get_params())
num_params = len(merged_update_)
for p in range(num_params):
merged_update_[p].set_data(
merged_update_[p].data() / self.total_weight)
self.model.set_params(self.merged_update.get_params())
self.total_weight = 0
self.merged_update.reset_zero()
@abstractmethod
def test_model(self, set_to_use):
"""Test self.model on all clients.
Args:
set_to_use: Dataset to test on, either "train" or "test".
Returns:
metrics: Dict of metrics returned by the model.
"""
return None
def save_model(self, log_dir):
"""Save self.model to specified directory.
Args:
log_dir: Directory to save model file.
"""
self.model.save(log_dir)
class TopServer(Server):
def __init__(self, server_model, merged_update, servers):
self.middle_servers = []
self.register_middle_servers(servers)
super(TopServer, self).__init__(server_model, merged_update)
def register_middle_servers(self, servers):
"""Register middle servers.
Args:
servers: Middle servers to be registered.
"""
if type(servers) == MiddleServer:
servers = [servers]
self.middle_servers.extend(servers)
def train_model(self, my_round, num_syncs, clients_per_group,
sampler, batch_size, base_dist):
"""Call middle servers to train their models and aggregate
their updates."""
for s in self.middle_servers:
s.set_model(self.model)
update = s.train_model(
my_round, num_syncs, clients_per_group, sampler, batch_size, base_dist)
self.merge_updates(clients_per_group, update)
self.update_model()
def test_model(self, set_to_use="test"):
"""Call middle servers to test their models."""
metrics = {}
for middle_server in self.middle_servers:
middle_server.set_model(self.model)
s_metrics = middle_server.test_model(set_to_use)
metrics.update(s_metrics)
return metrics
class MiddleServer(Server):
def __init__(self, server_id, server_model, merged_update, clients_in_group):
self.server_id = server_id
self.clients = []
self.register_clients(clients_in_group)
super(MiddleServer, self).__init__(server_model, merged_update)
def register_clients(self, clients):
"""Register clients of this middle server.
Args:
clients: Clients to be registered.
"""
if type(clients) is not list:
clients = [clients]
self.clients.extend(clients)
def select_clients(self, my_round, clients_per_group, sampler="random",
batch_size=32, base_dist=None, display=False,
metrics_dir="metrics", rand_per_group=2):
"""Randomly select clients_per_group clients for this round."""
online_clients = self.online(self.clients)
num_clients = len(online_clients)
num_sample_clients = min(clients_per_group, num_clients) \
- rand_per_group
# Randomly select part of num_clients clients
np.random.seed(my_round)
rand_clients_idx = np.random.choice(
range(num_clients), rand_per_group, replace=False)
rand_clients = np.take(online_clients, rand_clients_idx).tolist()
# Select rest clients to meet approximate i.i.d. dist
sample_clients = []
rest_clients = np.delete(online_clients, rand_clients_idx).tolist()
if sampler == "random":
sample_clients = self.random_sampling(
rest_clients, num_sample_clients, my_round, base_dist, rand_clients)
elif sampler == "probability":
sample_clients = self.probability_sampling(
rest_clients, num_sample_clients, my_round, base_dist, rand_clients)
elif sampler == "brute":
sample_clients = self.brute_sampling(
rest_clients, num_sample_clients, base_dist, rand_clients)
elif sampler == "bayesian":
sample_clients = self.bayesian_sampling(
rest_clients, num_sample_clients, my_round, base_dist, rand_clients)
elif sampler == "ga":
sample_clients = self.genetic_sampling(
rest_clients, num_sample_clients, my_round, base_dist, rand_clients)
elif sampler == "gbp-cs":
sample_clients = self.gbp_cs_sampling(
rest_clients, num_sample_clients, batch_size, base_dist, rand_clients)
selected_clients = rand_clients + sample_clients
# Measure the distance of base distribution and mean distribution
distance = self.get_dist_distance(selected_clients, base_dist)
print("Dist Distance on Middle Server %i:"
% self.server_id, distance, flush=True)
# Visualize distributions if needed
if display:
from metrics.visualization_utils import plot_clients_dist
plot_clients_dist(clients=selected_clients,
global_dist=base_dist,
draw_mean=True,
metrics_dir=metrics_dir)
return selected_clients
def random_sampling(self, clients, num_clients, my_round, base_dist=None,
exist_clients=[], num_iter=1):
"""Randomly sample num_clients clients from given clients.
Args:
clients: List of clients to be sampled.
num_clients: Number of clients to sample.
my_round: The current training round, used as random seed.
base_dist: Real data distribution, usually global_dist.
exist_clients: List of existing clients.
num_iter: Number of iterations for sampling.
Returns:
rand_clients: List of randomly sampled clients.
"""
np.random.seed(my_round)
rand_clients_ = []
if num_iter == 1:
rand_clients_ = np.random.choice(
clients, num_clients, replace=False).tolist()
elif num_iter > 1:
min_distance_ = 1
rand_clients_ = []
while num_iter > 0:
rand_clients_tmp_ = np.random.choice(
clients, num_clients, replace=False).tolist()
all_clients_ = exist_clients + rand_clients_tmp_
distance_ = self.get_dist_distance(all_clients_, base_dist)
if distance_ < min_distance_:
min_distance_ = distance_
rand_clients_[:] = rand_clients_tmp_
num_iter -= 1
return rand_clients_
def probability_sampling(self, clients, num_clients, my_round, base_dist,
exist_clients=[], num_iter=100):
"""Randomly sample num_clients clients from given clients, according
to real-time learning probability.
Args:
clients: List of clients to be sampled.
num_clients: Number of clients to sample.
my_round: The current training round, used as random seed.
base_dist: Real data distribution, usually global_dist.
exist_clients: List of existing clients.
num_iter: Number of iterations for sampling.
Returns:
rand_clients: List of sampled clients.
"""
assert num_iter > 1, "Invalid num_iter=%s (num_iter>1)" % num_iter
np.random.seed(my_round)
min_distance_ = 1
rand_clients_ = []
prob_ = np.array([1. / len(clients)] * len(clients))
while num_iter > 0:
rand_clients_idx_ = np.random.choice(
range(len(clients)), num_clients, p=prob_, replace=False)
rand_clients_tmp_ = | np.take(clients, rand_clients_idx_) | numpy.take |
#!/usr/bin/env python
"""
Uses Gaussian Mixture Models to separate members of clusters from non members. A series of data reductions are conducted on the supplied dataset to prepare it for Gaussian Mixture Model processing. Subordinate functions take care of clean up operations.
"""
# imports
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import math
from sklearn.preprocessing import MinMaxScaler
from sklearn.mixture import GaussianMixture
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, The University of Texas at Austin"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "MIT"
__version__ = "3.1.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
# globals
ATTR1 = "dist_frm_cent"
ATTR2 = "probability"
ATTR3 = "type"
def compute_gmm(df, plist, center_x, center_y, dist_est, verbose=0):
"""
Parent function handling all the subordinate functions that compute individual pieces of the GMM process. A series of data reductions are performed before the data set is deemed to be usable for GMM processing\n
Args:
► `df` (pandas.core.frame.DataFrame): The pandas dataframe that contains the dataset\n
► `plist` (list): list of attributes to be used to form a subset of the `df` dataframe for GMM processing\n
► `center_x` (float): estimated x coordinate of the center of the studying cluster\n
► `center_y` (float): esitmated y coordinate of the center of the studying cluster\n
► `dist_est` ([type]): distance estimate (in parsecs) of how far the cluster is located\n
► `verbose` (int, optional): Verbosity for the algorithm. Must be in the range [0,2]. Defaults to `0`\n
`0` → No plots or debug statements, data returned post processeing\n
`1` → plots displayed and data returned post processing\n
`2` → plots displayed, debug statements printed, and data returned\n
Returns:\n
► df (pandas.core.frame.DataFrame): the same dataframe `df` after the Gaussian Mixture Model has finished processing along with the preparatory reductions enforced on the dataframe before the processing\n
Raises:\n
`KeyError` → plist contains paramter not present in the dataframe `df`\n
`ValueError` → `center_x < 0` or `center_x >= 360`\n
`ValueError` → `center_y < -90` or `center_y > 90`\n
`ValueError` → `dist_est < 0`\n
`ValueError` → `verbosity != 0` or `1` or `2`\n
"""
#scope constants
acceptable_verbose = [0, 1, 2]
max_slope = 1.392
min_slope = 0.719
max_intercept = -84.5
min_intercept = 9.37
try:
for elem in plist:
df[elem]
except KeyError:
print("plist contains paramters that are not present in the dataframe. Please provide a valid plist")
if center_x < 0 or center_x >= 360:
raise ValueError(f"center_x must be in the range 0 <= center_x < 360.\nValue provided: {center_x}")
if center_y < -90 or center_y > 90:
raise ValueError(f"center_y must be in the range -90 <= center_y <= 90.\n Value provided: {center_y}")
if dist_est < 0:
raise ValueError(f"Distance cannot be negative.\nValue provided: {dist_est}")
if verbose not in acceptable_verbose:
raise ValueError(f"Verbosity can only be 0, 1, or 2.\nValue provided: {verbose}")
# linear interpolation for GMM bounds
min_dist = min_slope * dist_est + min_intercept
max_dist = max_slope * dist_est + max_intercept
# obtain distance information
df = __get_distance(df, plist[0], plist[1], center_x, center_y)
plist.append(ATTR1)
# make required dataset
test_frame = df[plist]
test_frame = test_frame[test_frame[ATTR1] <= 1] # distance from center <= 1 deg
test_frame = test_frame[1000 / test_frame[plist[-2]] <= max_dist] # max distance bound
test_frame = test_frame[1000 / test_frame[plist[-2]] >= min_dist] # min distance bound
test_frame.drop([ATTR1], axis="columns", inplace=True)
# GMM
test_frame = __fit_gmm(test_frame, verbose)
plist.pop() # removing distance from center for final test set
test_ra = np.array(test_frame["ra"])
df = df[df["ra"].isin(test_ra)]
return df
def __get_distance(df, param1, param2, center_x, center_y):
"""
Private function that calculates the Eucledian distance of every data point to the supplied center_x and center_y and appends it to the supplied dataframe to be used later\n
Args:
► `df` (pandas.core.frame.DataFrame): the pandas dataframe containing the data \n
► `param1` (str): attribute storing the equivalent x coordinate of the dataset\n
► `param2` (str): attribute storing the equivalent y coordinate of the dataset\n
► `center_x` (float): estimated x coordinate of the center of the studying cluster\n
► `center_y` (float): esitmated y coordinate of the center of the studying cluster\n
Returns:\n
► df (pandas.core.frame.DataFrame): the same dataframe `df` with the Eucledian distance of each point to the supplied center x and y coordinates appended\n
"""
distance = list()
x = np.array(df[param1])
y = | np.array(df[param2]) | numpy.array |
# This file is a part of the HiRISE DTM Importer for Blender
#
# Copyright (C) 2017 Arizona Board of Regents on behalf of the Planetary Image
# Research Laboratory, Lunar and Planetary Laboratory at the University of
# Arizona.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""Triangulation algorithms"""
import numpy as np
class Triangulate:
"""
A triangulation algorithm for creating a mesh from a DTM raster.
I have been re-writing parts of the Blender HiRISE DTM importer in an
effort to cull its dependencies on external packages. Originally, the
add-on relied on SciPy's Delaunay triangulation (really a wrapper for
Qhull's Delaunay triangulation) to triangulate a mesh from a HiRISE DTM.
This re-write is much better suited to the problem domain. The SciPy
Delaunay triangulation creates a mesh from any arbitrary point cloud and,
while robust, doesn't care about the fact that our HiRISE DTMs are
regularly gridded rasters. This triangulation algorithm is less robust
but much faster. Credit is due to <NAME> for his work on the previous
Blender HiRISE DTM importer --- this triangulation algorithm largely
models the one in his add-on with a few changes (namely interfacing
with NumPy's API).
Overview
----------
Suppose we have a DTM:
.. code::
- - - - - - - - X X - - - - -
- - - - - - X X X X X - - - -
- - - - X X X X X X X X - - -
- - X X X X X X X X X X X - -
X X X X X X X X X X X X X X -
- X X X X X X X X X X X X X X
- - X X X X X X X X X X X - -
- - - X X X X X X X X - - - -
- - - - X X X X X - - - - - -
- - - - - X X - - - - - - - -
where 'X' represents valid values and '-' represents invalid values.
Valid values should become vertices in the resulting mesh, invalid
values should be ignored.
Our end goal is to supply Blender with:
1. an (n x 3) list of vertices
2. an (m x 3) list of faces.
A vertex is a 3-tuple that we get from the DTM raster array. The
z-coordinate is whatever elevation value is in the DTM and the xy-
coordinates are the image indices multiplied by the resolution of the
DTM (e.g. if the DTM is at 5m/px, the first vertex is at (0m, 0m,
z_00) and the vertex to the right of it is at (5m, 0m, z_01)).
A face is a 3-tuple (because we're using triangles) where each element
is an index of a vertex in the vertices list. Computing the faces is
tricky because we want to leverage the orthogonal structure of the DTM
raster for computational efficiency but we also need to reference
vertex indices in our faces, which don't observe any regular
structure.
We take two rows at a time from the DTM raster and track the *raster
row* indices as well as well as the *vertex* indices. Raster row
indices are the distance of a pixel in the raster from the left-most
(valid *or* invalid) pixel of the row. The first vertex is index 0 and
corresponds to the upperleft-most valid pixel in the DTM raster.
Vertex indices increase to the right and then down.
For example, the first two rows:
.. code::
- - - - - - - - X X - - - - -
- - - - - - X X X X X - - - -
in vertex indices:
.. code::
- - - - - - - - 0 1 - - - - -
- - - - - - 2 3 4 5 6 - - - -
and in raster row indices:
.. code::
- - - - - - - - 9 10 - - - - -
- - - - - - 7 8 9 10 11 - - - -
To simplify, we will only add valid square regions to our mesh. So,
for these first two rows the only region that will be added to our
mesh is the quadrilateral formed by vertices 0, 1, 4 and 5. We
further divide this area into 2 triangles and add the vertices to the
face list in CCW order (i.e. t1: (4, 1, 0), t2: (4, 5, 1)).
After the triangulation between two rows is completed, the bottom
row is cached as the top row and the next row in the DTM raster is
read as the new bottom row. This process continues until the entire
raster has been triangulated.
Todo
---------
* It should be pretty trivial to add support for triangular
regions (i.e. in the example above, also adding the triangles
formed by (3, 4, 0) and (5, 6, 1)).
"""
def __init__(self, array):
self.array = array
self.faces = self._triangulate()
def _triangulate(self):
"""Triangulate a mesh from a topography array."""
# Allocate memory for the triangles array
max_tris = (self.array.shape[0] - 1) * (self.array.shape[1] - 1) * 2
tris = np.zeros((max_tris, 3), dtype=int)
ntri = 0
# We initialize a vertex counter at 0
prev_vtx_start = 0
# We don't care about the values in the array, just whether or not
# they are valid.
prev = ~np.isnan(self.array[0])
# We can sum this boolean array to count the number of valid entries
prev_num_valid = prev.sum()
# TODO: Probably a more clear (and faster) function than argmax for
# getting the first Truth-y value in a 1d array.
prev_img_start = np.argmax(prev)
# Start quadrangulation
for i in range(1, self.array.shape[0]):
# Fetch this row, get our bearings in image *and* vertex space
curr = ~np.isnan(self.array[i])
curr_vtx_start = prev_vtx_start + prev_num_valid
curr_img_start = np.argmax(curr)
curr_num_valid = curr.sum()
# Find the overlap between this row and the previous one
overlap = | np.logical_and(prev, curr) | numpy.logical_and |
"""
This script implements a deformation network that regresses per point offsets from instance to canonical
"""
from __future__ import print_function
import argparse
import os
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
from datasets import PartDataset
from pointnet import DeformNet
import torch.nn.functional as F
from pytorch3d.loss.chamfer import chamfer_distance as criterion
import open3d as o3d
import sys
from tensorboardX import SummaryWriter
import time
parser = argparse.ArgumentParser()
parser.add_argument('--batchSize', type=int, default=32, help='input batch size')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--nepoch', type=int, default=50, help='number of epochs to train for')
parser.add_argument('--outf', type=str, default='ae_deform', help='output folder')
parser.add_argument('--model', type=str, default = '', help='model path')
parser.add_argument('--num_points', type=int, default = 4096, help='number of points')
parser.add_argument('--dataset', type=str, required=True, help='dataset root')
parser.add_argument('--latent_size', type=int, default=100, help='bottleneck size')
parser.add_argument('--class_choice', type=str, required=True, help='class choice')
parser.add_argument('--dont_save_model', action='store_true', help='save model progress')
parser.add_argument('--test', action='store_true', help='use test set')
parser.add_argument('--viz', action='store_true', help='visualization')
parser.add_argument('--no_rot', action='store_true', help='no rotation on points', default=False)
opt = parser.parse_args()
print (opt)
def rand_rotation_matrix(deflection=1.0, randnums=None):
"""
Creates a random rotation matrix.
deflection: the magnitude of the rotation. For 0, no rotation; for 1, competely random
rotation. Small deflection => small perturbation.
randnums: 3 random numbers in the range [0, 1]. If `None`, they will be auto-generated.
"""
# from http://www.realtimerendering.com/resources/GraphicsGems/gemsiii/rand_rotation.c
if randnums is None:
randnums = np.random.uniform(size=(3,))
theta, phi, z = randnums
theta = theta * 2.0*deflection*np.pi # Rotation about the pole (Z).
phi = phi * 2.0*np.pi # For direction of pole deflection.
z = z * 2.0*deflection # For magnitude of pole deflection.
# Compute a vector V used for distributing points over the sphere
# via the reflection I - V Transpose(V). This formulation of V
# will guarantee that if x[1] and x[2] are uniformly distributed,
# the reflected points will be uniform on the sphere. Note that V
# has length sqrt(2) to eliminate the 2 in the Householder matrix.
r = np.sqrt(z)
Vx, Vy, Vz = V = (
np.sin(phi) * r,
np.cos(phi) * r,
np.sqrt(2.0 - z)
)
st = np.sin(theta)
ct = np.cos(theta)
R = | np.array(((ct, st, 0), (-st, ct, 0), (0, 0, 1))) | numpy.array |
from functools import partial
import numpy as np
from scipy.optimize import minimize, NonlinearConstraint, Bounds
from project_1 import save_to_excel, calculate_c, critical_buckling, node_distance, slenderness_ratio, \
stress_calculator, optimize_v1
def parameter_calculation(x0, E=1, W=0, D=0, xi=None, uniformCrossSection=True, remove_edge=None):
if xi is None:
if uniformCrossSection:
node1_x, node2_y, node3_y, cross_section_width = x0
else:
node1_x, node2_y, node3_y, cross_section_width = x0[0], x0[1], x0[2], x0[3:]
area = np.power(cross_section_width, 2)
else:
node1_x, node2_x, node2_y, node3_y = x0
area = xi ** 2
nodeCords = np.array([[node1_x, 0.0],
[0, node2_y],
[0, node3_y],
[-node1_x, 0.0],
])
# Nodal Coordinates
elemNodes = np.array([[0, 1], [0, 2], [1, 2], [3, 1], [3, 2]]) # Element connectivity: near node and far node
if remove_edge is not None:
elemNodes = np.delete(elemNodes, remove_edge, axis=0)
area = np.delete(area, remove_edge, axis=0)
modE = | np.full((elemNodes.shape[0], 1), E) | numpy.full |
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from copy import deepcopy
from batchgenerators.augmentations.utils import resize_segmentation
from configuration import default_num_threads, RESAMPLING_SEPARATE_Z_ANISO_THRESHOLD
from skimage.transform import resize
from scipy.ndimage.interpolation import map_coordinates
import numpy as np
from batchgenerators.utilities.file_and_folder_operations import *
from multiprocessing.pool import Pool
import SimpleITK as sitk
import numpy as np
import shutil
from batchgenerators.utilities.file_and_folder_operations import *
from multiprocessing import Pool
from collections import OrderedDict
def create_nonzero_mask(data):
from scipy.ndimage import binary_fill_holes
assert len(data.shape) == 4 or len(
data.shape) == 3, "data must have shape (C, X, Y, Z) or shape (C, X, Y)"
nonzero_mask = np.zeros(data.shape[1:], dtype=bool)
for c in range(data.shape[0]):
this_mask = data[c] != 0
nonzero_mask = nonzero_mask | this_mask
nonzero_mask = binary_fill_holes(nonzero_mask)
return nonzero_mask
def get_bbox_from_mask(mask, outside_value=0):
mask_voxel_coords = np.where(mask != outside_value)
minzidx = int(np.min(mask_voxel_coords[0]))
maxzidx = int(np.max(mask_voxel_coords[0])) + 1
minxidx = int(np.min(mask_voxel_coords[1]))
maxxidx = int(np.max(mask_voxel_coords[1])) + 1
minyidx = int(np.min(mask_voxel_coords[2]))
maxyidx = int(np.max(mask_voxel_coords[2])) + 1
return [[minzidx, maxzidx], [minxidx, maxxidx], [minyidx, maxyidx]]
def crop_to_bbox(image, bbox):
assert len(image.shape) == 3, "only supports 3d images"
resizer = (slice(bbox[0][0], bbox[0][1]), slice(
bbox[1][0], bbox[1][1]), slice(bbox[2][0], bbox[2][1]))
return image[resizer]
def get_case_identifier(case):
case_identifier = case[0].split("/")[-1].split(".nii.gz")[0][:-5]
return case_identifier
def get_case_identifier_from_npz(case):
case_identifier = case.split("/")[-1][:-4]
return case_identifier
def load_case_from_list_of_files(data_files, seg_file=None):
assert isinstance(data_files, list) or isinstance(
data_files, tuple), "case must be either a list or a tuple"
properties = OrderedDict()
data_itk = [sitk.ReadImage(f) for f in data_files]
properties["original_size_of_raw_data"] = np.array(data_itk[0].GetSize())[
[2, 1, 0]]
properties["original_spacing"] = np.array(
data_itk[0].GetSpacing())[[2, 1, 0]]
properties["list_of_data_files"] = data_files
properties["seg_file"] = seg_file
properties["itk_origin"] = data_itk[0].GetOrigin()
properties["itk_spacing"] = data_itk[0].GetSpacing()
properties["itk_direction"] = data_itk[0].GetDirection()
data_npy = np.vstack([sitk.GetArrayFromImage(d)[None] for d in data_itk])
if seg_file is not None:
seg_itk = sitk.ReadImage(seg_file)
seg_npy = sitk.GetArrayFromImage(seg_itk)[None].astype(np.float32)
else:
seg_npy = None
return data_npy.astype(np.float32), seg_npy, properties
def crop_to_nonzero(data, seg=None, nonzero_label=-1):
"""
:param data:
:param seg:
:param nonzero_label: this will be written into the segmentation map
:return:
"""
nonzero_mask = create_nonzero_mask(data)
bbox = get_bbox_from_mask(nonzero_mask, 0)
cropped_data = []
for c in range(data.shape[0]):
cropped = crop_to_bbox(data[c], bbox)
cropped_data.append(cropped[None])
data = np.vstack(cropped_data)
if seg is not None:
cropped_seg = []
for c in range(seg.shape[0]):
cropped = crop_to_bbox(seg[c], bbox)
cropped_seg.append(cropped[None])
seg = np.vstack(cropped_seg)
nonzero_mask = crop_to_bbox(nonzero_mask, bbox)[None]
if seg is not None:
seg[(seg == 0) & (nonzero_mask == 0)] = nonzero_label
else:
nonzero_mask = nonzero_mask.astype(int)
nonzero_mask[nonzero_mask == 0] = nonzero_label
nonzero_mask[nonzero_mask > 0] = 0
seg = nonzero_mask
return data, seg, bbox
def get_patient_identifiers_from_cropped_files(folder):
return [i.split("/")[-1][:-4] for i in subfiles(folder, join=True, suffix=".npz")]
class ImageCropper(object):
def __init__(self, num_threads, output_folder=None):
"""
This one finds a mask of nonzero elements (must be nonzero in all modalities) and crops the image to that mask.
In the case of BRaTS and ISLES data this results in a significant reduction in image size
:param num_threads:
:param output_folder: whete to store the cropped data
:param list_of_files:
"""
self.output_folder = output_folder
self.num_threads = num_threads
if self.output_folder is not None:
maybe_mkdir_p(self.output_folder)
@staticmethod
def crop(data, properties, seg=None):
shape_before = data.shape
data, seg, bbox = crop_to_nonzero(data, seg, nonzero_label=-1)
shape_after = data.shape
print("before crop:", shape_before, "after crop:", shape_after, "spacing:",
np.array(properties["original_spacing"]), "\n")
properties["crop_bbox"] = bbox
properties['classes'] = np.unique(seg)
seg[seg < -1] = 0
properties["size_after_cropping"] = data[0].shape
return data, seg, properties
@staticmethod
def crop_from_list_of_files(data_files, seg_file=None):
data, seg, properties = load_case_from_list_of_files(
data_files, seg_file)
return ImageCropper.crop(data, properties, seg)
def load_crop_save(self, case, case_identifier, overwrite_existing=False):
try:
print(case_identifier)
if overwrite_existing \
or (not os.path.isfile(os.path.join(self.output_folder, "%s.npz" % case_identifier))
or not os.path.isfile(os.path.join(self.output_folder, "%s.pkl" % case_identifier))):
data, seg, properties = self.crop_from_list_of_files(
case[:-1], case[-1])
all_data = np.vstack((data, seg))
np.savez_compressed(os.path.join(
self.output_folder, "%s.npz" % case_identifier), data=all_data)
with open(os.path.join(self.output_folder, "%s.pkl" % case_identifier), 'wb') as f:
pickle.dump(properties, f)
except Exception as e:
print("Exception in", case_identifier, ":")
print(e)
raise e
def get_list_of_cropped_files(self):
return subfiles(self.output_folder, join=True, suffix=".npz")
def get_patient_identifiers_from_cropped_files(self):
return [i.split("/")[-1][:-4] for i in self.get_list_of_cropped_files()]
def run_cropping(self, list_of_files, overwrite_existing=False, output_folder=None):
"""
also copied ground truth nifti segmentation into the preprocessed folder so that we can use them for evaluation
on the cluster
:param list_of_files: list of list of files [[PATIENTID_TIMESTEP_0000.nii.gz], [PATIENTID_TIMESTEP_0000.nii.gz]]
:param overwrite_existing:
:param output_folder:
:return:
"""
if output_folder is not None:
self.output_folder = output_folder
output_folder_gt = os.path.join(self.output_folder, "gt_segmentations")
maybe_mkdir_p(output_folder_gt)
for j, case in enumerate(list_of_files):
if case[-1] is not None:
shutil.copy(case[-1], output_folder_gt)
list_of_args = []
for j, case in enumerate(list_of_files):
case_identifier = get_case_identifier(case)
list_of_args.append((case, case_identifier, overwrite_existing))
p = Pool(self.num_threads)
p.starmap(self.load_crop_save, list_of_args)
p.close()
p.join()
def load_properties(self, case_identifier):
with open(os.path.join(self.output_folder, "%s.pkl" % case_identifier), 'rb') as f:
properties = pickle.load(f)
return properties
def save_properties(self, case_identifier, properties):
with open(os.path.join(self.output_folder, "%s.pkl" % case_identifier), 'wb') as f:
pickle.dump(properties, f)
def get_do_separate_z(spacing, anisotropy_threshold=RESAMPLING_SEPARATE_Z_ANISO_THRESHOLD):
do_separate_z = (np.max(spacing) / np.min(spacing)) > anisotropy_threshold
return do_separate_z
def get_lowres_axis(new_spacing):
axis = np.where(max(new_spacing) / np.array(new_spacing)
== 1)[0] # find which axis is anisotropic
return axis
def resample_patient(data, seg, original_spacing, target_spacing, order_data=3, order_seg=0, force_separate_z=False,
order_z_data=0, order_z_seg=0,
separate_z_anisotropy_threshold=RESAMPLING_SEPARATE_Z_ANISO_THRESHOLD):
"""
:param data:
:param seg:
:param original_spacing:
:param target_spacing:
:param order_data:
:param order_seg:
:param force_separate_z: if None then we dynamically decide how to resample along z, if True/False then always
/never resample along z separately
:param order_z_seg: only applies if do_separate_z is True
:param order_z_data: only applies if do_separate_z is True
:param separate_z_anisotropy_threshold: if max_spacing > separate_z_anisotropy_threshold * min_spacing (per axis)
then resample along lowres axis with order_z_data/order_z_seg instead of order_data/order_seg
:return:
"""
assert not ((data is None) and (seg is None))
if data is not None:
assert len(data.shape) == 4, "data must be c x y z"
if seg is not None:
assert len(seg.shape) == 4, "seg must be c x y z"
if data is not None:
shape = np.array(data[0].shape)
else:
shape = np.array(seg[0].shape)
new_shape = np.round(((np.array(original_spacing) /
np.array(target_spacing)).astype(float) * shape)).astype(int)
if force_separate_z is not None:
do_separate_z = force_separate_z
if force_separate_z:
axis = get_lowres_axis(original_spacing)
else:
axis = None
else:
if get_do_separate_z(original_spacing, separate_z_anisotropy_threshold):
do_separate_z = True
axis = get_lowres_axis(original_spacing)
elif get_do_separate_z(target_spacing, separate_z_anisotropy_threshold):
do_separate_z = True
axis = get_lowres_axis(target_spacing)
else:
do_separate_z = False
axis = None
if axis is not None:
if len(axis) == 3:
# every axis has the spacing, this should never happen, why is this code here?
do_separate_z = False
elif len(axis) == 2:
# this happens for spacings like (0.24, 1.25, 1.25) for example. In that case we do not want to resample
# separately in the out of plane axis
do_separate_z = False
else:
pass
if data is not None:
data_reshaped = resample_data_or_seg(data, new_shape, False, axis, order_data, do_separate_z,
order_z=order_z_data)
else:
data_reshaped = None
if seg is not None:
seg_reshaped = resample_data_or_seg(
seg, new_shape, True, axis, order_seg, do_separate_z, order_z=order_z_seg)
else:
seg_reshaped = None
return data_reshaped, seg_reshaped
def resample_data_or_seg(data, new_shape, is_seg, axis=None, order=3, do_separate_z=False, order_z=0):
"""
separate_z=True will resample with order 0 along z
:param data:
:param new_shape:
:param is_seg:
:param axis:
:param order:
:param do_separate_z:
:param cval:
:param order_z: only applies if do_separate_z is True
:return:
"""
assert len(data.shape) == 4, "data must be (c, x, y, z)"
if is_seg:
resize_fn = resize_segmentation
kwargs = OrderedDict()
else:
resize_fn = resize
kwargs = {'mode': 'edge', 'anti_aliasing': False}
dtype_data = data.dtype
shape = np.array(data[0].shape)
new_shape = np.array(new_shape)
if np.any(shape != new_shape):
data = data.astype(float)
if do_separate_z:
print("separate z, order in z is",
order_z, "order inplane is", order)
assert len(axis) == 1, "only one anisotropic axis supported"
axis = axis[0]
if axis == 0:
new_shape_2d = new_shape[1:]
elif axis == 1:
new_shape_2d = new_shape[[0, 2]]
else:
new_shape_2d = new_shape[:-1]
reshaped_final_data = []
for c in range(data.shape[0]):
reshaped_data = []
for slice_id in range(shape[axis]):
if axis == 0:
reshaped_data.append(
resize_fn(data[c, slice_id], new_shape_2d, order, **kwargs))
elif axis == 1:
reshaped_data.append(
resize_fn(data[c, :, slice_id], new_shape_2d, order, **kwargs))
else:
reshaped_data.append(resize_fn(data[c, :, :, slice_id], new_shape_2d, order,
**kwargs))
reshaped_data = np.stack(reshaped_data, axis)
if shape[axis] != new_shape[axis]:
# The following few lines are blatantly copied and modified from sklearn's resize()
rows, cols, dim = new_shape[0], new_shape[1], new_shape[2]
orig_rows, orig_cols, orig_dim = reshaped_data.shape
row_scale = float(orig_rows) / rows
col_scale = float(orig_cols) / cols
dim_scale = float(orig_dim) / dim
map_rows, map_cols, map_dims = np.mgrid[:rows, :cols, :dim]
map_rows = row_scale * (map_rows + 0.5) - 0.5
map_cols = col_scale * (map_cols + 0.5) - 0.5
map_dims = dim_scale * (map_dims + 0.5) - 0.5
coord_map = np.array([map_rows, map_cols, map_dims])
if not is_seg or order_z == 0:
reshaped_final_data.append(map_coordinates(reshaped_data, coord_map, order=order_z,
mode='nearest')[None])
else:
unique_labels = np.unique(reshaped_data)
reshaped = np.zeros(new_shape, dtype=dtype_data)
for i, cl in enumerate(unique_labels):
reshaped_multihot = np.round(
map_coordinates((reshaped_data == cl).astype(float), coord_map, order=order_z,
mode='nearest'))
reshaped[reshaped_multihot > 0.5] = cl
reshaped_final_data.append(reshaped[None])
else:
reshaped_final_data.append(reshaped_data[None])
reshaped_final_data = np.vstack(reshaped_final_data)
else:
print("no separate z, order", order)
reshaped = []
for c in range(data.shape[0]):
reshaped.append(
resize_fn(data[c], new_shape, order, **kwargs)[None])
reshaped_final_data = np.vstack(reshaped)
return reshaped_final_data.astype(dtype_data)
else:
print("no resampling necessary")
return data
class GenericPreprocessor(object):
def __init__(self, normalization_scheme_per_modality, use_nonzero_mask, transpose_forward: (tuple, list), intensityproperties=None):
"""
:param normalization_scheme_per_modality: dict {0:'nonCT'}
:param use_nonzero_mask: {0:False}
:param intensityproperties:
"""
self.transpose_forward = transpose_forward
self.intensityproperties = intensityproperties
self.normalization_scheme_per_modality = normalization_scheme_per_modality
self.use_nonzero_mask = use_nonzero_mask
self.resample_separate_z_anisotropy_threshold = RESAMPLING_SEPARATE_Z_ANISO_THRESHOLD
@staticmethod
def load_cropped(cropped_output_dir, case_identifier):
all_data = np.load(os.path.join(cropped_output_dir,
"%s.npz" % case_identifier))['data']
data = all_data[:-1].astype(np.float32)
seg = all_data[-1:]
with open(os.path.join(cropped_output_dir, "%s.pkl" % case_identifier), 'rb') as f:
properties = pickle.load(f)
return data, seg, properties
def resample_and_normalize(self, data, target_spacing, properties, seg=None, force_separate_z=None):
"""
data and seg must already have been transposed by transpose_forward. properties are the un-transposed values
(spacing etc)
:param data:
:param target_spacing:
:param properties:
:param seg:
:param force_separate_z:
:return:
"""
# target_spacing is already transposed, properties["original_spacing"] is not so we need to transpose it!
# data, seg are already transposed. Double check this using the properties
original_spacing_transposed = np.array(properties["original_spacing"])[
self.transpose_forward]
before = {
'spacing': properties["original_spacing"],
'spacing_transposed': original_spacing_transposed,
'data.shape (data is transposed)': data.shape
}
# remove nans
data[np.isnan(data)] = 0
data, seg = resample_patient(data, seg, np.array(original_spacing_transposed), target_spacing, 3, 1,
force_separate_z=force_separate_z, order_z_data=0, order_z_seg=0,
separate_z_anisotropy_threshold=self.resample_separate_z_anisotropy_threshold)
after = {
'spacing': target_spacing,
'data.shape (data is resampled)': data.shape
}
print("before:", before, "\nafter: ", after, "\n")
if seg is not None: # hippocampus 243 has one voxel with -2 as label. wtf?
seg[seg < -1] = 0
properties["size_after_resampling"] = data[0].shape
properties["spacing_after_resampling"] = target_spacing
use_nonzero_mask = self.use_nonzero_mask
assert len(self.normalization_scheme_per_modality) == len(data), "self.normalization_scheme_per_modality " \
"must have as many entries as data has " \
"modalities"
assert len(self.use_nonzero_mask) == len(data), "self.use_nonzero_mask must have as many entries as data" \
" has modalities"
for c in range(len(data)):
scheme = self.normalization_scheme_per_modality[c]
if scheme == "CT":
# clip to lb and ub from train data foreground and use foreground mn and sd from training data
assert self.intensityproperties is not None, "ERROR: if there is a CT then we need intensity properties"
mean_intensity = self.intensityproperties[c]['mean']
std_intensity = self.intensityproperties[c]['sd']
lower_bound = self.intensityproperties[c]['percentile_00_5']
upper_bound = self.intensityproperties[c]['percentile_99_5']
data[c] = np.clip(data[c], lower_bound, upper_bound)
data[c] = (data[c] - mean_intensity) / std_intensity
if use_nonzero_mask[c]:
data[c][seg[-1] < 0] = 0
elif scheme == "CT2":
# clip to lb and ub from train data foreground, use mn and sd form each case for normalization
assert self.intensityproperties is not None, "ERROR: if there is a CT then we need intensity properties"
lower_bound = self.intensityproperties[c]['percentile_00_5']
upper_bound = self.intensityproperties[c]['percentile_99_5']
mask = (data[c] > lower_bound) & (data[c] < upper_bound)
data[c] = np.clip(data[c], lower_bound, upper_bound)
mn = data[c][mask].mean()
sd = data[c][mask].std()
data[c] = (data[c] - mn) / sd
if use_nonzero_mask[c]:
data[c][seg[-1] < 0] = 0
elif scheme == 'noNorm':
pass
else:
if use_nonzero_mask[c]:
mask = seg[-1] >= 0
data[c][mask] = (data[c][mask] - data[c]
[mask].mean()) / (data[c][mask].std() + 1e-8)
data[c][mask == 0] = 0
else:
mn = data[c].mean()
std = data[c].std()
# print(data[c].shape, data[c].dtype, mn, std)
data[c] = (data[c] - mn) / (std + 1e-8)
return data, seg, properties
def preprocess_test_case(self, data_files, target_spacing, seg_file=None, force_separate_z=None):
data, seg, properties = ImageCropper.crop_from_list_of_files(
data_files, seg_file)
data = data.transpose((0, *[i + 1 for i in self.transpose_forward]))
seg = seg.transpose((0, *[i + 1 for i in self.transpose_forward]))
data, seg, properties = self.resample_and_normalize(data, target_spacing, properties, seg,
force_separate_z=force_separate_z)
return data.astype(np.float32), seg, properties
def _run_internal(self, target_spacing, case_identifier, output_folder_stage, cropped_output_dir, force_separate_z,
all_classes):
data, seg, properties = self.load_cropped(
cropped_output_dir, case_identifier)
data = data.transpose((0, *[i + 1 for i in self.transpose_forward]))
seg = seg.transpose((0, *[i + 1 for i in self.transpose_forward]))
data, seg, properties = self.resample_and_normalize(data, target_spacing,
properties, seg, force_separate_z)
all_data = np.vstack((data, seg)).astype(np.float32)
# we need to find out where the classes are and sample some random locations
# let's do 10.000 samples per class
# seed this for reproducibility!
num_samples = 10000
# at least 1% of the class voxels need to be selected, otherwise it may be too sparse
min_percent_coverage = 0.01
rndst = np.random.RandomState(1234)
class_locs = {}
for c in all_classes:
all_locs = np.argwhere(all_data[-1] == c)
if len(all_locs) == 0:
class_locs[c] = []
continue
target_num_samples = min(num_samples, len(all_locs))
target_num_samples = max(target_num_samples, int(
np.ceil(len(all_locs) * min_percent_coverage)))
selected = all_locs[rndst.choice(
len(all_locs), target_num_samples, replace=False)]
class_locs[c] = selected
print(c, target_num_samples)
properties['class_locations'] = class_locs
print("saving: ", os.path.join(
output_folder_stage, "%s.npz" % case_identifier))
np.savez_compressed(os.path.join(output_folder_stage, "%s.npz" % case_identifier),
data=all_data.astype(np.float32))
with open(os.path.join(output_folder_stage, "%s.pkl" % case_identifier), 'wb') as f:
pickle.dump(properties, f)
def run(self, target_spacings, input_folder_with_cropped_npz, output_folder, data_identifier,
num_threads=default_num_threads, force_separate_z=None):
"""
:param target_spacings: list of lists [[1.25, 1.25, 5]]
:param input_folder_with_cropped_npz: dim: c, x, y, z | npz_file['data'] np.savez_compressed(fname.npz, data=arr)
:param output_folder:
:param num_threads:
:param force_separate_z: None
:return:
"""
print("Initializing to run preprocessing")
print("npz folder:", input_folder_with_cropped_npz)
print("output_folder:", output_folder)
list_of_cropped_npz_files = subfiles(
input_folder_with_cropped_npz, True, None, ".npz", True)
maybe_mkdir_p(output_folder)
num_stages = len(target_spacings)
if not isinstance(num_threads, (list, tuple, np.ndarray)):
num_threads = [num_threads] * num_stages
assert len(num_threads) == num_stages
# we need to know which classes are present in this dataset so that we can precompute where these classes are
# located. This is needed for oversampling foreground
all_classes = load_pickle(
join(input_folder_with_cropped_npz, 'dataset_properties.pkl'))['all_classes']
for i in range(num_stages):
all_args = []
output_folder_stage = os.path.join(
output_folder, data_identifier + "_stage%d" % i)
maybe_mkdir_p(output_folder_stage)
spacing = target_spacings[i]
for j, case in enumerate(list_of_cropped_npz_files):
case_identifier = get_case_identifier_from_npz(case)
args = spacing, case_identifier, output_folder_stage, input_folder_with_cropped_npz, force_separate_z, all_classes
all_args.append(args)
p = Pool(num_threads[i])
p.starmap(self._run_internal, all_args)
p.close()
p.join()
class Preprocessor3DDifferentResampling(GenericPreprocessor):
def resample_and_normalize(self, data, target_spacing, properties, seg=None, force_separate_z=None):
"""
data and seg must already have been transposed by transpose_forward. properties are the un-transposed values
(spacing etc)
:param data:
:param target_spacing:
:param properties:
:param seg:
:param force_separate_z:
:return:
"""
# target_spacing is already transposed, properties["original_spacing"] is not so we need to transpose it!
# data, seg are already transposed. Double check this using the properties
original_spacing_transposed = np.array(properties["original_spacing"])[
self.transpose_forward]
before = {
'spacing': properties["original_spacing"],
'spacing_transposed': original_spacing_transposed,
'data.shape (data is transposed)': data.shape
}
# remove nans
data[np.isnan(data)] = 0
data, seg = resample_patient(data, seg, np.array(original_spacing_transposed), target_spacing, 3, 1,
force_separate_z=force_separate_z, order_z_data=3, order_z_seg=1,
separate_z_anisotropy_threshold=self.resample_separate_z_anisotropy_threshold)
after = {
'spacing': target_spacing,
'data.shape (data is resampled)': data.shape
}
print("before:", before, "\nafter: ", after, "\n")
if seg is not None: # hippocampus 243 has one voxel with -2 as label. wtf?
seg[seg < -1] = 0
properties["size_after_resampling"] = data[0].shape
properties["spacing_after_resampling"] = target_spacing
use_nonzero_mask = self.use_nonzero_mask
assert len(self.normalization_scheme_per_modality) == len(data), "self.normalization_scheme_per_modality " \
"must have as many entries as data has " \
"modalities"
assert len(self.use_nonzero_mask) == len(data), "self.use_nonzero_mask must have as many entries as data" \
" has modalities"
for c in range(len(data)):
scheme = self.normalization_scheme_per_modality[c]
if scheme == "CT":
# clip to lb and ub from train data foreground and use foreground mn and sd from training data
assert self.intensityproperties is not None, "ERROR: if there is a CT then we need intensity properties"
mean_intensity = self.intensityproperties[c]['mean']
std_intensity = self.intensityproperties[c]['sd']
lower_bound = self.intensityproperties[c]['percentile_00_5']
upper_bound = self.intensityproperties[c]['percentile_99_5']
data[c] = np.clip(data[c], lower_bound, upper_bound)
data[c] = (data[c] - mean_intensity) / std_intensity
if use_nonzero_mask[c]:
data[c][seg[-1] < 0] = 0
elif scheme == "CT2":
# clip to lb and ub from train data foreground, use mn and sd form each case for normalization
assert self.intensityproperties is not None, "ERROR: if there is a CT then we need intensity properties"
lower_bound = self.intensityproperties[c]['percentile_00_5']
upper_bound = self.intensityproperties[c]['percentile_99_5']
mask = (data[c] > lower_bound) & (data[c] < upper_bound)
data[c] = np.clip(data[c], lower_bound, upper_bound)
mn = data[c][mask].mean()
sd = data[c][mask].std()
data[c] = (data[c] - mn) / sd
if use_nonzero_mask[c]:
data[c][seg[-1] < 0] = 0
elif scheme == 'noNorm':
pass
else:
if use_nonzero_mask[c]:
mask = seg[-1] >= 0
else:
mask = np.ones(seg.shape[1:], dtype=bool)
data[c][mask] = (data[c][mask] - data[c]
[mask].mean()) / (data[c][mask].std() + 1e-8)
data[c][mask == 0] = 0
return data, seg, properties
class Preprocessor3DBetterResampling(GenericPreprocessor):
"""
This preprocessor always uses force_separate_z=False. It does resampling to the target spacing with third
order spline for data (just like GenericPreprocessor) and seg (unlike GenericPreprocessor). It never does separate
resampling in z.
"""
def resample_and_normalize(self, data, target_spacing, properties, seg=None, force_separate_z=False):
"""
data and seg must already have been transposed by transpose_forward. properties are the un-transposed values
(spacing etc)
:param data:
:param target_spacing:
:param properties:
:param seg:
:param force_separate_z:
:return:
"""
if force_separate_z is not False:
print("WARNING: Preprocessor3DBetterResampling always uses force_separate_z=False. "
"You specified %s. Your choice is overwritten" % str(force_separate_z))
force_separate_z = False
# be safe
assert force_separate_z is False
# target_spacing is already transposed, properties["original_spacing"] is not so we need to transpose it!
# data, seg are already transposed. Double check this using the properties
original_spacing_transposed = np.array(properties["original_spacing"])[
self.transpose_forward]
before = {
'spacing': properties["original_spacing"],
'spacing_transposed': original_spacing_transposed,
'data.shape (data is transposed)': data.shape
}
# remove nans
data[np.isnan(data)] = 0
data, seg = resample_patient(data, seg, np.array(original_spacing_transposed), target_spacing, 3, 3,
force_separate_z=force_separate_z, order_z_data=99999, order_z_seg=99999,
separate_z_anisotropy_threshold=self.resample_separate_z_anisotropy_threshold)
after = {
'spacing': target_spacing,
'data.shape (data is resampled)': data.shape
}
print("before:", before, "\nafter: ", after, "\n")
if seg is not None: # hippocampus 243 has one voxel with -2 as label. wtf?
seg[seg < -1] = 0
properties["size_after_resampling"] = data[0].shape
properties["spacing_after_resampling"] = target_spacing
use_nonzero_mask = self.use_nonzero_mask
assert len(self.normalization_scheme_per_modality) == len(data), "self.normalization_scheme_per_modality " \
"must have as many entries as data has " \
"modalities"
assert len(self.use_nonzero_mask) == len(data), "self.use_nonzero_mask must have as many entries as data" \
" has modalities"
for c in range(len(data)):
scheme = self.normalization_scheme_per_modality[c]
if scheme == "CT":
# clip to lb and ub from train data foreground and use foreground mn and sd from training data
assert self.intensityproperties is not None, "ERROR: if there is a CT then we need intensity properties"
mean_intensity = self.intensityproperties[c]['mean']
std_intensity = self.intensityproperties[c]['sd']
lower_bound = self.intensityproperties[c]['percentile_00_5']
upper_bound = self.intensityproperties[c]['percentile_99_5']
data[c] = np.clip(data[c], lower_bound, upper_bound)
data[c] = (data[c] - mean_intensity) / std_intensity
if use_nonzero_mask[c]:
data[c][seg[-1] < 0] = 0
elif scheme == "CT2":
# clip to lb and ub from train data foreground, use mn and sd form each case for normalization
assert self.intensityproperties is not None, "ERROR: if there is a CT then we need intensity properties"
lower_bound = self.intensityproperties[c]['percentile_00_5']
upper_bound = self.intensityproperties[c]['percentile_99_5']
mask = (data[c] > lower_bound) & (data[c] < upper_bound)
data[c] = np.clip(data[c], lower_bound, upper_bound)
mn = data[c][mask].mean()
sd = data[c][mask].std()
data[c] = (data[c] - mn) / sd
if use_nonzero_mask[c]:
data[c][seg[-1] < 0] = 0
elif scheme == 'noNorm':
pass
else:
if use_nonzero_mask[c]:
mask = seg[-1] >= 0
else:
mask = np.ones(seg.shape[1:], dtype=bool)
data[c][mask] = (data[c][mask] - data[c]
[mask].mean()) / (data[c][mask].std() + 1e-8)
data[c][mask == 0] = 0
return data, seg, properties
class PreprocessorFor2D(GenericPreprocessor):
def __init__(self, normalization_scheme_per_modality, use_nonzero_mask, transpose_forward: (tuple, list), intensityproperties=None):
super(PreprocessorFor2D, self).__init__(normalization_scheme_per_modality, use_nonzero_mask,
transpose_forward, intensityproperties)
def run(self, target_spacings, input_folder_with_cropped_npz, output_folder, data_identifier,
num_threads=default_num_threads, force_separate_z=None):
print("Initializing to run preprocessing")
print("npz folder:", input_folder_with_cropped_npz)
print("output_folder:", output_folder)
list_of_cropped_npz_files = subfiles(
input_folder_with_cropped_npz, True, None, ".npz", True)
assert len(list_of_cropped_npz_files) != 0, "set list of files first"
maybe_mkdir_p(output_folder)
all_args = []
num_stages = len(target_spacings)
# we need to know which classes are present in this dataset so that we can precompute where these classes are
# located. This is needed for oversampling foreground
all_classes = load_pickle(
join(input_folder_with_cropped_npz, 'dataset_properties.pkl'))['all_classes']
for i in range(num_stages):
output_folder_stage = os.path.join(
output_folder, data_identifier + "_stage%d" % i)
maybe_mkdir_p(output_folder_stage)
spacing = target_spacings[i]
for j, case in enumerate(list_of_cropped_npz_files):
case_identifier = get_case_identifier_from_npz(case)
args = spacing, case_identifier, output_folder_stage, input_folder_with_cropped_npz, force_separate_z, all_classes
all_args.append(args)
p = Pool(num_threads)
p.starmap(self._run_internal, all_args)
p.close()
p.join()
def resample_and_normalize(self, data, target_spacing, properties, seg=None, force_separate_z=None):
original_spacing_transposed = np.array(properties["original_spacing"])[
self.transpose_forward]
before = {
'spacing': properties["original_spacing"],
'spacing_transposed': original_spacing_transposed,
'data.shape (data is transposed)': data.shape
}
target_spacing[0] = original_spacing_transposed[0]
data, seg = resample_patient(data, seg, | np.array(original_spacing_transposed) | numpy.array |
#---------------------------------
# NAME || AM ||
# <NAME> || 432 ||
# <NAME> || 440 ||
#---------------------------------
# Biomedical Data Analysis
# Written in Python 3.6
import sys
import os
from data_parser import Data_Parser
import heartpy as hp
import math
import numpy as np
import numpy.matlib
from matplotlib import pyplot as plt
from sklearn.model_selection import KFold
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, f1_score
from sklearn.preprocessing import StandardScaler
import collections
electrocardiogram_sample_rate = 300.0
def reduce_dataset(dataset, labels, number):
data_ordering = np.random.permutation(dataset.shape[0])
dataset = dataset[data_ordering]
labels = labels[data_ordering]
return dataset[ : number], labels[ : number]
#RR intervals returned in ms, ((t2 - t1) / sample rate) * 1000.0
def create_RR_intervals_and_measures(dataset, labels):
temp_labels = list()
RR_intervals = list()
measures = list()
for index, heart_signal in enumerate(dataset):
try:
#plot_RR_Peaks(heart_signal)
working_data, measure = hp.process(heart_signal, sample_rate = electrocardiogram_sample_rate)
dict_counter = collections.Counter(working_data['binary_peaklist'])
rejected_threshold = dict_counter[0] / (dict_counter[0] + dict_counter[1])
#unpacking the dictonary values
measure = [*measure.values()]
if (True in np.isnan(np.array(measure)) or rejected_threshold >= 0.15): continue
measures.append(measure)
RR_intervals.append(working_data['RR_list'])
temp_labels.append(labels[index])
except:
#plotTimeSerie(heart_signal)
continue
return np.asarray(RR_intervals), np.asarray(measures), np.asarray(temp_labels)
def create_histogram(RR_intervals, number_of_bins):
RR_histograms = list()
for RR_inter in RR_intervals:
histogram = | np.histogram(RR_inter, number_of_bins) | numpy.histogram |
# This module contains everything that has to do with a population.
#
# This includes the following:
# - relations between people
# - ages
import numpy
import random
EXAMPLE_AGE_GROUPS = [10 * i for i in range(10)]
EXAMPLE_AGE_D = [0.07, 0.08, 0.1, 0.1, 0.1, 0.2, 0.12, 0.1, 0.08, 0.05]
# https://www.businessinsider.com/coronavirus-death-age-older-people-higher-risk-2020-2?op=1&r=US&IR=T
EXAMPLE_DEATH_D = [
0.002, 0.002, 0.002, 0.004, 0.013, 0.036, 0.08, 0.148, 0.148, 0.148, 0.148
]
EXAMPLE_CONTAGIOUS_T = 14
EXAMPLE_SICK_T = 14
# https://en.wikipedia.org/wiki/Coronavirus_disease_2019#Prognosis
EXAMPLE_DEATH_T = 8
class Population:
def __init__(self,
size=1000,
contagiousness=0.0003,
age_groups=EXAMPLE_AGE_GROUPS,
age_dist=EXAMPLE_AGE_D,
lethality_dist=EXAMPLE_DEATH_D,
time_to_kill=EXAMPLE_DEATH_T,
time_while_contagious=EXAMPLE_CONTAGIOUS_T,
time_while_sick=EXAMPLE_SICK_T,
initial=1):
self.size = size
self.age_groups = numpy.array(age_groups)
self.age_dist = | numpy.array(age_dist) | numpy.array |
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.ticker as plticker
def plot_image(value_fn):
# Values over all initial dealer scores and player scores
# Given the policy, the value should be near zero everywhere except when the player has a score of 21
dealer_scores = np.arange(1, 11, 1) # 10
player_scores = np.arange(11, 22, 1) # 11
V = np.zeros(shape=(len(dealer_scores), len(player_scores)))
for d_idx, dealer_score in enumerate(dealer_scores):
for p_idx, player_score in enumerate(player_scores):
value = value_fn(dealer_score, player_score)
V[d_idx][p_idx] = value
fig, ax = plt.subplots()
ax.imshow(V)
ax.set_ylabel("Dealer initial showing")
ax.yaxis.set_ticklabels(np.arange(0, 11, 1))
ax.yaxis.set_major_locator(plticker.MultipleLocator(base=1.0))
ax.set_xlabel("Player sum")
ax.xaxis.set_ticklabels(np.arange(10, 22, 1))
ax.xaxis.set_major_locator(plticker.MultipleLocator(base=1.0))
plt.show()
def plot(value_fn):
# Values over all initial dealer scores and player scores
# Given the policy, the value should be near zero everywhere except when the player has a score of 21
dealer_scores = np.arange(1, 11, 1) # 10
player_scores = | np.arange(11, 22, 1) | numpy.arange |
"""
Monitors interface with widgets to surface process variable information. They are
initialized using a lume-model variable and a controller used to access values over
EPICs.
"""
from datetime import datetime
import time
import logging
import numpy as np
from typing import List, Dict, Tuple
from lume_epics.client.controller import Controller
from lume_model.variables import ImageVariable, ScalarVariable
logger = logging.getLogger(__name__)
class PVImage:
"""
Monitor for updating and formatting image data.
Attributes:
variable (ImageVariable): Image process variable to be displayed.
controller (Controller): Controller object for accessing process variable.
pvname (str): Name of the process variable to access.
axis_units (str): Units associated with the image axes.
axis_labels (str): Labels associated with the image axes.
"""
def __init__(self, variable: ImageVariable, controller: Controller,) -> None:
"""Initialize monitor for an image variable.
Args:
variable (ImageVariable): Image process variable to be displayed.
controller (Controller): Controller object for accessing process variable.
"""
self.units = None
# check if units has been set
if "units" in variable.__fields_set__:
self.units = variable.units.split(":")
self.varname = variable.name
self.controller = controller
self.axis_labels = variable.axis_labels
self.axis_units = variable.axis_units
def poll(self) -> Dict[str, list]:
"""Collects image data and builds image data dictionary.
"""
return self.controller.get_image(self.varname)
class PVTimeSeries:
"""
Monitor for time series variables.
Attributes:
time (np.ndarray): Array of times sampled.
data (np.ndarray): Array of sampled data.
variable (ScalarVariable): Variable monitored for time series.
controller (Controller): Controller object for accessing process variable.
units (str): Units associated with the variable
varname (str): Name of the model variable to access
"""
def __init__(self, variable: ScalarVariable, controller: Controller,) -> None:
"""Initializes monitor attributes.
Args:
variable (ScalarVariable): Variable to monitor for time series
controller (Controller): Controller object for accessing process variable.
"""
self.varname = variable.name
self.tstart = time.time()
self.time = | np.array([]) | numpy.array |
# Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
import pennylane as qml
import pennylane.tape
from pennylane import numpy as pnp
"""Defines the device used for all tests"""
dev = qml.device("default.qubit", wires=4)
"""Defines circuits to be used in queueing/output tests"""
with pennylane.tape.QuantumTape() as tape1:
qml.PauliX(0)
H1 = qml.Hamiltonian([1.5], [qml.PauliZ(0) @ qml.PauliZ(1)])
qml.expval(H1)
with pennylane.tape.QuantumTape() as tape2:
qml.Hadamard(0)
qml.Hadamard(1)
qml.PauliZ(1)
qml.PauliX(2)
H2 = qml.Hamiltonian(
[1, 3, -2, 1, 1],
[
qml.PauliX(0) @ qml.PauliZ(2),
qml.PauliZ(2),
qml.PauliX(0),
qml.PauliX(2),
qml.PauliZ(0) @ qml.PauliX(1),
],
)
qml.expval(H2)
H3 = 1.5 * qml.PauliZ(0) @ qml.PauliZ(1) + 0.3 * qml.PauliX(1)
with qml.tape.QuantumTape() as tape3:
qml.PauliX(0)
qml.expval(H3)
H4 = (
qml.PauliX(0) @ qml.PauliZ(2)
+ 3 * qml.PauliZ(2)
- 2 * qml.PauliX(0)
+ qml.PauliZ(2)
+ qml.PauliZ(2)
)
H4 += qml.PauliZ(0) @ qml.PauliX(1) @ qml.PauliY(2)
with qml.tape.QuantumTape() as tape4:
qml.Hadamard(0)
qml.Hadamard(1)
qml.PauliZ(1)
qml.PauliX(2)
qml.expval(H4)
TAPES = [tape1, tape2, tape3, tape4]
OUTPUTS = [-1.5, -6, -1.5, -8]
class TestHamiltonianExpval:
"""Tests for the hamiltonian_expand transform"""
@pytest.mark.parametrize(("tape", "output"), zip(TAPES, OUTPUTS))
def test_hamiltonians(self, tape, output):
"""Tests that the hamiltonian_expand transform returns the correct value"""
tapes, fn = qml.transforms.hamiltonian_expand(tape)
results = dev.batch_execute(tapes)
expval = fn(results)
assert np.isclose(output, expval)
@pytest.mark.parametrize(("tape", "output"), zip(TAPES, OUTPUTS))
def test_hamiltonians_no_grouping(self, tape, output):
"""Tests that the hamiltonian_expand transform returns the correct value
if we switch grouping off"""
tapes, fn = qml.transforms.hamiltonian_expand(tape, group=False)
results = dev.batch_execute(tapes)
expval = fn(results)
assert np.isclose(output, expval)
def test_grouping_is_used(self):
"""Test that the grouping in a Hamiltonian is used"""
H = qml.Hamiltonian(
[1.0, 2.0, 3.0], [qml.PauliZ(0), qml.PauliX(1), qml.PauliX(0)], grouping_type="qwc"
)
assert H.grouping_indices is not None
with qml.tape.QuantumTape() as tape:
qml.Hadamard(wires=0)
qml.CNOT(wires=[0, 1])
qml.PauliX(wires=2)
qml.expval(H)
tapes, fn = qml.transforms.hamiltonian_expand(tape, group=False)
assert len(tapes) == 2
def test_number_of_tapes(self):
"""Tests that the the correct number of tapes is produced"""
H = qml.Hamiltonian([1.0, 2.0, 3.0], [qml.PauliZ(0), qml.PauliX(1), qml.PauliX(0)])
with qml.tape.QuantumTape() as tape:
qml.Hadamard(wires=0)
qml.CNOT(wires=[0, 1])
qml.PauliX(wires=2)
qml.expval(H)
tapes, fn = qml.transforms.hamiltonian_expand(tape, group=False)
assert len(tapes) == 3
tapes, fn = qml.transforms.hamiltonian_expand(tape, group=True)
assert len(tapes) == 2
def test_hamiltonian_error(self):
with pennylane.tape.QuantumTape() as tape:
qml.expval(qml.PauliZ(0))
with pytest.raises(ValueError, match=r"Passed tape must end in"):
tapes, fn = qml.transforms.hamiltonian_expand(tape)
@pytest.mark.autograd
def test_hamiltonian_dif_autograd(self, tol):
"""Tests that the hamiltonian_expand tape transform is differentiable with the Autograd interface"""
H = qml.Hamiltonian(
[-0.2, 0.5, 1], [qml.PauliX(1), qml.PauliZ(1) @ qml.PauliY(2), qml.PauliZ(0)]
)
var = pnp.array([0.1, 0.67, 0.3, 0.4, -0.5, 0.7, -0.2, 0.5, 1.0], requires_grad=True)
output = 0.42294409781940356
output2 = [
9.68883500e-02,
-2.90832724e-01,
-1.04448033e-01,
-1.94289029e-09,
3.50307411e-01,
-3.41123470e-01,
0.0,
-0.43657,
0.64123,
]
with qml.tape.QuantumTape() as tape:
for i in range(2):
qml.RX( | np.array(0) | numpy.array |
import numpy as np
from keras import backend as K
from keras import Input, Model
from keras.layers import Conv3D, MaxPooling3D, UpSampling3D, Activation, Conv3DTranspose, Concatenate, BatchNormalization
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, EarlyStopping
import tensorflow as tf
global ch_axis
K.set_image_dim_ordering('th') # channels first
if K.image_dim_ordering() is 'th':
ch_axis = 1
elif K.image_dim_ordering() is 'tf':
ch_axis = -1
def unet_model_3d(input_shape, first_channel_num=8, depth = 3, pool_size=(2, 2, 2), n_labels=1,
initial_learning_rate=0.00001, use_conv_transpose=True, bn_flag = False):
if K.image_dim_ordering() is 'th':
inputs = Input(shape=(1, input_shape[0], input_shape[1], input_shape[2]))
elif K.image_dim_ordering() is 'tf':
inputs = Input(shape=(input_shape[0], input_shape[1], input_shape[2], 1))
conv1_d = conv3d(first_channel_num, (3, 3, 3), bn_flag)(inputs)
conv1_d = conv3d(2*first_channel_num, (3, 3, 3), bn_flag)(conv1_d)
pool1 = MaxPooling3D(pool_size=pool_size)(conv1_d)
conv2_d = conv3d(2*first_channel_num, (3, 3, 3), bn_flag)(pool1)
conv2_d = conv3d(4*first_channel_num, (3, 3, 3), bn_flag)(conv2_d)
pool2 = MaxPooling3D(pool_size=pool_size)(conv2_d)
conv3_d = conv3d(4*first_channel_num, (3, 3, 3), bn_flag)(pool2)
conv3_d = conv3d(8*first_channel_num, (3, 3, 3), bn_flag)(conv3_d)
#################################################################################################################
if depth >= 4: # depth 4, 5
pool3 = MaxPooling3D(pool_size=pool_size)(conv3_d)
conv4_d = conv3d(8*first_channel_num, (3, 3, 3), bn_flag)(pool3)
conv4_d = conv3d(16*first_channel_num, (3, 3, 3), bn_flag)(conv4_d)
################################################################################################################
if depth == 5: # depth 5
pool4 = MaxPooling3D(pool_size=pool_size)(conv4_d)
conv5_d = conv3d(16*first_channel_num, (3, 3, 3), bn_flag)(pool4)
conv5_d = conv3d(32*first_channel_num, (3, 3, 3), bn_flag)(conv5_d)
conv5_u = conv5_d
up4 = upsampling_convtranspose(conv5_u, 32 * first_channel_num, pool_size, use_conv_transpose)
up4 = Concatenate(axis=ch_axis)([up4, conv4_d])
conv4_u = conv3d(16*first_channel_num, (3, 3, 3), bn_flag)(up4)
conv4_u = conv3d(16*first_channel_num, (3, 3, 3), bn_flag)(conv4_u)
elif depth == 4: # depth 4
conv4_u = conv4_d
################################################################################################################
up3 = upsampling_convtranspose(conv4_u, 16 * first_channel_num, pool_size, use_conv_transpose)
up3 = Concatenate(axis=ch_axis)([up3, conv3_d])
conv3_u = conv3d(8*first_channel_num, (3, 3, 3), bn_flag)(up3)
conv3_u = conv3d(8*first_channel_num, (3, 3, 3), bn_flag)(conv3_u)
#################################################################################################################
if depth == 3: # depth 3
conv3_u = conv3_d
up2 = upsampling_convtranspose(conv3_u, 8*first_channel_num, pool_size, use_conv_transpose)
up2 = Concatenate(axis=ch_axis)([up2, conv2_d])
conv2_u = conv3d(4*first_channel_num, (3, 3, 3), bn_flag)(up2)
conv2_u = conv3d(4*first_channel_num, (3, 3, 3), bn_flag)(conv2_u)
up1 = upsampling_convtranspose(conv2_u, 4*first_channel_num, pool_size, use_conv_transpose)
up1 = Concatenate(axis=ch_axis)([up1, conv1_d])
conv1_u = conv3d(2*first_channel_num, (3, 3, 3), bn_flag)(up1)
conv1_u = conv3d(2*first_channel_num, (3, 3, 3), bn_flag)(conv1_u)
output = Conv3D(n_labels, (1, 1, 1), activation=None)(conv1_u)
output = Activation('sigmoid')(output)
model = Model(inputs=inputs, outputs=output)
model.compile(optimizer=Adam(lr=initial_learning_rate), loss=dice_coef_loss, metrics=[dice_coef])
return model
def dice_coef_np(y_true, y_pred):
y_true_f = y_true.flatten()
y_pred_f = y_pred.flatten()
intersection = | np.sum(y_true_f * y_pred_f) | numpy.sum |
import importlib
import pytest
import autoray as ar
# find backends to tests
BACKENDS = ['numpy']
for lib in ['cupy', 'dask', 'tensorflow', 'torch', 'mars', 'jax', 'sparse']:
if importlib.util.find_spec(lib):
BACKENDS.append(pytest.param(lib))
if lib == 'jax':
import os
from jax.config import config
config.update("jax_enable_x64", True)
os.environ['XLA_PYTHON_CLIENT_ALLOCATOR'] = 'platform'
else:
BACKENDS.append(pytest.param(
lib,
marks=pytest.mark.skipif(True, reason=f"No {lib}.")
))
JAX_RANDOM_KEY = None
def gen_rand(shape, backend, dtype='float64'):
if backend == 'jax':
from jax import random as jrandom
global JAX_RANDOM_KEY
if JAX_RANDOM_KEY is None:
JAX_RANDOM_KEY = jrandom.PRNGKey(42)
JAX_RANDOM_KEY, subkey = jrandom.split(JAX_RANDOM_KEY)
return jrandom.uniform(subkey, shape=shape, dtype=dtype)
elif backend == 'sparse':
return ar.do('random.uniform', size=shape, like=backend,
density=0.5, format='coo', fill_value=0)
x = ar.do('random.uniform', size=shape, like=backend)
x = ar.astype(x, ar.to_backend_dtype(dtype, backend))
assert ar.get_dtype_name(x) == dtype
return x
@pytest.mark.parametrize('backend', BACKENDS)
@pytest.mark.parametrize('fn', ['sqrt', 'exp', 'sum'])
def test_basic(backend, fn):
x = gen_rand((2, 3, 4), backend)
y = ar.do(fn, x)
if (backend == 'sparse') and (fn is 'sum'):
pytest.xfail("Sparse 'sum' outputs dense.")
assert ar.infer_backend(x) == ar.infer_backend(y) == backend
@pytest.mark.parametrize('backend', BACKENDS)
@pytest.mark.parametrize('fn,args', [
(ar.conj, []),
(ar.transpose, []),
(ar.real, []),
(ar.imag, []),
(ar.reshape, [(5, 3)]),
])
def test_attribute_prefs(backend, fn, args):
if (backend is 'torch') and fn in (ar.real, ar.imag):
pytest.xfail("Pytorch doesn't support complex numbers yet...")
x = gen_rand((3, 5), backend)
y = fn(x, *args)
assert ar.infer_backend(x) == ar.infer_backend(y) == backend
def modified_gram_schmidt(X):
Q = []
for j in range(0, X.shape[0]):
q = X[j, :]
for i in range(0, j):
rij = ar.do('tensordot', ar.do('conj', Q[i]), q, 1)
q = q - rij * Q[i]
rjj = ar.do('linalg.norm', q, 2)
Q.append(q / rjj)
return ar.do('stack', Q, axis=0, like=X)
@pytest.mark.parametrize('backend', BACKENDS)
def test_mgs(backend):
if backend == 'sparse':
pytest.xfail("Sparse doesn't support linear algebra yet...")
x = gen_rand((3, 5), backend)
Ux = modified_gram_schmidt(x)
y = ar.do('sum', Ux @ ar.dag(Ux))
assert ar.to_numpy(y) == pytest.approx(3)
def modified_gram_schmidt_np_mimic(X):
from autoray import numpy as np
print(np)
Q = []
for j in range(0, X.shape[0]):
q = X[j, :]
for i in range(0, j):
rij = np.tensordot(np.conj(Q[i]), q, 1)
q = q - rij * Q[i]
rjj = | np.linalg.norm(q, 2) | numpy.linalg.norm |
from .path_alg import solve_path
import numpy as np
import numpy.linalg as LA
from .misc_functions import unpenalized
r"""
Problem : min ||Ab - y||^2 + lambda ||b||1 with C.b= 0
Dimensions : A : m*d ; y : m ; b : d ; C : k*d
The first function compute a solution of a Lasso problem for a given lambda.
The parameters are lam (lambda/lambdamax, in [0,1]) and pb, which has to be a 'problem_LS type'
which is defined bellow in order to contain all the important parameters of the problem.
"""
tol = 1e-5
def Classo_R1(pb, lam):
pb_type = pb.type # can be 'Path-Alg', 'P-PDS' , 'PF-PDS' or 'DR'
if lam < 1e-5:
return unpenalized(pb.matrix)
# ODE
# here we compute the path algo until our lambda, and just take the last beta
if pb_type == "Path-Alg":
BETA = solve_path(pb.matrix, lam, False, 0, "R1")[0]
return BETA[-1]
regpath = pb.regpath
if not regpath:
pb.compute_param() # this is a way to compute costful matrices computation like A^tA only once when we do pathcomputation with warm starts.
(m, d, k), (A, C, y) = pb.dim, pb.matrix
lamb = lam * pb.lambdamax
Anorm = pb.Anorm
tol = pb.tol * LA.norm(y) / Anorm # tolerance rescaled
Proj = proj_c(C, d)
AtA = pb.AtA
Aty = pb.Aty
# Save some matrix products already computed in problem.compute_param()
gamma, tau = pb.gam / (2 * pb.AtAnorm), pb.tauN
w, zerod = lamb * gamma * pb.weights, np.zeros(
d
) # two vectors usefull to compute the prox of f(b) = sum(wi |bi|)
if pb_type == "PF-PDS": # y1 --> S ; p1 --> p . ; p2 --> y2
(x, v) = pb.init
for i in range(pb.N):
S = x - gamma * (AtA.dot(x) - Aty) * 2 - (C.T).dot(v)
p = prox(S, w, zerod)
y2 = v + tau * C.dot(x)
v = v + tau * C.dot(p)
eps = p - gamma * (AtA.dot(p) - Aty) * 2 - C.T.dot(y2) - S
x = x + eps
if i % 10 == 2 and LA.norm(eps) < tol:
if regpath:
return (x, (x, v))
else:
return x
if LA.norm(x) + LA.norm(p) + LA.norm(v) > 1e6:
raise ValueError("The algorithm of PF-PDS diverges")
raise ValueError(
"The algorithm of PF-PDS did not converge after %i iterations " % pb.N
)
if pb_type == "P-PDS":
xbar, x, v = pb.init
for i in range(pb.N):
grad = AtA.dot(x) - Aty
v = v + tau * C.dot(xbar)
s = x - 2 * gamma * grad - (C.T).dot(v)
p = prox(s, w, zerod)
nw_x = Proj.dot(p)
eps = nw_x - x
xbar = p + eps
if i % 10 == 2 and LA.norm(eps) < tol:
if regpath:
return (x, (xbar, x, v))
else:
return x
x = nw_x
if LA.norm(x) > 1e10:
raise ValueError("The algorithm of P-PDS diverges")
raise ValueError(
"The algorithm of P-PDS did not converge after %i iterations " % pb.N
)
else: # "DR":
gamma = gamma / (2 * lam)
w = w / (2 * lam)
mu, ls, c, root = pb.mu, [], pb.c, 0.0
Q1, Q2 = QQ(2 * gamma / (mu - 1), A, AtA = pb.AtA, AAt = pb.AAt)
QA, qy = Q1.dot(A), Q1.dot(y)
qy_mult = qy * (mu - 1)
b, xbar, x = pb.init
for i in range(pb.N):
xbar = xbar + mu * (prox(2 * b - xbar, w, zerod) - b)
x = x + mu * (Proj.dot(2 * b - x) - b)
nv_b = (2 - mu) * b
nv_b = nv_b + qy_mult + Q2.dot(x + xbar - 2 * nv_b)
if i % 2 == 1 and LA.norm(b - nv_b) < tol:
if regpath:
return (b, (b, xbar, x))
else:
return b
b = nv_b
raise ValueError(
"The algorithm of <NAME> did not converge after %i iterations "
% pb.N
)
"""
This function compute the the solution for a given path of lam :
by calling the function 'algo' for each lambda with warm start,
or with the method ODE, by computing the whole path
thanks to the ODE that rules Beta and the subgradient s,
and then to evaluate it in the given finite path.
"""
def pathlasso_R1(pb, path, n_active = False):
n, d, k = pb.dim
BETA, tol = [], pb.tol
if pb.type == "Path-Alg":
beta, sp_path = solve_path(pb.matrix, path[-1], n_active, 0, "R1")
# in the method ODE, we only compute the solution for breaking points. We can stop here if return_sp_path = True
# else, we do a little manipulation to interpolated the value of beta between those points, as we know beta is affine between those breaking points.
# if return_sp_path:
# return beta, sp_path
sp_path.append(path[-1]), beta.append(beta[-1])
i = 0
for lam in path:
while lam < sp_path[i + 1]:
i += 1
teta = (sp_path[i] - lam) / (sp_path[i] - sp_path[i + 1])
BETA.append(beta[i] * (1 - teta) + beta[i + 1] * teta)
return BETA
# Now we are in the case where we have to do warm starts.
save_init = pb.init
pb.regpath = True
pb.compute_param()
if type(n_active) == int and n_active > 0:
n_act = n_active
else:
n_act = d + 1
for lam in path:
X = Classo_R1(pb, lam)
beta, init = X[0], X[1]
BETA.append(beta)
pb.init = init
p = sum([(abs(beta[i]) > 1e-5) for i in range(len(beta))])
if p >= n_act or type(init) == str:
pb.init = save_init
BETA.extend([BETA[-1]] * (len(path) - len(BETA)))
pb.regpath = False
return BETA
pb.init = save_init
pb.regpath = False
return BETA
"""
Class of problem : we define a type, which will contain as attributes all the parameters we need for a given problem.
"""
class problem_R1:
def __init__(self, data, algo):
self.N = 500000
self.matrix, self.dim = data, (
data[0].shape[0],
data[0].shape[1],
data[1].shape[0],
)
(m, d, k) = self.dim
if algo == "P-PDS":
self.init = np.zeros(d), np.zeros(d), np.zeros(k)
elif algo == "PF-PDS":
self.init = np.zeros(d), np.zeros(k)
else:
self.init = np.zeros(d), np.zeros(d), np.zeros(d)
self.tol = tol
self.weights = np.ones(d)
self.regpath = False
self.name = algo + " LS"
self.type = algo # type of algorithm used
self.mu = 1.95
self.Aty = (self.matrix[0].T).dot(self.matrix[2])
self.lambdamax = 2 * LA.norm(self.Aty, np.infty)
self.gam = 1.0
self.tau = 0.5 # equation for the convergence of 'PF-PDS' and LS algorithms : gam + tau < 1
if algo == "DR":
self.gam = self.dim[1]
self.AtA = None
self.AAt = None
# this is a method of the class pb that is used to computed the expensive multiplications only once. (espacially usefull for warm start. )
def compute_param(self):
(A, C, y) = self.matrix
m, d, k = self.dim
self.Anorm = LA.norm(A, "fro")
self.AtA = (A.T).dot(A)
self.c = d ** 2 / np.trace(
self.AtA
) # parameter for Concomitant problem : the matrix is scaled as c*A^2
self.Cnorm = LA.norm(C, 2) ** 2 + 1e-5
self.tauN = self.tau / self.Cnorm
self.AtAnorm = LA.norm(self.AtA, 2)
if self.type == "DR":
self.AAt = A.dot(A.T)
"""
Functions used in the algorithms, modules needed :
import numpy as np
import numpy.linalg as LA
from .../class_of_problem import problem
"""
# compute the prox of the function : f(b)= sum (wi * |bi| )
def prox(b, w, zeros):
return np.minimum(b + w, zeros) + np.maximum(b - w, zeros)
# Compute I - C^t (C.C^t)^-1 . C : the projection on Ker(C)
def proj_c(M, d):
k = len(M)
return np.eye(d) - LA.multi_dot([M.T, np.linalg.inv(M.dot(M.T)+ 1e-4 * | np.eye(k) | numpy.eye |
###this code is a mess
import pandas as pd
import numpy as np
from numpy import newaxis
import matplotlib.pyplot as plt
import tensorflow as tf
import datetime as dt
from sklearn import preprocessing
from sklearn.ensemble import AdaBoostRegressor
from sklearn.svm import SVR
from sklearn.neighbors import RadiusNeighborsRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.neural_network import MLPRegressor
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
def build_model(layers): #keras LSTM network
model = Sequential()
model.add(LSTM(
input_shape=(layers[1], layers[0]),
output_dim=layers[1],
return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(
layers[2],
return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(
output_dim=layers[3]))
model.add(Activation("linear"))
#start = time.time()
model.compile(loss="mse", optimizer="rmsprop")
#print("> Compilation Time : ", time.time() - start)
return model
# def predict_point_by_point(model, data):
# #Predict each timestep given the last sequence of true data, in effect only predicting 1 step ahead each time
# predicted = model.predict(data)
# predicted = np.reshape(predicted, (predicted.size,))
# return predicted
#
# def predict_sequence_full(model, data, window_size):
# #Shift the window by 1 new prediction each time, re-run predictions on new window
# curr_frame = data[0]
# predicted = []
# for i in range(len(data)):
# predicted.append(model.predict(curr_frame[newaxis,:,:])[0,0])
# curr_frame = curr_frame[1:]
# curr_frame = np.insert(curr_frame, [window_size-1], predicted[-1], axis=0)
# return predicted
#
# def predict_sequences_multiple(model, data, window_size, prediction_len):
# #Predict sequence of 50 steps before shifting prediction run forward by 50 steps
# prediction_seqs = []
# for i in range(int(len(data)/prediction_len)):
# curr_frame = data[i*prediction_len]
# predicted = []
# for j in range(prediction_len):
# predicted.append(model.predict(curr_frame[newaxis,:,:])[0,0])
# curr_frame = curr_frame[1:]
# curr_frame = np.insert(curr_frame, window_size-1, predicted[-1], axis=0)
# prediction_seqs.append(predicted)
# return prediction_seqs
#
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return np.array(dataX), np.array(dataY)
back_fat=pd.read_excel('tables.xls', sheet_name=3, header=None) #preprocess input table
back_fat.dropna(inplace=True)
back_fat.columns=['x','y']
b2=back_fat.loc[(back_fat['x']>='2014-10-31')].copy(deep=True) #split data before and after the 2014 crisis
b1=back_fat.loc[(back_fat['x']<'2014-10-31')].copy(deep=True)
#back_fat=back_fat.loc[(back_fat['x']>='2014-10-31')].copy(deep=True)
scaler=preprocessing.MinMaxScaler() #normalize data
t1=b1['y'].values
t1=t1.reshape(-1,1)
t1=scaler.fit_transform(t1)
t1.reshape(-1)
b1['y']=t1
t2=b2['y'].values
t2=t2.reshape(-1,1)
t2=scaler.fit_transform(t2)
t2.reshape(-1)
b2['y']=t2
test=[b1,b2]
back_fat=pd.concat(test)
# EMA=0.0
# gamma=1
# smoothe=back_fat['y'].values
# smoothe=smoothe.astype(np.float64)
# for x in np.nditer(smoothe, op_flags=['readwrite']):
# EMA=gamma*x+(1-gamma)*EMA
# x[...]=EMA
#
# back_fat['y']=smoothe
#back_fat['date']=back_fat['date'].apply(lambda x: x.toordinal())
#new_back=back_fat.loc[(back_fat['x']>='2014-10-31')&(back_fat['x']<='2018-01-13')].copy(deep=True)
new_back=back_fat.loc[(back_fat['x']<='2018-08-21')].copy(deep=True) #split into testing and training datasets
back_predict=back_fat.loc[back_fat['x']>='2018-08-28'].copy(deep=True)
new_back['x']=new_back['x'].apply(lambda x: x.toordinal()) #convert date
back_predict['x']=back_predict['x'].apply(lambda x: x.toordinal())
backarr=back_predict['x'].values
backarr=backarr.astype(np.float64)
for elem in np.nditer(backarr, op_flags=['readwrite']):
elem[...]=elem-733000 #just to make the graphs a bit more readable
#elem[...] = elem / 10000
backarr=backarr.reshape(-1,1)
#backarr=np.divide(backarr, 10000)
backarr2=new_back['x'].values
backarr2=backarr2.astype(np.float64)
for elem in np.nditer(backarr2, op_flags=['readwrite']):
elem[...]=elem-733000
#elem[...]=elem/10000
backarr2=backarr2.reshape(-1,1)
#backarr2=np.divide(backarr2, 10000)
outarr=new_back['y'].values
outarr=outarr.astype(np.float64)
outarr=outarr.reshape(-1,1)
outarr2=back_predict['y'].values
outarr2=outarr2.astype(np.float64)
outarr2=outarr2.reshape(-1,1)
# plt.plot(back_fat['x'], back_fat['y'])
# plt.plot(back_fat['x'], smoothe)
# plt.show()
#print(backarr.shape[0])
#train=create_dataset(backarr2)
#train1,train2=create_dataset(outarr)
#test1,test2=create_dataset(outarr2)
#train1 = np.reshape(train1, (train1.shape[0], 1, train1.shape[1]))
#test1=np.reshape(test1, (test1.shape[0], 1, test1.shape[1]))
#backarr2=np.reshape(backarr2, (backarr2.shape[0], 1, backarr2.shape[1]))
#backarr=np.reshape(backarr, (backarr.shape[0], 1, backarr.shape[1]))
#model= build_model([1,1,100,1])
#model.fit(backarr2, outarr, batch_size=1, epochs=50, validation_split=0.2, verbose=0)
#predicted = predict_sequence_full(model, test, np.size(backarr))
#predict=predict_sequences_multiple(model, backarr, 2, backarr.shape[0])
# backarr=backarr.reshape(-1,1)
# plt.plot(backarr,outarr2)
# plt.plot(backarr,predict)
# plt.show()
# backarr2=backarr2.reshape(-1,1)
#backarr=backarr.reshape(-1,1)
#print(predict.shape," " ,outarr2.shape, " ", train1.shape, train2.shape, backarr2.shape)
#classificator=MLPRegressor(hidden_layer_sizes=(200, ), activation='relu', solver='lbfgs', alpha=0.0001,
#batch_size='auto', learning_rate='adaptive', learning_rate_init=0.001, power_t=0.5, max_iter=200,
#shuffle=True, random_state=None, tol=0.0001, verbose=False, warm_start=False, momentum=0.9, nesterovs_momentum=True, early_stopping=False, validation_fraction=0.1, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
regressor=KernelRidge(alpha=0.005, kernel='rbf', gamma=0.00025, degree=3, coef0=1, kernel_params=None) #sklearn NN models
regressor2=SVR(C=1e3,epsilon=0.01,gamma=0.001,tol=1e-4)
#classificator.fit(backarr2,outarr)
#print(classificator.score(backarr, back_predict['y']))
#print(classificator.score(backarr2, new_back['y']))
# out=[]
# scores=[]
# for x,y in zip(backarr, outarr2):
# x=x.reshape(1, -1)
# backarr2=backarr2.reshape(-1, 1)
# regressor2.fit(backarr2, outarr)
# mid=regressor2.predict(x)
# #mid2=classificator.score(x, y)
# #print (x, y)
# backarr2=np.append(backarr2, x)
# outarr=np.append(outarr, y)
# out=np.append(out, mid)
# #scores=np.append(scores,mid2)
regressor.fit(backarr2,outarr)
out=regressor.predict(backarr2)
regressor2.fit(backarr2,outarr)
out2=regressor2.predict(backarr2)
res=regressor.score(backarr2,outarr)
res2=regressor2.score(backarr2,outarr)
time1,time2=[],[]
for i in range(dt.date(2008,2,25).toordinal(),dt.date(2014,2,3).toordinal(),7): #make an array of mondays
time1=np.append(time1,i)
for i in range(dt.date(2014,10,27).toordinal(),dt.date(2018,8,21).toordinal(),7): time2=np.append(time2,i)
for i in np.nditer(time1, op_flags=['readwrite']): i[...]=i-733000
for i in np.nditer(time2, op_flags=['readwrite']): i[...]=i-733000
time1=time1.reshape(-1,1)
time2=time2.reshape(-1,1)
interpol=regressor.predict(time1) #interpolate the original data that has gaps in it to fill the said gaps
interpol2=regressor.predict(time2)
time= | np.concatenate((time1,time2)) | numpy.concatenate |
from __future__ import division
from chainer.dataset import dataset_mixin
from abc import ABCMeta, abstractmethod
import os
from PIL import Image
from PIL import ImageOps
import numpy
import six
import skimage.filters
from skimage.color import rgb2lab
from skimage import exposure
import cv2
import time
import typing
from comicolorization.utility import color
"""
Just a little bit modification
@see https://github.com/pfnet/chainer/blob/master/chainer/datasets/image_dataset.py
"""
@six.add_metaclass(ABCMeta)
class InputOutputDatsetInterface(object):
@abstractmethod
def get_input_luminance_range(self):
pass
@abstractmethod
def get_input_range(self):
pass
@abstractmethod
def get_output_range(self):
pass
class PILImageDatasetBase(dataset_mixin.DatasetMixin):
def __init__(self, paths, resize=None, random_crop_size=None, random_flip=False, test=False, root='.'):
"""
:param resize: if it is not None, resize image
:param random_crop_size: if it is not None, random crop image after resize
:param random_flip: if it is True, random flip image right left
"""
if isinstance(paths, six.string_types):
with open(paths) as paths_file:
paths = [path.strip() for path in paths_file]
self._paths = paths
self._root = root
self._resize = resize
self._crop_size = random_crop_size
self._flip = random_flip
self._test = test
def __len__(self):
return len(self._paths)
def get_example(self, i):
# type: (any) -> typing.Tuple[str, Image]
path = os.path.join(self._root, self._paths[i])
image = Image.open(path)
if self._resize is not None:
image = image.resize(self._resize)
if self._crop_size is not None:
width, height = image.size
if self._test is True:
top = int((height - self._crop_size[1]) / 2)
left = int((width - self._crop_size[0]) / 2)
bottom = top + self._crop_size[1]
right = left + self._crop_size[0]
else:
top = numpy.random.randint(height - self._crop_size[1] + 1)
left = numpy.random.randint(width - self._crop_size[0] + 1)
bottom = top + self._crop_size[1]
right = left + self._crop_size[0]
image = image.crop((left, top, right, bottom))
if self._flip:
if numpy.random.randint(2) == 1:
image = image.transpose(Image.FLIP_LEFT_RIGHT)
return path, image
class PILImageDataset(PILImageDatasetBase):
def get_example(self, i):
# type: (any) -> Image
return super(PILImageDataset, self).get_example(i)[1]
class ColorMonoImageDataset(dataset_mixin.DatasetMixin, InputOutputDatsetInterface):
def __init__(self, base, dtype=numpy.float32):
# type: (PILImageDataset, any) -> any
self._dtype = dtype
self.base = base
def __len__(self):
return len(self.base)
def get_example(self, i):
# type: (any) -> typing.Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]
"""
:return: (RGB array [0~255], gray array [0~255], RGB array [0~255])
"""
image = self.base[i]
rgb_image_data = numpy.asarray(image, dtype=self._dtype).transpose(2, 0, 1)[:3, :, :]
gray_image = ImageOps.grayscale(image)
gray_image_data = numpy.asarray(gray_image, dtype=self._dtype)[:, :, numpy.newaxis].transpose(2, 0, 1)
return rgb_image_data, gray_image_data, rgb_image_data
def get_input_luminance_range(self):
raise NotImplementedError
def get_input_range(self):
return (0, 255)
def get_output_range(self):
return (0, 255), (0, 255), (0, 255)
class LabImageDataset(dataset_mixin.DatasetMixin, InputOutputDatsetInterface):
def __init__(self, base):
# type: (ColorMonoImageDataset) -> None
self.base = base
def __len__(self):
return len(self.base)
def get_example(self, i):
# type: (any) -> typing.Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]
rgb_image_data, gray_image_data, _ = self.base[i]
dtype = rgb_image_data.dtype
image_data = rgb_image_data.transpose(1, 2, 0) / 255
lab_image_data = rgb2lab(image_data).transpose(2, 0, 1).astype(dtype)
luminous_image_data = | numpy.expand_dims(lab_image_data[0], axis=0) | numpy.expand_dims |
from .simulate_1D import simulate
import numpy as np
import _pickle as cPickle
from collections import namedtuple
import os
from tqdm import tqdm
import pandas as pd
import h5py
import json
from scipy.stats import poisson
import copy
from replication.tools import load_ori_position, load_lengths_and_centro
class ensembleSim:
def __init__(self, Nsim, Nori, Ndiff, lengths,
p_on, p_off, only_one, all_same_ori=True,
dt_speed=1,
fork_speed=1,
gindin=True,
p_v=1,
l_ori=[], cut=10, random=False, one_minute=False,
positions=None, ramp=None,
max_ramp=None, ramp_type="linear", strengths=[], hdf5_file=None,
D_Ndiff="pulse", fsd="uniform", variance_fs=2):
self.Nsim = Nsim
self.Nori = Nori
self.Ndiff = Ndiff
self.lengths = lengths
if type(lengths) == str:
print("Lengths = %s" % lengths)
raise
if lengths and type(lengths[0]) == list:
print("lengts = ", lengths)
print("But should be a list")
raise
assert(type(gindin) == bool)
assert(type(only_one) == bool)
self.p_on = p_on
self.p_off = p_off
self.only_one = only_one
self.all_same_ori = all_same_ori
self.dt_speed = dt_speed
self.fork_speed = fork_speed
self.gindin = gindin
self.p_v = p_v
self.cut = cut
self.l_ori = l_ori
self.random = random
self.one_minute = one_minute
self.positions = positions
self.ramp = ramp
self.max_ramp = max_ramp
self.ramp_type = ramp_type
self.strengths = strengths
self.hdf5_file = None
self.D_Ndiff = D_Ndiff
self.fsd = fsd
self.variance_fs = variance_fs
def add_precomputed(self, name, file_hdf5="None", precision=None, two=False):
qt = getattr(self, name)()
with h5py.File(file_hdf5, 'a') as myfile:
quant = myfile.get("analysis")
if myfile.get("analysis") is None:
quant = myfile.create_group("analysis")
if quant.get(name) is not None:
print(name, "Allready computed")
return
# print(quant.get(name))
# print(type(qt[0]))
if qt != [] and type(qt) in [tuple, list] and type(qt[0]) in[list, np.ndarray]:
prop = quant.create_group(name)
if precision:
prop.create_dataset("precision", data=precision)
maxi = None
if two:
maxi = 2
for i in range(len(qt[:maxi])):
if precision:
prop.create_dataset(str(i), data=list(
map(lambda x: int(x * precision), qt[i])))
else:
prop.create_dataset(str(i), data=np.array(qt[i]))
else:
prop = quant.create_dataset(name, data=qt)
def show_parameters(self, show_ori=True):
P = ["Nsim", "Nori", "Ndiff", "lengths", "p_on", "p_off",
"only_one", "all_same_ori", "dt_speed",
"fork_speed", "gindin", "p_v", "cut", "l_ori", "ramp", "max_ramp"]
for parameter in P:
if (parameter == "l_ori" or parameter == "Nori") and not show_ori:
print(parameter, self.nori)
continue
if hasattr(self, parameter):
print(parameter, getattr(self, parameter))
else:
print(parameter, "Not defined")
def data(self):
return [self.aIts,
self.aFts,
self.aFds,
self.aRps,
self.aDNAs,
self.raDNAs,
self.aUnrs,
self.aFree_origins]
def n3Dsim(self):
v = self.try_load_property("n3Dsim")
if v is not None:
return v
return len(self.aIts)
def load_data(self, data):
self.aIts, self.aFts, self.aFds, self.aRps, self.aDNAs, self.raDNAs, self.aUnrs, self.aFree_origins = data
unr = np.sum(np.array(self.aUnrs), axis=1)
self.anIts = self.aIts * unr
def remove_correlations(self):
del self.aIODs
del self.aIRTDs
del self.aTLs
def add_traj(self, N, run_length=10000):
old_nsim = 0 + self.Nsim
self.Nsim = N
self.run_all(init=False)
self.Nsim = old_nsim + N
def run_all(self, run_length=200, load_from_file=None, correlation=True, skip=[], single=False, init=True, orip=False):
if init:
self.aIts = []
self.aIfs = []
self.aFts = []
self.aFds = []
self.aRps = []
self.aDNAs = []
self.raDNAs = []
self.aUnrs = []
self.aFree_origins = []
self.aFree_Diff_bis = []
self.anIts = []
self.aFree_Diff = []
self.aFiring_Position = []
self.aIODs = []
self.aIRTDs = []
self.aTLs = []
self.record_diffusing = []
self.orip = []
self.aPol = []
self.fork_speeds = []
self.lft_forks = []
found = 0
for sim in tqdm(range(self.Nsim)):
ori = self.Nori
if self.l_ori != []:
ori = self.l_ori
# check dimension of position
positions = self.positions
if self.positions and type(self.positions[0][0]) is list:
positions = self.positions[sim]
strengths = self.strengths
if self.strengths and type(self.strengths[0][0]) is list:
strengths = self.strengths[sim]
Nd = self.Ndiff
max_ramp = self.max_ramp
if self.D_Ndiff == "poisson":
Nd = poisson.rvs(size=1, mu=self.Ndiff)[0]
max_ramp = Nd
if load_from_file is None:
S = simulate(ori,
Nd,
self.lengths,
self.p_on,
self.p_off,
self.only_one,
dt_speed=self.dt_speed,
fork_speed=self.fork_speed,
gindin=self.gindin,
p_v=self.p_v,
random=self.random,
positions=positions,
ramp=self.ramp,
max_ramp=max_ramp,
ramp_type=self.ramp_type,
strengths=strengths,
fsd=self.fsd,
variance_fs=self.variance_fs
)
S.simulate(run_length)
found += 1
self.record_diffusing.append(S.record_diffusing)
else:
# print("Sim", sim)
if sim in skip:
# print("skip", skip)
continue
# print(sim)
Simu = namedtuple("Simu", ["polys", "oris", "Ndiff_libre_t", "record_diffusing"])
troot = "%s%i/" % (load_from_file, sim + 1)
if single:
troot = load_from_file
file_to_open = troot + "polymer_timing.dat"
try:
if os.path.exists(file_to_open):
with open(file_to_open, "rb") as f:
polys = cPickle.load(f)
oris = [np.array(p.origins) - p.start for p in polys]
Ndiff_libre_t = []
if os.path.exists(troot + "Ndiff_libre_t.dat"):
with open(troot + "Ndiff_libre_t.dat", "rb") as f:
Ndiff_libre_t = cPickle.load(f)
record_diffusing = []
if os.path.exists(troot + "record_diffusing.dat"):
with open(troot + "record_diffusing.dat", "rb") as f:
record_diffusing = cPickle.load(f)
self.record_diffusing.append(record_diffusing)
S = Simu(polys, oris, Ndiff_libre_t, record_diffusing)
found += 1
else:
print(file_to_open, "does not exist")
continue
except EOFError:
print("Not all files in %i readable" % sim)
if found == 1 and self.all_same_ori:
self.l_ori = S.oris
unfinished = False
self.aRps.append([])
for poly in S.polys:
if self.one_minute:
dt = 1
else:
dt = self.dt_speed
if not hasattr(poly, "dt"):
poly.dt = self.dt_speed
poly.max_fs = self.fork_speed
try:
self.aRps[-1].append(poly.get_replication_profile())
if np.any(self.aRps[-1][0] == 0):
print(self.aRps[-1])
raise TypeError
except TypeError:
unfinished = True
print("Sim %i not finished" % sim)
break
if unfinished:
self.aRps.pop(-1)
continue
self.aIts.append([])
self.aIfs.append([])
self.anIts.append([])
self.aFts.append([])
self.aFds.append([])
self.aDNAs.append([])
self.raDNAs.append([])
self.aUnrs.append([])
self.aFree_Diff.append([])
self.aFree_origins.append([])
self.aFree_Diff_bis.append([])
self.aFiring_Position.append([])
self.aIODs.append([])
self.aIRTDs.append([])
self.aTLs.append([])
self.aPol.append([])
self.fork_speeds.append([])
self.lft_forks.append([])
for poly in S.polys:
if orip:
p = poly.get_ori_position()
p.sort()
self.orip.append(p)
print(p)
dt = self.dte # if self.one_minute == 1
# Cut == 0 because we removed them from all the chromosomes
ft, it = poly.get_firing_time_It(cut=0, normed=False, dt=dt)
fd = poly.get_fork_density(cut=0, normed=False, dt=dt) # Normed afteward
self.aIts[-1].append(it)
self.aFts[-1].append(ft)
self.aFds[-1].append(fd)
dnat, _, pol = poly.get_DNA_with_time(dt=dt, polarity=True)
self.raDNAs[-1].append(dnat)
self.aPol[-1].append(pol)
if correlation:
iods, irtds, tls = poly.get_correlations(dt=dt, thresh=0.99)
self.aIODs[-1].append(iods)
self.aIRTDs[-1].append(irtds)
self.aTLs[-1].append(tls)
fsp, lft = poly.get_speeds_lifetime()
self.fork_speeds[-1].extend(fsp)
self.lft_forks[-1].extend(lft)
# if hasattr(poly, "fork_speeds"):
# self.fork_speeds[-1].extend(poly.fork_speeds)
"""
All the following line to be able to compute No(t-1)
"""
# print(self.aUnrs[-1][-1])
# .append(poly.get_DNA_with_time(fork_speed=self.fork_speed)[0])
# print(self.raDNAs[-1][-1][-1])
Free_o = poly.get_free_origins_time(normed=False, dt=dt).tolist()
assert (Free_o[-1] == 0)
self.aFree_origins[-1].append(np.array([len(poly.origins)] + Free_o[:-1]))
# self.aFree_origins[-1].append(Free_o)
# print(self.aFree_origins[-1])
# assert(1 == 0)
"""
len_poly = poly.end + 1 - poly.start
assert(self.raDNAs[-1][-1][-1] == len_poly)
self.raDNAs[-1][-1] = self.raDNAs[-1][-1].tolist()
self.raDNAs[-1][-1].pop(0)
self.raDNAs[-1][-1].append(len_poly)
self.raDNAs[-1][-1] = np.array(self.raDNAs[-1][-1])
# print(self.raDNAs[-1][-1])
# self.aUnrs[-1][-1] = self.aUnrs[-1][-1]
"""
len_poly = poly.end + 1 - poly.start
self.aUnrs[-1].append(len_poly - self.raDNAs[-1][-1])
ftime, firing_position = poly.get_dist_between_activated_origins(dt=dt)
self.aFiring_Position[-1].append(firing_position)
# print (norm.shape,self.aUnrs[-1][-1].shape)
# raise
# print(it)
DNA_time = np.sum(np.array(self.raDNAs[-1]), axis=0) / np.sum(self.lengths)
try:
for t in range(len(DNA_time)):
tp = int(round(t * dt / self.dt_speed, 0))
if tp > len(S.Ndiff_libre_t) - 1:
break
self.aFree_Diff_bis[-1].append(S.Ndiff_libre_t[tp])
except:
# Not available in 3D
pass
"""
try:
self.aFree_Diff[-1] = S.get_free()
# print(self.aFree_Diff[-1])
except:
pass"""
bins = 100
for poly in S.polys:
self.aIfs[-1].append(poly.get_firing_at_fraction(DNA_time=DNA_time,
cut=0, bins=bins))
self.aIfs[-1] = np.sum(np.array(self.aIfs[-1]), axis=0) / \
(np.array(np.arange(0, 1, 1 / bins) + 1 / 100.) * self.length)[::-1]
# print (np.array(np.arange(0,1,1/bins) * np.sum(self.lengths))[::-1])
unr = np.sum(np.array(self.aUnrs[-1]), axis=0)
unr[unr == 0] = np.nan
self.anIts[-1] = np.sum(np.array(self.aIts[-1]), axis=0)
self.aIts[-1] = np.sum(np.array(self.aIts[-1]), axis=0) / unr
self.aFds[-1] = np.sum(np.array(self.aFds[-1]), axis=0) / self.length
self.aFree_origins[-1] = np.sum(np.array(self.aFree_origins[-1]), axis=0)
# print(self.raDNAs)
self.aDNAs[-1] = 1 + np.sum(np.array(self.raDNAs[-1]), axis=0) / self.length
return S
def get_what(self, what, fraction=[0, 1], max_track_length=None):
"""return an array which contain a concatenation by sim
for each sim it is an array which contain a list of the given quantity for evey time step
IOD, IRTD, or TL
"""
def recompute(what, tl, max_track_length):
res = []
for ich, ch in enumerate(what):
res.append([])
for ipos, spos in enumerate(ch):
# Go throug time
# print(spos)
# print(spos,)
if type(spos) is not list:
spos = [] + spos.tolist()
else:
spos = [] + spos
if spos == []:
res[-1].append([])
continue
spos.insert(0, 0)
pos = np.cumsum(spos)
# print(tl[ich][ipos])
keep = np.array(tl[ich][ipos]) < max_track_length
kpos = pos[np.array(keep, np.bool)]
pos = kpos[1:] - kpos[:-1]
res[-1].append(pos)
"""
if np.any(keep == False):
print(pos.shape, keep.shape, pos[keep].shape)
print(len(res[-1][-1]), len(ch[ipos]))
# print(spos, pos, keep, tl[ich][ipos])
print(res[-1][-1])
raise"""
# return
return np.array(res).T
iod3 = []
for sim in range(self.Nsim):
def get_by_time(what=what):
# print(sim)
iods = np.array(getattr(self, "a" + what + "s")[sim])
if max_track_length is not None:
tl = np.array(getattr(self, "aTLs")[sim])
tl = tl.T
iods = iods.T
iods2 = []
fraction_time = np.array(self.raDNAs[sim]).copy()
for ichl, chl in enumerate(self.lengths):
# Normalise to 1 by dividing by chromosome length
fraction_time[ichl] /= chl
to_keep = iods
if max_track_length is not None:
# print(tl[ich].shape)
to_keep = recompute(iods.T, tl.T, max_track_length)
# print(fraction_time.shape)
for ich, (ch_what, ch_fraction) in enumerate(zip(to_keep, fraction_time.T)):
# We go throug time and
# By chromosomes select where they match the selected fraction:
select = (ch_fraction >= fraction[0]) * (ch_fraction <= fraction[1])
# print(select)
# return
if np.sum(select) >= 2:
iods2.append(np.concatenate(ch_what[select]))
if np.sum(select) == 1:
# print(ch_what)
iods2.append(np.array(ch_what[select][0]))
"""
print(iods2[-1])
print(iods2[-2])
print(np.concatenate([[], []]).shape)
print(np.array([]).shape)
return"""
if np.sum(select) == 0:
iods2.append(np.array([]))
return iods2
iod3 += get_by_time()
return iod3
def get_cum_sum_hist(self, what, bins=100, fraction=[0, 1], max_track_length=None):
"""Cumulative histogram in a combing like fashion
as the time steps are all used and added together"""
if what != "ori":
data = self.get_what(what, fraction=fraction, max_track_length=max_track_length)
elif what == "ori":
data = [np.array(io)[1:] - np.array(io)[:-1] for io in self.l_ori]
m = []
for i in data:
m += i.tolist() # np.mean(i) for i in iod3 if i != [] ]
self.m = m
y, x = np.histogram(m, bins=bins, normed=True)
# hist(m,bins=100,normed=True,cumulative=-1,histtype='step')
y = np.array([0] + np.cumsum(y).tolist())
y /= y[-1]
# print(y[0], y[-1])
y = 1 - y
# plot( 5*(x[1:]/2+x[:-1]/2),y)
return x, y
def get_quant(self, name, shift=0, n_rep=None, cut=0):
if shift != 0:
print("You should not use it")
prop = getattr(self, name)
# print(prop)
times = self.get_times_replication(n_rep=n_rep)
# print(times)
# print(maxl)
if -1 in times:
maxl = int(max(map(len, prop)))
else:
maxl = int(max(times / self.dte))
if name == "aIfs":
maxl = len(prop[0])
normed_prop = np.zeros((len(prop[:n_rep]), maxl))
# print("Nan")
normed_prop += np.nan
for iIt, It in enumerate(prop[:n_rep]):
# print(len(It), maxl)
normed_prop[iIt, :min(len(It), maxl)] = np.array(It[:min(len(It), maxl)])
if cut != 0 and name in ["anIts", "aFds"]:
# Remove last cut:
# print("Before", normed_prop[iIt])
# print("la")
removed = 0
if cut != 0:
for i in range(1, len(normed_prop[iIt])):
while removed != cut and normed_prop[iIt][-i] > 0:
# print(i)
normed_prop[iIt][-i] = -1
removed += 1
if removed == cut:
normed_prop[iIt][-i:] = np.nan
break
# print("After", normed_prop[iIt])
if shift != 0:
normed_prop[iIt, len(It):] = It[-1]
self.all = normed_prop
x = np.arange(maxl)
if n_rep:
y = np.nanmean(normed_prop[:n_rep], axis=0)
err = np.std(normed_prop[:n_rep], axis=0)
else:
y = np.nanmean(normed_prop, axis=0)
err = np.std(normed_prop, axis=0)
return x * self.dte, y, err, normed_prop
def get_time(self, n_rep=None):
times = self.get_times_replication(n_rep=n_rep)
# print(times)
# print(maxl)
maxl = int(max(times / self.dte))
return np.arange(maxl) * self.dte
def get_times_replication(self, finished=True, n_rep=None):
v = self.try_load_property("get_times_replication")
if v is not None:
return v
times = []
for rep in self.aRps[:n_rep]:
times.append(-1)
for c in rep:
if finished and np.sum(np.equal(c, None)) != 0:
times[-1] = -1
break
else:
times[-1] = max(times[-1], max(np.array(c)[~np.equal(c, None)]))
# print(self.dte)
return np.array(times) # * self.dte
@property
def nori(self):
nori = 1.0 * np.sum(list(map(len, self.l_ori)))
if nori == 0:
print("Warning, no origins ")
return nori
@property
def length(self):
return np.sum(self.lengths)
@property
def dte(self):
if self.one_minute:
return 1
else:
return self.dt_speed
def try_load_property(self, name):
# print(name)
if hasattr(self, "hdf5_file") and self.hdf5_file is not None:
with h5py.File(self.hdf5_file, 'r') as myfile:
quant = myfile.get("analysis")
if quant is not None:
prop = quant.get(name)
# print(prop, hasattr(prop, "shape"))
if hasattr(prop, "shape"):
return prop.value
# print(prop, dir(prop))
if prop is not None:
return [prop[str(i)].value for i in range(len(prop))]
return None
def get_dist_between_activated_origins(self, time=None):
"""Time in minutes"""
v = self.try_load_property("get_dist_between_activated_origins")
if v is not None:
return v
Dist = []
if time is None:
time = 1e8
else:
time = time # / self.dte
# print(time)
for fps in self.aFiring_Position:
for fp in fps:
fired = fp[::, 0] <= time
dist = fp[fired][::, 1]
dist = dist[1:] - dist[:-1]
Dist.extend(dist)
return Dist
def get_time_at_fraction(self, frac=1, bead=True):
dna = frac + 1
x, DNA = self.DNAs()[:2]
# print(DNA)
for iid, d in enumerate(DNA):
if d >= dna:
return x[iid]
return x[-1]
def Mean_replication_time(self, n_intervals=6):
v = self.try_load_property("Mean_replication_time")
if v is not None:
return v
def get_times_at_fraction(nsim, time, n_interval=6):
fracs = np.arange(0, 1.01, 1 / n_interval)
idna = 0
dna = fracs[idna] + 1
DNA = self.aDNAs[nsim]
times = []
# print(DNA)
for iid, d in enumerate(DNA):
if d >= dna:
# print(dna)
times.append(time[iid])
idna += 1
dna = fracs[idna] + 1
if dna >= 2:
times.append(time[-1])
break
return times
rep = []
cp = []
time = self.get_time()
#time, _, _, _ = self.get_quant("aDNAs")
for il, l in enumerate(self.lengths):
rep.append(np.zeros((n_intervals, l)))
Nsim = len(self.aRps)
for sim in range(Nsim):
intervals = get_times_at_fraction(sim, time)
#print("int", intervals, len(time))
# print(self.aRps[sim][il])
for iinte, (end, start) in enumerate(zip(intervals[1:], intervals[:-1])):
pos = (self.aRps[sim][il] <
end) & (self.aRps[sim][il] > start)
# print(pos)
rep[-1][iinte, pos] += 1
cp.append(copy.deepcopy(rep[-1]))
cp[-1] = cp[-1] / np.sum(cp[-1], axis=0)
tmp = np.zeros_like(cp[-1])
for i in range(1, n_intervals + 1):
tmp[i - 1, ::] = i
toc = cp[-1] * tmp * 6 / 5 - 1 / 5
mcp = np.mean(toc, axis=0)
std = np.mean((toc - mcp)**2, axis=0)**0.5
cp[-1] = [mcp, std]
return rep, cp
def It_Mean_field_origins(self, n_rep=None):
v = self.try_load_property("It_Mean_field_origins")
if v is not None:
return v
x, y = self.Free_Diff_bis(n_rep=n_rep)[:2]
x, y1 = self.Free_origins(n_rep=n_rep)[:2]
x, DNA = self.DNAs(n_rep=n_rep)[:2]
Unr = (2 - DNA) * self.length
return x, y * y1 / Unr * self.p_on * self.p_v / self.dt_speed
def It_Mean_field_simplified(self, n_rep=None):
v = self.try_load_property("It_Mean_field_simplified")
if v is not None:
return v
x, y = self.Free_Diff_bis(n_rep=n_rep)[:2]
# print(self.nori, self.length)
return x, y * self.nori / self.length * self.p_on * self.p_v / self.dt_speed
def get_rep_profile(self, allp=True):
v = self.try_load_property("get_rep_profile")
if v is not None:
return v
rep = []
repall = []
for il, l in enumerate(self.lengths):
rep.append(np.zeros(l))
repall.append([])
Nsim = len(self.aRps)
for sim in range(Nsim):
rep[il] += np.array(self.aRps[sim][il]) / Nsim
repall[-1].append(np.array(self.aRps[sim][il]))
if allp:
return rep, repall
return rep
def get_mean_copie(self, time):
copie = []
std_copie = []
rep_t = self.get_times_replication()
for il, l in enumerate(self.lengths):
# print(l)
Nsim = len(self.aRps) - rep_t.tolist().count(-1)
copie.append(np.ones((Nsim, l)))
for sim, time_rep in enumerate(rep_t):
if time_rep != -1:
# print("th")
copie[il][sim, np.array(self.aRps[sim][il] * self.dte) < time] = 2
sim += 1
std_copie.append(np.std(copie[il], axis=0))
copie[il] = np.mean(copie[il], axis=0)
return copie, std_copie
def Its(self, n_rep=None, recompute=False, cut=0):
v = self.try_load_property("Its")
if v is not None:
# print("Pre")
return v
if cut != 0 and recompute is False:
print("Warning Its does not consider cut")
elif cut != 0 and recompute is True:
print("Cut Its considered")
if recompute:
NF = self.get_quant("anIts", n_rep=n_rep, cut=cut)[3]
self.tUNrs = np.sum(np.array(self.aUnrs), axis=1)
x, _, _, Unr = self.get_quant("tUNrs", n_rep=n_rep)
Unr[Unr == 0] = np.nan
y = | np.nanmean(NF / Unr, axis=0) | numpy.nanmean |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.