prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
'''
Diagnosing Colorectal Polyps in the Wild with Capsule Networks (D-Caps)
Original Paper by <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>
Paper published at ISBI 2020: arXiv version (https://arxiv.org/abs/2001.03305)
Code written by: <NAME>
If you use significant portions of this code or the ideas from our paper, please cite it :)
If you have any questions, please email me at <EMAIL>.
This file handles everything data related: Loading the data, splitting it, etc.
'''
from __future__ import print_function
from collections import Counter
import os
from glob import glob
import csv
import cv2
from sklearn.model_selection import StratifiedKFold
import numpy as np
from sklearn.model_selection import train_test_split
from skimage.transform import resize
from tqdm import tqdm
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.ioff()
from utils import safe_mkdir
debug = False
def load_data(root, exp_name, exp=0, split=0, k_folds=4, val_split=0.1):
# Main functionality of loading and spliting the data
def _load_data():
with open(os.path.join(root, 'split_lists', exp_name, 'train_split_' + str(split) + '.csv'), 'r') as f:
reader = csv.reader(f)
training_list = list(reader)
with open(os.path.join(root, 'split_lists', exp_name, 'test_split_' + str(split) + '.csv'), 'r') as f:
reader = csv.reader(f)
test_list = list(reader)
X, y = np.hsplit(np.asarray(training_list), 2)
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=val_split, random_state=12, stratify=y)
new_train_list, val_list = np.hstack((X_train, y_train)), np.hstack((X_val, y_val))
return new_train_list, val_list, np.asarray(test_list)
# Try-catch to handle calling split data before load only if files are not found.
try:
new_training_list, validation_list, testing_list = _load_data()
return new_training_list, validation_list, testing_list
except:
# Create the training and test splits if not found
split_data(root, exp_name, exp_num=exp, num_splits=k_folds)
try:
new_training_list, validation_list, testing_list = _load_data()
return new_training_list, validation_list, testing_list
except Exception as e:
print(e)
print('Failed to load data, see load_data in load_polyp_data.py')
exit(1)
def split_data_for_flow(root, out_dir, exp_name, resize_option, resize_shape, train_list, val_list, test_list):
def _load_imgs(data_list, phase):
data_info_list = []
for patient_num_label in tqdm(data_list, desc=phase):
files = []
for ext in ('*.jpg', '*.JPG', '*.tif', '*.tiff', '*.png', '*.PNG'):
files.extend(sorted(glob(os.path.join(root, patient_num_label[0], ext).replace('\\', '/'))))
if not files:
print('WARNING: No Images found in {}. Ensure the path is set up properly in the compute '
'class samples function in load polyp data.'.format(os.path.join(root, patient_num_label[0])))
for f in files:
img = cv2.imread(f)
try:
img = img.astype(np.float32)
except:
print('Unable to load image: {}. Please check the file for corruption.'.format(f))
continue
data_info_list.append([f, patient_num_label[1], img.shape[0], img.shape[1]])
# Balance sample amounts
np_data_list = np.asarray(data_info_list)
if phase == 'Load_train':
n_classes = len(np.unique(np_data_list[:,1]))
max_samples = 0
split_np_list = []
for n in range(n_classes):
split_np_list.append(np_data_list[np_data_list[:, 1] == '{}'.format(n)])
amt = len(split_np_list[n])
if amt > max_samples:
max_samples = amt
out_list = np.empty((n_classes * max_samples,5), dtype='|S255')
for n in range(n_classes):
res_lis = np.resize(split_np_list[n],(max_samples,4))
out_list[n*max_samples:(n+1)*max_samples,:] = np.hstack((res_lis, np.expand_dims(res_lis[:,0],-1)))
counts = Counter(out_list[:, 4])
for s, num in tqdm(counts.items(), desc='Renaming duplicate images'):
if num > 1: # ignore strings that only appear once
for suffix in range(1, num + 1): # suffix starts at 1 and increases by 1 each time
out_list[out_list[:, 4].tolist().index(s), 4] = \
'{}_{}.{}'.format(s.decode('utf-8').replace('\\', '/')[:-4],
suffix, s.decode('utf-8').replace('\\', '/')[-3:]) # replace each appearance of s
return out_list
else:
return np.hstack((np_data_list, np.expand_dims(np_data_list[:,0],-1)))
def _compute_out_shape(height_list, width_list):
if resize_option == 'resize_max':
out_shape = [np.max(height_list), np.max(width_list)]
elif resize_option == 'resize_min' or resize_option == 'crop_min':
out_shape = [np.min(height_list), np.min(width_list)]
elif resize_option == 'resize_avg':
out_shape = [int(np.mean(height_list)), int(np.mean(width_list))]
elif resize_option == 'resize_std':
out_shape = resize_shape
else:
raise NotImplementedError(
'Error: Encountered resize choice which is not implemented in load_polyp_data.py.')
if resize_shape[0] is not None:
out_shape[0] = resize_shape[0]
if resize_shape[1] is not None:
out_shape[1] = resize_shape[1]
return out_shape[0], out_shape[1]
def _random_crop(img, crop_shape, mask=None):
assert img.shape[2] == 3
height, width = img.shape[0], img.shape[1]
dy, dx = crop_shape
if dy is None:
dy = height
if dx is None:
dx = width
x = np.random.randint(0, width - dx + 1)
y = np.random.randint(0, height - dy + 1)
if mask is not None:
return [img[y:(y+dy), x:(x+dx), :], mask[y:(y+dy), x:(x+dx)]]
else:
return [img[y:(y + dy), x:(x + dx), :]]
def _save_imgs(lst, phase, hei, wid):
class_list = exp_name.split('vs')
class_map = dict()
for k, v in enumerate(class_list):
class_map[k] = '{}_{}'.format(k,v)
try:
safe_mkdir(os.path.join(out_dir, phase, class_map[k]))
except:
pass
for i, f in enumerate(tqdm(lst, desc='Creating {} images'.format(phase))):
img_out_name = os.path.join(out_dir, phase, class_map[int(f[1])],
'{}_{}.jpg'.format(os.path.basename(os.path.dirname(f[4])),
os.path.basename(f[4])[:-4])).replace('\\', '/')
if not os.path.isfile(img_out_name):
try:
im = cv2.imread(f[0].decode('utf-8'))
except AttributeError:
im = cv2.imread(f[0])
try:
im = im.astype(np.float32)
except:
print('Unable to load image: {}. Please check the file for corruption.'.format(f[0]))
continue
if im.shape[0] != hei or im.shape[1] != wid:
if resize_option == 'crop_min':
out_im = _random_crop(im, (hei,wid))
else:
out_im = resize(im, (hei,wid), mode='reflect', preserve_range=True)
else:
out_im = im
cv2.imwrite(img_out_name, out_im)
def _compute_num_images():
n_train = len(glob(os.path.join(out_dir, 'train', '*', '*.jpg')))
n_val = len(glob(os.path.join(out_dir, 'val', '*', '*.jpg')))
n_test = len(glob(os.path.join(out_dir, 'test', '*', '*.jpg')))
return n_train, n_val, n_test
train_info_array = np.asarray(_load_imgs(train_list, 'Load_train'))
val_info_array = np.asarray(_load_imgs(val_list, 'Load_val'))
test_info_array = np.asarray(_load_imgs(test_list, 'Load_test'))
train_height, train_width = _compute_out_shape(train_info_array[:,2].astype(int), train_info_array[:,3].astype(int))
val_height, val_width = _compute_out_shape(val_info_array[:,2].astype(int), val_info_array[:,3].astype(int))
test_height, test_width = _compute_out_shape(test_info_array[:,2].astype(int), test_info_array[:,3].astype(int))
_save_imgs(train_info_array, 'train', train_height, train_width)
_save_imgs(val_info_array, 'val', val_height, val_width)
_save_imgs(test_info_array, 'test', test_height, test_width)
num_train, num_val, num_test = _compute_num_images()
return num_train, [train_height, train_width], num_val, [val_height, val_width], \
num_test, [test_height, test_width]
def split_data(root_path, exp_name, exp_num, num_splits=4):
patient_list = []
patient_list.extend(sorted(glob(os.path.join(root_path,'Images','*', '*'))))
assert len(patient_list) != 0, 'Unable to find any files in {}'.format(os.path.join(root_path,'Images','*','*'))
label_list = []
for patient_num in patient_list:
img_type = os.path.basename(os.path.dirname(patient_num))
if img_type == 'Normal':
label_list.append(0)
elif img_type == 'HP' or img_type == 'Hyperplastic':
label_list.append(1)
elif img_type == 'Serrated' or img_type == 'SSA':
label_list.append(2)
elif img_type == 'TA' or img_type == 'TVA' or img_type == 'Adenoma':
label_list.append(3)
elif img_type == 'Cancer':
label_list.append(4)
elif img_type == 'NewAdenomas':
label_list.append(5)
pass # This is a holdout testing set. Do not add to training, val, or testing. Only task after cross-validation is complete.
else:
raise Exception('Encountered unknown image type: {}'.format(img_type))
outdir = os.path.join(root_path, 'split_lists', exp_name)
try:
safe_mkdir(outdir)
except:
pass
patient_list = np.asarray(patient_list)
label_list = np.asarray(label_list)
if exp_num == 0:
to_delete = np.append(np.argwhere(label_list==0), np.append(np.argwhere(label_list==2), np.append(np.argwhere(label_list==4), np.argwhere(label_list==5))))
final_img_list = np.delete(patient_list, to_delete)
final_label_list = np.delete(label_list, to_delete)
final_label_list[final_label_list==1] = 0
final_label_list[final_label_list==3] = 1
elif exp_num == 1:
to_delete = np.append(np.argwhere(label_list==0), np.append(np.argwhere(label_list==4), np.argwhere(label_list==5)))
final_img_list = np.delete(patient_list, to_delete)
final_label_list = np.delete(label_list, to_delete)
final_label_list[final_label_list==1] = 0
final_label_list[final_label_list==2] = 1
final_label_list[final_label_list==3] = 1
elif exp_num == 2:
to_delete = np.append(np.argwhere(label_list==0), np.append(np.argwhere(label_list==3), np.append(np.argwhere(label_list==4), np.argwhere(label_list==5))))
final_img_list = | np.delete(patient_list, to_delete) | numpy.delete |
import numpy as np
import matplotlib.pyplot as plt
import scipy as sp
from scipy import stats
import pandas as pd
###############################################################################################################
###############################################################################################################
def reg_corr_plot():
class LinearRegression:
def __init__(self, beta1, beta2, error_scale, data_size):
self.beta1 = beta1
self.beta2 = beta2
self.error_scale = error_scale
self.x = np.random.randint(1, data_size, data_size)
self.y = self.beta1 + self.beta2*self.x + self.error_scale*np.random.randn(data_size)
def x_y_cor(self):
return np.corrcoef(self.x, self.y)[0, 1]
fig, ax = plt.subplots(nrows = 2, ncols = 4,figsize=(24, 12))
beta1, beta2, error_scale,data_size = 2, .05, 1, 100
lrg1 = LinearRegression(beta1, beta2, error_scale, data_size)
ax[0, 0].scatter(lrg1.x, lrg1.y)
ax[0, 0].plot(lrg1.x, beta1 + beta2*lrg1.x, color = '#FA954D', alpha = .7)
ax[0, 0].set_title(r'$Y={}+{}X+{}u$'.format(beta1, beta2, error_scale))
ax[0, 0].annotate(r'$\rho={:.4}$'.format(lrg1.x_y_cor()), xy=(0.1, 0.9), xycoords='axes fraction')
beta1, beta2, error_scale,data_size = 2, -.6, 1, 100
lrg2 = LinearRegression(beta1, beta2, error_scale, data_size)
ax[0, 1].scatter(lrg2.x, lrg2.y)
ax[0, 1].plot(lrg2.x, 2 - .6*lrg2.x, color = '#FA954D', alpha = .7)
ax[0, 1].set_title(r'$Y={}+{}X+{}u$'.format(beta1, beta2, error_scale))
ax[0, 1].annotate(r'$\rho={:.4}$'.format(lrg2.x_y_cor()), xy=(0.1, 0.9), xycoords='axes fraction')
beta1, beta2, error_scale,data_size = 2, 1, 1, 100
lrg3 = LinearRegression(beta1, beta2, error_scale, data_size)
ax[0, 2].scatter(lrg3.x, lrg3.y)
ax[0, 2].plot(lrg3.x, beta1 + beta2 * lrg3.x, color = '#FA954D', alpha = .7)
ax[0, 2].set_title(r'$Y={}+{}X+{}u$'.format(beta1, beta2, error_scale))
ax[0, 2].annotate(r'$\rho={:.4}$'.format(lrg3.x_y_cor()), xy=(0.1, 0.9), xycoords='axes fraction')
beta1, beta2, error_scale,data_size = 2, 3, 1, 100
lrg4 = LinearRegression(beta1, beta2, error_scale, data_size)
ax[0, 3].scatter(lrg4.x, lrg4.y)
ax[0, 3].plot(lrg4.x, beta1 + beta2 * lrg4.x, color = '#FA954D', alpha = .7)
ax[0, 3].set_title(r'$Y={}+{}X+{}u$'.format(beta1, beta2, error_scale))
ax[0, 3].annotate(r'$\rho={:.4}$'.format(lrg4.x_y_cor()), xy=(0.1, 0.9), xycoords='axes fraction')
beta1, beta2, error_scale,data_size = 2, 3, 3, 100
lrg5 = LinearRegression(beta1, beta2, error_scale, data_size)
ax[1, 0].scatter(lrg5.x, lrg5.y)
ax[1, 0].plot(lrg5.x, beta1 + beta2 * lrg5.x, color = '#FA954D', alpha = .7)
ax[1, 0].set_title(r'$Y={}+{}X+{}u$'.format(beta1, beta2, error_scale))
ax[1, 0].annotate(r'$\rho={:.4}$'.format(lrg5.x_y_cor()), xy=(0.1, 0.9), xycoords='axes fraction')
beta1, beta2, error_scale,data_size = 2, 3, 10, 100
lrg6 = LinearRegression(beta1, beta2, error_scale, data_size)
ax[1, 1].scatter(lrg6.x, lrg6.y)
ax[1, 1].plot(lrg6.x, beta1 + beta2 * lrg6.x, color = '#FA954D', alpha = .7)
ax[1, 1].set_title(r'$Y={}+{}X+{}u$'.format(beta1, beta2, error_scale))
ax[1, 1].annotate(r'$\rho={:.4}$'.format(lrg6.x_y_cor()), xy=(0.1, 0.9), xycoords='axes fraction')
beta1, beta2, error_scale,data_size = 2, 3, 20, 100
lrg7 = LinearRegression(beta1, beta2, error_scale, data_size)
ax[1, 2].scatter(lrg7.x, lrg7.y)
ax[1, 2].plot(lrg7.x, beta1 + beta2 * lrg7.x, color = '#FA954D', alpha = .7)
ax[1, 2].set_title(r'$Y={}+{}X+{}u$'.format(beta1, beta2, error_scale))
ax[1, 2].annotate(r'$\rho={:.4}$'.format(lrg7.x_y_cor()), xy=(0.1, 0.9), xycoords='axes fraction')
beta1, beta2, error_scale,data_size = 2, 3, 50, 100
lrg8 = LinearRegression(beta1, beta2, error_scale, data_size)
ax[1, 3].scatter(lrg8.x, lrg8.y)
ax[1, 3].plot(lrg3.x, beta1 + beta2 * lrg3.x, color = '#FA954D', alpha = .7)
ax[1, 3].set_title(r'$Y={}+{}X+{}u$'.format(beta1, beta2, error_scale))
ax[1, 3].annotate(r'$\rho={:.4}$'.format(lrg8.x_y_cor()), xy=(0.1, 0.9), xycoords='axes fraction')
###############################################################################################################
###############################################################################################################
def central_limit_theorem_plot():
fig, ax = plt.subplots(4, 3, figsize = (20, 20))
########################################################################################
x = np.linspace(2, 8, 100)
a = 2 # range of uniform distribution
b = 8
unif_pdf = np.ones(len(x)) * 1/(b-a)
ax[0, 0].plot(x, unif_pdf, lw = 3, color = 'r')
ax[0, 0].plot([x[0],x[0]],[0, 1/(b-a)], lw = 3, color = 'r', alpha = .9) # vertical line
ax[0, 0].plot([x[-1],x[-1]],[0, 1/(b-a)], lw = 3, color = 'r', alpha = .9)
ax[0, 0].fill_between(x, 1/(b-a), 0, alpha = .5, color = 'r')
ax[0, 0].set_xlim([1, 9])
ax[0, 0].set_ylim([0, .4])
ax[0, 0].set_title('Uniform Distribution', size = 18)
ax[0, 0].set_ylabel('Population Distribution', size = 12)
########################################################################################
ss = 2 #sample size
unif_sample_mean = np.zeros(1000)
for i in range(1000):
unif_sample = np.random.rand(ss)
unif_sample_mean[i] = np.mean(unif_sample)
ax[1, 0].hist(unif_sample_mean, bins = 20, color = 'r', alpha = .5)
ax[1, 0].set_ylabel('Sample Distribution๏ผ $n = 2$', size = 12)
########################################################################################
ss = 10 #sample size
unif_sample_mean = | np.zeros(1000) | numpy.zeros |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright ยฉ 2017 <NAME> <<EMAIL>>
#
# Distributed under terms of the MIT license.
import os
import time
import math
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.collections
import matplotlib.colors
import fiona
import shapely
import shapely.geometry
import shapely.ops
import pandas as pd
import numpy as np
from pysal.esda.mapclassify import Equal_Interval, Fisher_Jenks, Maximum_Breaks, Natural_Breaks, Quantiles, Percentiles
from .BasemapUtils import BasemapWrapper, PolygonPatchesWrapper, getBounds, getShapefileColumn, DEFAULT_CACHE_LOCATION
def getUSMercatorBounds():
lats = (24.39, 49.38) #southern point, northern point
lons = (-124.85, -66.89) #western point, eastern point
return lats, lons
def showCmap(cmap):
fig,ax = plt.subplots(1,1,figsize=(5,3))
norm = matplotlib.colors.Normalize(vmin=0, vmax=2)
scalarMap = matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap)
cbaxes = fig.add_axes([0, -0.1, 1.0, 0.1], frameon=False)
colorbar = matplotlib.colorbar.ColorbarBase(
cbaxes,
cmap=cmap,
norm=norm,
orientation='horizontal'
)
colorbar.outline.set_visible(True)
colorbar.outline.set_linewidth(0.5)
colorbar.set_ticks([0,1,2])
colorbar.set_ticklabels(["Small","Medium","Large"])
colorbar.ax.tick_params(labelsize=12,labelcolor='k',direction='inout',width=2,length=6)
color = scalarMap.to_rgba(1)
img = np.zeros((10,10,3), dtype=float)
img[:,:,0] += color[0]
img[:,:,1] += color[1]
img[:,:,2] += color[2]
ax.imshow(img)
plt.show()
plt.close()
def discretizeCmap(n, base="Reds"):
'''Creates a cmap with n colors sampled from the given base cmap
'''
cmap = matplotlib.cm.get_cmap(base, n)
cmaplist = [cmap(i) for i in range(cmap.N)]
# We can customize the colors of the discrete cmap
#cmaplist[0] = (0.0, 0.0, 1.0, 1.0)
#cmaplist[-1] = (1.0, 1.0, 1.0, 1.0)
cmap = cmap.from_list('Custom cmap', cmaplist, cmap.N)
return cmap
def getLogTickLabels(minVal, maxVal, positive=True):
ticks = []
tickLabels = []
if minVal == 0:
bottomLog = 0
ticks.append(0)
tickLabels.append("$0$")
else:
bottomLog = int(math.floor( | np.log10(minVal) | numpy.log10 |
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import logging
import os
import time
from unittest import TestCase
import numpy as np
import pytest
import torch
import pyro
import pyro.distributions as dist
import pyro.optim as optim
from pyro.distributions.testing import fakes
from pyro.infer import SVI, TraceGraph_ELBO
from pyro.poutine import Trace
from tests.common import assert_equal
logger = logging.getLogger(__name__)
def param_mse(name, target):
return torch.sum(torch.pow(target - pyro.param(name), 2.0)).detach().cpu().item()
class GaussianChain(TestCase):
# chain of normals with known covariances and latent means
def setUp(self):
self.loc0 = torch.tensor([0.2])
self.data = torch.tensor([-0.1, 0.03, 0.20, 0.10])
self.n_data = self.data.size(0)
self.sum_data = self.data.sum()
def setup_chain(self, N):
self.N = N # number of latent variables in the chain
lambdas = [1.5 * (k + 1) / N for k in range(N + 1)]
self.lambdas = list(map(lambda x: torch.tensor([x]), lambdas))
self.lambda_tilde_posts = [self.lambdas[0]]
for k in range(1, self.N):
lambda_tilde_k = (self.lambdas[k] * self.lambda_tilde_posts[k - 1]) /\
(self.lambdas[k] + self.lambda_tilde_posts[k - 1])
self.lambda_tilde_posts.append(lambda_tilde_k)
self.lambda_posts = [None] # this is never used (just a way of shifting the indexing by 1)
for k in range(1, self.N):
lambda_k = self.lambdas[k] + self.lambda_tilde_posts[k - 1]
self.lambda_posts.append(lambda_k)
lambda_N_post = (self.n_data * torch.tensor(1.0).expand_as(self.lambdas[N]) * self.lambdas[N]) +\
self.lambda_tilde_posts[N - 1]
self.lambda_posts.append(lambda_N_post)
self.target_kappas = [None]
self.target_kappas.extend([self.lambdas[k] / self.lambda_posts[k] for k in range(1, self.N)])
self.target_mus = [None]
self.target_mus.extend([self.loc0 * self.lambda_tilde_posts[k - 1] / self.lambda_posts[k]
for k in range(1, self.N)])
target_loc_N = self.sum_data * self.lambdas[N] / lambda_N_post +\
self.loc0 * self.lambda_tilde_posts[N - 1] / lambda_N_post
self.target_mus.append(target_loc_N)
self.which_nodes_reparam = self.setup_reparam_mask(N)
# controls which nodes are reparameterized
def setup_reparam_mask(self, N):
while True:
mask = torch.bernoulli(0.30 * torch.ones(N))
if torch.sum(mask) < 0.40 * N and torch.sum(mask) > 0.5:
return mask
def model(self, reparameterized, difficulty=0.0):
next_mean = self.loc0
for k in range(1, self.N + 1):
latent_dist = dist.Normal(next_mean, torch.pow(self.lambdas[k - 1], -0.5))
loc_latent = pyro.sample("loc_latent_%d" % k, latent_dist)
next_mean = loc_latent
loc_N = next_mean
with pyro.plate("data", self.data.size(0)):
pyro.sample("obs", dist.Normal(loc_N,
torch.pow(self.lambdas[self.N], -0.5)), obs=self.data)
return loc_N
def guide(self, reparameterized, difficulty=0.0):
previous_sample = None
for k in reversed(range(1, self.N + 1)):
loc_q = pyro.param("loc_q_%d" % k, self.target_mus[k].detach() + difficulty * (0.1 * torch.randn(1) - 0.53))
log_sig_q = pyro.param("log_sig_q_%d" % k, -0.5 * torch.log(self.lambda_posts[k]).data +
difficulty * (0.1 * torch.randn(1) - 0.53))
sig_q = torch.exp(log_sig_q)
kappa_q = None
if k != self.N:
kappa_q = pyro.param("kappa_q_%d" % k, self.target_kappas[k].data +
difficulty * (0.1 * torch.randn(1) - 0.53))
mean_function = loc_q if k == self.N else kappa_q * previous_sample + loc_q
node_flagged = True if self.which_nodes_reparam[k - 1] == 1.0 else False
Normal = dist.Normal if reparameterized or node_flagged else fakes.NonreparameterizedNormal
loc_latent = pyro.sample("loc_latent_%d" % k, Normal(mean_function, sig_q),
infer=dict(baseline=dict(use_decaying_avg_baseline=True)))
previous_sample = loc_latent
return previous_sample
@pytest.mark.stage("integration", "integration_batch_1")
@pytest.mark.init(rng_seed=0)
class GaussianChainTests(GaussianChain):
def test_elbo_reparameterized_N_is_3(self):
self.setup_chain(3)
self.do_elbo_test(True, 1100, 0.0058, 0.03, difficulty=1.0)
def test_elbo_reparameterized_N_is_8(self):
self.setup_chain(8)
self.do_elbo_test(True, 1100, 0.0059, 0.03, difficulty=1.0)
@pytest.mark.skipif("CI" in os.environ and os.environ["CI"] == "true",
reason="Skip slow test in travis.")
def test_elbo_reparameterized_N_is_17(self):
self.setup_chain(17)
self.do_elbo_test(True, 2700, 0.0044, 0.03, difficulty=1.0)
def test_elbo_nonreparameterized_N_is_3(self):
self.setup_chain(3)
self.do_elbo_test(False, 1700, 0.0049, 0.04, difficulty=0.6)
def test_elbo_nonreparameterized_N_is_5(self):
self.setup_chain(5)
self.do_elbo_test(False, 1000, 0.0061, 0.06, difficulty=0.6)
@pytest.mark.skipif("CI" in os.environ and os.environ["CI"] == "true",
reason="Skip slow test in travis.")
def test_elbo_nonreparameterized_N_is_7(self):
self.setup_chain(7)
self.do_elbo_test(False, 1800, 0.0035, 0.05, difficulty=0.6)
def do_elbo_test(self, reparameterized, n_steps, lr, prec, difficulty=1.0):
n_repa_nodes = torch.sum(self.which_nodes_reparam) if not reparameterized else self.N
logger.info(" - - - - - DO GAUSSIAN %d-CHAIN ELBO TEST [reparameterized = %s; %d/%d] - - - - - " %
(self.N, reparameterized, n_repa_nodes, self.N))
if self.N < 0:
def array_to_string(y):
return str(map(lambda x: "%.3f" % x.detach().cpu().numpy()[0], y))
logger.debug("lambdas: " + array_to_string(self.lambdas))
logger.debug("target_mus: " + array_to_string(self.target_mus[1:]))
logger.debug("target_kappas: " + array_to_string(self.target_kappas[1:]))
logger.debug("lambda_posts: " + array_to_string(self.lambda_posts[1:]))
logger.debug("lambda_tilde_posts: " + array_to_string(self.lambda_tilde_posts))
pyro.clear_param_store()
adam = optim.Adam({"lr": lr, "betas": (0.95, 0.999)})
elbo = TraceGraph_ELBO()
loss_and_grads = elbo.loss_and_grads
# loss_and_grads = elbo.jit_loss_and_grads # This fails.
svi = SVI(self.model, self.guide, adam, loss=elbo.loss, loss_and_grads=loss_and_grads)
for step in range(n_steps):
t0 = time.time()
svi.step(reparameterized=reparameterized, difficulty=difficulty)
if step % 5000 == 0 or step == n_steps - 1:
kappa_errors, log_sig_errors, loc_errors = [], [], []
for k in range(1, self.N + 1):
if k != self.N:
kappa_error = param_mse("kappa_q_%d" % k, self.target_kappas[k])
kappa_errors.append(kappa_error)
loc_errors.append(param_mse("loc_q_%d" % k, self.target_mus[k]))
log_sig_error = param_mse("log_sig_q_%d" % k, -0.5 * torch.log(self.lambda_posts[k]))
log_sig_errors.append(log_sig_error)
max_errors = (np.max(loc_errors), np.max(log_sig_errors), np.max(kappa_errors))
min_errors = (np.min(loc_errors), np.min(log_sig_errors), np.min(kappa_errors))
mean_errors = (np.mean(loc_errors), | np.mean(log_sig_errors) | numpy.mean |
"""
Unit tests for trust-region iterative subproblem.
To run it in its simplest form::
nosetests test_optimize.py
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.optimize._trustregion_exact import (
estimate_smallest_singular_value,
singular_leading_submatrix,
IterativeSubproblem)
from scipy.linalg import (svd, get_lapack_funcs, det, qr, norm)
from numpy.testing import (assert_array_equal,
assert_equal, assert_array_almost_equal)
def random_entry(n, min_eig, max_eig, case):
# Generate random matrix
rand = np.random.uniform(-1, 1, (n, n))
# QR decomposition
Q, _, _ = qr(rand, pivoting='True')
# Generate random eigenvalues
eigvalues = np.random.uniform(min_eig, max_eig, n)
eigvalues = np.sort(eigvalues)[::-1]
# Generate matrix
Qaux = np.multiply(eigvalues, Q)
A = np.dot(Qaux, Q.T)
# Generate gradient vector accordingly
# to the case is being tested.
if case == 'hard':
g = np.zeros(n)
g[:-1] = np.random.uniform(-1, 1, n-1)
g = np.dot(Q, g)
elif case == 'jac_equal_zero':
g = np.zeros(n)
else:
g = np.random.uniform(-1, 1, n)
return A, g
class TestEstimateSmallestSingularValue(object):
def test_for_ill_condiotioned_matrix(self):
# Ill-conditioned triangular matrix
C = np.array([[1, 2, 3, 4],
[0, 0.05, 60, 7],
[0, 0, 0.8, 9],
[0, 0, 0, 10]])
# Get svd decomposition
U, s, Vt = svd(C)
# Get smallest singular value and correspondent right singular vector.
smin_svd = s[-1]
zmin_svd = Vt[-1, :]
# Estimate smallest singular value
smin, zmin = estimate_smallest_singular_value(C)
# Check the estimation
assert_array_almost_equal(smin, smin_svd, decimal=8)
assert_array_almost_equal(abs(zmin), abs(zmin_svd), decimal=8)
class TestSingularLeadingSubmatrix(object):
def test_for_already_singular_leading_submatrix(self):
# Define test matrix A.
# Note that the leading 2x2 submatrix is singular.
A = np.array([[1, 2, 3],
[2, 4, 5],
[3, 5, 6]])
# Get Cholesky from lapack functions
cholesky, = get_lapack_funcs(('potrf',), (A,))
# Compute Cholesky Decomposition
c, k = cholesky(A, lower=False, overwrite_a=False, clean=True)
delta, v = singular_leading_submatrix(A, c, k)
A[k-1, k-1] += delta
# Check if the leading submatrix is singular.
assert_array_almost_equal(det(A[:k, :k]), 0)
# Check if `v` fullfil the specified properties
quadratic_term = np.dot(v, np.dot(A, v))
assert_array_almost_equal(quadratic_term, 0)
def test_for_simetric_indefinite_matrix(self):
# Define test matrix A.
# Note that the leading 5x5 submatrix is indefinite.
A = np.asarray([[1, 2, 3, 7, 8],
[2, 5, 5, 9, 0],
[3, 5, 11, 1, 2],
[7, 9, 1, 7, 5],
[8, 0, 2, 5, 8]])
# Get Cholesky from lapack functions
cholesky, = get_lapack_funcs(('potrf',), (A,))
# Compute Cholesky Decomposition
c, k = cholesky(A, lower=False, overwrite_a=False, clean=True)
delta, v = singular_leading_submatrix(A, c, k)
A[k-1, k-1] += delta
# Check if the leading submatrix is singular.
assert_array_almost_equal(det(A[:k, :k]), 0)
# Check if `v` fullfil the specified properties
quadratic_term = np.dot(v, np.dot(A, v))
assert_array_almost_equal(quadratic_term, 0)
def test_for_first_element_equal_to_zero(self):
# Define test matrix A.
# Note that the leading 2x2 submatrix is singular.
A = np.array([[0, 3, 11],
[3, 12, 5],
[11, 5, 6]])
# Get Cholesky from lapack functions
cholesky, = get_lapack_funcs(('potrf',), (A,))
# Compute Cholesky Decomposition
c, k = cholesky(A, lower=False, overwrite_a=False, clean=True)
delta, v = singular_leading_submatrix(A, c, k)
A[k-1, k-1] += delta
# Check if the leading submatrix is singular
assert_array_almost_equal(det(A[:k, :k]), 0)
# Check if `v` fullfil the specified properties
quadratic_term = np.dot(v, np.dot(A, v))
assert_array_almost_equal(quadratic_term, 0)
class TestIterativeSubproblem(object):
def test_for_the_easy_case(self):
# `H` is chosen such that `g` is not orthogonal to the
# eigenvector associated with the smallest eigenvalue `s`.
H = [[10, 2, 3, 4],
[2, 1, 7, 1],
[3, 7, 1, 7],
[4, 1, 7, 2]]
g = [1, 1, 1, 1]
# Trust Radius
trust_radius = 1
# Solve Subproblem
subprob = IterativeSubproblem(x=0,
fun=lambda x: 0,
jac=lambda x: np.array(g),
hess=lambda x: np.array(H),
k_easy=1e-10,
k_hard=1e-10)
p, hits_boundary = subprob.solve(trust_radius)
assert_array_almost_equal(p, [0.00393332, -0.55260862,
0.67065477, -0.49480341])
assert_array_almost_equal(hits_boundary, True)
def test_for_the_hard_case(self):
# `H` is chosen such that `g` is orthogonal to the
# eigenvector associated with the smallest eigenvalue `s`.
H = [[10, 2, 3, 4],
[2, 1, 7, 1],
[3, 7, 1, 7],
[4, 1, 7, 2]]
g = [6.4852641521327437, 1, 1, 1]
s = -8.2151519874416614
# Trust Radius
trust_radius = 1
# Solve Subproblem
subprob = IterativeSubproblem(x=0,
fun=lambda x: 0,
jac=lambda x: np.array(g),
hess=lambda x: np.array(H),
k_easy=1e-10,
k_hard=1e-10)
p, hits_boundary = subprob.solve(trust_radius)
assert_array_almost_equal(-s, subprob.lambda_current)
def test_for_interior_convergence(self):
H = [[1.812159, 0.82687265, 0.21838879, -0.52487006, 0.25436988],
[0.82687265, 2.66380283, 0.31508988, -0.40144163, 0.08811588],
[0.21838879, 0.31508988, 2.38020726, -0.3166346, 0.27363867],
[-0.52487006, -0.40144163, -0.3166346, 1.61927182, -0.42140166],
[0.25436988, 0.08811588, 0.27363867, -0.42140166, 1.33243101]]
g = [0.75798952, 0.01421945, 0.33847612, 0.83725004, -0.47909534]
# Solve Subproblem
subprob = IterativeSubproblem(x=0,
fun=lambda x: 0,
jac=lambda x: np.array(g),
hess=lambda x: np.array(H))
p, hits_boundary = subprob.solve(1.1)
assert_array_almost_equal(p, [-0.68585435, 0.1222621, -0.22090999,
-0.67005053, 0.31586769])
assert_array_almost_equal(hits_boundary, False)
assert_array_almost_equal(subprob.lambda_current, 0)
assert_array_almost_equal(subprob.niter, 1)
def test_for_jac_equal_zero(self):
H = [[0.88547534, 2.90692271, 0.98440885, -0.78911503, -0.28035809],
[2.90692271, -0.04618819, 0.32867263, -0.83737945, 0.17116396],
[0.98440885, 0.32867263, -0.87355957, -0.06521957, -1.43030957],
[-0.78911503, -0.83737945, -0.06521957, -1.645709, -0.33887298],
[-0.28035809, 0.17116396, -1.43030957, -0.33887298, -1.68586978]]
g = [0, 0, 0, 0, 0]
# Solve Subproblem
subprob = IterativeSubproblem(x=0,
fun=lambda x: 0,
jac=lambda x: np.array(g),
hess=lambda x: np.array(H),
k_easy=1e-10,
k_hard=1e-10)
p, hits_boundary = subprob.solve(1.1)
assert_array_almost_equal(p, [0.06910534, -0.01432721,
-0.65311947, -0.23815972,
-0.84954934])
assert_array_almost_equal(hits_boundary, True)
def test_for_jac_very_close_to_zero(self):
H = [[0.88547534, 2.90692271, 0.98440885, -0.78911503, -0.28035809],
[2.90692271, -0.04618819, 0.32867263, -0.83737945, 0.17116396],
[0.98440885, 0.32867263, -0.87355957, -0.06521957, -1.43030957],
[-0.78911503, -0.83737945, -0.06521957, -1.645709, -0.33887298],
[-0.28035809, 0.17116396, -1.43030957, -0.33887298, -1.68586978]]
g = [0, 0, 0, 0, 1e-15]
# Solve Subproblem
subprob = IterativeSubproblem(x=0,
fun=lambda x: 0,
jac=lambda x: np.array(g),
hess=lambda x: np.array(H),
k_easy=1e-10,
k_hard=1e-10)
p, hits_boundary = subprob.solve(1.1)
assert_array_almost_equal(p, [0.06910534, -0.01432721,
-0.65311947, -0.23815972,
-0.84954934])
assert_array_almost_equal(hits_boundary, True)
def test_for_random_entries(self):
# Seed
np.random.seed(1)
# Dimension
n = 5
for case in ('easy', 'hard', 'jac_equal_zero'):
eig_limits = [(-20, -15),
(-10, -5),
(-10, 0),
(-5, 5),
(-10, 10),
(0, 10),
(5, 10),
(15, 20)]
for min_eig, max_eig in eig_limits:
# Generate random symmetric matrix H with
# eigenvalues between min_eig and max_eig.
H, g = random_entry(n, min_eig, max_eig, case)
# Trust radius
trust_radius_list = [0.1, 0.3, 0.6, 0.8, 1, 1.2, 3.3, 5.5, 10]
for trust_radius in trust_radius_list:
# Solve subproblem with very high accuracy
subprob_ac = IterativeSubproblem(0,
lambda x: 0,
lambda x: g,
lambda x: H,
k_easy=1e-10,
k_hard=1e-10)
p_ac, hits_boundary_ac = subprob_ac.solve(trust_radius)
# Compute objective function value
J_ac = 1/2*np.dot(p_ac, np.dot(H, p_ac))+np.dot(g, p_ac)
stop_criteria = [(0.1, 2),
(0.5, 1.1),
(0.9, 1.01)]
for k_opt, k_trf in stop_criteria:
# k_easy and k_hard computed in function
# of k_opt and k_trf accordingly to
# <NAME>., <NAME>., & <NAME>. (2000).
# "Trust region methods". Siam. p. 197.
k_easy = min(k_trf-1,
1-np.sqrt(k_opt))
k_hard = 1-k_opt
# Solve subproblem
subprob = IterativeSubproblem(0,
lambda x: 0,
lambda x: g,
lambda x: H,
k_easy=k_easy,
k_hard=k_hard)
p, hits_boundary = subprob.solve(trust_radius)
# Compute objective function value
J = 1/2*np.dot(p, np.dot(H, p))+ | np.dot(g, p) | numpy.dot |
# -*- coding: utf-8 -*-
# @Author: <NAME>
# @Date: 2020-04-20 17:22:06
# @Last Modified by: <NAME>
# @Last Modified time: 2020-05-27 16:29:46
import sys
import numpy as np
import matplotlib.pyplot as plt
from scipy.fftpack import dct
from scipy.io import wavfile
import tensorflow as tf
import edison.mfcc.mfcc_utils as mfu
import edison.mcu.mcu_util as mcu
# Settings
from config import *
cache_dir += '/mfcc_mcu/'
fname = cache_dir+'mel_constants.h'
######################################################################
# functions
######################################################################
def melMtxToUnspares(mel_mtx):
"""
convert mel matrix to unsparse form
melMtxCompact, melCompFStarts, melCompFCount = melMtxToUnspares(mel_mtx)
"""
melMtxCompact = []
melCompFStarts = []
melCompFCount = []
# loop over cols which each represents an output mel as linear combination
# of input freqs
for ncol in range(mel_mtx.shape[1]):
vec = mel_mtx[:,ncol]
first_frq = vec.nonzero()[0][0]
nonzero_cnt = np.count_nonzero(vec)
vec_snippet = vec[first_frq:first_frq+nonzero_cnt]
melMtxCompact += vec_snippet.tolist()
melCompFStarts.append(first_frq)
melCompFCount.append(nonzero_cnt)
# if ncol == 0:
# print(vec)
# print(first_frq)
# print(nonzero_cnt)
# print(vec_snippet)
melMtxCompact = np.array(melMtxCompact, dtype='int16')
melCompFStarts = np.array(melCompFStarts, dtype='int16')
melCompFCount = np.array(melCompFCount, dtype='int16')
assert melCompFCount.sum() == melMtxCompact.size
print('Reduced Mel matrix from %d elements to %d (-%.1f%%)' %
(mel_mtx.size, melMtxCompact.size, 100-100.0/mel_mtx.size*melMtxCompact.size) )
return melMtxCompact, melCompFStarts, melCompFCount
def calcCConstants():
"""
Calcualte constants used for the C implementation
"""
# calculate mel matrix
mel_mtx = mfu.gen_mel_weight_matrix(num_mel_bins=num_mel_bins, num_spectrogram_bins=num_spectrogram_bins, sample_rate=sample_rate, \
lower_edge_hertz=lower_edge_hertz, upper_edge_hertz=upper_edge_hertz)
print(type(mel_mtx))
print('mel matrix shape: %s' % (str(mel_mtx.shape)))
print('mel matrix bounds: min %f max %f' % (np.amin(mel_mtx), np.amax(mel_mtx)))
print('mel matrix nonzero elements : %d/%d' % (np.count_nonzero(mel_mtx), np.prod(mel_mtx.shape)) )
# Mel matrix is very sparse, could optimize this..
mel_mtx_s16 = np.array(mel_mtx_scale*mel_mtx, dtype='int16')
print('discrete mel matrix shape: %s' % (str(mel_mtx_s16.shape)))
print('discrete mel matrix bounds: min %f max %f' % (np.amin(mel_mtx_s16), np.amax(mel_mtx_s16)))
print('discrete mel matrix nonzero elements : %d/%d' % (np.count_nonzero(mel_mtx_s16), np.prod(mel_mtx_s16.shape)) )
# write mel matrix
mel_str = 'const int16_t melMtx[%d][%d] = \n' % (mel_mtx_s16.shape[0],mel_mtx_s16.shape[1])
mel_str += mcu.mtxToC(mel_mtx_s16, prepad=4)
mel_str += ';\n'
# calculate LUT for ln(x) for x in [0,32766]
log_lut = np.array(np.log(np.linspace(1e-6,32766,32767)), dtype='int16')
log_lug_str = 'const q15_t logLutq15[%d] = \n' % (32767)
log_lug_str += mcu.vecToC(log_lut, prepad = 4)
log_lug_str += ';\n'
# calculate scale vector for fast DCT by Makhoul1980
N = num_mel_bins
k = np.arange(N)
factors = 2 * np.exp(-1j*np.pi*k/(2*N))
factors_s16_real = np.array(mel_twiddle_scale*factors.real, dtype='int16')
factors_s16_imag = np.array(mel_twiddle_scale*factors.imag, dtype='int16')
factors_s16 = np.empty((factors_s16_real.size + factors_s16_imag.size,), dtype='int16')
factors_s16[0::2] = factors_s16_real
factors_s16[1::2] = factors_s16_imag
twiddle_factors_str = 'const q15_t dctTwiddleFactorsq15[%d] = \n' % (2*N)
twiddle_factors_str += mcu.vecToC(factors_s16, prepad = 4)
twiddle_factors_str += ';\n'
# unsparse method
melMtxCompact, melCompFStarts, melCompFCount = melMtxToUnspares(mel_mtx_s16)
mtxcomp_str = 'const q15_t melMtxCompact[%d] = \n' % (melMtxCompact.size)
mtxcomp_str += mcu.vecToC(melMtxCompact, prepad = 4)
mtxcomp_str += ';\n'
mtxcompfstart_str = 'const q15_t melCompFStarts[%d] = \n' % (melCompFStarts.size)
mtxcompfstart_str += mcu.vecToC(melCompFStarts, prepad = 4)
mtxcompfstart_str += ';\n'
mtxcompfcount_str = 'const q15_t melCompFCount[%d] = \n' % (melCompFCount.size)
mtxcompfcount_str += mcu.vecToC(melCompFCount, prepad = 4)
mtxcompfcount_str += ';\n'
f = open(fname, 'w')
f.write('#define MEL_SAMPLE_SIZE %5d\n' % sample_size)
f.write('#define MEL_N_MEL_BINS %5d\n' % num_mel_bins)
f.write('#define MEL_N_SPECTROGRAM_BINS %5d\n' % num_spectrogram_bins)
f.write('#define MEL_SAMPLE_RATE %5d\n' % sample_rate)
f.write('#define MEL_LOWER_EDGE_HZ %05.3f\n' % lower_edge_hertz)
f.write('#define MEL_UPPER_EDGE_HZ %05.3f\n' % upper_edge_hertz)
f.write('#define MEL_MTX_SCALE %5d\n\n' % mel_mtx_scale)
f.write('#define MEL_MTX_ROWS %5d\n' % mel_mtx_s16.shape[0])
f.write('#define MEL_MTX_COLS %5d\n' % mel_mtx_s16.shape[1])
f.write(mel_str)
f.write('#define MEL_LOG_LUT_SIZE %5d\n' % log_lut.shape[0])
f.write(log_lug_str)
f.write('#define MEL_DCT_TWIDDLE_SIZE %5d\n' % factors_s16.shape[0])
f.write(twiddle_factors_str)
f.write('\n\n// Compact mel matrix\n')
f.write(mtxcomp_str)
f.write(mtxcompfstart_str)
f.write(mtxcompfcount_str)
f.close()
######################################################################
# plottery
######################################################################
def plotCompare():
plt.style.use('seaborn-bright')
t = np.linspace(0, sample_size/fs, num=sample_size)
f = np.linspace(0.0, fs/2.0, sample_size//2)
f2 = np.linspace(-fs/2, fs/2, sample_size)
fmel = np.linspace(0,num_mel_bins,num_mel_bins)
fig = plt.figure(constrained_layout=True)
gs = fig.add_gridspec(5, 2)
ax = fig.add_subplot(gs[0, :])
ax.plot(t, y, label='y')
ax.grid(True)
ax.legend()
ax.set_title('input')
ax = fig.add_subplot(gs[1, 0])
n = host_fft.shape[0]
ax.plot(f2, np.real(np.concatenate((host_fft[-n//2:], host_fft[0:n//2]))), label='real')
ax.plot(f2, np.imag(np.concatenate((host_fft[-n//2:], host_fft[0:n//2]))), label='imag')
ax.grid(True)
ax.legend()
ax.set_title('host FFT')
ax = fig.add_subplot(gs[1, 1])
real_part = mcu_fft[0::2]
ax.plot(f2, np.concatenate((real_part[-n//2:], real_part[0:n//2])), label='real')
imag_part = mcu_fft[1::2]
ax.plot(f2, np.concatenate((imag_part[-n//2:], imag_part[0:n//2])), label='imag')
ax.grid(True)
ax.legend()
ax.set_title('MCU FFT')
ax = fig.add_subplot(gs[2, 0])
ax.plot(f2[-mel_mtx.shape[0]:], mel_mtx*(host_spec.max()/mel_mtx.max()), 'k', alpha=0.2, label='')
ax.plot(f2, np.concatenate((host_spec[-n//2:], host_spec[0:n//2])), label='y')
ax.grid(True)
ax.legend()
ax.set_title('host spectrum')
ax = fig.add_subplot(gs[2, 1])
ax.plot(f2, | np.concatenate((mcu_spec[-n//2:], mcu_spec[0:n//2])) | numpy.concatenate |
import unittest
import mock
import numpy as np
from smqtk.algorithms.nn_index.hash_index.sklearn_balltree import \
SkLearnBallTreeHashIndex
from smqtk.representation.data_element.memory_element import DataMemoryElement
from smqtk.utils.bits import int_to_bit_vector_large
class TestBallTreeHashIndex (unittest.TestCase):
def test_is_usable(self):
# Should always be true because major dependency (sklearn) is a package
# requirement.
self.assertTrue(SkLearnBallTreeHashIndex.is_usable())
def test_default_configuration(self):
c = SkLearnBallTreeHashIndex.get_default_config()
self.assertEqual(len(c), 3)
self.assertIsInstance(c['cache_element'], dict)
self.assertIsNone(c['cache_element']['type'])
self.assertEqual(c['leaf_size'], 40)
self.assertIsNone(c['random_seed'])
def test_init_without_cache(self):
i = SkLearnBallTreeHashIndex(cache_element=None, leaf_size=52,
random_seed=42)
self.assertIsNone(i.cache_element)
self.assertEqual(i.leaf_size, 52)
self.assertEqual(i.random_seed, 42)
self.assertIsNone(i.bt)
def test_init_with_empty_cache(self):
empty_cache = DataMemoryElement()
i = SkLearnBallTreeHashIndex(cache_element=empty_cache,
leaf_size=52,
random_seed=42)
self.assertEqual(i.cache_element, empty_cache)
self.assertEqual(i.leaf_size, 52)
self.assertEqual(i.random_seed, 42)
self.assertIsNone(i.bt)
def test_get_config(self):
bt = SkLearnBallTreeHashIndex()
bt_c = bt.get_config()
self.assertEqual(len(bt_c), 3)
self.assertIn('cache_element', bt_c)
self.assertIn('leaf_size', bt_c)
self.assertIn('random_seed', bt_c)
self.assertIsInstance(bt_c['cache_element'], dict)
self.assertIsNone(bt_c['cache_element']['type'])
def test_init_consistency(self):
# Test that constructing an instance with a configuration yields the
# same config via ``get_config``.
# - Default config should be a valid configuration for this impl.
c = SkLearnBallTreeHashIndex.get_default_config()
self.assertEqual(
SkLearnBallTreeHashIndex.from_config(c).get_config(),
c
)
# With non-null cache element
c['cache_element']['type'] = 'DataMemoryElement'
self.assertEqual(
SkLearnBallTreeHashIndex.from_config(c).get_config(),
c
)
def test_build_index_no_input(self):
bt = SkLearnBallTreeHashIndex(random_seed=0)
self.assertRaises(
ValueError,
bt.build_index, []
)
def test_build_index(self):
bt = SkLearnBallTreeHashIndex(random_seed=0)
# Make 1000 random bit vectors of length 256
m = np.random.randint(0, 2, 1000 * 256).reshape(1000, 256)
bt.build_index(m)
# deterministically sort index of built and source data to determine
# that an index was built.
self.assertIsNotNone(bt.bt)
np.testing.assert_array_almost_equal(
sorted(np.array(bt.bt.data).tolist()),
sorted(m.tolist())
)
def test_update_index_no_input(self):
bt = SkLearnBallTreeHashIndex(random_seed=0)
self.assertRaises(
ValueError,
bt.update_index, []
)
def test_update_index_new_index(self):
# Virtually the same as `test_build_index` but using update_index.
bt = SkLearnBallTreeHashIndex(random_seed=0)
# Make 1000 random bit vectors of length 256
m = np.random.randint(0, 2, 1000 * 256).reshape(1000, 256).astype(bool)
bt.update_index(m)
# deterministically sort index of built and source data to determine
# that an index was built.
self.assertIsNotNone(bt.bt)
np.testing.assert_array_almost_equal(
sorted(np.array(bt.bt.data).tolist()),
sorted(m.tolist())
)
def test_update_index_additive(self):
# Test updating an existing index, i.e. rebuilding using the union of
# previous and new data.
bt = SkLearnBallTreeHashIndex(random_seed=0)
# Make 1000 random bit vectors of length 256
m1 = np.random.randint(0, 2, 1000 * 256).reshape(1000, 256)\
.astype(bool)
m2 = np.random.randint(0, 2, 100 * 256).reshape(100, 256).astype(bool)
# Build initial index
bt.build_index(m1)
# Current model should only contain m1's data.
np.testing.assert_array_almost_equal(
sorted( | np.array(bt.bt.data) | numpy.array |
import pytest
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.testing as pdt
import networkx as nx
from mossspider import NetworkTMLE
@pytest.fixture
def sm_network():
"""Loads a small network for short test runs and checks of data set creations"""
G = nx.Graph()
G.add_nodes_from([(1, {'W': 1, 'A': 1, 'Y': 1, 'C': 1}),
(2, {'W': 0, 'A': 0, 'Y': 0, 'C': -1}),
(3, {'W': 0, 'A': 1, 'Y': 0, 'C': 5}),
(4, {'W': 0, 'A': 0, 'Y': 1, 'C': 0}),
(5, {'W': 1, 'A': 0, 'Y': 0, 'C': 0}),
(6, {'W': 1, 'A': 0, 'Y': 1, 'C': 0}),
(7, {'W': 0, 'A': 1, 'Y': 0, 'C': 10}),
(8, {'W': 0, 'A': 0, 'Y': 0, 'C': -5}),
(9, {'W': 1, 'A': 1, 'Y': 0, 'C': -5})])
G.add_edges_from([(1, 2), (1, 3), (1, 9),
(2, 3), (2, 6),
(3, 4),
(4, 7),
(5, 7), (5, 9)
])
return G
@pytest.fixture
def r_network():
"""Loads network from the R library tmlenet for comparison"""
df = pd.read_csv("tests/tmlenet_r_data.csv")
df['IDs'] = df['IDs'].str[1:].astype(int)
df['NETID_split'] = df['Net_str'].str.split()
G = nx.DiGraph()
G.add_nodes_from(df['IDs'])
for i, c in zip(df['IDs'], df['NETID_split']):
if type(c) is list:
for j in c:
G.add_edge(i, int(j[1:]))
# Adding attributes
for node in G.nodes():
G.nodes[node]['W'] = np.int(df.loc[df['IDs'] == node, 'W1'])
G.nodes[node]['A'] = np.int(df.loc[df['IDs'] == node, 'A'])
G.nodes[node]['Y'] = np.int(df.loc[df['IDs'] == node, 'Y'])
return G
class TestNetworkTMLE:
def test_error_node_ids(self):
G = nx.Graph()
G.add_nodes_from([(1, {'A': 1, 'Y': 1}), (2, {'A': 0, 'Y': 1}), ("N", {'A': 1, 'Y': 0}), (4, {'A': 0, 'Y': 0})])
with pytest.raises(ValueError):
NetworkTMLE(network=G, exposure='A', outcome='Y')
def test_error_self_loops(self):
G = nx.Graph()
G.add_nodes_from([(1, {'A': 1, 'Y': 1}), (2, {'A': 0, 'Y': 1}), (3, {'A': 1, 'Y': 0}), (4, {'A': 0, 'Y': 0})])
G.add_edges_from([(1, 1), (1, 2), (3, 4)])
with pytest.raises(ValueError):
NetworkTMLE(network=G, exposure='A', outcome='Y')
def test_error_nonbinary_a(self):
G = nx.Graph()
G.add_nodes_from([(1, {'A': 2, 'Y': 1}), (2, {'A': 5, 'Y': 1}), (3, {'A': 1, 'Y': 0}), (4, {'A': 0, 'Y': 0})])
with pytest.raises(ValueError):
NetworkTMLE(network=G, exposure='A', outcome='Y')
def test_error_degree_restrictions(self, r_network):
with pytest.raises(ValueError):
NetworkTMLE(network=r_network, exposure='A', outcome='Y', degree_restrict=2)
with pytest.raises(ValueError):
NetworkTMLE(network=r_network, exposure='A', outcome='Y', degree_restrict=[0, 1, 2])
with pytest.raises(ValueError):
NetworkTMLE(network=r_network, exposure='A', outcome='Y', degree_restrict=[2, 0])
def test_error_fit_gimodel(self, r_network):
tmle = NetworkTMLE(network=r_network, exposure='A', outcome='Y')
# tmle.exposure_model('W')
tmle.exposure_map_model('W', distribution=None)
tmle.outcome_model('A + W')
with pytest.raises(ValueError):
tmle.fit(p=0.0, samples=10)
def test_error_fit_gsmodel(self, r_network):
tmle = NetworkTMLE(network=r_network, exposure='A', outcome='Y')
tmle.exposure_model('W')
# tmle.exposure_map_model('W', distribution=None)
tmle.outcome_model('A + W')
with pytest.raises(ValueError):
tmle.fit(p=0.0, samples=10)
def test_error_gs_distributions(self, r_network):
tmle = NetworkTMLE(network=r_network, exposure='A', outcome='Y')
with pytest.raises(ValueError):
tmle.exposure_map_model('W', measure='mean', distribution=None)
with pytest.raises(ValueError):
tmle.exposure_map_model('W', measure='mean', distribution='multinomial')
def test_error_fit_qmodel(self, r_network):
tmle = NetworkTMLE(network=r_network, exposure='A', outcome='Y')
tmle.exposure_model('W')
tmle.exposure_map_model('W', distribution=None)
# tmle.outcome_model('A + W')
with pytest.raises(ValueError):
tmle.fit(p=0.0, samples=10)
def test_error_p_bound(self, r_network):
tmle = NetworkTMLE(network=r_network, exposure='A', outcome='Y')
tmle.exposure_model('W')
tmle.exposure_map_model('W', distribution=None)
tmle.outcome_model('A + W')
# For single 'p'
with pytest.raises(ValueError):
tmle.fit(p=1.5, samples=10)
# For multiple 'p'
with pytest.raises(ValueError):
tmle.fit(p=[0.1, 1.5, 0.1,
0.1, 0.1, 0.1,
0.1, 0.1, 0.1], samples=100)
def test_error_p_type(self, r_network):
tmle = NetworkTMLE(network=r_network, exposure='A', outcome='Y')
tmle.exposure_model('W')
tmle.exposure_map_model('W', distribution=None)
tmle.outcome_model('A + W')
with pytest.raises(ValueError):
tmle.fit(p=5, samples=10)
def test_error_summary(self, r_network):
tmle = NetworkTMLE(network=r_network, exposure='A', outcome='Y')
tmle.exposure_model('W')
tmle.exposure_map_model('W', distribution=None)
tmle.outcome_model('A + W')
with pytest.raises(ValueError):
tmle.summary()
def test_df_creation(self, sm_network):
columns = ["_original_id_", "W", "A", "Y", "A_sum", "A_mean", "W_sum", "W_mean", "degree"]
expected = pd.DataFrame([[1, 1, 1, 1, 2, 2/3, 1, 1/3, 3],
[2, 0, 0, 0, 2, 2/3, 2, 2/3, 3],
[3, 0, 1, 0, 1, 1/3, 1, 1/3, 3],
[4, 0, 0, 1, 2, 1, 0, 0, 2],
[5, 1, 0, 0, 2, 1, 1, 1/2, 2],
[6, 1, 0, 1, 0, 0, 0, 0, 1],
[7, 0, 1, 0, 0, 0, 1, 1/2, 2],
[8, 0, 0, 0, 0, 0, 0, 0, 0],
[9, 1, 1, 0, 1, 1/2, 2, 1, 2]],
columns=columns,
index=[0, 1, 2, 3, 4, 5, 6, 7, 8])
tmle = NetworkTMLE(network=sm_network, exposure='A', outcome='Y')
created = tmle.df
# Checking that expected is the same as the created
assert tmle._continuous_outcome is False
pdt.assert_frame_equal(expected,
created[columns],
check_dtype=False)
def test_df_creation_restricted(self, sm_network):
expected = pd.DataFrame([[1, 1, 1, 2, 2/3, 1, 1/3, 3],
[0, 0, 0, 2, 2/3, 2, 2/3, 3],
[0, 1, 0, 1, 1/3, 1, 1/3, 3],
[0, 0, 1, 2, 1, 0, 0, 2],
[1, 0, 0, 2, 1, 1, 1/2, 2],
[1, 0, 1, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 0, 1, 1/2, 2],
[0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 1, 1/2, 2, 1, 2]],
columns=["W", "A", "Y", "A_sum", "A_mean", "W_sum", "W_mean", "degree"],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8])
expected_r = pd.DataFrame([[0, 0, 1, 2, 1, 0, 0, 2],
[1, 0, 0, 2, 1, 1, 1/2, 2],
[1, 0, 1, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 0, 1, 1/2, 2],
[0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 1, 1/2, 2, 1, 2]],
columns=["W", "A", "Y", "A_sum", "A_mean", "W_sum", "W_mean", "degree"],
index=[3, 4, 5, 6, 7, 8])
tmle = NetworkTMLE(network=sm_network, exposure='A', outcome='Y', degree_restrict=[0, 2])
created = tmle.df
created_r = tmle.df_restricted
# Checking that expected is the same as the created
pdt.assert_frame_equal(expected,
created[["W", "A", "Y", "A_sum", "A_mean", "W_sum", "W_mean", "degree"]],
check_dtype=False)
pdt.assert_frame_equal(expected_r,
created_r[["W", "A", "Y", "A_sum", "A_mean", "W_sum", "W_mean", "degree"]],
check_dtype=False)
def test_restricted_number(self, sm_network):
tmle = NetworkTMLE(network=sm_network, exposure='A', outcome='Y', degree_restrict=[0, 2])
n_created = tmle.df.shape[0]
n_created_r = tmle.df_restricted.shape[0]
assert 6 == n_created_r
assert 3 == n_created - n_created_r
tmle = NetworkTMLE(network=sm_network, exposure='A', outcome='Y', degree_restrict=[1, 3])
n_created = tmle.df.shape[0]
n_created_r = tmle.df_restricted.shape[0]
assert 8 == n_created_r
assert 1 == n_created - n_created_r
def test_continuous_processing(self):
G = nx.Graph()
y_list = [1, -1, 5, 0, 0, 0, 10, -5]
G.add_nodes_from([(1, {'A': 0, 'Y': y_list[0]}), (2, {'A': 1, 'Y': y_list[1]}),
(3, {'A': 1, 'Y': y_list[2]}), (4, {'A': 0, 'Y': y_list[3]}),
(5, {'A': 1, 'Y': y_list[4]}), (6, {'A': 1, 'Y': y_list[5]}),
(7, {'A': 0, 'Y': y_list[6]}), (8, {'A': 0, 'Y': y_list[7]})])
tmle = NetworkTMLE(network=G, exposure='A', outcome='Y', continuous_bound=0.0001)
# Checking all flagged parts are correct
assert tmle._continuous_outcome is True
assert tmle._continuous_min_ == -5.0001
assert tmle._continuous_max_ == 10.0001
assert tmle._cb_ == 0.0001
# Checking that TMLE bounding works as intended
maximum = 10.0001
minimum = -5.0001
y_bound = (np.array(y_list) - minimum) / (maximum - minimum)
pdt.assert_series_equal(pd.Series(y_bound, index=[0, 1, 2, 3, 4, 5, 6, 7]),
tmle.df['Y'],
check_dtype=False, check_names=False)
def test_df_creation_continuous(self, sm_network):
expected = pd.DataFrame([[1, 1, 2, 1, 3],
[0, 0, 2, 2, 3],
[0, 1, 1, 1, 3],
[0, 0, 2, 0, 2],
[1, 0, 2, 1, 2],
[1, 0, 0, 0, 1],
[0, 1, 0, 1, 2],
[0, 0, 0, 0, 0],
[1, 1, 1, 2, 2]],
columns=["W", "A", "A_sum", "W_sum", "degree"],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8])
expected["C"] = [4.00001333e-01, 2.66669778e-01, 6.66664444e-01, 3.33335556e-01, 3.33335556e-01,
3.33335556e-01, 9.99993333e-01, 6.66657778e-06, 6.66657778e-06]
tmle = NetworkTMLE(network=sm_network, exposure='A', outcome='C', continuous_bound=0.0001)
created = tmle.df
# Checking that expected is the same as the created
assert tmle._continuous_outcome is True
pdt.assert_frame_equal(expected[["W", "A", "C", "A_sum", "W_sum", "degree"]],
created[["W", "A", "C", "A_sum", "W_sum", "degree"]],
check_dtype=False)
def test_no_consecutive_ids(self):
G = nx.Graph()
G.add_nodes_from([(1, {'W': 1, 'A': 1, 'Y': 1}), (2, {'W': 0, 'A': 0, 'Y': 0}),
(3, {'W': 0, 'A': 1, 'Y': 0}), (4, {'W': 0, 'A': 0, 'Y': 1}),
(5, {'W': 1, 'A': 0, 'Y': 0}), (7, {'W': 1, 'A': 0, 'Y': 1}),
(9, {'W': 0, 'A': 1, 'Y': 0}), (11, {'W': 0, 'A': 0, 'Y': 0}),
(12, {'W': 1, 'A': 1, 'Y': 0})])
G.add_edges_from([(1, 2), (1, 3), (1, 12), (2, 3), (2, 7),
(3, 4), (4, 9), (5, 9), (5, 12)])
expected = pd.DataFrame([[1, 1, 1, 1, 2, 2 / 3, 1, 1 / 3, 3],
[2, 0, 0, 0, 2, 2/3, 2, 2/3, 3],
[3, 0, 1, 0, 1, 1 / 3, 1, 1 / 3, 3],
[4, 0, 0, 1, 2, 1, 0, 0, 2],
[5, 1, 0, 0, 2, 1, 1, 1 / 2, 2],
[7, 1, 0, 1, 0, 0, 0, 0, 1],
[8, 0, 1, 0, 0, 0, 1, 1 / 2, 2],
[11, 0, 0, 0, 0, 0, 0, 0, 0],
[12, 1, 1, 0, 1, 1 / 2, 2, 1, 2]
],
columns=["_original_id_", "W", "A", "Y", "A_sum",
"A_mean", "W_sum", "W_mean", "degree"],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8])
tmle = NetworkTMLE(network=G, exposure='A', outcome='Y')
created = tmle.df.sort_values(by='_original_id_').reset_index()
pdt.assert_frame_equal(expected[["W", "A", "Y", "A_sum", "A_mean", "W_sum", "W_mean", "degree"]],
created[["W", "A", "Y", "A_sum", "A_mean", "W_sum", "W_mean", "degree"]],
check_dtype=False)
def test_df_creation_nonparametric(self, sm_network):
columns = ["_original_id_", "A", "A_map1", "A_map2", "A_map3"]
expected = pd.DataFrame([[1, 1, 0, 1, 1],
[2, 0, 1, 1, 0],
[3, 1, 1, 0, 0],
[4, 0, 1, 1, 0],
[5, 0, 1, 1, 0],
[6, 0, 0, 0, 0],
[7, 1, 0, 0, 0],
[8, 0, 0, 0, 0],
[9, 1, 1, 0, 0]],
columns=columns,
index=[0, 1, 2, 3, 4, 5, 6, 7, 8])
tmle = NetworkTMLE(network=sm_network, exposure='A', outcome='Y')
created = tmle.df.sort_values(by='_original_id_').reset_index()
# Checking that expected is the same as the created
pdt.assert_frame_equal(expected[columns], created[columns], check_dtype=False)
def test_summary_measures_creation(self, sm_network):
columns = ["_original_id_", "A_sum", "A_mean", "A_var", "W_sum", "W_mean", "W_var"]
neighbors_w = {1: np.array([0, 0, 1]), 2: np.array([0, 1, 1]), 3: np.array([0, 0, 1]), 4: np.array([0, 0]),
5: | np.array([0, 1]) | numpy.array |
from skimage.segmentation import quickshift, mark_boundaries
import numpy as np
import matplotlib.pyplot as plt
from IPython.core.display import display, HTML
import cv2
def show_vis_explanation(explanation_image, cmap=None):
"""
Get the environment and show the image.
Args:
explanation_image:
cmap:
Returns: None
"""
plt.imshow(explanation_image, cmap)
plt.axis("off")
plt.show()
# def is_jupyter():
# # ref: https://stackoverflow.com/a/39662359/4834515
# try:
# shell = get_ipython().__class__.__name__
# if shell == 'ZMQInteractiveShell':
# return True # Jupyter notebook or qtconsole
# elif shell == 'TerminalInteractiveShell':
# return False # Terminal running IPython
# else:
# return False # Other type (?)
# except NameError:
# return False # Probably standard Python interpreter
def explanation_to_vis(batched_image: np.ndarray, explanation: np.ndarray, style='grayscale') -> np.ndarray:
"""
Args:
batched_image: e.g., (1, height, width, 3).
explanation: should have the same width and height as image.
style: ['grayscale', 'heatmap', 'overlay_grayscale', 'overlay_heatmap', 'overlay_threshold'].
Returns:
"""
if len(batched_image.shape) == 4:
assert batched_image.shape[0] == 1, "For one image only"
batched_image = batched_image[0]
assert len(batched_image.shape) == 3
assert len(explanation.shape) == 2, f"image shape {batched_image.shape} vs " \
f"explanation {explanation.shape}"
image = batched_image
if style == 'grayscale':
# explanation has the same size as image, no need to scale.
# usually for gradient-based explanations w.r.t. the image.
return _grayscale(explanation)
elif style == 'heatmap':
# explanation's width and height are usually smaller than image.
# usually for CAM, GradCAM etc, which produce lower-resolution explanations.
return _heatmap(explanation, (image.shape[1], image.shape[0])) # image just for the shape.
elif style == 'overlay_grayscale':
return overlay_grayscale(image, explanation)
elif style == 'overlay_heatmap':
return overlay_heatmap(image, explanation)
elif style == 'overlay_threshold':
# usually for LIME etc, which originally shows positive and negative parts.
return overlay_heatmap(image, explanation)
else:
raise KeyError("Unknown visualization style.")
def _grayscale(explanation: np.ndarray, percentile=99) -> np.ndarray:
"""
Args:
explanation: numpy.ndarray, 2d.
percentile:
Returns: numpy.ndarray, uint8, same shape as explanation
"""
assert len(explanation.shape) == 2, f"{explanation.shape}. " \
"Currently support 2D explanation results for visualization. " \
"Reduce higher dimensions to 2D for visualization."
assert np.max(explanation) <= 1.0
assert isinstance(percentile, int)
assert 0 <= percentile <= 100
image_2d = explanation
vmax = | np.percentile(image_2d, percentile) | numpy.percentile |
import datetime as DT
import numpy as NP
import matplotlib.pyplot as PLT
import matplotlib.colors as PLTC
import scipy.constants as FCNST
from astropy.io import fits
from astropy.io import ascii
from astropy.table import Table
import progressbar as PGB
import antenna_array as AA
import geometry as GEOM
import sim_observe as SIM
import ipdb as PDB
LWA_reformatted_datafile_prefix = '/data3/t_nithyanandan/project_MOFF/data/samples/lwa_reformatted_data_test'
pol = 0
LWA_reformatted_datafile = LWA_reformatted_datafile_prefix + '.pol-{0:0d}.fits'.format(pol)
max_n_timestamps = None
hdulist = fits.open(LWA_reformatted_datafile)
extnames = [h.header['EXTNAME'] for h in hdulist]
lat = hdulist['PRIMARY'].header['latitude']
f0 = hdulist['PRIMARY'].header['center_freq']
nchan = hdulist['PRIMARY'].header['nchan']
dt = 1.0 / hdulist['PRIMARY'].header['sample_rate']
freqs = hdulist['freqs'].data
channel_width = freqs[1] - freqs[0]
f_center = f0
bchan = 63
echan = 963
max_antenna_radius = 75.0
antid = hdulist['Antenna Positions'].data['Antenna']
antpos = hdulist['Antenna Positions'].data['Position']
# antpos -= NP.mean(antpos, axis=0).reshape(1,-1)
core_ind = NP.logical_and((NP.abs(antpos[:,0]) < max_antenna_radius), (NP.abs(antpos[:,1]) < max_antenna_radius))
# core_ind = NP.logical_and((NP.abs(antpos[:,0]) <= NP.max(NP.abs(antpos[:,0]))), (NP.abs(antpos[:,1]) < NP.max(NP.abs(antpos[:,1]))))
ant_info = NP.hstack((antid[core_ind].reshape(-1,1), antpos[core_ind,:]))
n_antennas = ant_info.shape[0]
ants = []
for i in xrange(n_antennas):
ants += [AA.Antenna('{0:0d}'.format(int(ant_info[i,0])), lat, ant_info[i,1:], f0)]
aar = AA.AntennaArray()
for ant in ants:
aar = aar + ant
antpos_info = aar.antenna_positions()
timestamps = hdulist['TIMESTAMPS'].data['timestamp']
if max_n_timestamps is None:
max_n_timestamps = len(timestamps)
else:
max_n_timestamps = min(max_n_timestamps, len(timestamps))
timestamps = timestamps[:max_n_timestamps]
stand_cable_delays = NP.loadtxt('/data3/t_nithyanandan/project_MOFF/data/samples/cable_delays.txt', skiprows=1)
antennas = stand_cable_delays[:,0].astype(NP.int).astype(str)
cable_delays = stand_cable_delays[:,1]
# antenna_cable_delays_output = {}
progress = PGB.ProgressBar(widgets=[PGB.Percentage(), PGB.Bar(), PGB.ETA()], maxval=max_n_timestamps).start()
for i in xrange(max_n_timestamps):
timestamp = timestamps[i]
update_info = {}
update_info['antennas'] = []
update_info['antenna_array'] = {}
update_info['antenna_array']['timestamp'] = timestamp
for label in aar.antennas:
adict = {}
adict['label'] = label
adict['action'] = 'modify'
adict['timestamp'] = timestamp
if label in hdulist[timestamp].columns.names:
adict['t'] = NP.arange(nchan) * dt
Et_P1 = hdulist[timestamp].data[label]
adict['Et_P1'] = Et_P1[:,0] + 1j * Et_P1[:,1]
adict['flag_P1'] = False
adict['gridfunc_freq'] = 'scale'
adict['wtsinfo_P1'] = [{'orientation':0.0, 'lookup':'/data3/t_nithyanandan/project_MOFF/simulated/LWA/data/lookup/E_illumination_isotropic_radiators_lookup_zenith.txt'}]
adict['gridmethod'] = 'NN'
adict['distNN'] = 0.5 * FCNST.c / f0
adict['tol'] = 1.0e-6
adict['maxmatch'] = 1
adict['delaydict_P1'] = {}
adict['delaydict_P1']['pol'] = 'P1'
adict['delaydict_P1']['frequencies'] = hdulist['FREQUENCIES AND CABLE DELAYS'].data['frequency']
# adict['delaydict_P1']['delays'] = hdulist['FREQUENCIES AND CABLE DELAYS'].data[label]
adict['delaydict_P1']['delays'] = cable_delays[antennas == label]
adict['delaydict_P1']['fftshifted'] = True
else:
adict['flag_P1'] = True
update_info['antennas'] += [adict]
aar.update(update_info, verbose=True)
if i==0:
aar.grid()
aar.grid_convolve(pol='P1', method='NN', distNN=0.5*FCNST.c/f0, tol=1.0e-6, maxmatch=1)
holimg = AA.Image(antenna_array=aar, pol='P1')
holimg.imagr(pol='P1')
if i == 0:
# avg_img = NP.abs(holimg.holograph_P1)**2
tavg_img = NP.abs(holimg.holograph_P1)**2 - NP.nanmean(NP.abs(holimg.holograph_P1.reshape(-1,holimg.holograph_P1.shape[2]))**2, axis=0).reshape(1,1,-1)
else:
# avg_img += NP.abs(holimg.holograph_P1)**2
tavg_img += NP.abs(holimg.holograph_P1)**2 - NP.nanmean(NP.abs(holimg.holograph_P1.reshape(-1,holimg.holograph_P1.shape[2]))**2, axis=0).reshape(1,1,-1)
progress.update(i+1)
progress.finish()
tavg_img /= max_n_timestamps
favg_img = NP.sum(tavg_img[:,:,bchan:echan], axis=2)/(echan-bchan)
fig1 = PLT.figure(figsize=(12,12))
# fig1.clf()
ax11 = fig1.add_subplot(111, xlim=(NP.amin(holimg.lf_P1[:,0]), NP.amax(holimg.lf_P1[:,0])), ylim=(NP.amin(holimg.mf_P1[:,0]), NP.amax(holimg.mf_P1[:,0])))
# imgplot = ax11.imshow(NP.mean(NP.abs(holimg.holograph_P1)**2, axis=2), aspect='equal', extent=(NP.amin(holimg.lf_P1[:,0]), NP.amax(holimg.lf_P1[:,0]), NP.amin(holimg.mf_P1[:,0]), NP.amax(holimg.mf_P1[:,0])), origin='lower', norm=PLTC.LogNorm())
imgplot = ax11.imshow(favg_img, aspect='equal', extent=(NP.amin(holimg.lf_P1[:,0]), NP.amax(holimg.lf_P1[:,0]), NP.amin(holimg.mf_P1[:,0]), NP.amax(holimg.mf_P1[:,0])), origin='lower')
# l, = ax11.plot(skypos[:,0], skypos[:,1], 'o', mfc='none', mec='white', mew=1, ms=10)
PLT.grid(True,which='both',ls='-',color='g')
cbaxes = fig1.add_axes([0.1, 0.05, 0.8, 0.05])
cbar = fig1.colorbar(imgplot, cax=cbaxes, orientation='horizontal')
# PLT.colorbar(imgplot)
PLT.savefig('/data3/t_nithyanandan/project_MOFF/data/samples/figures/LWA_sample_image_{0:0d}_iterations.png'.format(max_n_timestamps), bbox_inches=0)
PLT.show()
#### For testing
timestamp = timestamps[-1]
Et = []
Ef = []
cabdel = []
pos = []
stand = []
for label in aar.antennas:
Et += [aar.antennas[label].pol.Et_P1[0]]
stand += [label]
pos += [(aar.antennas[label].location.x, aar.antennas[label].location.y, aar.antennas[label].location.z)]
# cabdel += [aar.antennas[]]
cabdel += [cable_delays[antennas == label]]
Ef += [aar.antennas[label].pol.Ef_P1[0]]
Et = NP.asarray(Et).ravel()
Ef = NP.asarray(Ef).ravel()
stand = NP.asarray(stand).ravel()
cabdel = NP.asarray(cabdel).ravel()
pos = NP.asarray(pos)
data = Table({'stand': NP.asarray(stand).astype(int).ravel(), 'x-position [m]': pos[:,0], 'y-position [m]': pos[:,1], 'z-position [m]': pos[:,2], 'cable-delay [ns]': | NP.asarray(cabdel*1e9).ravel() | numpy.asarray |
#
# Copyright 2021 <NAME>
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import numpy as np
import json
from scipy.special import erfinv
def wavelength_filter2D(field, lamb, sigma, hipass=False):
nx = field.shape[0]
measure = nx**2
Lx = 1
mu_init = np.sum(field)**2/measure
sigma_init = np.sqrt(np.sum((field - mu_init)**2)/measure)
print('sigma_init=',sigma_init)
qx = np.arange(0,nx, dtype=np.float64)
qx = np.where(qx <= nx//2, qx/Lx, (nx-qx)/Lx)
qx *= 2 * np.pi
qy = np.arange(0,nx//2 +1, dtype=np.float64)
qy*= 2*np.pi/Lx
q2 = (qx**2).reshape(-1,1) + (qy**2).reshape(1,-1)
filt = np.ones_like(q2)
q_s = 2*np.pi/lamb
if (hipass is True):
filt *= (q2 >= q_s ** 2)
else:
filt *= (q2 <= q_s ** 2)
h_qs = np.fft.irfftn( np.fft.rfftn(field) * filt, field.shape)
mu_filt = np.sum(h_qs)/measure
sigma_filt = np.sqrt(np.sum((h_qs - mu_filt)**2)/measure)
print('sigma_filt=',sigma_filt)
print('mu_filt=',mu_filt)
h_qs *= sigma/sigma_filt
mu_scaled = np.sum(h_qs)/measure
sigma_scaled = np.sqrt(np.sum((h_qs - mu_scaled)**2)/measure)
print('sigma_scaled=',sigma_scaled)
return h_qs
def smoothcutoff2D(field, minimum_val, k=10):
measure = np.array(field.shape).prod()
mu0 = np.sum(field)/measure
print('mu0', mu0)
print('cutval=', minimum_val-mu0)
cutfield = half_sigmoid(field-mu0, minimum_val-mu0, k=k)
mu_cutoff = np.sum(cutfield)/measure
sigma_cutoff = np.sqrt(np.sum((cutfield - mu_cutoff)**2)/measure)
print('sigma_cutoff=',sigma_cutoff)
print('minval_cutoff=',np.amin(cutfield)+mu0)
return cutfield + mu0
def half_sigmoid(f, cutoff, k=10):
x = np.asarray(f)
y = np.asarray(x+0.0)
y[np.asarray(x < 0)] = x[np.asarray(x < 0)]*abs(cutoff)/(
abs(cutoff)**k+np.abs(x[np.asarray(x < 0)])**k)**(1/k)
return y
def threshsymm(field, Vf):
measure = np.array(field.shape).prod()
mu = np.sum(field)/measure
sigma = np.sqrt(np.sum((field-mu)**2/measure))
thresh = 2**0.5*erfinv(2*Vf - 1)
thresh_scaled = thresh*sigma + mu
thresh_field = np.ones_like(field)
thresh_field[field < thresh_scaled] = -1
print( | np.sum(thresh_field) | numpy.sum |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from collections import namedtuple
import skeleton as skel
import skeleton_matching as skm
import robust_functions as rf
import helperfunctions as hf
import visualize as vis
Obs = namedtuple('Observation', 'g q')
def register_skeleton(S1, S2, corres, params):
"""
This function computes the (non-rigid) registration params between the
two skeletons. This function computes the normal equation, solves the
non-linear least squares problem.
Parameters
----------
S1, S2 : Skeleton Class
Two skeletons for which we compute the non-rigid registration params
corres : numpy array (Mx2)
correspondence between two skeleton nodes
params : Dictionary
num_iter : Maximum number of iterations for the optimization routine
default: 10
w_rot : Weight for the rotation matrix constraints
default: 100,
w_reg : Weight for regularization constraints
default: 100
w_corresp: Weight for correspondence constraints
default: 1
w_fix : Weight for fixed nodes
default: 1
fix_idx : list of fixed nodes
default : []
R_fix : list of rotation matrices for fixed nodes
default : [np.eye(3)]
t_fix : list of translation vectors for fixed nodes
default: [np.zeros((3,1))]
use_robust_kernel : Use robust kernels for optimization (recommended if corres has outliers)
default : True
robust_kernel_type : Choose a robust kernel type (huber, cauchy, geman-mcclure)
default: 'cauchy'
robust_kernel_param : scale/outlier parameter for robust kernel
default: 2
debug : show debug visualizations + info
default: False
Returns
-------
T12 : list of 4x4 numpy arrays
Affine transformation corresponding to each node in S1
"""
print('Computing registration params.')
# set default params if not provided
if 'num_iter' not in params:
params['num_iter'] = 10
if 'w_rot' not in params:
params['w_rot'] = 100
if 'w_reg' not in params:
params['w_reg'] = 100
if 'w_corresp' not in params:
params['w_corresp'] = 1
if 'w_fix' not in params:
params['w_fix'] = 1
if 'fix)idx' not in params:
params['fix_idx'] = []
if 'use_robust_kernel' not in params:
params['use_robust_kernel'] = False
if 'robust_kernel_type' not in params:
params['robust_kernel_type'] = 'cauchy'
if 'robust_kernel_param' not in params:
params['robust_kernel_param'] = 2
if 'debug' not in params:
params['debug'] = False
# initialize normal equation
J_rot, r_rot, J_reg, r_reg, J_corresp, r_corresp, J_fix, r_fix = \
initialize_normal_equations(S1, corres, params)
# initialze solution
x = initialize_solution(S1, params)
# initialize weights
W_rot, W_reg, W_corresp, W_fix = initialize_weight_matrices(\
params['w_rot'], len(r_rot), params['w_reg'], len(r_reg), \
params['w_corresp'], len(r_corresp) , params['w_fix'], len(r_fix))
# # initialize variables in optimization
m = S1.XYZ.shape[0]
T12 = [None]*m
R = [None]*m
t = [None]*m
for j in range(m):
xj = x[12*j:12*(j+1)]
R[j] = np.reshape(xj[0:9], (3,3))
t[j] = xj[9:12]
# perform optimization
if params['debug']:
fh_debug = plt.figure()
E_prev = np.inf;
dx_prev = np.inf;
for i in range(params['num_iter']):
# counters used for different constraints
jk = 0
jc = 0
jf = 0
# compute jacobian and residual for each constraint types
for j in range(m):
# registration params for jth node
Rj = R[j]
tj = t[j]
# constraints from rotation matrix entries
Jj_rot, rj_rot = compute_rotation_matrix_constraints(Rj)
J_rot[6*j:6*(j+1), 12*j:12*(j+1)] = Jj_rot
r_rot[6*j:6*(j+1)] = rj_rot
# constraints from regularization term
ind = np.argwhere(S1.A[j,:]==1).flatten()
for k in range(np.sum(S1.A[j,:])):
# params
Rk = R[ind[k]]
tk = t[ind[k]]
Jj_reg, Jk_reg, r_jk_reg = compute_regularization_constraints(Rj, tj, Rk, tk)
# collect all constraints
nc = r_jk_reg.shape[0]
J_reg[nc*jk : nc*(jk+1), 12*j:12*(j+1)] = Jj_reg
J_reg[nc*jk : nc*(jk+1), ind[k]*12:12*(ind[k]+1)] = Jk_reg
r_reg[nc*jk : nc*(jk+1)] = r_jk_reg
# increment counter for contraints from neighbouring nodes
jk = jk+1
# constraints from correspondences
if corres.shape[0] > 0:
ind_C = np.argwhere(corres[:,0] == j).flatten()
if len(ind_C) > 0:
# observations
Y = Obs(S1.XYZ[j,:].reshape(3,1), S2.XYZ[corres[ind_C,1],:].reshape(3,1))
# compute constraints
J_jc_corresp, r_jc_corresp = compute_corresp_constraints(Rj, tj, Y)
# collect all constraints
nc = r_jc_corresp.shape[0]
J_corresp[nc*jc:nc*(jc+1), 12*j:12*(j+1)] = J_jc_corresp
r_corresp[nc*jc:nc*(jc+1)] = r_jc_corresp
# increment counter for correspondence constraints
jc = jc + 1
# constraints from fixed nodes
if len(params['fix_idx']) > 0:
if j in params['fix_idx']:
ind_f = params['fix_idx'].index(j)
# observations
R_fix = params['R_fix'][ind_f]
t_fix = params['t_fix'][ind_f]
# compute fix node constraints
J_jf_fix, r_jf_fix = compute_fix_node_constraints(Rj, tj, R_fix, t_fix);
nc = r_jf_fix.shape[0]
J_fix[nc*jf: nc*(jf+1), 12*j:12*(j+1)] = J_jf_fix
r_fix[nc*jf:nc*(jf+1)] = r_jf_fix
# update counter
jf = jf + 1
# compute weights and residual using robust kernel
if params['use_robust_kernel']:
if params['robust_kernel_type'] == 'huber':
_, _, W_corresp = rf.loss_huber(r_corresp, params['robust_kernel_param'])
elif params['robust_kernel_type'] == 'cauchy':
_, _, W_corresp = rf.loss_cauchy(r_corresp, params['robust_kernel_param'])
elif params['robust_kernel_type'] == 'geman_mcclure':
_, _, W_corresp = rf.loss_geman_mcclure(r_corresp, params['robust_kernel_param'])
else:
print('Robust kernel not undefined. \n')
W_corresp = params['w_corresp']*np.diag(W_corresp.flatten())
# collect all constraints
J = np.vstack((J_rot, J_reg, J_corresp, J_fix))
r = np.vstack((r_rot, r_reg, r_corresp, r_fix))
# construct weight matrix
W = combine_weight_matrices(W_rot, W_reg, W_corresp, W_fix)
# solve linear system
A = J.T @ W @ J
b = J.T @ W @ r
dx = -np.linalg.solve(A, b)
# Errors
E_rot = r_rot.T @ W_rot @ r_rot
E_reg = r_reg.T @ W_reg @ r_reg
E_corresp = r_corresp.T @ W_corresp @ r_corresp
E_fix = r_fix.T @ W_fix @ r_fix
E_total = E_rot + E_reg + E_corresp + E_fix
# print errors
if params['debug']:
print("Iteration # ", i)
print("E_total = ", E_total)
print("E_rot = ", E_rot)
print("E_reg = ", E_reg)
print("E_corresp = ", E_corresp)
print("E_fix = ", E_fix)
print("Rank(A) = ", np.linalg.matrix_rank(A))
# update current estimate
for j in range(m):
#params
dx_j = dx[12*j:12*(j+1)]
R[j] = R[j] + np.reshape(dx_j[0:9], (3, 3), order = 'F')
t[j] = t[j] + dx_j[9:12]
# collect and return transformation
for j in range(m):
T12[j] = hf.M(R[j], t[j])
# apply registration to skeleton for visualization
if params['debug']:
# compute registration error
S2_hat = apply_registration_params_to_skeleton(S1, T12)
vis.plot_skeleton(fh_debug, S1,'b');
vis.plot_skeleton(fh_debug, S2,'r');
vis.plot_skeleton(fh_debug, S2_hat,'k');
vis.plot_skeleton_correspondences(fh_debug, S2_hat, S2, corres)
plt.title("Iteration " + str(i))
# exit criteria
if np.abs(E_total - E_prev) < 1e-6 or np.abs(np.linalg.norm(dx) - np.linalg.norm(dx_prev)) < 1e-6:
print("Exiting optimization.")
print('Total error = ', E_total)
break
# update last solution
E_prev = E_total
dx_prev = dx
return T12
def initialize_normal_equations(S, corres, params):
"""
This function initailizes J and r matrices for different types of % constraints.
Parameters
----------
S : Skeleton Class
Contains points, adjacency matrix etc related to the skeleton graph.
corres : numpy array (Mx2)
correspondence between two skeleton nodes
params : Dictionary
see description in register_skeleton function
Returns
-------
J_rot : numpy array [6mx12m]
jacobian for rotation matrix error
r_rot : numpy array [6mx1]
residual for rotation matrix error
J_reg : numpy array [12mx12m]
jacobian for regularization error
r_reg : numpy array [12mx1]
residual for reuglarization error
J_corres : numpy array [3nCx12m]
jacobian for correspondence error
r_corres : numpy array [3nCx1]
residual for correspondence error
J_fix : numpy array[12nFx12m]
jacobian for fix nodes
r_fix : numpy array [12mFx1]
residual for fix nodes
"""
# get sizes from input
m = S.XYZ.shape[0]
nK = 2*S.edge_count
nC = corres.shape[0]
nF = len(params['fix_idx'])
# constraints from individual rotation matrix
num_rot_cons = 6*m
J_rot = np.zeros((num_rot_cons, 12*m))
r_rot = np.zeros((num_rot_cons,1))
# constraints from regularization
num_reg_cons = 12*nK
J_reg = np.zeros((num_reg_cons,12*m))
r_reg = np.zeros((num_reg_cons,1))
# constraints from correspondences
num_corres_cons = 3*nC;
J_corres = np.zeros((num_corres_cons,12*m))
r_corres = np.zeros((num_corres_cons,1))
# constraints from fix nodes
num_fix_cons = 12*nF
J_fix = np.zeros((num_fix_cons,12*m))
r_fix = np.zeros((num_fix_cons,1))
return J_rot, r_rot, J_reg, r_reg, J_corres, r_corres, J_fix, r_fix
def initialize_solution(S, params):
"""
This function initialzes the soultion either as the zero solution or
provided initial transformation.
Parameters
----------
S : Skeleton Class
Only used for getting number of unknowns.
params : Dictionary
R_init, params.t_init used for initializing solution if
they are provided. If R_init is a list then a separate approximate is
assumed for every transformation. Otherwise R_init should be 3x3, t_init 3x1
Returns
-------
x : numpy array [12mx1]
initial solution vector as expected by the optimization procedure.
"""
m = S.XYZ.shape[0]
x = np.zeros((12*m,1))
R = [None]*m
t = [None]*m
for j in range(m):
if 'R_init' in params and 't_init' in params:
if len(params['R_init']) == m:
R[j] = params['R_init'][j]
t[j] = params['t_init'][j]
else:
R[j] = params['R_init']
t[j] = params['t_init']
else:
# start from zero solution
R[j] = np.eye(3);
t[j] = np.zeros((3,1))
# rearrange in a column vector
x[12*j:12*(j+1)] = np.vstack((np.reshape(R[j], (9, 1),order='F'),t[j]))
return x
def initialize_weight_matrices(w_rot, n_rot, w_reg, n_reg, w_corresp, n_corresp, w_fix, n_fix):
"""
This function computes the weight matrices corresponding to each constraint
given the weights and number of constraints for each type.
"""
W_rot = np.diag(w_rot*np.ones(n_rot))
W_reg = np.diag(w_reg* | np.ones(n_reg) | numpy.ones |
# Implement Back-Propagation only unsing "numpy" package.
# Without any automatic differentiation tools
# import module
import numpy as np
"""----------All functions----------"""
def softmax(z):
# calculate softmax of matrix z
exp_z = np.exp(z)
tempsum = np.sum(exp_z, axis=0)
softmax_z = exp_z / tempsum[:, np.newaxis].T.repeat(10,axis=0)
return softmax_z
def sigmoid(x):
return 1. / (1 + np.exp(-x))
def tanh(x):
return (np.exp(x) - np.exp(-x)) / (np.exp(x) + np.exp(-x))
def relu(x):
return | np.maximum(x, 0) | numpy.maximum |
import os
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import agents.utils as agent_utils
class FullOracleModel:
def __init__(self, env, input_shape, num_blocks, encoder_filters, encoder_filter_sizes, encoder_strides,
encoder_neurons, learning_rate, weight_decay, gamma, batch_norm=False):
self.env = env
self.input_shape = input_shape
self.num_blocks = num_blocks
self.encoder_filters = encoder_filters
self.encoder_filter_sizes = encoder_filter_sizes
self.encoder_strides = encoder_strides
self.encoder_neurons = encoder_neurons
self.learning_rate = learning_rate
self.weight_decay = weight_decay
self.gamma = gamma
self.batch_norm = batch_norm
def encode(self, states, batch_size=100):
assert states.shape[0]
num_steps = int(np.ceil(states.shape[0] / batch_size))
embeddings = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
embedding = self.session.run(self.state_block_t, feed_dict={
self.states_pl: states[batch_slice]
})
embeddings.append(embedding)
embeddings = np.concatenate(embeddings, axis=0)
return embeddings
def build(self):
self.build_placeholders_and_constants()
self.build_model()
self.build_training()
def build_placeholders_and_constants(self):
self.states_pl = tf.placeholder(tf.float32, shape=(None, *self.input_shape), name="states_pl")
self.actions_pl = tf.placeholder(tf.int32, shape=(None,), name="actions_pl")
self.rewards_pl = tf.placeholder(tf.float32, shape=(None,), name="rewards_pl")
self.dones_pl = tf.placeholder(tf.bool, shape=(None,), name="dones_pl")
self.next_states_pl = tf.placeholder(tf.float32, shape=(None, *self.input_shape), name="next_states_pl")
self.is_training_pl = tf.placeholder(tf.bool, shape=[], name="is_training_pl")
self.r_c = tf.constant(self.env.r, dtype=tf.float32)
self.p_c = tf.constant(self.env.p, dtype=tf.float32)
def build_model(self):
self.state_block_t = self.build_encoder(self.states_pl)
self.next_state_block_t = self.build_encoder(self.next_states_pl, share_weights=True)
def build_training(self):
r_t = tf.gather(self.r_c, self.actions_pl)
p_t = tf.gather(self.p_c, self.actions_pl)
dones_t = tf.cast(self.dones_pl, tf.float32)
self.reward_loss_t = tf.square(self.rewards_pl - tf.reduce_sum(self.state_block_t * r_t, axis=1))
self.transition_loss_t = tf.reduce_sum(
tf.square(tf.stop_gradient(self.next_state_block_t) - tf.matmul(tf.expand_dims(self.state_block_t, axis=1), p_t)[:, 0, :]),
axis=1
) * (1 - dones_t)
reg = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
if len(reg) > 0:
self.regularization_loss_t = tf.add_n(reg)
else:
self.regularization_loss_t = 0
self.loss_t = tf.reduce_mean(
(1 / 2) * (self.reward_loss_t + self.gamma * self.transition_loss_t), axis=0
) + self.regularization_loss_t
self.train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss_t)
if self.batch_norm:
self.update_op = tf.group(*tf.get_collection(tf.GraphKeys.UPDATE_OPS))
self.train_step = tf.group(self.train_step, self.update_op)
def build_encoder(self, input_t, share_weights=False):
x = tf.expand_dims(input_t, axis=-1)
with tf.variable_scope("encoder", reuse=share_weights):
for idx in range(len(self.encoder_filters)):
with tf.variable_scope("conv{:d}".format(idx + 1)):
x = tf.layers.conv2d(
x, self.encoder_filters[idx], self.encoder_filter_sizes[idx], self.encoder_strides[idx],
padding="SAME", activation=tf.nn.relu if not self.batch_norm else None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer(),
use_bias=not self.batch_norm
)
if self.batch_norm and idx != len(self.encoder_filters) - 1:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
x = tf.layers.flatten(x)
if self.batch_norm:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
for idx, neurons in enumerate(self.encoder_neurons):
with tf.variable_scope("fc{:d}".format(idx + 1)):
x = tf.layers.dense(
x, neurons, activation=tf.nn.relu if not self.batch_norm else None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer(),
use_bias=not self.batch_norm
)
if self.batch_norm:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
with tf.variable_scope("predict"):
x = tf.layers.dense(
x, self.num_blocks, activation=tf.nn.softmax,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer()
)
return x
def start_session(self, gpu_memory=None):
gpu_options = None
if gpu_memory is not None:
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory)
tf_config = tf.ConfigProto(gpu_options=gpu_options)
self.session = tf.Session(config=tf_config)
self.session.run(tf.global_variables_initializer())
def stop_session(self):
if self.session is not None:
self.session.close()
class PartialOracleModel:
ENCODER_NAMESPACE = "encoder"
TARGET_ENCODER_NAMESPACE = "target_encoder"
def __init__(self, input_shape, num_blocks, num_actions, encoder_filters, encoder_filter_sizes,
encoder_strides, encoder_neurons, learning_rate_encoder, learning_rate_model, weight_decay, gamma,
optimizer_encoder, optimizer_model, max_steps, batch_norm=True, target_network=False,
add_hand_state=False, add_entropy=False, entropy_from=10000, entropy_start=0.0, entropy_end=0.1,
ce_transitions=False):
self.input_shape = input_shape
self.num_blocks = num_blocks
self.num_actions = num_actions
self.encoder_filters = encoder_filters
self.encoder_filter_sizes = encoder_filter_sizes
self.encoder_strides = encoder_strides
self.encoder_neurons = encoder_neurons
self.learning_rate_encoder = learning_rate_encoder
self.learning_rate_model = learning_rate_model
self.weight_decay = weight_decay
self.gamma = gamma
self.optimizer_encoder = optimizer_encoder
self.optimizer_model = optimizer_model
self.max_steps = max_steps
self.batch_norm = batch_norm
self.target_network = target_network
self.add_hand_state = add_hand_state
self.add_entropy = add_entropy
self.entropy_from = entropy_from
self.entropy_start = entropy_start
self.entropy_end = entropy_end
self.ce_transitions = ce_transitions
self.hand_states_pl, self.next_hand_states_pl = None, None
def encode(self, states, batch_size=100, hand_states=None):
assert states.shape[0]
num_steps = int(np.ceil(states.shape[0] / batch_size))
embeddings = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: states[batch_slice],
self.is_training_pl: False
}
if hand_states is not None:
feed_dict[self.hand_states_pl] = hand_states[batch_slice]
embedding = self.session.run(self.state_block_t, feed_dict=feed_dict)
embeddings.append(embedding)
embeddings = np.concatenate(embeddings, axis=0)
return embeddings
def build(self):
self.build_placeholders()
self.build_model()
self.build_training()
def build_placeholders(self):
self.states_pl = tf.placeholder(tf.float32, shape=(None, *self.input_shape), name="states_pl")
self.actions_pl = tf.placeholder(tf.int32, shape=(None,), name="actions_pl")
self.rewards_pl = tf.placeholder(tf.float32, shape=(None,), name="rewards_pl")
self.dones_pl = tf.placeholder(tf.bool, shape=(None,), name="dones_pl")
self.next_states_pl = tf.placeholder(tf.float32, shape=(None, *self.input_shape), name="next_states_pl")
self.is_training_pl = tf.placeholder(tf.bool, shape=[], name="is_training_pl")
if self.add_hand_state:
self.hand_states_pl = tf.placeholder(tf.float32, shape=(None, 1), name="hand_states_pl")
self.next_hand_states_pl = tf.placeholder(tf.float32, shape=(None, 1), name="next_hand_states_pl")
def build_model(self):
self.state_block_t = self.build_encoder(self.states_pl, hand_state=self.hand_states_pl)
if self.target_network:
self.next_state_block_t = self.build_encoder(
self.next_states_pl, share_weights=False, namespace=self.TARGET_ENCODER_NAMESPACE,
hand_state=self.next_hand_states_pl
)
self.build_target_update()
else:
self.next_state_block_t = self.build_encoder(
self.next_states_pl, share_weights=True, hand_state=self.next_hand_states_pl
)
self.r_t = tf.get_variable(
"reward_matrix", shape=(self.num_actions, self.num_blocks), dtype=tf.float32,
initializer=tf.random_uniform_initializer(minval=0, maxval=1, dtype=tf.float32)
)
self.p_t = tf.get_variable(
"transition_matrix", shape=(self.num_actions, self.num_blocks, self.num_blocks), dtype=tf.float32,
initializer=tf.random_uniform_initializer(minval=0, maxval=1, dtype=tf.float32)
)
def build_training(self):
self.global_step = tf.train.get_or_create_global_step()
r_t = tf.gather(self.r_t, self.actions_pl)
p_t = tf.gather(self.p_t, self.actions_pl)
dones_t = tf.cast(self.dones_pl, tf.float32)
self.reward_loss_t = (1 / 2) * tf.square(self.rewards_pl - tf.reduce_sum(self.state_block_t * r_t, axis=1))
if self.ce_transitions:
# treat p_t as log probabilities
p_t = tf.nn.softmax(p_t, axis=-1)
# predict next state
next_state = tf.matmul(tf.expand_dims(self.state_block_t, axis=1), p_t)[:, 0, :]
# cross entropy between next state probs and predicted probs
self.transition_loss_t = - self.next_state_block_t * tf.log(next_state + 1e-7)
self.transition_loss_t = tf.reduce_sum(self.transition_loss_t, axis=-1)
self.transition_loss_t = self.transition_loss_t * (1 - dones_t)
else:
self.transition_loss_t = (1 / 2) * tf.reduce_sum(
tf.square(tf.stop_gradient(self.next_state_block_t) -
tf.matmul(tf.expand_dims(self.state_block_t, axis=1), p_t)[:, 0, :]),
axis=1
) * (1 - dones_t)
reg = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
if len(reg) > 0:
self.regularization_loss_t = tf.add_n(reg)
else:
self.regularization_loss_t = 0
self.loss_t = tf.reduce_mean(
self.reward_loss_t + self.gamma * self.transition_loss_t, axis=0
) + self.regularization_loss_t
if self.add_entropy:
plogs = self.state_block_t * tf.log(self.state_block_t + 1e-7)
self.entropy_loss_t = tf.reduce_mean(tf.reduce_sum(- plogs, axis=1), axis=0)
f = tf.maximum(0.0, tf.cast(self.global_step - self.entropy_from, tf.float32)) / \
(self.max_steps - self.entropy_from)
f = f * (self.entropy_end - self.entropy_start) + self.entropy_start
self.loss_t += f * self.entropy_loss_t
encoder_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.ENCODER_NAMESPACE)
model_variables = [self.r_t, self.p_t]
encoder_optimizer = agent_utils.get_optimizer(self.optimizer_encoder, self.learning_rate_encoder)
model_optimizer = agent_utils.get_optimizer(self.optimizer_model, self.learning_rate_model)
self.encoder_train_step = encoder_optimizer.minimize(
self.loss_t, global_step=self.global_step, var_list=encoder_variables
)
if self.batch_norm:
self.update_op = tf.group(*tf.get_collection(tf.GraphKeys.UPDATE_OPS))
self.encoder_train_step = tf.group(self.encoder_train_step, self.update_op)
self.model_train_step = model_optimizer.minimize(
self.loss_t, var_list=model_variables
)
self.train_step = tf.group(self.encoder_train_step, self.model_train_step)
def build_encoder(self, input_t, share_weights=False, namespace=ENCODER_NAMESPACE, hand_state=None):
x = tf.expand_dims(input_t, axis=-1)
with tf.variable_scope(namespace, reuse=share_weights):
for idx in range(len(self.encoder_filters)):
with tf.variable_scope("conv{:d}".format(idx + 1)):
x = tf.layers.conv2d(
x, self.encoder_filters[idx], self.encoder_filter_sizes[idx], self.encoder_strides[idx],
padding="SAME", activation=tf.nn.relu if not self.batch_norm else None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer(),
use_bias=not self.batch_norm
)
if self.batch_norm and idx != len(self.encoder_filters) - 1:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
x = tf.layers.flatten(x)
if self.batch_norm:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
if hand_state is not None:
x = tf.concat([x, hand_state], axis=1)
for idx, neurons in enumerate(self.encoder_neurons):
with tf.variable_scope("fc{:d}".format(idx + 1)):
x = tf.layers.dense(
x, neurons, activation=tf.nn.relu if not self.batch_norm else None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer(),
use_bias=not self.batch_norm
)
if self.batch_norm:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
with tf.variable_scope("predict"):
x = tf.layers.dense(
x, self.num_blocks, activation=tf.nn.softmax,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer()
)
return x
def build_target_update(self):
source_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.ENCODER_NAMESPACE)
target_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.TARGET_ENCODER_NAMESPACE)
assert len(source_vars) == len(target_vars) and len(source_vars) > 0
update_ops = []
for source_var, target_var in zip(source_vars, target_vars):
update_ops.append(tf.assign(target_var, source_var))
self.target_update_op = tf.group(*update_ops)
def start_session(self, gpu_memory=None):
gpu_options = None
if gpu_memory is not None:
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory)
tf_config = tf.ConfigProto(gpu_options=gpu_options)
self.session = tf.Session(config=tf_config)
self.session.run(tf.global_variables_initializer())
def stop_session(self):
if self.session is not None:
self.session.close()
class GumbelModel:
ENCODER_NAMESPACE = "encoder"
TARGET_ENCODER_NAMESPACE = "target_encoder"
def __init__(self, input_shape, num_blocks, num_actions, encoder_filters, encoder_filter_sizes,
encoder_strides, encoder_neurons, learning_rate_encoder, learning_rate_r, learning_rate_p,
weight_decay, gamma, optimizer_encoder, optimizer_model, max_steps, batch_norm=True,
target_network=True, gamma_schedule=None, straight_through=False, kl=False, kl_weight=1.0,
oracle_r=None, oracle_p=None, transitions_mse=False, correct_ce=False):
self.input_shape = input_shape
self.num_blocks = num_blocks
self.num_actions = num_actions
self.encoder_filters = encoder_filters
self.encoder_filter_sizes = encoder_filter_sizes
self.encoder_strides = encoder_strides
self.encoder_neurons = encoder_neurons
self.learning_rate_encoder = learning_rate_encoder
self.learning_rate_r = learning_rate_r
self.learning_rate_p = learning_rate_p
self.weight_decay = weight_decay
self.gamma = gamma
self.optimizer_encoder = optimizer_encoder
self.optimizer_model = optimizer_model
self.max_steps = max_steps
self.batch_norm = batch_norm
self.target_network = target_network
self.gamma_schedule = gamma_schedule
self.straight_through = straight_through
self.kl = kl
self.kl_weight = kl_weight
self.oracle_r = oracle_r
self.oracle_p = oracle_p
self.transitions_mse = transitions_mse
self.correct_ce = correct_ce
if self.gamma_schedule is not None:
assert len(self.gamma) == len(self.gamma_schedule) + 1
self.hand_states_pl, self.next_hand_states_pl = None, None
def encode(self, states, batch_size=100, hand_states=None):
assert states.shape[0]
num_steps = int(np.ceil(states.shape[0] / batch_size))
embeddings = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: states[batch_slice],
self.is_training_pl: False
}
if hand_states is not None:
feed_dict[self.hand_states_pl] = hand_states[batch_slice]
embedding = self.session.run(self.state_block_samples_t, feed_dict=feed_dict)
embeddings.append(embedding)
embeddings = np.concatenate(embeddings, axis=0)
return embeddings
def build(self):
self.build_placeholders()
self.build_model()
self.build_training()
def build_placeholders(self):
self.states_pl = tf.placeholder(tf.float32, shape=(None, *self.input_shape), name="states_pl")
self.actions_pl = tf.placeholder(tf.int32, shape=(None,), name="actions_pl")
self.rewards_pl = tf.placeholder(tf.float32, shape=(None,), name="rewards_pl")
self.dones_pl = tf.placeholder(tf.bool, shape=(None,), name="dones_pl")
self.next_states_pl = tf.placeholder(tf.float32, shape=(None, *self.input_shape), name="next_states_pl")
self.is_training_pl = tf.placeholder(tf.bool, shape=[], name="is_training_pl")
self.hand_states_pl = tf.placeholder(tf.float32, shape=(None, 1), name="hand_states_pl")
self.next_hand_states_pl = tf.placeholder(tf.float32, shape=(None, 1), name="next_hand_states_pl")
self.temperature_pl = tf.placeholder(tf.float32, shape=[], name="temperature_pl")
def build_model(self):
# encode state block
self.state_block_logits_t = self.build_encoder(self.states_pl, hand_state=self.hand_states_pl)
agent_utils.summarize(self.state_block_logits_t, "state_block_logits_t")
self.state_block_cat_dist = tf.contrib.distributions.OneHotCategorical(
logits=self.state_block_logits_t
)
self.state_block_samples_t = tf.cast(self.state_block_cat_dist.sample(), tf.int32)
self.state_block_sg_dist = tf.contrib.distributions.RelaxedOneHotCategorical(
self.temperature_pl, logits=self.state_block_logits_t
)
self.state_block_sg_samples_t = self.state_block_sg_dist.sample()
agent_utils.summarize(self.state_block_sg_samples_t, "state_block_sg_samples_t")
if self.straight_through:
# hard sample
self.state_block_sg_samples_hard_t = \
tf.cast(tf.one_hot(tf.argmax(self.state_block_sg_samples_t, -1), self.num_blocks), tf.float32)
# fake gradients for the hard sample
self.state_block_sg_samples_t = \
tf.stop_gradient(self.state_block_sg_samples_hard_t - self.state_block_sg_samples_t) + \
self.state_block_sg_samples_t
agent_utils.summarize(self.state_block_sg_samples_hard_t, "state_block_sg_samples_hard_t")
# encode next state block
if self.target_network:
self.next_state_block_logits_t = self.build_encoder(
self.next_states_pl, share_weights=False, namespace=self.TARGET_ENCODER_NAMESPACE,
hand_state=self.next_hand_states_pl
)
self.build_target_update()
else:
self.next_state_block_logits_t = self.build_encoder(
self.next_states_pl, share_weights=True, hand_state=self.next_hand_states_pl
)
self.next_state_block_cat_dist = tf.contrib.distributions.OneHotCategorical(
logits=self.next_state_block_logits_t
)
self.next_state_block_samples_t = tf.cast(self.next_state_block_cat_dist.sample(), tf.float32)
self.r_v = tf.get_variable(
"reward_matrix", shape=(self.num_actions, self.num_blocks), dtype=tf.float32,
initializer=tf.random_uniform_initializer(minval=0, maxval=1, dtype=tf.float32)
)
self.r_t = self.r_v
self.p_v = tf.get_variable(
"transition_matrix", shape=(self.num_actions, self.num_blocks, self.num_blocks), dtype=tf.float32,
initializer=tf.random_uniform_initializer(minval=0, maxval=1, dtype=tf.float32)
)
if not self.transitions_mse:
self.p_t = tf.nn.softmax(self.p_v, axis=-1)
else:
self.p_t = self.p_v
def build_training(self):
# set up global step variable
self.global_step = tf.train.get_or_create_global_step()
# gather reward and transition matrices for each action
if self.oracle_r is not None:
r_t = tf.gather(self.oracle_r, self.actions_pl)
else:
r_t = tf.gather(self.r_t, self.actions_pl)
if self.oracle_p is not None:
p_t = tf.gather(self.oracle_p, self.actions_pl)
else:
p_t = tf.gather(self.p_t, self.actions_pl)
dones_t = tf.cast(self.dones_pl, tf.float32)
# reward loss
self.reward_loss_t = tf.square(self.rewards_pl - tf.reduce_sum(self.state_block_sg_samples_t * r_t, axis=1))
# transition loss
next_state = tf.matmul(tf.expand_dims(self.state_block_sg_samples_t, axis=1), p_t)[:, 0, :]
if self.transitions_mse:
self.transition_loss_t = tf.reduce_sum(
tf.square(tf.stop_gradient(self.next_state_block_samples_t) - next_state),
axis=1
) * (1 - dones_t)
else:
if self.correct_ce:
self.transition_loss_t = tf.reduce_sum(
- tf.stop_gradient(tf.nn.softmax(self.next_state_block_logits_t)) * tf.log(next_state + 1e-7),
axis=1
) * (1 - dones_t)
else:
self.transition_loss_t = tf.reduce_sum(
- tf.stop_gradient(self.next_state_block_samples_t) * tf.log(next_state + 1e-7),
axis=1
) * (1 - dones_t)
# weight decay regularizer
reg = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
if len(reg) > 0:
self.regularization_loss_t = tf.add_n(reg)
else:
self.regularization_loss_t = 0.0
# kl divergence regularizer
if self.kl:
prior_logits_t = tf.ones_like(self.state_block_logits_t) / self.num_blocks
prior_cat_dist = tf.contrib.distributions.OneHotCategorical(logits=prior_logits_t)
kl_divergence_t = tf.contrib.distributions.kl_divergence(self.state_block_cat_dist, prior_cat_dist)
self.kl_loss_t = tf.reduce_mean(kl_divergence_t)
else:
self.kl_loss_t = 0.0
# final loss
if self.gamma_schedule is not None:
gamma = tf.train.piecewise_constant(self.global_step, self.gamma_schedule, self.gamma)
else:
gamma = self.gamma
self.loss_t = tf.reduce_mean(
self.reward_loss_t + gamma * self.transition_loss_t, axis=0
) + self.regularization_loss_t + self.kl_weight * self.kl_loss_t
# encoder optimizer
encoder_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.ENCODER_NAMESPACE)
encoder_optimizer = agent_utils.get_optimizer(self.optimizer_encoder, self.learning_rate_encoder)
self.encoder_train_step = encoder_optimizer.minimize(
self.loss_t, global_step=self.global_step, var_list=encoder_variables
)
# add batch norm updates
if self.batch_norm:
self.update_op = tf.group(*tf.get_collection(tf.GraphKeys.UPDATE_OPS))
self.encoder_train_step = tf.group(self.encoder_train_step, self.update_op)
# model optimizer if not full oracle
if self.oracle_r is None or self.oracle_p is None:
r_optimizer = agent_utils.get_optimizer(self.optimizer_model, self.learning_rate_r)
r_step = r_optimizer.minimize(
self.reward_loss_t, var_list=[self.r_v]
)
p_optimizer = agent_utils.get_optimizer(self.optimizer_model, self.learning_rate_p)
p_step = p_optimizer.minimize(
self.transition_loss_t, var_list=[self.p_v]
)
self.model_train_step = tf.group(r_step, p_step)
self.train_step = tf.group(self.encoder_train_step, self.model_train_step)
else:
self.train_step = self.encoder_train_step
def build_encoder(self, input_t, share_weights=False, namespace=ENCODER_NAMESPACE, hand_state=None):
x = tf.expand_dims(input_t, axis=-1)
with tf.variable_scope(namespace, reuse=share_weights):
for idx in range(len(self.encoder_filters)):
with tf.variable_scope("conv{:d}".format(idx + 1)):
x = tf.layers.conv2d(
x, self.encoder_filters[idx], self.encoder_filter_sizes[idx], self.encoder_strides[idx],
padding="SAME", activation=tf.nn.relu if not self.batch_norm else None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer(),
use_bias=not self.batch_norm
)
if self.batch_norm and idx != len(self.encoder_filters) - 1:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
x = tf.layers.flatten(x)
if self.batch_norm:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
if hand_state is not None:
x = tf.concat([x, hand_state], axis=1)
for idx, neurons in enumerate(self.encoder_neurons):
with tf.variable_scope("fc{:d}".format(idx + 1)):
x = tf.layers.dense(
x, neurons, activation=tf.nn.relu if not self.batch_norm else None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer(),
use_bias=not self.batch_norm
)
if self.batch_norm:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
with tf.variable_scope("predict"):
x = tf.layers.dense(
x, self.num_blocks, activation=None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer()
)
return x
def build_target_update(self):
source_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.ENCODER_NAMESPACE)
target_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.TARGET_ENCODER_NAMESPACE)
assert len(source_vars) == len(target_vars) and len(source_vars) > 0
update_ops = []
for source_var, target_var in zip(source_vars, target_vars):
update_ops.append(tf.assign(target_var, source_var))
self.target_update_op = tf.group(*update_ops)
def build_summaries(self):
# losses
tf.summary.scalar("loss", self.loss_t)
tf.summary.scalar("transition_loss", tf.reduce_mean(self.transition_loss_t))
tf.summary.scalar("reward_loss", tf.reduce_mean(self.reward_loss_t))
# logits grads
grad_t = tf.gradients(tf.reduce_mean(self.transition_loss_t), self.state_block_logits_t)
grad_r = tf.gradients(tf.reduce_mean(self.reward_loss_t), self.state_block_logits_t)
norm_grad_t = tf.norm(grad_t, ord=2, axis=-1)[0]
norm_grad_r = tf.norm(grad_r, ord=2, axis=-1)[0]
agent_utils.summarize(norm_grad_t, "logits_grad_t")
agent_utils.summarize(norm_grad_r, "logits_grad_r")
# samples grads
grad_t = tf.gradients(tf.reduce_mean(self.transition_loss_t), self.state_block_sg_samples_t)
grad_r = tf.gradients(tf.reduce_mean(self.reward_loss_t), self.state_block_sg_samples_t)
norm_grad_t = tf.norm(grad_t, ord=2, axis=-1)[0]
norm_grad_r = tf.norm(grad_r, ord=2, axis=-1)[0]
agent_utils.summarize(norm_grad_t, "sg_samples_grad_t")
agent_utils.summarize(norm_grad_r, "sg_samples_grad_r")
def start_session(self, gpu_memory=None):
gpu_options = None
if gpu_memory is not None:
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory)
tf_config = tf.ConfigProto(gpu_options=gpu_options)
self.session = tf.Session(config=tf_config)
self.session.run(tf.global_variables_initializer())
def stop_session(self):
if self.session is not None:
self.session.close()
class ExpectationModel:
ENCODER_NAMESPACE = "encoder"
TARGET_ENCODER_NAMESPACE = "target_encoder"
def __init__(self, input_shape, num_blocks, num_actions, encoder_filters, encoder_filter_sizes,
encoder_strides, encoder_neurons, learning_rate_encoder, learning_rate_r, learning_rate_t,
weight_decay, gamma, optimizer_encoder, optimizer_model, max_steps, batch_norm=True,
target_network=True, oracle_r=None, oracle_t=None, propagate_next_state=False,
z_transform=False, abs_z_transform=False, sigsoftmax=False, encoder_tau=1.0, model_tau=1.0,
no_tau_target_encoder=False, kl_penalty=False, kl_penalty_weight=0.01, small_r_init=False,
small_t_init=False):
if propagate_next_state:
assert not target_network
self.input_shape = input_shape
self.num_blocks = num_blocks
self.num_actions = num_actions
self.encoder_filters = encoder_filters
self.encoder_filter_sizes = encoder_filter_sizes
self.encoder_strides = encoder_strides
self.encoder_neurons = encoder_neurons
self.learning_rate_encoder = learning_rate_encoder
self.learning_rate_r = learning_rate_r
self.learning_rate_t = learning_rate_t
self.weight_decay = weight_decay
self.gamma = gamma
self.optimizer_encoder = optimizer_encoder
self.optimizer_model = optimizer_model
self.max_steps = max_steps
self.batch_norm = batch_norm
self.target_network = target_network
self.oracle_r = oracle_r
self.oracle_t = oracle_t
self.propagate_next_state = propagate_next_state
self.z_transform = z_transform
self.abs_z_transform = abs_z_transform
self.sigsoftmax = sigsoftmax
self.encoder_tau = encoder_tau
self.model_tau = model_tau
self.no_tau_target_encoder = no_tau_target_encoder
self.kl_penalty = kl_penalty
self.kl_penalty_weight = kl_penalty_weight
self.small_r_init = small_r_init
self.small_t_init = small_t_init
self.hand_states_pl, self.next_hand_states_pl = None, None
def encode(self, depths, hand_states, batch_size=100):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
embeddings = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[batch_slice],
self.is_training_pl: False
}
embedding = self.session.run(self.state_softmax_t, feed_dict=feed_dict)
embeddings.append(embedding)
embeddings = np.concatenate(embeddings, axis=0)
return embeddings
def validate(self, depths, hand_states, actions, rewards, next_depths, next_hand_states, dones, batch_size=100):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
losses = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[:, np.newaxis][batch_slice],
self.actions_pl: actions[batch_slice],
self.rewards_pl: rewards[batch_slice],
self.next_states_pl: next_depths[batch_slice],
self.next_hand_states_pl: next_hand_states[:, np.newaxis][batch_slice],
self.dones_pl: dones[batch_slice],
self.is_training_pl: False
}
l1, l2 = self.session.run([self.full_transition_loss_t, self.full_reward_loss_t], feed_dict=feed_dict)
losses.append(np.transpose(np.array([l1, l2]), axes=(1, 0)))
losses = np.concatenate(losses, axis=0)
return losses
def validate_and_encode(self, depths, hand_states, actions, rewards, next_depths, next_hand_states, dones,
batch_size=100):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
losses = []
embeddings = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[:, np.newaxis][batch_slice],
self.actions_pl: actions[batch_slice],
self.rewards_pl: rewards[batch_slice],
self.next_states_pl: next_depths[batch_slice],
self.next_hand_states_pl: next_hand_states[:, np.newaxis][batch_slice],
self.dones_pl: dones[batch_slice],
self.is_training_pl: False
}
tmp_embeddings, l1, l2 = self.session.run([
self.state_softmax_t, self.full_transition_loss_t, self.full_reward_loss_t], feed_dict=feed_dict
)
losses.append(np.transpose(np.array([l1, l2]), axes=(1, 0)))
embeddings.append(tmp_embeddings)
losses = np.concatenate(losses, axis=0)
embeddings = np.concatenate(embeddings)
return losses, embeddings
def build(self):
self.build_placeholders()
self.build_model()
self.build_training()
def build_placeholders(self):
self.states_pl = tf.placeholder(tf.float32, shape=(None, *self.input_shape), name="states_pl")
self.actions_pl = tf.placeholder(tf.int32, shape=(None,), name="actions_pl")
self.rewards_pl = tf.placeholder(tf.float32, shape=(None,), name="rewards_pl")
self.dones_pl = tf.placeholder(tf.bool, shape=(None,), name="dones_pl")
self.next_states_pl = tf.placeholder(tf.float32, shape=(None, *self.input_shape), name="next_states_pl")
self.is_training_pl = tf.placeholder(tf.bool, shape=[], name="is_training_pl")
self.hand_states_pl = tf.placeholder(tf.float32, shape=(None, 1), name="hand_states_pl")
self.next_hand_states_pl = tf.placeholder(tf.float32, shape=(None, 1), name="next_hand_states_pl")
def build_model(self):
self.state_logits_t, self.state_softmax_t = self.build_encoder(
self.states_pl, self.hand_states_pl, self.encoder_tau
)
self.perplexity_t = tf.constant(2, dtype=tf.float32) ** (
- tf.reduce_mean(
tf.reduce_sum(
self.state_softmax_t * tf.log(self.state_softmax_t + 1e-7) /
tf.log(tf.constant(2, dtype=self.state_softmax_t.dtype)),
axis=1
),
axis=0
)
)
if self.no_tau_target_encoder:
target_tau = 1.0
else:
target_tau = self.encoder_tau
if self.target_network:
self.next_state_logits_t, self.next_state_softmax_t = self.build_encoder(
self.next_states_pl, self.next_hand_states_pl, target_tau, share_weights=False,
namespace=self.TARGET_ENCODER_NAMESPACE
)
self.build_target_update()
else:
self.next_state_logits_t, self.next_state_softmax_t = self.build_encoder(
self.next_states_pl, self.next_hand_states_pl, target_tau, share_weights=True
)
self.build_r_model()
self.build_t_model()
def build_r_model(self):
if self.small_r_init:
r_init = tf.random_normal_initializer(mean=0, stddev=0.1, dtype=tf.float32)
else:
r_init = tf.random_uniform_initializer(minval=0, maxval=1, dtype=tf.float32)
self.r_v = tf.get_variable(
"reward_matrix", shape=(self.num_actions, self.num_blocks), dtype=tf.float32,
initializer=r_init
)
self.r_t = self.r_v
def build_t_model(self):
if self.small_t_init:
t_init = tf.random_normal_initializer(mean=0, stddev=0.1, dtype=tf.float32)
else:
t_init = tf.random_uniform_initializer(minval=0, maxval=1, dtype=tf.float32)
self.t_v = tf.get_variable(
"transition_matrix", shape=(self.num_actions, self.num_blocks, self.num_blocks), dtype=tf.float32,
initializer=t_init
)
self.t_softmax_t = tf.nn.softmax(self.t_v / self.model_tau, axis=2)
self.t_logsoftmax_t = tf.nn.log_softmax(self.t_v / self.model_tau, axis=2)
def build_training(self):
# prep
self.global_step = tf.train.get_or_create_global_step()
self.gather_matrices()
self.dones_float_t = tf.cast(self.dones_pl, tf.float32)
# build losses
self.build_reward_loss()
self.build_transition_loss()
self.build_regularization_loss()
self.build_kl_penalty()
# build full loss
self.gamma_v = tf.Variable(initial_value=self.gamma, trainable=False)
self.loss_t = self.reward_loss_t + tf.stop_gradient(self.gamma_v) * self.transition_loss_t + \
self.regularization_loss_t + self.kl_penalty_weight_v * self.kl_penalty_t
# build training
self.build_encoder_training()
self.build_model_training()
# integrate training into a single op
if self.model_train_step is None:
self.train_step = self.encoder_train_step
else:
self.train_step = tf.group(self.encoder_train_step, self.model_train_step)
def gather_matrices(self):
if self.oracle_r is not None:
self.r_gather_t = tf.gather(self.oracle_r, self.actions_pl)
else:
self.r_gather_t = tf.gather(self.r_t, self.actions_pl)
if self.oracle_t is not None:
self.t_logsoftmax_gather_t = tf.gather(self.oracle_t, self.actions_pl)
else:
self.t_logsoftmax_gather_t = tf.gather(self.t_logsoftmax_t, self.actions_pl)
def build_reward_loss(self):
term1 = tf.square(self.rewards_pl[:, tf.newaxis] - self.r_gather_t)
term2 = term1 * self.state_softmax_t
self.full_reward_loss_t = (1 / 2) * tf.reduce_sum(term2, axis=1)
self.reward_loss_t = (1 / 2) * tf.reduce_mean(tf.reduce_sum(term2, axis=1), axis=0)
def build_transition_loss(self):
if self.propagate_next_state:
self.transition_term1 = self.state_softmax_t[:, :, tf.newaxis] * self.next_state_softmax_t[:, tf.newaxis, :]
else:
self.transition_term1 = self.state_softmax_t[:, :, tf.newaxis] * tf.stop_gradient(
self.next_state_softmax_t[:, tf.newaxis, :]
)
self.transition_term2 = self.transition_term1 * self.t_logsoftmax_gather_t
self.full_transition_loss_t = - tf.reduce_sum(self.transition_term2, axis=[1, 2]) * (1 - self.dones_float_t)
self.transition_loss_t = tf.reduce_sum(self.full_transition_loss_t, axis=0) / tf.reduce_max(
[1.0, tf.reduce_sum(1 - self.dones_float_t)]
)
def build_regularization_loss(self):
reg = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
if len(reg) > 0:
self.regularization_loss_t = tf.add_n(reg)
else:
self.regularization_loss_t = 0
def build_kl_penalty(self):
self.kl_penalty_weight_v = tf.Variable(self.kl_penalty_weight, trainable=False, dtype=tf.float32)
self.kl_penalty_weight_pl = tf.placeholder(tf.float32, shape=[], name="kl_penalty_weight_pl")
self.kl_penalty_weight_assign = tf.assign(self.kl_penalty_weight_v, self.kl_penalty_weight_pl)
if self.kl_penalty:
log_softmax = tf.nn.log_softmax(self.state_logits_t, axis=-1)
self.kl_penalty_t = tf.reduce_mean(tf.reduce_sum(self.state_softmax_t * log_softmax, axis=-1), axis=0)
else:
self.kl_penalty_t = 0.0
def build_encoder_training(self):
encoder_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.ENCODER_NAMESPACE)
encoder_optimizer = agent_utils.get_optimizer(self.optimizer_encoder, self.learning_rate_encoder)
self.encoder_train_step = encoder_optimizer.minimize(
self.loss_t, global_step=self.global_step, var_list=encoder_variables
)
if self.batch_norm:
self.update_op = tf.group(*tf.get_collection(tf.GraphKeys.UPDATE_OPS))
self.encoder_train_step = tf.group(self.encoder_train_step, self.update_op)
def build_model_training(self):
model_train_step = []
if self.oracle_r is None:
r_optimizer = agent_utils.get_optimizer(self.optimizer_model, self.learning_rate_r)
self.r_step = r_optimizer.minimize(
self.reward_loss_t, var_list=[self.r_v]
)
model_train_step.append(self.r_step)
if self.oracle_t is None:
t_optimizer = agent_utils.get_optimizer(self.optimizer_model, self.learning_rate_t)
self.t_step = t_optimizer.minimize(
self.transition_loss_t, var_list=[self.t_v]
)
model_train_step.append(self.t_step)
if len(model_train_step) > 0:
self.model_train_step = tf.group(*model_train_step)
else:
self.model_train_step = None
def build_encoder(self, depth_pl, hand_state_pl, tau, share_weights=False, namespace=ENCODER_NAMESPACE):
x = tf.expand_dims(depth_pl, axis=-1)
with tf.variable_scope(namespace, reuse=share_weights):
for idx in range(len(self.encoder_filters)):
with tf.variable_scope("conv{:d}".format(idx + 1)):
x = tf.layers.conv2d(
x, self.encoder_filters[idx], self.encoder_filter_sizes[idx], self.encoder_strides[idx],
padding="SAME", activation=tf.nn.relu if not self.batch_norm else None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer(),
use_bias=not self.batch_norm
)
if self.batch_norm and idx != len(self.encoder_filters) - 1:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
x = tf.layers.flatten(x)
if self.batch_norm:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
x = tf.concat([x, hand_state_pl], axis=1)
for idx, neurons in enumerate(self.encoder_neurons):
with tf.variable_scope("fc{:d}".format(idx + 1)):
x = tf.layers.dense(
x, neurons, activation=tf.nn.relu if not self.batch_norm else None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer(),
use_bias=not self.batch_norm
)
if self.batch_norm:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
with tf.variable_scope("predict"):
x = tf.layers.dense(
x, self.num_blocks, activation=None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer()
)
if self.z_transform:
dist = x - tf.reduce_min(x, axis=1)[:, tf.newaxis]
dist = dist / tf.reduce_sum(dist, axis=1)[:, tf.newaxis]
elif self.abs_z_transform:
abs_x = tf.abs(x)
dist = abs_x / tf.reduce_sum(abs_x, axis=1)[:, tf.newaxis]
elif self.sigsoftmax:
e_x = tf.exp(x - tf.reduce_max(x, axis=1)[:, tf.newaxis])
sig_e_x = e_x * tf.nn.sigmoid(x)
dist = sig_e_x / tf.reduce_sum(sig_e_x, axis=1)[:, tf.newaxis]
else:
dist = tf.nn.softmax(x / tau)
return x, dist
def build_target_update(self):
source_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.ENCODER_NAMESPACE)
target_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.TARGET_ENCODER_NAMESPACE)
assert len(source_vars) == len(target_vars) and len(source_vars) > 0
update_ops = []
for source_var, target_var in zip(source_vars, target_vars):
update_ops.append(tf.assign(target_var, source_var))
self.target_update_op = tf.group(*update_ops)
def build_summaries(self):
# losses
tf.summary.scalar("loss", self.loss_t)
tf.summary.scalar("transition_loss", tf.reduce_mean(self.transition_loss_t))
tf.summary.scalar("reward_loss", tf.reduce_mean(self.reward_loss_t))
# logits and softmax
agent_utils.summarize(self.state_logits_t, "logits")
agent_utils.summarize(self.state_softmax_t, "softmax")
# gradients
self.grad_norms_d = dict()
for target, target_name in zip(
[self.loss_t, self.transition_loss_t, self.reward_loss_t], ["total_loss", "t_loss", "r_loss"]
):
for source, source_name in zip([self.state_logits_t, self.state_softmax_t], ["logits", "softmax"]):
grads = tf.gradients(tf.reduce_mean(target), source)
grad_norms = tf.norm(grads, ord=1, axis=-1)[0]
name = "state_{}_grad_{}".format(source_name, target_name)
self.grad_norms_d[name] = tf.reduce_mean(grad_norms)
agent_utils.summarize(grad_norms, name)
def set_gamma(self, value):
self.session.run(tf.assign(self.gamma_v, value))
def set_kl_penalty_weight(self, value):
self.session.run(self.kl_penalty_weight_assign, feed_dict={self.kl_penalty_weight_pl: value})
def start_session(self, gpu_memory=None):
gpu_options = None
if gpu_memory is not None:
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory)
tf_config = tf.ConfigProto(gpu_options=gpu_options)
self.session = tf.Session(config=tf_config)
self.session.run(tf.global_variables_initializer())
def stop_session(self):
if self.session is not None:
self.session.close()
def save_matrices_as_images(self, step, save_dir, ext="pdf"):
r, p = self.session.run([self.r_t, self.t_softmax_t])
r = np.reshape(r, (r.shape[0], -1))
p = np.reshape(p, (p.shape[0], -1))
r_path = os.path.join(save_dir, "r_{:d}.{}".format(step, ext))
p_path = os.path.join(save_dir, "p_{:d}.{}".format(step, ext))
plt.clf()
plt.imshow(r, vmin=-0.5, vmax=1.5)
plt.colorbar()
plt.savefig(r_path)
plt.clf()
plt.imshow(p, vmin=0, vmax=1)
plt.colorbar()
plt.savefig(p_path)
class ExpectationModelGaussian(ExpectationModel):
def __init__(self, input_shape, num_blocks, num_actions, encoder_filters, encoder_filter_sizes,
encoder_strides, encoder_neurons, learning_rate_encoder, learning_rate_r, learning_rate_t,
weight_decay, gamma, optimizer_encoder, optimizer_model, max_steps, batch_norm=True,
target_network=True, oracle_r=None, oracle_t=None, propagate_next_state=False,
z_transform=False, abs_z_transform=False, sigsoftmax=False, encoder_tau=1.0, model_tau=1.0,
no_tau_target_encoder=False, kl_penalty=False, kl_penalty_weight=0.01, small_r_init=False,
small_t_init=False):
super(ExpectationModelGaussian, self).__init__(
input_shape, num_blocks, num_actions, encoder_filters, encoder_filter_sizes, encoder_strides,
encoder_neurons, learning_rate_encoder, learning_rate_r, learning_rate_t, weight_decay, gamma,
optimizer_encoder, optimizer_model, max_steps, batch_norm=batch_norm, target_network=target_network,
oracle_r=oracle_r, oracle_t=oracle_t, propagate_next_state=propagate_next_state, z_transform=z_transform,
abs_z_transform=abs_z_transform, sigsoftmax=sigsoftmax, encoder_tau=encoder_tau, model_tau=model_tau,
no_tau_target_encoder=no_tau_target_encoder, kl_penalty=kl_penalty,
kl_penalty_weight=kl_penalty_weight, small_r_init=small_r_init, small_t_init=small_t_init
)
def build_model(self):
self.state_mu_t, self.state_sd_t, self.state_var_t, self.state_logits_t, self.state_softmax_t = \
self.build_encoder(
self.states_pl, self.hand_states_pl, self.encoder_tau, namespace=self.ENCODER_NAMESPACE
)
self.perplexity_t = tf.constant(2, dtype=tf.float32) ** (
- tf.reduce_mean(
tf.reduce_sum(
self.state_softmax_t * tf.log(self.state_softmax_t + 1e-7) /
tf.log(tf.constant(2, dtype=self.state_softmax_t.dtype)),
axis=1
),
axis=0
)
)
if self.no_tau_target_encoder:
target_tau = 1.0
else:
target_tau = self.encoder_tau
if self.target_network:
self.next_state_mu_t, self.next_state_sd_t, self.next_state_var_t, self.next_state_logits_t, \
self.next_state_softmax_t = \
self.build_encoder(
self.next_states_pl, self.next_hand_states_pl, target_tau, share_weights=False,
namespace=self.TARGET_ENCODER_NAMESPACE
)
self.build_target_update()
else:
self.next_state_mu_t, self.next_state_sd_t, self.next_state_var_t, self.next_state_logits_t, \
self.next_state_softmax_t = \
self.build_encoder(
self.next_states_pl, self.next_hand_states_pl, target_tau, share_weights=True,
namespace=self.ENCODER_NAMESPACE
)
self.build_r_model()
self.build_t_model()
def build_encoder(self, depth_pl, hand_state_pl, tau, share_weights=False, namespace=None):
x = tf.expand_dims(depth_pl, axis=-1)
with tf.variable_scope(namespace, reuse=share_weights):
for idx in range(len(self.encoder_filters)):
with tf.variable_scope("conv{:d}".format(idx + 1)):
x = tf.layers.conv2d(
x, self.encoder_filters[idx], self.encoder_filter_sizes[idx], self.encoder_strides[idx],
padding="SAME", activation=tf.nn.relu if not self.batch_norm else None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer(),
use_bias=not self.batch_norm
)
if self.batch_norm and idx != len(self.encoder_filters) - 1:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
x = tf.layers.flatten(x)
if self.batch_norm:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
x = tf.concat([x, hand_state_pl], axis=1)
for idx, neurons in enumerate(self.encoder_neurons):
with tf.variable_scope("fc{:d}".format(idx + 1)):
x = tf.layers.dense(
x, neurons, activation=tf.nn.relu if not self.batch_norm else None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer(),
use_bias=not self.batch_norm
)
if self.batch_norm:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
with tf.variable_scope("predict"):
mu_t = tf.layers.dense(
x, self.num_blocks, activation=None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer()
)
log_var_t = tf.layers.dense(
x, self.num_blocks, activation=None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer()
)
var_t = tf.exp(log_var_t)
sd_t = tf.sqrt(var_t)
noise_t = tf.random_normal(
shape=(tf.shape(mu_t)[0], self.num_blocks), mean=0, stddev=1.0
)
sample_t = mu_t + sd_t * noise_t
dist_t = tf.nn.softmax(sample_t / tau)
return mu_t, sd_t, var_t, sample_t, dist_t
def build_kl_penalty(self):
self.kl_penalty_weight_v = tf.Variable(self.kl_penalty_weight, trainable=False, dtype=tf.float32)
self.kl_penalty_weight_pl = tf.placeholder(tf.float32, shape=[], name="kl_penalty_weight_pl")
self.kl_penalty_weight_assign = tf.assign(self.kl_penalty_weight_v, self.kl_penalty_weight_pl)
if self.kl_penalty:
kl_divergence_t = 0.5 * (tf.square(self.state_mu_t) + self.state_var_t - tf.log(self.state_var_t) - 1.0)
self.kl_penalty_t = tf.reduce_mean(tf.reduce_sum(kl_divergence_t, axis=1), axis=0)
else:
self.kl_penalty_t = 0.0
class ExpectationModelGaussianWithQ(ExpectationModelGaussian):
def __init__(self, input_shape, num_blocks, num_actions, encoder_filters, encoder_filter_sizes,
encoder_strides, encoder_neurons, learning_rate_encoder, learning_rate_r, learning_rate_t, learning_rate_q,
weight_decay, reward_gamma, transition_gamma, optimizer_encoder, optimizer_model, max_steps, batch_norm=True,
target_network=True, oracle_r=None, oracle_t=None, propagate_next_state=False,
z_transform=False, abs_z_transform=False, sigsoftmax=False, encoder_tau=1.0, model_tau=1.0,
no_tau_target_encoder=False, kl_penalty=False, kl_penalty_weight=0.01, small_r_init=False,
small_t_init=False, small_q_init=False):
super(ExpectationModelGaussianWithQ, self).__init__(
input_shape, num_blocks, num_actions, encoder_filters, encoder_filter_sizes, encoder_strides,
encoder_neurons, learning_rate_encoder, learning_rate_r, learning_rate_t, weight_decay, transition_gamma,
optimizer_encoder, optimizer_model, max_steps, batch_norm=batch_norm, target_network=target_network,
oracle_r=oracle_r, oracle_t=oracle_t, propagate_next_state=propagate_next_state, z_transform=z_transform,
abs_z_transform=abs_z_transform, sigsoftmax=sigsoftmax, encoder_tau=encoder_tau, model_tau=model_tau,
no_tau_target_encoder=no_tau_target_encoder, kl_penalty=kl_penalty, kl_penalty_weight=kl_penalty_weight,
small_r_init=small_r_init, small_t_init=small_t_init
)
self.learning_rate_q = learning_rate_q
self.small_q_init = small_q_init
self.reward_gamma = reward_gamma
def validate(self, depths, hand_states, actions, rewards, q_values, next_depths, next_hand_states, dones,
batch_size=100):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
losses = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[:, np.newaxis][batch_slice],
self.actions_pl: actions[batch_slice],
self.rewards_pl: rewards[batch_slice],
self.q_values_pl: q_values[batch_slice],
self.next_states_pl: next_depths[batch_slice],
self.next_hand_states_pl: next_hand_states[:, np.newaxis][batch_slice],
self.dones_pl: dones[batch_slice],
self.is_training_pl: False
}
l1, l2, l3 = self.session.run(
[self.full_q_loss_t, self.full_transition_loss_t, self.full_reward_loss_t], feed_dict=feed_dict
)
losses.append(np.transpose(np.array([l1, l2, l3]), axes=(1, 0)))
losses = np.concatenate(losses, axis=0)
return losses
def validate_and_encode(self, depths, hand_states, actions, rewards, q_values, next_depths, next_hand_states, dones,
batch_size=100):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
losses = []
embeddings = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[:, np.newaxis][batch_slice],
self.actions_pl: actions[batch_slice],
self.rewards_pl: rewards[batch_slice],
self.q_values_pl: q_values[batch_slice],
self.next_states_pl: next_depths[batch_slice],
self.next_hand_states_pl: next_hand_states[:, np.newaxis][batch_slice],
self.dones_pl: dones[batch_slice],
self.is_training_pl: False
}
tmp_embeddings, l1, l2, l3 = self.session.run([
self.state_softmax_t, self.full_q_loss_t, self.full_transition_loss_t, self.full_reward_loss_t],
feed_dict=feed_dict
)
losses.append(np.transpose(np.array([l1, l2, l3]), axes=(1, 0)))
embeddings.append(tmp_embeddings)
losses = np.concatenate(losses, axis=0)
embeddings = np.concatenate(embeddings)
return losses, embeddings
def build_placeholders(self):
self.states_pl = tf.placeholder(tf.float32, shape=(None, *self.input_shape), name="states_pl")
self.actions_pl = tf.placeholder(tf.int32, shape=(None,), name="actions_pl")
self.rewards_pl = tf.placeholder(tf.float32, shape=(None,), name="rewards_pl")
self.q_values_pl = tf.placeholder(tf.float32, shape=(None, self.num_actions), name="q_values_pl")
self.dones_pl = tf.placeholder(tf.bool, shape=(None,), name="dones_pl")
self.next_states_pl = tf.placeholder(tf.float32, shape=(None, *self.input_shape), name="next_states_pl")
self.is_training_pl = tf.placeholder(tf.bool, shape=[], name="is_training_pl")
self.hand_states_pl = tf.placeholder(tf.float32, shape=(None, 1), name="hand_states_pl")
self.next_hand_states_pl = tf.placeholder(tf.float32, shape=(None, 1), name="next_hand_states_pl")
def build_model(self):
self.state_mu_t, self.state_sd_t, self.state_var_t, self.state_logits_t, self.state_softmax_t = \
self.build_encoder(
self.states_pl, self.hand_states_pl, self.encoder_tau, namespace=self.ENCODER_NAMESPACE
)
self.perplexity_t = tf.constant(2, dtype=tf.float32) ** (
- tf.reduce_mean(
tf.reduce_sum(
self.state_softmax_t * tf.log(self.state_softmax_t + 1e-7) /
tf.log(tf.constant(2, dtype=self.state_softmax_t.dtype)),
axis=1
),
axis=0
)
)
if self.no_tau_target_encoder:
target_tau = 1.0
else:
target_tau = self.encoder_tau
if self.target_network:
self.next_state_mu_t, self.next_state_sd_t, self.next_state_var_t, self.next_state_logits_t, \
self.next_state_softmax_t = \
self.build_encoder(
self.next_states_pl, self.next_hand_states_pl, target_tau, share_weights=False,
namespace=self.TARGET_ENCODER_NAMESPACE
)
self.build_target_update()
else:
self.next_state_mu_t, self.next_state_sd_t, self.next_state_var_t, self.next_state_logits_t, \
self.next_state_softmax_t = \
self.build_encoder(
self.next_states_pl, self.next_hand_states_pl, target_tau, share_weights=True,
namespace=self.ENCODER_NAMESPACE
)
self.build_r_model()
self.build_t_model()
self.build_q_model()
def build_q_model(self):
if self.small_q_init:
q_init = tf.random_normal_initializer(mean=0, stddev=0.1, dtype=tf.float32)
else:
q_init = tf.random_uniform_initializer(minval=0, maxval=1, dtype=tf.float32)
self.q_v = tf.get_variable(
"q_matrix", shape=(self.num_actions, self.num_blocks), dtype=tf.float32,
initializer=q_init
)
self.q_t = self.q_v
def build_training(self):
# prep
self.global_step = tf.train.get_or_create_global_step()
self.gather_matrices()
self.dones_float_t = tf.cast(self.dones_pl, tf.float32)
# build losses
self.build_q_loss()
self.build_reward_loss()
self.build_transition_loss()
self.build_regularization_loss()
self.build_kl_penalty()
# build full loss
self.gamma_v = tf.Variable(initial_value=self.gamma, trainable=False)
self.loss_t = self.q_loss_t + self.reward_gamma * self.reward_loss_t + \
tf.stop_gradient(self.gamma_v) * self.transition_loss_t + \
self.regularization_loss_t + self.kl_penalty_weight_v * self.kl_penalty_t
# build training
self.build_encoder_training()
self.build_model_training()
self.build_q_model_training()
if self.model_train_step is None:
self.model_train_step = self.q_step
else:
self.model_train_step = tf.group(self.model_train_step, self.q_step)
# integrate training into a single op
self.train_step = tf.group(self.encoder_train_step, self.model_train_step)
def build_q_loss(self):
term1 = tf.square(self.q_values_pl[:, :, tf.newaxis] - self.q_t[tf.newaxis, :, :])
term1 = tf.reduce_sum(term1, axis=1)
term2 = term1 * self.state_softmax_t
self.full_q_loss_t = (1 / 2) * tf.reduce_sum(term2, axis=1)
self.q_loss_t = tf.reduce_mean(self.full_q_loss_t, axis=0)
def build_q_model_training(self):
q_optimizer = agent_utils.get_optimizer(self.optimizer_model, self.learning_rate_q)
self.q_step = q_optimizer.minimize(
self.q_loss_t, var_list=[self.q_v]
)
class ExpectationModelContinuous:
ENCODER_NAMESPACE = "encoder"
TARGET_ENCODER_NAMESPACE = "target_encoder"
def __init__(self, input_shape, num_blocks, num_actions, encoder_filters, encoder_filter_sizes,
encoder_strides, encoder_neurons, learning_rate_encoder, learning_rate_r, learning_rate_t,
weight_decay, gamma, optimizer_encoder, optimizer_model, max_steps, batch_norm=True,
target_network=True, propagate_next_state=False, no_sample=False, softplus=False,
beta=0.0, zero_embedding_variance=False, old_bn_settings=False, bn_momentum=0.99):
if propagate_next_state:
assert not target_network
self.input_shape = input_shape
self.num_blocks = num_blocks
self.num_actions = num_actions
self.encoder_filters = encoder_filters
self.encoder_filter_sizes = encoder_filter_sizes
self.encoder_strides = encoder_strides
self.encoder_neurons = encoder_neurons
self.learning_rate_encoder = learning_rate_encoder
self.learning_rate_r = learning_rate_r
self.learning_rate_t = learning_rate_t
self.weight_decay = weight_decay
self.gamma = gamma
self.optimizer_encoder = optimizer_encoder
self.optimizer_model = optimizer_model
self.max_steps = max_steps
self.batch_norm = batch_norm
self.target_network = target_network
self.propagate_next_state = propagate_next_state
self.no_sample = no_sample
self.softplus = softplus
self.beta = beta
self.zero_embedding_variance = zero_embedding_variance
self.old_bn_settings = old_bn_settings
self.bn_momentum = bn_momentum
self.hand_states_pl, self.next_hand_states_pl = None, None
def encode(self, depths, hand_states, batch_size=100, zero_sd=False):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
embeddings = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[batch_slice],
self.is_training_pl: False
}
if zero_sd:
feed_dict[self.state_sd_t] = np.zeros(
(len(depths[batch_slice]), self.num_blocks), dtype=np.float32
)
embedding = self.session.run(self.state_mu_t, feed_dict=feed_dict)
embeddings.append(embedding)
embeddings = np.concatenate(embeddings, axis=0)
return embeddings
def predict_next_states(self, depths, hand_states, actions, batch_size=100, zero_sd=False):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
next_states = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[batch_slice],
self.actions_pl: actions[batch_slice],
self.is_training_pl: False
}
if zero_sd:
feed_dict[self.state_sd_t] = np.zeros(
(len(depths[batch_slice]), self.num_blocks), dtype=np.float32
)
tmp_next_states = self.session.run(self.transformed_logits, feed_dict=feed_dict)
next_states.append(tmp_next_states)
next_states = np.concatenate(next_states, axis=0)
return next_states
def predict_rewards(self, depths, hand_states, actions, batch_size=100, zero_sd=False):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
rewards = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[batch_slice],
self.actions_pl: actions[batch_slice],
self.is_training_pl: False
}
if zero_sd:
feed_dict[self.state_sd_t] = np.zeros(
(len(depths[batch_slice]), self.num_blocks), dtype=np.float32
)
tmp_rewards = self.session.run(self.reward_prediction_t, feed_dict=feed_dict)
rewards.append(tmp_rewards)
rewards = np.concatenate(rewards, axis=0)
return rewards
def validate(self, depths, hand_states, actions, rewards, next_depths, next_hand_states, dones, batch_size=100):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
losses = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[:, np.newaxis][batch_slice],
self.actions_pl: actions[batch_slice],
self.rewards_pl: rewards[batch_slice],
self.next_states_pl: next_depths[batch_slice],
self.next_hand_states_pl: next_hand_states[:, np.newaxis][batch_slice],
self.dones_pl: dones[batch_slice],
self.is_training_pl: False
}
l1, l2 = self.session.run([self.full_transition_loss_t, self.full_reward_loss_t], feed_dict=feed_dict)
losses.append(np.transpose(np.array([l1, l2]), axes=(1, 0)))
losses = np.concatenate(losses, axis=0)
return losses
def validate_and_encode(self, depths, hand_states, actions, rewards, next_depths, next_hand_states, dones,
batch_size=100):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
losses = []
embeddings = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[:, np.newaxis][batch_slice],
self.actions_pl: actions[batch_slice],
self.rewards_pl: rewards[batch_slice],
self.next_states_pl: next_depths[batch_slice],
self.next_hand_states_pl: next_hand_states[:, np.newaxis][batch_slice],
self.dones_pl: dones[batch_slice],
self.is_training_pl: False
}
tmp_embeddings, l1, l2 = self.session.run([
self.state_mu_t, self.full_transition_loss_t, self.full_reward_loss_t], feed_dict=feed_dict
)
losses.append(np.transpose(np.array([l1, l2]), axes=(1, 0)))
embeddings.append(tmp_embeddings)
losses = np.concatenate(losses, axis=0)
embeddings = np.concatenate(embeddings)
return losses, embeddings
def build(self):
self.build_placeholders()
self.build_model()
self.build_training()
self.saver = tf.train.Saver()
def build_placeholders(self):
self.states_pl = tf.placeholder(tf.float32, shape=(None, *self.input_shape), name="states_pl")
self.actions_pl = tf.placeholder(tf.int32, shape=(None,), name="actions_pl")
self.rewards_pl = tf.placeholder(tf.float32, shape=(None,), name="rewards_pl")
self.dones_pl = tf.placeholder(tf.bool, shape=(None,), name="dones_pl")
self.next_states_pl = tf.placeholder(tf.float32, shape=(None, *self.input_shape), name="next_states_pl")
self.is_training_pl = tf.placeholder(tf.bool, shape=[], name="is_training_pl")
self.hand_states_pl = tf.placeholder(tf.float32, shape=(None, 1), name="hand_states_pl")
self.next_hand_states_pl = tf.placeholder(tf.float32, shape=(None, 1), name="next_hand_states_pl")
def build_model(self):
self.state_mu_t, self.state_var_t, self.state_sd_t, self.state_sample_t = \
self.build_encoder(self.states_pl, self.hand_states_pl)
if self.target_network:
self.next_state_mu_t, self.next_state_var_t, self.next_state_sd_t, self.next_state_sample_t = self.build_encoder(
self.next_states_pl, self.next_hand_states_pl, share_weights=False,
namespace=self.TARGET_ENCODER_NAMESPACE
)
self.build_target_update()
else:
self.next_state_mu_t, self.next_state_var_t, self.next_state_sd_t, self.next_state_sample_t = self.build_encoder(
self.next_states_pl, self.next_hand_states_pl, share_weights=True
)
self.r_v = tf.get_variable(
"reward_matrix", shape=(self.num_actions, self.num_blocks), dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0, stddev=np.sqrt(2 / self.num_blocks), dtype=tf.float32)
)
self.t_v = tf.get_variable(
"transition_matrix", shape=(self.num_actions, self.num_blocks, self.num_blocks), dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0, stddev=np.sqrt(2 / self.num_blocks), dtype=tf.float32)
)
def build_training(self):
# create global training step variable
self.global_step = tf.train.get_or_create_global_step()
self.float_dones_t = tf.cast(self.dones_pl, tf.float32)
# gather appropriate transition matrices
self.gather_r_t = tf.gather(self.r_v, self.actions_pl)
self.gather_t_t = tf.gather(self.t_v, self.actions_pl)
# build losses
self.build_reward_loss()
self.build_transition_loss()
self.build_weight_decay_loss()
self.build_kl_loss()
# build the whole loss
self.gamma_v = tf.Variable(initial_value=self.gamma, trainable=False)
self.loss_t = self.reward_loss_t + tf.stop_gradient(self.gamma_v) * self.transition_loss_t + \
self.regularization_loss_t + self.beta * self.kl_loss_t
# build training
self.build_encoder_training()
self.build_r_model_training()
self.build_t_model_training()
self.model_train_step = tf.group(self.r_step, self.t_step)
self.train_step = tf.group(self.encoder_train_step, self.model_train_step)
def build_reward_loss(self):
self.reward_prediction_t = tf.reduce_sum(self.state_sample_t * self.gather_r_t, axis=1)
term1 = tf.square(
self.rewards_pl - self.reward_prediction_t
)
self.full_reward_loss_t = (1 / 2) * term1
self.reward_loss_t = tf.reduce_mean(self.full_reward_loss_t, axis=0)
def build_transition_loss(self):
self.transformed_logits = tf.matmul(self.state_sample_t[:, tf.newaxis, :], self.gather_t_t)
self.transformed_logits = self.transformed_logits[:, 0, :]
if self.propagate_next_state:
term1 = tf.reduce_sum(tf.square(self.next_state_sample_t - self.transformed_logits), axis=1)
else:
term1 = tf.reduce_sum(tf.square(tf.stop_gradient(self.next_state_sample_t) - self.transformed_logits),
axis=1)
self.full_transition_loss_t = (1 / 2) * term1 * (1 - self.float_dones_t)
self.transition_loss_t = tf.reduce_sum(self.full_transition_loss_t, axis=0) / tf.reduce_max(
[1.0, tf.reduce_sum(1 - self.float_dones_t)])
def build_weight_decay_loss(self):
reg = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
if len(reg) > 0:
self.regularization_loss_t = tf.add_n(reg)
else:
self.regularization_loss_t = 0
def build_kl_loss(self):
self.kl_loss_t = 0.0
if self.beta is not None and self.beta > 0.0:
self.kl_divergence_t = 0.5 * (
tf.square(self.state_mu_t) + self.state_var_t - tf.log(self.state_var_t + 1e-5) - 1.0)
self.kl_loss_t = tf.reduce_mean(tf.reduce_sum(self.kl_divergence_t, axis=1))
def build_encoder_training(self):
encoder_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.ENCODER_NAMESPACE)
encoder_optimizer = agent_utils.get_optimizer(self.optimizer_encoder, self.learning_rate_encoder)
self.encoder_train_step = encoder_optimizer.minimize(
self.loss_t, global_step=self.global_step, var_list=encoder_variables
)
if self.batch_norm:
self.update_op = tf.group(*tf.get_collection(tf.GraphKeys.UPDATE_OPS))
self.encoder_train_step = tf.group(self.encoder_train_step, self.update_op)
def build_r_model_training(self):
r_optimizer = agent_utils.get_optimizer(self.optimizer_model, self.learning_rate_r)
self.r_step = r_optimizer.minimize(
self.reward_loss_t, var_list=[self.r_v]
)
def build_t_model_training(self):
t_optimizer = agent_utils.get_optimizer(self.optimizer_model, self.learning_rate_t)
self.t_step = t_optimizer.minimize(
self.transition_loss_t, var_list=[self.t_v]
)
def build_encoder(self, depth_pl, hand_state_pl, share_weights=False, namespace=ENCODER_NAMESPACE):
if len(depth_pl.shape) == 3:
x = tf.expand_dims(depth_pl, axis=-1)
elif len(depth_pl.shape) != 4:
raise ValueError("Weird depth shape?")
else:
x = depth_pl
with tf.variable_scope(namespace, reuse=share_weights):
for idx in range(len(self.encoder_filters)):
with tf.variable_scope("conv{:d}".format(idx + 1)):
x = tf.layers.conv2d(
x, self.encoder_filters[idx], self.encoder_filter_sizes[idx], self.encoder_strides[idx],
padding="SAME", activation=tf.nn.relu if not self.batch_norm else None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer(),
use_bias=not self.batch_norm
)
if self.batch_norm and idx != len(self.encoder_filters) - 1:
if self.old_bn_settings:
x = tf.layers.batch_normalization(
x, training=self.is_training_pl, trainable=not share_weights, momentum=self.bn_momentum
)
else:
x = tf.layers.batch_normalization(
x, training=self.is_training_pl if not share_weights else False,
trainable=not share_weights, momentum=self.bn_momentum
)
x = tf.nn.relu(x)
x = tf.layers.flatten(x)
if self.batch_norm:
if self.old_bn_settings:
x = tf.layers.batch_normalization(
x, training=self.is_training_pl, trainable=not share_weights, momentum=self.bn_momentum
)
else:
x = tf.layers.batch_normalization(
x, training=self.is_training_pl if not share_weights else False,
trainable=not share_weights, momentum=self.bn_momentum
)
x = tf.nn.relu(x)
x = tf.concat([x, hand_state_pl], axis=1)
for idx, neurons in enumerate(self.encoder_neurons):
with tf.variable_scope("fc{:d}".format(idx + 1)):
x = tf.layers.dense(
x, neurons, activation=tf.nn.relu if not self.batch_norm else None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer(),
use_bias=not self.batch_norm
)
if self.batch_norm:
if self.old_bn_settings:
x = tf.layers.batch_normalization(
x, training=self.is_training_pl, trainable=not share_weights, momentum=self.bn_momentum
)
else:
x = tf.layers.batch_normalization(
x, training=self.is_training_pl if not share_weights else False,
trainable=not share_weights, momentum=self.bn_momentum
)
x = tf.nn.relu(x)
with tf.variable_scope("predict"):
mu = tf.layers.dense(
x, self.num_blocks, activation=None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer()
)
sigma = tf.layers.dense(
x, self.num_blocks, activation=None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer()
)
if self.no_sample:
sample = mu
var_t = None
else:
noise = tf.random_normal(
shape=(tf.shape(mu)[0], self.num_blocks), mean=0, stddev=1.0
)
if self.softplus:
# log var is sd
sd = tf.nn.softplus(sigma)
if self.zero_embedding_variance:
sd = sd * 0.0
sd_noise_t = noise * sd
sample = mu + sd_noise_t
var_t = tf.square(sd)
else:
var_t = tf.exp(sigma)
if self.zero_embedding_variance:
var_t = var_t * 0.0
sd = tf.sqrt(var_t)
sd_noise_t = noise * sd
sample = mu + sd_noise_t
return mu, var_t, sd, sample
def build_target_update(self):
source_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.ENCODER_NAMESPACE)
target_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.TARGET_ENCODER_NAMESPACE)
assert len(source_vars) == len(target_vars) and len(source_vars) > 0
update_ops = []
for source_var, target_var in zip(source_vars, target_vars):
update_ops.append(tf.assign(target_var, source_var))
self.target_update_op = tf.group(*update_ops)
def build_summaries(self):
# losses
tf.summary.scalar("loss", self.loss_t)
tf.summary.scalar("transition_loss", tf.reduce_mean(self.transition_loss_t))
tf.summary.scalar("reward_loss", tf.reduce_mean(self.reward_loss_t))
# logits and softmax
self.summarize(self.state_mu_t, "means")
self.summarize(self.state_var_t, "vars")
self.summarize(self.state_sample_t, "samples")
# matrices
self.summarize(self.r_v, "R")
self.summarize(self.t_v, "T")
def set_gamma(self, value):
self.session.run(tf.assign(self.gamma_v, value))
def start_session(self, gpu_memory=None):
gpu_options = None
if gpu_memory is not None:
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory)
tf_config = tf.ConfigProto(gpu_options=gpu_options)
self.session = tf.Session(config=tf_config)
self.session.run(tf.global_variables_initializer())
def stop_session(self):
if self.session is not None:
self.session.close()
def load(self, path):
self.saver.restore(self.session, path)
def save(self, path):
path_dir = os.path.dirname(path)
if len(path_dir) > 0 and not os.path.isdir(path_dir):
os.makedirs(path_dir)
self.saver.save(self.session, path)
def save_matrices_as_images(self, step, save_dir, ext="pdf"):
r, p = self.session.run([self.r_v, self.t_v])
r = np.reshape(r, (r.shape[0], -1))
p = np.reshape(p, (p.shape[0], -1))
r_path = os.path.join(save_dir, "r_{:d}.{}".format(step, ext))
p_path = os.path.join(save_dir, "p_{:d}.{}".format(step, ext))
plt.clf()
plt.imshow(r, vmin=-0.5, vmax=1.5)
plt.colorbar()
plt.savefig(r_path)
plt.clf()
plt.imshow(p, vmin=0, vmax=1)
plt.colorbar()
plt.savefig(p_path)
def summarize(self, var, name):
tf.summary.scalar(name + "_mean", tf.reduce_mean(var))
tf.summary.scalar(name + "_min", tf.reduce_min(var))
tf.summary.scalar(name + "_max", tf.reduce_max(var))
tf.summary.histogram(name + "_hist", var)
class ExpectationModelVQ(ExpectationModelContinuous):
def __init__(self, input_shape, num_embeddings, dimensionality, num_actions, encoder_filters, encoder_filter_sizes,
encoder_strides, encoder_neurons, learning_rate_encoder, learning_rate_r, learning_rate_t,
weight_decay, gamma_1, optimizer_encoder, optimizer_model,
max_steps, batch_norm=True, target_network=True, propagate_next_state=False, no_sample=False,
softplus=False, alpha=0.1, beta=0.0):
ExpectationModelContinuous.__init__(
self, input_shape, dimensionality, num_actions, encoder_filters, encoder_filter_sizes, encoder_strides,
encoder_neurons, learning_rate_encoder, learning_rate_r, learning_rate_t, weight_decay, gamma_1,
optimizer_encoder, optimizer_model, max_steps, batch_norm=batch_norm, target_network=target_network,
propagate_next_state=propagate_next_state, no_sample=no_sample, softplus=softplus, beta=beta
)
self.num_embeddings = num_embeddings
self.dimensionality = dimensionality
self.aplha = alpha
def build_model(self):
self.state_mu_t = \
self.build_encoder(self.states_pl, self.hand_states_pl, namespace=self.ENCODER_NAMESPACE)
self.embeds = tf.get_variable(
"embeddings", [self.num_embeddings, self.dimensionality],
initializer=tf.truncated_normal_initializer(stddev=0.02)
)
self.state_sample_t, self.state_classes_t = self.quantize(self.state_mu_t)
if self.target_network:
self.next_state_mu_t = self.build_encoder(
self.next_states_pl, self.next_hand_states_pl, share_weights=False,
namespace=self.TARGET_ENCODER_NAMESPACE
)
self.build_target_update()
else:
self.next_state_mu_t = self.build_encoder(
self.next_states_pl, self.next_hand_states_pl, share_weights=True, namespace=self.ENCODER_NAMESPACE
)
self.next_state_sample_t, self.next_state_classes_t = self.quantize(self.next_state_mu_t)
self.r_v = tf.get_variable(
"reward_matrix", shape=(self.num_actions, self.dimensionality), dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0, stddev=np.sqrt(2 / self.dimensionality), dtype=tf.float32)
)
self.t_v = tf.get_variable(
"transition_matrix", shape=(self.num_actions, self.dimensionality, self.dimensionality), dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0, stddev=np.sqrt(2 / self.dimensionality), dtype=tf.float32)
)
def quantize(self, prediction):
diff = prediction[:, tf.newaxis, :] - self.embeds[tf.newaxis, :, :]
norm = tf.norm(diff, axis=2)
classes = tf.argmin(norm, axis=1)
return tf.gather(self.embeds, classes), classes
def build_training(self):
# create global training step variable
self.global_step = tf.train.get_or_create_global_step()
self.float_dones_t = tf.cast(self.dones_pl, tf.float32)
# gather appropriate transition matrices
self.gather_r_t = tf.gather(self.r_v, self.actions_pl)
self.gather_t_t = tf.gather(self.t_v, self.actions_pl)
# build losses
self.build_reward_loss()
self.build_transition_loss()
self.build_left_and_right_embedding_losses()
self.build_weight_decay_loss()
# build the whole loss
self.gamma_v = tf.Variable(initial_value=self.gamma, trainable=False)
self.main_loss_t = self.reward_loss_t + tf.stop_gradient(self.gamma_v) * self.transition_loss_t + \
self.regularization_loss_t
self.loss_t = self.main_loss_t + self.right_loss_t
# build training
self.build_encoder_and_embedding_training()
self.build_r_model_training()
self.build_t_model_training()
self.model_train_step = tf.group(self.r_step, self.t_step)
self.train_step = tf.group(self.encoder_train_step, self.model_train_step)
def build_left_and_right_embedding_losses(self):
self.left_loss_t = tf.reduce_mean(
tf.norm(tf.stop_gradient(self.state_mu_t) - self.state_sample_t, axis=1) ** 2
)
self.right_loss_t = tf.reduce_mean(
tf.norm(self.state_mu_t - tf.stop_gradient(self.state_sample_t), axis=1) ** 2
)
def build_reward_loss(self):
self.reward_prediction_t = tf.reduce_sum(self.state_sample_t * self.gather_r_t, axis=1)
term1 = tf.square(
self.rewards_pl - self.reward_prediction_t
)
self.full_reward_loss_t = (1 / 2) * term1
self.reward_loss_t = tf.reduce_mean(self.full_reward_loss_t, axis=0)
def build_transition_loss(self):
self.transformed_logits = tf.matmul(self.state_sample_t[:, tf.newaxis, :], self.gather_t_t)
self.transformed_logits = self.transformed_logits[:, 0, :]
if self.propagate_next_state:
term1 = tf.reduce_mean(tf.square(self.next_state_sample_t - self.transformed_logits), axis=1)
else:
term1 = tf.reduce_mean(tf.square(tf.stop_gradient(self.next_state_sample_t) - self.transformed_logits),
axis=1)
self.full_transition_loss_t = (1 / 2) * term1 * (1 - self.float_dones_t)
self.transition_loss_t = tf.reduce_sum(self.full_transition_loss_t, axis=0) / tf.reduce_max(
[1.0, tf.reduce_sum(1 - self.float_dones_t)])
def build_encoder_and_embedding_training(self):
encoder_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.ENCODER_NAMESPACE)
encoder_optimizer = agent_utils.get_optimizer(self.optimizer_encoder, self.learning_rate_encoder)
grad_z = tf.gradients(self.main_loss_t, self.state_sample_t)
encoder_grads = [(tf.gradients(self.state_mu_t, var, grad_z)[0] + self.aplha *
tf.gradients(self.right_loss_t, var)[0], var) for var in encoder_variables]
embed_grads = list(zip(tf.gradients(self.left_loss_t, self.embeds), [self.embeds]))
self.encoder_train_step = encoder_optimizer.apply_gradients(
encoder_grads + embed_grads
)
if self.batch_norm:
self.update_op = tf.group(*tf.get_collection(tf.GraphKeys.UPDATE_OPS))
self.encoder_train_step = tf.group(self.encoder_train_step, self.update_op)
def build_encoder(self, depth_pl, hand_state_pl, share_weights=False, namespace=None):
x = tf.expand_dims(depth_pl, axis=-1)
with tf.variable_scope(namespace, reuse=share_weights):
for idx in range(len(self.encoder_filters)):
with tf.variable_scope("conv{:d}".format(idx + 1)):
x = tf.layers.conv2d(
x, self.encoder_filters[idx], self.encoder_filter_sizes[idx], self.encoder_strides[idx],
padding="SAME", activation=tf.nn.relu if not self.batch_norm else None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer(),
use_bias=not self.batch_norm
)
if self.batch_norm and idx != len(self.encoder_filters) - 1:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
x = tf.layers.flatten(x)
if self.batch_norm:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
x = tf.concat([x, hand_state_pl], axis=1)
for idx, neurons in enumerate(self.encoder_neurons):
with tf.variable_scope("fc{:d}".format(idx + 1)):
x = tf.layers.dense(
x, neurons, activation=tf.nn.relu if not self.batch_norm else None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer(),
use_bias=not self.batch_norm
)
if self.batch_norm:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
with tf.variable_scope("predict"):
mu = tf.layers.dense(
x, self.num_blocks, activation=None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer()
)
return mu
class ExpectationModelContinuousNNTransitions(ExpectationModelContinuous):
def __init__(self, input_shape, num_blocks, num_actions, encoder_filters, encoder_filter_sizes,
encoder_strides, encoder_neurons, learning_rate_encoder, learning_rate_r, learning_rate_t,
transition_neurons, weight_decay, gamma_1, optimizer_encoder, optimizer_model,
max_steps, batch_norm=True, target_network=True, propagate_next_state=False, no_sample=False,
softplus=False, beta=0.0):
ExpectationModelContinuous.__init__(
self, input_shape, num_blocks, num_actions, encoder_filters, encoder_filter_sizes, encoder_strides,
encoder_neurons, learning_rate_encoder, learning_rate_r, learning_rate_t, weight_decay, gamma_1,
optimizer_encoder, optimizer_model, max_steps, batch_norm=batch_norm, target_network=target_network,
propagate_next_state=propagate_next_state, no_sample=no_sample, softplus=softplus, beta=beta
)
self.transition_neurons = transition_neurons
def build_model(self):
# TODO: throw out t_v; either accept mu_t or sample_t
self.state_mu_t, self.state_var_t, self.state_sd_t, self.state_sample_t = \
self.build_encoder(self.states_pl, self.hand_states_pl)
if self.target_network:
self.next_state_mu_t, self.next_state_var_t, self.next_state_sd_t, self.next_state_sample_t = self.build_encoder(
self.next_states_pl, self.next_hand_states_pl, share_weights=False,
namespace=self.TARGET_ENCODER_NAMESPACE
)
self.build_target_update()
else:
self.next_state_mu_t, self.next_state_var_t, self.next_state_sd_t, self.next_state_sample_t = self.build_encoder(
self.next_states_pl, self.next_hand_states_pl, share_weights=True
)
self.r_v = tf.get_variable(
"reward_matrix", shape=(self.num_actions, self.num_blocks), dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0, stddev=np.sqrt(2 / self.num_blocks), dtype=tf.float32)
)
self.t_v = tf.get_variable(
"transition_matrix", shape=(self.num_actions, self.num_blocks, self.num_blocks), dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0, stddev=np.sqrt(2 / self.num_blocks), dtype=tf.float32)
)
def build_transition_nn(self, embedding):
# TODO: needs to accept actions as well
x = embedding
with tf.variable_scope("transition"):
for idx, neurons in enumerate(self.transition_neurons):
with tf.variable_scope("fc{:d}".format(idx)):
if idx == len(self.transition_neurons) - 1:
x = tf.layers.dense(
x, neurons, activation=None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer()
)
else:
x = tf.layers.dense(
x, neurons, activation=tf.nn.relu if not self.batch_norm else None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer(),
use_bias=not self.batch_norm
)
if self.batch_norm:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
assert embedding.shape[1] == x.shape[1]
return x
class ExpectationModelContinuousWithQ(ExpectationModelContinuous):
def __init__(self, input_shape, num_blocks, num_actions, encoder_filters, encoder_filter_sizes,
encoder_strides, encoder_neurons, learning_rate_encoder, learning_rate_r, learning_rate_t,
learning_rate_q, weight_decay, gamma_1, gamma_2, optimizer_encoder, optimizer_model,
max_steps, batch_norm=True, target_network=True, propagate_next_state=False, no_sample=False,
softplus=False, beta=0.0, old_bn_settings=False, bn_momentum=0.99):
ExpectationModelContinuous.__init__(
self, input_shape, num_blocks, num_actions, encoder_filters, encoder_filter_sizes, encoder_strides,
encoder_neurons, learning_rate_encoder, learning_rate_r, learning_rate_t, weight_decay, gamma_1,
optimizer_encoder, optimizer_model, max_steps, batch_norm=batch_norm, target_network=target_network,
propagate_next_state=propagate_next_state, no_sample=no_sample, softplus=softplus, beta=beta,
old_bn_settings=old_bn_settings, bn_momentum=bn_momentum
)
self.gamma_2 = gamma_2
self.learning_rate_q = learning_rate_q
def validate(self, depths, hand_states, actions, rewards, q_values, next_depths, next_hand_states, dones,
batch_size=100):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
losses = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[:, np.newaxis][batch_slice],
self.actions_pl: actions[batch_slice],
self.rewards_pl: rewards[batch_slice],
self.q_values_pl: q_values[batch_slice],
self.next_states_pl: next_depths[batch_slice],
self.next_hand_states_pl: next_hand_states[:, np.newaxis][batch_slice],
self.dones_pl: dones[batch_slice],
self.is_training_pl: False
}
l1, l2, l3 = self.session.run(
[self.full_q_loss_t, self.full_transition_loss_t, self.full_reward_loss_t], feed_dict=feed_dict
)
losses.append(np.transpose(np.array([l1, l2, l3]), axes=(1, 0)))
losses = np.concatenate(losses, axis=0)
return losses
def validate_and_encode(self, depths, hand_states, actions, rewards, q_values, next_depths, next_hand_states, dones,
batch_size=100):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
losses = []
embeddings = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[:, np.newaxis][batch_slice],
self.actions_pl: actions[batch_slice],
self.rewards_pl: rewards[batch_slice],
self.q_values_pl: q_values[batch_slice],
self.next_states_pl: next_depths[batch_slice],
self.next_hand_states_pl: next_hand_states[:, np.newaxis][batch_slice],
self.dones_pl: dones[batch_slice],
self.is_training_pl: False
}
tmp_embeddings, l1, l2, l3 = self.session.run([
self.state_mu_t, self.full_q_loss_t, self.full_transition_loss_t, self.full_reward_loss_t],
feed_dict=feed_dict
)
losses.append(np.transpose(np.array([l1, l2, l3]), axes=(1, 0)))
embeddings.append(tmp_embeddings)
losses = np.concatenate(losses, axis=0)
embeddings = np.concatenate(embeddings)
return losses, embeddings
def build_placeholders(self):
self.states_pl = tf.placeholder(tf.float32, shape=(None, *self.input_shape), name="states_pl")
self.actions_pl = tf.placeholder(tf.int32, shape=(None,), name="actions_pl")
self.rewards_pl = tf.placeholder(tf.float32, shape=(None,), name="rewards_pl")
self.q_values_pl = tf.placeholder(tf.float32, shape=(None, self.num_actions), name="q_values_pl")
self.dones_pl = tf.placeholder(tf.bool, shape=(None,), name="dones_pl")
self.next_states_pl = tf.placeholder(tf.float32, shape=(None, *self.input_shape), name="next_states_pl")
self.is_training_pl = tf.placeholder(tf.bool, shape=[], name="is_training_pl")
self.hand_states_pl = tf.placeholder(tf.float32, shape=(None, 1), name="hand_states_pl")
self.next_hand_states_pl = tf.placeholder(tf.float32, shape=(None, 1), name="next_hand_states_pl")
def build_model(self):
self.build_encoders()
self.build_linear_models()
def build_encoders(self):
self.state_mu_t, self.state_var_t, self.state_sd_t, self.state_sample_t = \
self.build_encoder(self.states_pl, self.hand_states_pl)
if self.target_network:
self.next_state_mu_t, self.next_state_var_t, self.next_state_sd_t, self.next_state_sample_t = self.build_encoder(
self.next_states_pl, self.next_hand_states_pl, share_weights=False,
namespace=self.TARGET_ENCODER_NAMESPACE
)
self.build_target_update()
else:
self.next_state_mu_t, self.next_state_var_t, self.next_state_sd_t, self.next_state_sample_t = self.build_encoder(
self.next_states_pl, self.next_hand_states_pl, share_weights=True
)
def build_linear_models(self):
self.q_v = tf.get_variable(
"q_values_matrix", shape=(self.num_actions, self.num_blocks), dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0, stddev=np.sqrt(2 / self.num_blocks), dtype=tf.float32)
)
self.r_v = tf.get_variable(
"reward_matrix", shape=(self.num_actions, self.num_blocks), dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0, stddev=np.sqrt(2 / self.num_blocks), dtype=tf.float32)
)
self.t_v = tf.get_variable(
"transition_matrix", shape=(self.num_actions, self.num_blocks, self.num_blocks), dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0, stddev=np.sqrt(2 / self.num_blocks), dtype=tf.float32)
)
def build_training(self):
# create global training step variable
self.global_step = tf.train.get_or_create_global_step()
self.float_dones_t = tf.cast(self.dones_pl, tf.float32)
# gather appropriate transition matrices
self.gather_r_t = tf.gather(self.r_v, self.actions_pl)
self.gather_t_t = tf.gather(self.t_v, self.actions_pl)
# build losses
self.build_q_loss()
self.build_reward_loss()
self.build_transition_loss()
self.build_weight_decay_loss()
self.build_kl_loss()
# build the whole loss
self.gamma_v = tf.Variable(initial_value=self.gamma, trainable=False)
self.loss_t = self.q_loss_t + self.gamma_2 * self.reward_loss_t + \
tf.stop_gradient(self.gamma_v) * self.transition_loss_t + \
self.regularization_loss_t + self.beta * self.kl_loss_t
# build training
self.build_encoder_training()
self.build_q_model_training()
self.build_r_model_training()
self.build_t_model_training()
self.model_train_step = tf.group(self.q_step, self.r_step, self.t_step)
self.train_step = tf.group(self.encoder_train_step, self.model_train_step)
def build_q_loss(self):
self.q_prediction_t = tf.reduce_sum(self.state_sample_t[:, tf.newaxis, :] * self.q_v[tf.newaxis, :, :], axis=2)
term1 = tf.reduce_mean(tf.square(self.q_values_pl - self.q_prediction_t), axis=1)
self.full_q_loss_t = (1 / 2) * term1
self.q_loss_t = tf.reduce_mean(self.full_q_loss_t, axis=0)
def build_q_model_training(self):
q_optimizer = agent_utils.get_optimizer(self.optimizer_model, self.learning_rate_q)
self.q_step = q_optimizer.minimize(
self.q_loss_t, var_list=[self.q_v]
)
class ExpectationModelVQWithQ(ExpectationModelVQ):
def __init__(self, input_shape, num_blocks, dimensionality, num_actions, encoder_filters, encoder_filter_sizes,
encoder_strides, encoder_neurons, learning_rate_encoder, learning_rate_r, learning_rate_t,
learning_rate_q, weight_decay, gamma_1, gamma_2, optimizer_encoder, optimizer_model,
max_steps, batch_norm=True, target_network=True, propagate_next_state=False, no_sample=False,
softplus=False, alpha=0.1, beta=0.0):
ExpectationModelVQ.__init__(
self, input_shape, num_blocks, dimensionality, num_actions, encoder_filters, encoder_filter_sizes,
encoder_strides, encoder_neurons, learning_rate_encoder, learning_rate_r, learning_rate_t, weight_decay,
gamma_1, optimizer_encoder, optimizer_model, max_steps, batch_norm=batch_norm,
target_network=target_network, propagate_next_state=propagate_next_state, no_sample=no_sample,
softplus=softplus, alpha=alpha, beta=beta
)
self.gamma_2 = gamma_2
self.learning_rate_q = learning_rate_q
def validate(self, depths, hand_states, actions, rewards, q_values, next_depths, next_hand_states, dones,
batch_size=100):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
losses = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[:, np.newaxis][batch_slice],
self.actions_pl: actions[batch_slice],
self.rewards_pl: rewards[batch_slice],
self.q_values_pl: q_values[batch_slice],
self.next_states_pl: next_depths[batch_slice],
self.next_hand_states_pl: next_hand_states[:, np.newaxis][batch_slice],
self.dones_pl: dones[batch_slice],
self.is_training_pl: False
}
l1, l2, l3 = self.session.run(
[self.full_q_loss_t, self.full_transition_loss_t, self.full_reward_loss_t], feed_dict=feed_dict
)
losses.append(np.transpose(np.array([l1, l2, l3]), axes=(1, 0)))
losses = np.concatenate(losses, axis=0)
return losses
def validate_and_encode(self, depths, hand_states, actions, rewards, q_values, next_depths, next_hand_states, dones,
batch_size=100):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
losses = []
embeddings = []
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[:, np.newaxis][batch_slice],
self.actions_pl: actions[batch_slice],
self.rewards_pl: rewards[batch_slice],
self.q_values_pl: q_values[batch_slice],
self.next_states_pl: next_depths[batch_slice],
self.next_hand_states_pl: next_hand_states[:, np.newaxis][batch_slice],
self.dones_pl: dones[batch_slice],
self.is_training_pl: False
}
tmp_embeddings, l1, l2, l3 = self.session.run([
self.state_mu_t, self.full_q_loss_t, self.full_transition_loss_t, self.full_reward_loss_t],
feed_dict=feed_dict
)
losses.append(np.transpose(np.array([l1, l2, l3]), axes=(1, 0)))
embeddings.append(tmp_embeddings)
losses = np.concatenate(losses, axis=0)
embeddings = np.concatenate(embeddings)
return losses, embeddings
def build_placeholders(self):
self.states_pl = tf.placeholder(tf.float32, shape=(None, *self.input_shape), name="states_pl")
self.actions_pl = tf.placeholder(tf.int32, shape=(None,), name="actions_pl")
self.rewards_pl = tf.placeholder(tf.float32, shape=(None,), name="rewards_pl")
self.q_values_pl = tf.placeholder(tf.float32, shape=(None, self.num_actions), name="q_values_pl")
self.dones_pl = tf.placeholder(tf.bool, shape=(None,), name="dones_pl")
self.next_states_pl = tf.placeholder(tf.float32, shape=(None, *self.input_shape), name="next_states_pl")
self.is_training_pl = tf.placeholder(tf.bool, shape=[], name="is_training_pl")
self.hand_states_pl = tf.placeholder(tf.float32, shape=(None, 1), name="hand_states_pl")
self.next_hand_states_pl = tf.placeholder(tf.float32, shape=(None, 1), name="next_hand_states_pl")
def build_model(self):
self.state_mu_t = \
self.build_encoder(self.states_pl, self.hand_states_pl, namespace=self.ENCODER_NAMESPACE)
self.embeds = tf.get_variable(
"embeddings", [self.num_embeddings, self.dimensionality],
initializer=tf.truncated_normal_initializer(stddev=0.02)
)
self.state_sample_t, self.state_classes_t = self.quantize(self.state_mu_t)
if self.target_network:
self.next_state_mu_t = self.build_encoder(
self.next_states_pl, self.next_hand_states_pl, share_weights=False,
namespace=self.TARGET_ENCODER_NAMESPACE
)
self.build_target_update()
else:
self.next_state_mu_t = self.build_encoder(
self.next_states_pl, self.next_hand_states_pl, share_weights=True, namespace=self.ENCODER_NAMESPACE
)
self.next_state_sample_t, self.next_state_classes_t = self.quantize(self.next_state_mu_t)
self.q_v = tf.get_variable(
"q_values_matrix", shape=(self.num_actions, self.dimensionality), dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0, stddev=np.sqrt(2 / self.dimensionality), dtype=tf.float32)
)
self.r_v = tf.get_variable(
"reward_matrix", shape=(self.num_actions, self.dimensionality), dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0, stddev=np.sqrt(2 / self.dimensionality), dtype=tf.float32)
)
self.t_v = tf.get_variable(
"transition_matrix", shape=(self.num_actions, self.dimensionality, self.dimensionality), dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0, stddev=np.sqrt(2 / self.dimensionality), dtype=tf.float32)
)
def build_training(self):
# create global training step variable
self.global_step = tf.train.get_or_create_global_step()
self.float_dones_t = tf.cast(self.dones_pl, tf.float32)
# gather appropriate transition matrices
self.gather_r_t = tf.gather(self.r_v, self.actions_pl)
self.gather_t_t = tf.gather(self.t_v, self.actions_pl)
# build losses
self.build_q_loss()
self.build_reward_loss()
self.build_transition_loss()
self.build_left_and_right_embedding_losses()
self.build_weight_decay_loss()
# build the whole loss
self.gamma_v = tf.Variable(initial_value=self.gamma, trainable=False)
self.main_loss_t = self.q_loss_t + self.gamma_2 * self.reward_loss_t + \
tf.stop_gradient(self.gamma_v) * self.transition_loss_t + self.regularization_loss_t
self.loss_t = self.main_loss_t + self.right_loss_t
# build training
self.build_encoder_and_embedding_training()
self.build_q_model_training()
self.build_r_model_training()
self.build_t_model_training()
self.model_train_step = tf.group(self.q_step, self.r_step, self.t_step)
self.train_step = tf.group(self.encoder_train_step, self.model_train_step)
def build_q_loss(self):
self.q_prediction_t = tf.reduce_sum(self.state_sample_t[:, tf.newaxis, :] * self.q_v[tf.newaxis, :, :], axis=2)
term1 = tf.reduce_mean(tf.square(self.q_values_pl - self.q_prediction_t), axis=1)
self.full_q_loss_t = (1 / 2) * term1
self.q_loss_t = tf.reduce_mean(self.full_q_loss_t, axis=0)
def build_q_model_training(self):
q_optimizer = agent_utils.get_optimizer(self.optimizer_model, self.learning_rate_q)
self.q_step = q_optimizer.minimize(
self.q_loss_t, var_list=[self.q_v]
)
class ExpectationModelVQWithQNeural(ExpectationModelVQWithQ):
def __init__(self, input_shape, num_blocks, dimensionality, num_actions, encoder_filters, encoder_filter_sizes,
encoder_strides, encoder_neurons, q_neurons, learning_rate_encoder, learning_rate_r, learning_rate_t,
learning_rate_q, weight_decay, gamma_1, gamma_2, optimizer_encoder, optimizer_model,
max_steps, batch_norm=True, target_network=True, propagate_next_state=False, no_sample=False,
softplus=False, alpha=0.1, beta=0.0):
ExpectationModelVQWithQ.__init__(
self, input_shape, num_blocks, dimensionality, num_actions, encoder_filters, encoder_filter_sizes,
encoder_strides, encoder_neurons, learning_rate_encoder, learning_rate_r, learning_rate_t, learning_rate_q,
weight_decay, gamma_1, gamma_2, optimizer_encoder, optimizer_model, max_steps, batch_norm=batch_norm,
target_network=target_network, propagate_next_state=propagate_next_state, no_sample=no_sample,
softplus=softplus, alpha=alpha, beta=beta
)
self.q_neurons = q_neurons
def build_model(self):
self.state_mu_t = \
self.build_encoder(self.states_pl, self.hand_states_pl, namespace=self.ENCODER_NAMESPACE)
self.embeds = tf.get_variable(
"embeddings", [self.num_embeddings, self.dimensionality],
initializer=tf.truncated_normal_initializer(stddev=0.02)
)
self.state_sample_t, self.state_classes_t = self.quantize(self.state_mu_t)
if self.target_network:
self.next_state_mu_t = self.build_encoder(
self.next_states_pl, self.next_hand_states_pl, share_weights=False,
namespace=self.TARGET_ENCODER_NAMESPACE
)
self.build_target_update()
else:
self.next_state_mu_t = self.build_encoder(
self.next_states_pl, self.next_hand_states_pl, share_weights=True, namespace=self.ENCODER_NAMESPACE
)
self.next_state_sample_t, self.next_state_classes_t = self.quantize(self.next_state_mu_t)
self.r_v = tf.get_variable(
"reward_matrix", shape=(self.num_actions, self.dimensionality), dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0, stddev=np.sqrt(2 / self.dimensionality), dtype=tf.float32)
)
self.t_v = tf.get_variable(
"transition_matrix", shape=(self.num_actions, self.dimensionality, self.dimensionality), dtype=tf.float32,
initializer=tf.random_normal_initializer(mean=0, stddev=np.sqrt(2 / self.dimensionality), dtype=tf.float32)
)
def build_q_loss(self):
self.q_prediction_t = self.build_q_model(self.state_sample_t)
term1 = tf.reduce_mean(tf.square(self.q_values_pl - self.q_prediction_t), axis=1)
self.full_q_loss_t = (1 / 2) * term1
self.q_loss_t = tf.reduce_mean(self.full_q_loss_t, axis=0)
def build_q_model_training(self):
q_optimizer = agent_utils.get_optimizer(self.optimizer_model, self.learning_rate_q)
self.q_step = q_optimizer.minimize(
self.q_loss_t, var_list=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="q_model")
)
def build_q_model(self, embedding):
x = embedding
with tf.variable_scope("q_model"):
for idx, neurons in enumerate(self.q_neurons):
with tf.variable_scope("fc{:d}".format(idx)):
if idx == len(self.q_neurons) - 1:
x = tf.layers.dense(
x, neurons, activation=None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer()
)
else:
x = tf.layers.dense(
x, neurons, activation=tf.nn.relu if not self.batch_norm else None,
kernel_regularizer=agent_utils.get_weight_regularizer(self.weight_decay),
kernel_initializer=agent_utils.get_mrsa_initializer(),
use_bias=not self.batch_norm
)
if self.batch_norm:
x = tf.layers.batch_normalization(x, training=self.is_training_pl)
x = tf.nn.relu(x)
return x
class ExpectationModelHierarchy:
L1_NAMESPACE = "l1"
L1_TARGET_NAMESPACE = "target_l1"
L2_NAMESPACE = "l2"
L2_TARGET_NAMESPACE = "target_l2"
def __init__(self, input_shape, l1_num_blocks, l2_num_blocks, num_actions, encoder_filters, encoder_filter_sizes,
encoder_strides, encoder_neurons, l1_learning_rate_encoder, l2_learning_rate_encoder, learning_rate_r, learning_rate_t,
weight_decay, gamma, optimizer_encoder, optimizer_model, max_steps, l2_hiddens, batch_norm=True,
target_network=True, propagate_next_state=False, no_sample=False):
if propagate_next_state:
assert not target_network
self.input_shape = input_shape
self.l1_num_blocks = l1_num_blocks
self.l2_num_blocks = l2_num_blocks
self.num_actions = num_actions
self.encoder_filters = encoder_filters
self.encoder_filter_sizes = encoder_filter_sizes
self.encoder_strides = encoder_strides
self.encoder_neurons = encoder_neurons
self.l1_learning_rate_encoder = l1_learning_rate_encoder
self.l2_learning_rate_encoder = l2_learning_rate_encoder
self.learning_rate_r = learning_rate_r
self.learning_rate_t = learning_rate_t
self.weight_decay = weight_decay
self.gamma = gamma
self.optimizer_encoder = optimizer_encoder
self.optimizer_model = optimizer_model
self.max_steps = max_steps
self.l2_hiddens = l2_hiddens
self.batch_norm = batch_norm
self.target_network = target_network
self.propagate_next_state = propagate_next_state
self.no_sample = no_sample
self.hand_states_pl, self.next_hand_states_pl = None, None
def encode(self, depths, hand_states, level, batch_size=100):
num_steps = int(np.ceil(depths.shape[0] / batch_size))
embeddings = []
if level == self.L1_NAMESPACE:
to_run = self.l1_state_mu_t
else:
to_run = self.l2_softmax_t
for i in range(num_steps):
batch_slice = np.index_exp[i * batch_size:(i + 1) * batch_size]
feed_dict = {
self.states_pl: depths[batch_slice],
self.hand_states_pl: hand_states[batch_slice],
self.is_training_pl: False
}
embedding = self.session.run(to_run, feed_dict=feed_dict)
embeddings.append(embedding)
embeddings = np.concatenate(embeddings, axis=0)
return embeddings
def validate(self, depths, hand_states, actions, rewards, next_depths, next_hand_states, dones, level,
batch_size=100):
num_steps = int( | np.ceil(depths.shape[0] / batch_size) | numpy.ceil |
"""
Created on Oct. 19, 2020
@author: Heng-Sheng (Hanson) Chang
"""
import os, sys
import numpy as np
from numba import njit, jit
from elastica._linalg import _batch_matvec, _batch_cross, _batch_norm, _batch_matrix_transpose
from elastica._calculus import quadrature_kernel, difference_kernel
from elastica.external_forces import NoForces
@njit(cache=True)
def inverse_rigidity_matrix(matrix):
inverse_matrix = np.zeros(matrix.shape)
for i in range(inverse_matrix.shape[2]):
inverse_matrix[:, :, i] = np.linalg.inv(matrix[:, :, i])
return inverse_matrix
@njit(cache=True)
def strain_to_shear_and_curvature(strain):
n_elems = int((strain.size + 3) / 6)
shear = np.zeros((3, n_elems))
curvature = np.zeros((3, n_elems-1))
index = 0
for i in range(3):
shear[i, :] = strain[index:index+n_elems]
index += n_elems
for i in range(3):
curvature[i] = strain[index:index+n_elems-1]
index += (n_elems-1)
return shear, curvature
@njit(cache=True)
def shear_and_curvature_to_strain(shear, curvature):
strain = np.zeros(shear.size + curvature.size)
n_elems = shear.shape[1]
index = 0
for i in range(3):
strain[index:index+n_elems] = shear[i, :]
index += n_elems
for i in range(3):
strain[index:index+n_elems-1] = curvature[i]
index += (n_elems-1)
return strain
@njit(cache=True)
def _lab_to_material(directors, lab_vectors):
return _batch_matvec(directors, lab_vectors)
@njit(cache=True)
def _material_to_lab(directors, material_vectors):
blocksize = material_vectors.shape[1]
output_vector = np.zeros((3, blocksize))
for i in range(3):
for j in range(3):
for k in range(blocksize):
output_vector[i, k] += (
directors[j, i, k] * material_vectors[j, k]
)
return output_vector
@njit(cache=True)
def average2D(vector_collection):
blocksize = vector_collection.shape[1]-1
output_vector = | np.zeros((3, blocksize)) | numpy.zeros |
# Copyright (C) 2021 <NAME>, <NAME>, and Politecnico di Milano. All rights reserved.
# Licensed under the Apache 2.0 License.
import sys
sys.path = ['.'] + sys.path
import numpy as np
import scipy.stats as stats
# import open bandit pipeline (obp)
from obp.ope.utils import find_optimal_lambda, estimate_lambda
from prettytable import PrettyTable
from scipy.stats import t
from scipy.optimize import minimize
SIGMA2_B = [1]
SIGMA2_E = [1.5, 1.9, 1.99, 1.999]
N = [10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000, 20000, 50000, 100000]
N = [10, 20, 50, 100, 200, 500, 1000]
N_ref = 10000000 #samples to compute all things
mu_b, mu_e = 0., 0.5
n_runs = 60
significance = 0.1
def f(x):
return 100*np.cos(2*np.pi*x)
def generate_dataset(n, mu, sigma2):
generated_samples = stats.norm.rvs(size=n, loc=mu, scale=np.sqrt(sigma2))
return generated_samples, f(generated_samples)
def compute_renyi_divergence(mu_b, sigma2_b, mu_e, sigma2_e, alpha=2):
var_star = alpha * sigma2_b + (1 - alpha) * sigma2_e
contextual_non_exp_Renyi = np.log(sigma2_b ** .5 / sigma2_e ** .5) + 1 / (2 * (alpha - 1)) * np.log(
sigma2_b / var_star) + alpha * (mu_e - mu_b) ** 2 / (2 * var_star)
non_exp_Renyi = np.mean(contextual_non_exp_Renyi)
exp_Renyi = np.exp(non_exp_Renyi)
return exp_Renyi
def welch_test(res):
res_mean = np.mean(res, axis=0)
res_std = np.var(res, axis=0) ** .5
n = res.shape[0]
confidence = .98
ll = []
for i in range(len(N)):
optimal = np.argmin(res_mean[i][:-1])
t_stat = -(res_mean[i][optimal] - res_mean[i]) / np.sqrt(res_std[i][optimal] ** 2 / n + res_std[i] ** 2 / n)
dof = (res_std[i][optimal] ** 2 / n + res_std[i] ** 2 / n) ** 2 / (res_std[i][optimal] ** 4 / (n ** 2 * (n - 1)) + res_std[i] ** 4 / (n ** 2 * (n - 1)))
dof = dof.astype(int)
c = t.ppf(confidence, dof)
ll.append(t_stat < c)
print(ll)
return np.array(ll).T
for sigma2_b in SIGMA2_B:
for sigma2_e in SIGMA2_E:
# On policy sampling for reference
X_on, f_on = generate_dataset(N_ref, mu_e, sigma2_e)
mu_f_e = np.mean(f_on)
sigma2_f_e = np.var(f_on)
d2_Renyi = compute_renyi_divergence(mu_b, sigma2_b, mu_e, sigma2_e)
print('Reference: ', mu_f_e, sigma2_f_e)
print('Renyi: ', d2_Renyi)
target_dist = stats.norm(mu_e, sigma2_e)
behav_dist = stats.norm(mu_b, sigma2_b)
res = np.zeros((n_runs, len(N), 8))
lambdas = np.zeros((n_runs, len(N), 3))
ess = np.zeros((n_runs, len(N), 8))
for run in range(n_runs):
X_all, f_all = generate_dataset(max(N), mu_b, sigma2_b)
X_on_all, f_on_all = generate_dataset(max(N), mu_e, sigma2_e)
pdf_e_all = target_dist.pdf(X_all)
pdf_b_all = behav_dist.pdf(X_all)
for i, n in enumerate(N):
X_n, f_n = X_all[:n], f_all[:n]
X_on_n, f_on_n = X_on_all[:n], f_on_all[:n]
pdf_e = pdf_e_all[:n]
pdf_b = pdf_b_all[:n]
iw = pdf_e / pdf_b
lambda_optimal = np.sqrt(np.log(1 / significance) / (3 * d2_Renyi * n))
lambda_superoptimal = find_optimal_lambda(d2_Renyi, n, significance)
lambda_estimated, conv = estimate_lambda(iw, n, significance, return_info=True)
threshold = np.sqrt((3 * d2_Renyi * n)/(2 * np.log(1 / significance)))
iw_optimal = iw / ((1 - lambda_optimal) + lambda_optimal * iw)
iw_superoptimal = iw / ((1 - lambda_superoptimal) + lambda_superoptimal * iw)
iw_estimated = iw / ((1 - lambda_estimated) + lambda_estimated * iw)
iw_trucnated = np.clip(iw, 0, threshold)
iw_sn = iw / np.sum(iw) * n
def obj(lambda_):
shrinkage_weight = (lambda_ * iw) / (iw ** 2 + lambda_)
estimated_rewards_ = shrinkage_weight * f_n
variance = np.var(estimated_rewards_)
bias = np.sqrt(np.mean((iw - shrinkage_weight) ** 2)) * max(f_n)
return bias ** 2 + variance
lambda_opt = minimize(obj, x0=np.array([1]), bounds=[(0., np.inf)], method='Powell').x
iw_os = (lambda_opt * iw) / (iw ** 2 + lambda_opt)
ess[run, i, 0] = np.sum(iw) ** 2 / np.sum(iw ** 2)
ess[run, i, 4] = np.sum(iw_optimal) ** 2 / np.sum(iw_optimal ** 2)
ess[run, i, 5] = np.sum(iw_superoptimal) ** 2 / np.sum(iw_superoptimal ** 2)
ess[run, i, 6] = np.sum(iw_estimated) ** 2 / np.sum(iw_estimated ** 2)
ess[run, i, 2] = np.sum(iw_trucnated) ** 2 / np.sum(iw_trucnated ** 2)
ess[run, i, 1] = np.sum(iw_sn) ** 2 / np.sum(iw_sn ** 2)
ess[run, i, 7] = n
ess[run, i, 3] = np.sum(iw_os) ** 2 / np.sum(iw_os ** 2)
f_iw = np.mean(iw * f_n)
f_iw_optimal = np.mean(iw_optimal * f_n)
f_iw_superoptimal = np.mean(iw_superoptimal * f_n)
f_iw_estimated = np.mean(iw_estimated * f_n)
f_iw_trucnated = np.mean(iw_trucnated * f_n)
f_iw_wn = np.mean(iw_sn * f_n)
f_on_nn = np.mean(f_on_n)
f_iw_os = np.mean(iw_os * f_n)
error_iw = np.abs(f_iw - mu_f_e)
error_optimal = np.abs(f_iw_optimal - mu_f_e)
error_superoptimal = np.abs(f_iw_superoptimal - mu_f_e)
error_estimated = np.abs(f_iw_estimated - mu_f_e)
error_trucnated = np.abs(f_iw_trucnated - mu_f_e)
error_wn = np.abs(f_iw_wn - mu_f_e)
error_on = np.abs(f_on_nn - mu_f_e)
error_os = np.abs(f_iw_os - mu_f_e)
res[run, i, 0] = error_iw
res[run, i, 1] = error_wn
res[run, i, 2] = error_trucnated
res[run, i, 3] = error_os
res[run, i, 4] = error_optimal
res[run, i, 5] = error_superoptimal
res[run, i, 6] = error_estimated
res[run, i, 7] = error_on
lambdas[run, i, 0] = lambda_optimal
lambdas[run, i, 1] = lambda_superoptimal
lambdas[run, i, 2] = lambda_estimated
test = welch_test(res)
res_mean = np.mean(res, axis=0)
res_std = np.var(res, axis=0) ** .5
res_low, res_high = t.interval(0.95, n_runs-1, loc=res_mean, scale=res_std / np.sqrt(n_runs))
lambdas_mean = np.mean(lambdas, axis=0)
lambdas_std = np.var(lambdas, axis=0) ** .5
lambdas_low, lambdas_high = t.interval(0.95, n_runs - 1, loc=lambdas_mean, scale=lambdas_std / | np.sqrt(n_runs) | numpy.sqrt |
import numpy as np
import cv2
from renderer import plot_sdf
SHAPE_PATH = '../shapes/shape/'
SHAPE_IMAGE_PATH = '../shapes/shape_images/'
TRAIN_DATA_PATH = '../datasets/train/'
VAL_DATA_PATH = '../datasets/val/'
SAMPLED_IMAGE_PATH = '../datasets/sampled_images/'
HEATMAP_PATH = '../results/true_heatmaps/'
CANVAS_SIZE = np.array([800, 800]) # Keep two dimensions the same
SHAPE_COLOR = (255, 255, 255)
POINT_COLOR = (127, 127, 127)
# The Shape and Circle classes are adapted from
# https://github.com/Oktosha/DeepSDF-explained/blob/master/deepSDF-explained.ipynb
class Shape:
def sdf(self, p):
pass
class Circle(Shape):
def __init__(self, c, r):
self.c = c
self.r = r
def set_c(self, c):
self.c = c
def set_r(self, r):
self.r = r
def sdf(self, p):
return np.linalg.norm(p - self.c) - self.r
# The CircleSampler class is adapted from
# https://github.com/mintpancake/2d-sdf-net
class CircleSampler(object):
def __init__(self, circle_name, circle_path, circle_image_path, sampled_image_path, train_data_path, val_data_path,
split_ratio=0.8, show_image=False):
self.circle_name = circle_name
self.circle_path = circle_path
self.circle_image_path = circle_image_path
self.sampled_image_path = sampled_image_path
self.train_data_path = train_data_path
self.val_data_path = val_data_path
self.circle = Circle([0, 0], 0)
self.sampled_data = np.array([])
self.train_data = np.array([])
self.val_data = np.array([])
self.split_ratio = split_ratio
self.show_image = show_image
def run(self, show_image):
self.load()
self.sample()
self.save(show_image)
# load the coordinate of center and the radius of the circle for sampling
def load(self):
f = open(f'{self.circle_path}{self.circle_name}.txt', 'r')
line = f.readline()
x, y, radius = map(lambda n: np.double(n), line.strip('\n').split(' '))
center = np.array([x, y])
f.close()
self.circle.set_c(center)
self.circle.set_r(radius)
def sample(self, m=5000, n=2000, var=(0.025, 0.0025)):
"""
:param m: number of points sampled on the boundary
each boundary point generates 2 samples
:param n: number of points sampled uniformly in the canvas
:param var: two Gaussian variances used to transform boundary points
"""
# Do uniform sampling
# Use polar coordinate
r = np.random.uniform(0, 0.5, size=(n, 1))
t = np.random.uniform(0, 2 * np.pi, size=(n, 1))
# Transform to Cartesian coordinate
uniform_points = np.concatenate((0.5 + r * np.cos(t), 0.5 + r * np.sin(t)), axis=1)
# Do Gaussian sampling
t = np.random.uniform(0, 2 * np.pi, size=(m, 1))
direction = np.concatenate(( | np.cos(t) | numpy.cos |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
@author: <NAME>
@create time: 2020-09-25 11:20
@edit time: 2021-04-14 23:25
@file: /RL4Net-PA/root/anaconda3/envs/PA/lib/python3.7/site-packages/rl4net/envs/power_allocation/pa_env_v2.py
@desc: An enviornment for power allocation in d2d and BS het-nets.
Different from pa_env:
1. allow `recv` as a sorter
2. seperate pos_seed / fading_seed(seed), for an enviornment, position of
nodes is fixed, but fading(incudes jakes channel model and shadow fading)
varies.
3. reset() now accept seed parameter to reset random seed of fadings.
Path_loss is 114.8 + 36.7*np.log10(d), follow 3GPP TR 36.873, d is
the transmitting distance, fc is 3.5GHz.
Bandwith is 20MHz, AWGN power is -114dBm, respectively.
Assume BS power is lower than 46 dBm(about 40 W).
Assume minimum transmit power: 5dBm/ maximum: 38dBm, for d2d devices.
FP algorithm, WMMSE algorithm, maximum power, random power allocation
schemes as comparisons.
downlink
"""
from collections import namedtuple
from pathlib import Path
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import numpy as np
import scipy
import scipy.io
import scipy.special
Node = namedtuple('Node', 'x y type')
class PAEnv:
@property
def n_states(self):
"""return dim of state"""
part_len = {
'recv': self.m_state + 1,
'power': self.m_state + 1,
'rate': self.m_state + 1,
'fading': self.m_state
}
return sum(part_len[metric] for metric in self.metrics)
@property
def n_actions(self):
"""return num of actions"""
return self.n_levels
def init_observation_space(self, kwargs):
valid_parts = ['power', 'rate', 'fading', 'recv']
# default using power
sorter = kwargs['sorter'] if 'sorter' in kwargs else 'power'
# default using all
metrics = kwargs['metrics'] if 'metrics' in kwargs else valid_parts
# check data
if sorter not in valid_parts:
msg = f'sorter should in power, rate and fading, but is {sorter}'
raise ValueError(msg)
if any(metric not in valid_parts for metric in metrics):
msg = f'metrics should in power, rate and fading, but is {metrics}'
raise ValueError(msg)
# set to instance attr
self.sorter, self.metrics = sorter, metrics
def init_power_levels(self):
min_power, max_power = self.min_power, self.max_power
zero_power = 0
def cal_p(p_dbm): return 1e-3 * np.power(10, p_dbm / 10)
dbm_powers = np.linspace(min_power, max_power, num=self.n_levels-1)
powers = [cal_p(p_dbm) for p_dbm in dbm_powers]
powers = [zero_power] + powers
self.power_levels = np.array(powers)
def init_pos(self):
""" Initialize position of devices(DT, DR, BS and CUE).
Establish a Cartesian coordinate system using the BS as an origin.
Celluar User Equipment(CUE) are all located within the radius R_bs
of the base station(typically, 1km) and outside the protected radius
r_bs(simulation paramter, typically 0.01km) of the BS.
The D2D transmission devices(DT) are located within the radius
(R_bs - R_dev) of the BS, to ensure DRs all inside the radius R_bs.
Each DT has a cluster with sveral DRs, which is allocated within the
radius R_dev of DT. Futher more, DRs always appear outside the raduis
r_dev(typically 0.001km) of its DT.
All positions are sotred with the infomation of the corresponding
device in attributes of the environment instance, self.users and
self.devices.
"""
# set seed
np.random.seed(self.pos_seed)
r_bs, R_bs, r_dev, R_dev = self.r_bs, self.R_bs, self.r_dev, self.R_dev
def random_point(min_r, radius, ox=0, oy=0):
# https://www.cnblogs.com/yunlambert/p/10161339.html
# renference the formulaic deduction, his code has bug at uniform
theta = np.random.random() * 2 * np.pi
r = np.random.uniform(min_r, radius**2)
x, y = np.cos(theta) * np.sqrt(r), np.sin(theta) * np.sqrt(r)
return ox + x, oy + y
# init CUE positions
self.station = Node(0, 0, 'station')
self.users = {}
for i in range(self.m_usr):
x, y = random_point(r_bs, R_bs)
user = Node(x, y, 'user')
self.users[i] = user
# init D2D positions
self.devices = {}
for t in range(self.n_t):
tx, ty = random_point(r_bs, R_bs - R_dev)
t_device = Node(tx, ty, 't_device')
r_devices = {
r: Node(*random_point(r_dev, R_dev, tx, ty), 'r_device')
for r in range(self.m_r)
}
self.devices[t] = {'t_device': t_device, 'r_devices': r_devices}
def init_jakes(self, fd=10, Ts=20e-3, Ns=50):
"""Initialize samples of Jakes model.
Jakes model is a simulation of the rayleigh channel, which represents
the small-scale fading.
Each Rx corresponding to a (downlink) channel, each channel is a
source of interference to other channels. Consdering the channel
itself, we get a matrix representing the small-scale fading. Note that
the interference is decided on the position of Tx and Rx, so that the
m interferences make by m channels from the same Tx have the same
fading ratio.
Args:
fd: Doppler frequency, default 10(Hz)
Ts: sampling period, default 20 * 1e-3(s)
Ns: number of samples, default 50
"""
# set seed
np.random.seed(self.seed)
n_t, m_r, n_bs, m_usr = self.n_t, self.m_r, 1, self.m_usr
n_recvs = n_t * m_r + n_bs * m_usr
randn = np.random.randn
def calc_h_set(pho):
# calculate next sample of Jakes model.
h_d2d = np.kron(np.sqrt((1.-pho**2)*0.5*(
randn(n_recvs, n_t)**2 + randn(n_recvs, n_t)**2)),
np.ones((1, m_r), dtype=np.int32))
h_bs = np.kron(np.sqrt((1.-pho**2)*0.5*(
randn(n_recvs, n_bs)**2 + randn(n_recvs, n_bs)**2)),
np.ones((1, m_usr), dtype=np.int32))
h_set = np.concatenate((h_d2d, h_bs), axis=1)
return h_set
# recurrence generate all Ns samples of Jakes.
H_set = np.zeros([n_recvs, n_recvs, int(Ns)], dtype=np.float32)
pho = np.float32(scipy.special.k0(2*np.pi*fd*Ts))
H_set[:, :, 0] = calc_h_set(0)
for i in range(1, int(Ns)):
H_set[:, :, i] = H_set[:, :, i-1]*pho + calc_h_set(pho)
self.H_set = H_set
def init_path_loss(self, slope=0):
"""Initialize paht loss( large-scale fading).
The large-scale fading is related to distance. An experimental
formula can be used to modelling it by 3GPP TR 36.873, explained as:
L = 36.7log10(d) + 22.7 + 26log10(fc) - 0.3(hUT - 1.5).
When fc=3.5GHz and hUT=1.5m, the formula can be simplified to:
L = 114.8 + 36.7*log10(d) + 10*log10(z),
where z is a lognormal random variable.
As with the small-scale fading, each the n Rxs have one siginal and
(n-1) interferences. Using a n*n matrix to record the path loss, we
notice that the interference from one Tx has same large-scale fading,
Consistent with small-scale fading.
"""
n_t, m_r, n_bs, m_usr = self.n_t, self.m_r, self.n_bs, self.m_usr
n_r_devices, n_recvs = n_t * m_r, n_t * m_r + n_bs * m_usr
# calculate distance matrix from initialized positions.
distance_matrix = np.zeros((n_recvs, n_recvs))
def get_distances(node):
"""Calculate distances from other devices to given device."""
dis = np.zeros(n_recvs)
# d2d
for t_index, cluster in self.devices.items():
t_device = cluster['t_device']
delta_x, delta_y = t_device.x - node.x, t_device.y - node.y
distance = np.sqrt(delta_x**2 + delta_y**2)
dis[t_index*m_r: t_index*m_r+m_r] = distance
# bs
delta_x, delta_y = self.station.x - node.x, self.station.y - node.y
distance = np.sqrt(delta_x**2 + delta_y**2)
dis[n_r_devices:] = distance # ๅทฒ็ปๆn_r_devicesไธชไฟก้ไบ
return dis
# ๆฅๆถๅจๅๅนฒๆฐ้กน้ฝๅ
่่d2dๅ่่ๅบ็ซ
for t_index, cluster in self.devices.items():
r_devices = cluster['r_devices']
for r_index, r_device in r_devices.items():
distance_matrix[t_index * self.m_r +
r_index] = get_distances(r_device)
for u_index, user in self.users.items():
distance_matrix[n_r_devices + u_index] = get_distances(user)
self.distance_matrix = distance_matrix
# assign the minimum distance
min_dis = np.concatenate(
(np.repeat(self.r_dev, n_r_devices), np.repeat(self.r_bs, m_usr))
) * np.ones((n_recvs, n_recvs))
std = 8. + slope * (distance_matrix - min_dis)
# random initialize lognormal variable
lognormal = np.random.lognormal(size=(n_recvs, n_recvs), sigma=std)
# micro
path_loss = lognormal * \
pow(10., -(114.8 + 36.7*np.log10(distance_matrix))/10.)
self.path_loss = path_loss
def __init__(self, n_levels,
n_t_devices=9, m_r_devices=4, n_bs=1, m_usrs=4, **kwargs):
"""Initialize PA environment"""
# set sttributes
self.n_t, self.m_r = n_t_devices, m_r_devices
self.n_bs, self.m_usr = n_bs, m_usrs
self.n_recvs = self.n_t * self.m_r + self.n_bs * self.m_usr
self.r_dev, self.r_bs, self.R_dev, self.R_bs = 0.001, 0.01, 0.1, 1
self.Ns, self.n_levels = 50, n_levels
self.min_power, self.max_power, self.thres_power = 5, 38, -114 # dBm
self.bs_power = 10 # W
self.m_state = 16
self.__dict__.update(kwargs)
# set random seed
# set random seed
self.pos_seed = kwargs['pos_seed'] if kwargs.get('pos_seed', 0) > 1 else 799345
self.seed = kwargs['seed'] if kwargs.get('seed', 0) > 1 else 799345
# init attributes of pa env
self.init_observation_space(kwargs)
self.init_power_levels()
self.init_pos() # init recv pos
self.init_jakes(Ns=self.Ns) # init rayleigh loss using jakes model
self.init_path_loss(slope=0) # init path loss
self.cur_step = 0
def reset(self, seed=None):
if seed:
self.seed = seed
self.init_jakes(Ns=self.Ns) # init rayleigh loss using jakes model
self.init_path_loss() # init path loss
self.cur_step = 0
h_set = self.H_set[:, :, self.cur_step]
self.fading = np.square(h_set) * self.path_loss
return | np.random.random((self.n_t * self.m_r, self.n_states)) | numpy.random.random |
import unittest
import numpy as np
import openmdao.api as om
from openmdao.utils.assert_utils import assert_near_equal
from dymos.utils.testing_utils import assert_check_partials
import dymos as dm
from dymos.transcriptions.pseudospectral.components import GaussLobattoInterleaveComp
from dymos.transcriptions.grid_data import GridData
class TestGaussLobattoInterleaveComp(unittest.TestCase):
def setUp(self):
dm.options['include_check_partials'] = True
self.grid_data = gd = GridData(num_segments=3, segment_ends=np.array([0., 2., 4., 10.0]),
transcription='gauss-lobatto', transcription_order=[3, 3, 3])
num_disc_nodes = gd.subset_num_nodes['state_disc']
num_col_nodes = gd.subset_num_nodes['col']
self.p = om.Problem(model=om.Group())
state_options = {'u': {'units': 'm', 'shape': (1,)},
'v': {'units': 'm', 'shape': (3, 2)}}
ode_outputs = {'vehicle_cg': {'units': 'm', 'shape': (3,)}}
indep_comp = om.IndepVarComp()
self.p.model.add_subsystem('indep', indep_comp, promotes=['*'])
indep_comp.add_output('state_disc:u',
val=np.zeros((num_disc_nodes, 1)), units='m')
indep_comp.add_output('state_disc:v',
val=np.zeros((num_disc_nodes, 3, 2)), units='m')
indep_comp.add_output('state_col:u',
val=np.zeros((num_col_nodes, 1)), units='m')
indep_comp.add_output('state_col:v',
val=np.zeros((num_col_nodes, 3, 2)), units='m')
indep_comp.add_output('ode_disc:cg',
val=np.zeros((num_disc_nodes, 3)), units='m')
indep_comp.add_output('ode_col:cg',
val=np.zeros((num_col_nodes, 3)), units='m')
glic = self.p.model.add_subsystem('interleave_comp',
subsys=GaussLobattoInterleaveComp(grid_data=gd))
glic.add_var('u', **state_options['u'], disc_src='state_disc:u', col_src='state_col:u')
glic.add_var('v', **state_options['v'], disc_src='state_disc:v', col_src='state_col:v')
glic.add_var('vehicle_cg', **ode_outputs['vehicle_cg'], disc_src='ode_disc:cg', col_src='ode_col:cg')
self.p.model.connect('state_disc:u', 'interleave_comp.disc_values:u')
self.p.model.connect('state_disc:v', 'interleave_comp.disc_values:v')
self.p.model.connect('state_col:u', 'interleave_comp.col_values:u')
self.p.model.connect('state_col:v', 'interleave_comp.col_values:v')
self.p.model.connect('ode_disc:cg', 'interleave_comp.disc_values:vehicle_cg')
self.p.model.connect('ode_col:cg', 'interleave_comp.col_values:vehicle_cg')
self.p.setup(force_alloc_complex=True)
self.p['state_disc:u'] = | np.random.random((num_disc_nodes, 1)) | numpy.random.random |
# -*- coding: utf-8 -*-
"""
Defines unit tests for :mod:`colour.contrast.barten1999` module.
"""
import numpy as np
import unittest
from itertools import permutations
from colour.contrast import (optical_MTF_Barten1999, pupil_diameter_Barten1999,
sigma_Barten1999, retinal_illuminance_Barten1999,
maximum_angular_size_Barten1999,
contrast_sensitivity_function_Barten1999)
from colour.utilities import ignore_numpy_errors
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'TestOpticalMTFBarten1999', 'TestPupilDiameterBarten1999',
'TestSigmaBarten1999', 'TestRetinalIlluminanceBarten1999',
'TestMaximumAngularSizeBarten1999',
'TestContrastSensitivityFunctionBarten1999'
]
class TestOpticalMTFBarten1999(unittest.TestCase):
"""
Defines :func:`colour.contrast.barten1999.optical_MTF_Barten1999`
definition unit tests methods.
"""
def test_optical_MTF_Barten1999(self):
"""
Tests :func:`colour.contrast.barten1999.optical_MTF_Barten1999`
definition.
"""
np.testing.assert_almost_equal(
optical_MTF_Barten1999(4, 0.01), 0.968910791191297, decimal=7)
np.testing.assert_almost_equal(
optical_MTF_Barten1999(8, 0.01), 0.881323136669471, decimal=7)
np.testing.assert_almost_equal(
optical_MTF_Barten1999(4, 0.05), 0.454040738727245, decimal=7)
def test_n_dimensional_optical_MTF_Barten1999(self):
"""
Tests :func:`colour.contrast.barten1999.optical_MTF_Barten1999`
definition n-dimensional support.
"""
u = np.array([4, 8, 12])
sigma = np.array([0.01, 0.05, 0.1])
M_opt = optical_MTF_Barten1999(u, sigma)
u = np.tile(u, (6, 1))
sigma = np.tile(sigma, (6, 1))
M_opt = np.tile(M_opt, (6, 1))
np.testing.assert_almost_equal(
optical_MTF_Barten1999(u, sigma), M_opt, decimal=7)
u = np.reshape(u, (2, 3, 3))
sigma = np.reshape(sigma, (2, 3, 3))
M_opt = np.reshape(M_opt, (2, 3, 3))
np.testing.assert_almost_equal(
optical_MTF_Barten1999(u, sigma), M_opt, decimal=7)
@ignore_numpy_errors
def test_nan_optical_MTF_Barten1999(self):
"""
Tests :func:`colour.contrast.barten1999.optical_MTF_Barten1999`
definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
optical_MTF_Barten1999(np.array(case), np.array(case))
class TestPupilDiameterBarten1999(unittest.TestCase):
"""
Defines :func:`colour.contrast.barten1999.pupil_diameter_Barten1999`
definition unit tests methods.
"""
def test_pupil_diameter_Barten1999(self):
"""
Tests :func:`colour.contrast.barten1999.pupil_diameter_Barten1999`
definition.
"""
np.testing.assert_almost_equal(
pupil_diameter_Barten1999(20, 60), 2.272517118855717, decimal=7)
np.testing.assert_almost_equal(
pupil_diameter_Barten1999(0.2, 600), 2.272517118855717, decimal=7)
np.testing.assert_almost_equal(
pupil_diameter_Barten1999(20, 60, 30),
2.459028745178825,
decimal=7)
def test_n_dimensional_pupil_diameter_Barten1999(self):
"""
Tests :func:`colour.contrast.barten1999.pupil_diameter_Barten1999`
definition n-dimensional support.
"""
L = np.array([0.2, 20, 100])
X_0 = np.array([60, 120, 240])
Y_0 = np.array([60, 30, 15])
d = pupil_diameter_Barten1999(L, X_0, Y_0)
L = np.tile(L, (6, 1))
X_0 = np.tile(X_0, (6, 1))
d = np.tile(d, (6, 1))
np.testing.assert_almost_equal(
pupil_diameter_Barten1999(L, X_0, Y_0), d, decimal=7)
L = np.reshape(L, (2, 3, 3))
X_0 = np.reshape(X_0, (2, 3, 3))
d = np.reshape(d, (2, 3, 3))
np.testing.assert_almost_equal(
pupil_diameter_Barten1999(L, X_0, Y_0), d, decimal=7)
@ignore_numpy_errors
def test_nan_pupil_diameter_Barten1999(self):
"""
Tests :func:`colour.contrast.barten1999.pupil_diameter_Barten1999`
definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
pupil_diameter_Barten1999(
| np.array(case) | numpy.array |
import cv2
import numpy as np
import json
from copy_paste import CopyPaste
from coco import CocoDetectionCP
from visualize import display_instances
import albumentations as A
import random
from matplotlib import pyplot as plt
transformScene = A.Compose([
#A.RandomScale(scale_limit=(-0.9, 1), p=1), #LargeScaleJitter from scale of 0.1 to 2
A.PadIfNeeded(256, 256, border_mode=0), #pads with image in the center, not the top left like the paper
#A.Resize(800, 1333, always_apply=True, p=1),
A.Resize(534, 889, always_apply=True, p=1),
#A.RandomCrop(534, 889),
#A.Resize(800, 1333, always_apply=True, p=1),
]
# bbox_params=A.BboxParams(format="coco", min_visibility=0.05)
)
transform = A.Compose([
A.RandomScale(scale_limit=(-0.9, 1), p=1), #LargeScaleJitter from scale of 0.1 to 2
A.PadIfNeeded(256, 256, border_mode=0), #pads with image in the center, not the top left like the paper
A.Flip(always_apply=True, p=0.5),
A.Resize(800, 1333, always_apply=True, p=1),
#A.Solarize(always_apply=True, p=1.0, threshold=(128, 128)),
A.RandomCrop(534, 889),
#A.Resize(800, 1333, always_apply=True, p=1),
# pct_objects is the percentage of objects to paste over
CopyPaste(blend=True, sigma=1, pct_objects_paste=1, p=1.)
], bbox_params=A.BboxParams(format="coco")
)
#data = CocoDetectionCP(
# '../agilent-repos/mmdetection/data/bead_cropped_detection/images',
# '../agilent-repos/mmdetection/data/custom/object-classes.json',
# transform
#)
data = CocoDetectionCP(
'../Swin-Transformer-Object-Detection/data/flooding_high_cropped',
# '../agilent-repos/mmdetection/data/bead_cropped_detection/images',
#'../Swin-Transformer-Object-Detection/data/bead_cropped_detection/traintype2lower.json',
'../Swin-Transformer-Object-Detection/data/beading_basler',
'../Swin-Transformer-Object-Detection/data/basler_bead_non_cropped.json',
transform,
transformScene
)
f, ax = plt.subplots(1, 2, figsize=(16, 16))
#index = random.randint(0, len(data))
#index = random.randint(0, 6) # We are testing on the 6 with annotations
index = 5 # hardcode for testing
img_data = data[index]
image = img_data['image']
masks = img_data['masks']
bboxes = img_data['bboxes']
# new_anno = img_data['annotation']
# with open('aug_one.json', 'w') as j_file:
# json.dump(new_anno, j_file, indent=4)
#
# cv2.imwrite("Basler_acA2440-35um__23336827__20201014_102933834_103.tiff", image)
empty = np.array([])
display_instances(image, empty, empty, empty, empty, show_mask=False, show_bbox=False, ax=ax[0])
if len(bboxes) > 0:
boxes = | np.stack([b[:4] for b in bboxes], axis=0) | numpy.stack |
import numpy as np
import torch, os, argparse, random
from sklearn.metrics import roc_auc_score, average_precision_score
basedir = os.path.abspath(os.path.dirname(__file__))
os.chdir(basedir)
os.makedirs("models", exist_ok=True)
torch.backends.cudnn.deterministic = True
torch.autograd.set_detect_anomaly(True)
from model import NeoDTI_with_aff
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type=int, default=26, help="random seed for initialization")
parser.add_argument("--d", default=1024, type=int, help="the embedding dimension d")
parser.add_argument("--n",default=1.0, type=float, help="global gradient norm to be clipped")
parser.add_argument("--k",default=512, type=int, help="the dimension of reprojection matrices k")
parser.add_argument("--l2-factor",default = 0.2, type=float, help="weight of l2 loss")
parser.add_argument("--lr", default=1e-3, type=float, help='learning rate')
parser.add_argument("--weight-decay", default=0, type=float, help='weight decay of the optimizer')
parser.add_argument("--num-steps", default=3000, type=int, help='number of training steps')
parser.add_argument("--device", choices=[-1,0,1,2,3], default=0, type=int, help='device number (-1 for cpu)')
args = parser.parse_args()
return args
def row_normalize(a_matrix, substract_self_loop):
if substract_self_loop == True:
np.fill_diagonal(a_matrix,0)
a_matrix = a_matrix.astype(float)
row_sums = a_matrix.sum(axis=1)+1e-12
new_matrix = a_matrix / row_sums[:, np.newaxis]
new_matrix[np.isnan(new_matrix) | np.isinf(new_matrix)] = 0.0
return torch.Tensor(new_matrix)
def retrain(args, DTItrain, aff, verbose=True):
set_seed(args)
drug_protein = np.zeros((num_drug,num_protein))
mask = np.zeros((num_drug,num_protein))
for ele in DTItrain:
drug_protein[ele[0],ele[1]] = ele[2]
mask[ele[0],ele[1]] = 1
protein_drug = drug_protein.T
drug_protein_normalize = row_normalize(drug_protein,False).to(device)
protein_drug_normalize = row_normalize(protein_drug,False).to(device)
drug_protein = torch.Tensor(drug_protein).to(device)
mask = torch.Tensor(mask).to(device)
drug_protein_affinity = aff
protein_drug_affinity = drug_protein_affinity.T
drug_protein_affinity_normalize = row_normalize(drug_protein_affinity,False).to(device)
protein_drug_affinity_normalize = row_normalize(protein_drug_affinity,False).to(device)
drug_protein_affinity = torch.Tensor(drug_protein_affinity).to(device)
mask_affinity = | np.zeros((num_drug,num_protein)) | numpy.zeros |
#! /usr/bin/env python
#
"""
This is the main driver routien for the SED display and analysis.
The normal use would be to invoke this from the command line as in
sed_plot_interface.py
There are no parameters for the call.
This code requires the Python extinction package. Installation of the
package is described at "https://extinction.readthedocs.io/en/latest/".
Other required packages: tkinter, matplotlib, numpy, scipy, sys, os, math,
and astropy. All these are common packages.
"""
import math
import sys
import os
import tkinter as Tk
import tkinter.ttk
import tkinter.filedialog
import tkinter.simpledialog
import tkinter.messagebox
from tkinter.colorchooser import askcolor
from tkinter.scrolledtext import ScrolledText
from tkinter.filedialog import askopenfilenames
from tkinter.simpledialog import askinteger
import numpy
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import matplotlib
from matplotlib.figure import Figure
import read_vizier_sed_table
import sed_utilities
import tkinter_utilities
import extinction_code
import model_utilities
import position_plot
matplotlib.use('TkAgg')
# The following are "global" variables with line/marker information from
# matplotlib. These are used, but not changed, in the code in more than one
# place hence I am using single variables here for the values.
MATPLOTLIB_SYMBOL_LIST = [
None, "o", "v", "^", "<", ">", "1", "2", "3", "4", "8", "s", "p",
"P", "*", "h", "H", "+", "x", "X", "D", "d", "|", "_", ".", "."]
MATPLOTLIB_SYMBOL_NAME_LIST = [
'None', 'circle', 'triangle down', 'triangle up', 'triangle_left',
'triangle right', 'tri_down', 'tri_up', 'tri_left', 'tri_right',
'octagon', 'square', 'pentagon', 'plus (filled)', 'star',
'hexagon1', 'hexagon2', 'plus', 'x', 'x (filled)', 'diamond',
'thin_diamond', 'vline', 'hline', 'point', 'pixel']
MATPLOTLIB_LINE_LIST = ['-', '--', '-.', ':', None]
MATPLOTLIB_LINE_NAME_LIST = ['solid', 'dashed', 'dashdot', 'dotted', 'None']
# define a background colour for windows
BGCOL = '#F8F8FF'
# default colours
COLOUR_SET = ['blue', 'forestgreen', 'black', 'orange', 'red', 'purple',
'cyan', 'lime', 'brown', 'violet', 'grey', 'gold']
WAVELNORM = 2.2
FLAMBDANORM = 5.e-15
def startup():
"""
Startup.py is a wrapper for starting the plot tool.
Parameters
----------
None.
Returns
-------
root : The Tkinter window class variable for the plot window.
plot_object : The SED plot GUI object variable.
"""
# Make a Tkinter window
newroot = Tk.Tk()
newroot.title("SED Fitting Tool")
newroot.config(bg=BGCOL)
# start the interface
plot_object = PlotWindow(newroot)
return newroot, plot_object
def strfmt(value):
"""
Apply a format to a real value for writing out the data values.
This routine is used to format an input real value either in
exponential format or in floating point format depending on
the magnitude of the input value.
This works better for constant width columns than the Python g format.
Parameters
----------
value : a real number value
Returns
-------
outstring : a format string segment
"""
if (abs(value) > 1.e+07) or (abs(value) < 0.001):
outstring = '%14.7e ' % (value)
if value == 0.:
outstring = '%14.6f ' % (value)
else:
outstring = '%14.6f ' % (value)
outstring = outstring.lstrip(' ')
return outstring
def get_data_values(xdata, ydata, mask, filternames):
"""
Utility routine to apply a mask to (x,y) data.
Parameters
----------
xdata: A numpy array of values, nominally float values
ydata: A numpy array of values, nominally float values
mask: A numpy boolean array for which points are to be returned
filternames: A numpy string array of the filter names
Returns
-------
newxdata: A numpy array of the xdata values where mask = True
newydata: A numpy array of the ydata values where mask = True
newfilternames: A numpy array of the filter names where mask = True
"""
inds = numpy.where(mask)
newxdata = numpy.copy(xdata[inds])
newydata = numpy.copy(ydata[inds])
try:
newfilternames = numpy.copy(filternames[inds])
except TypeError:
newfilternames = []
for loop in range(len(newxdata)):
newfilternames.append('')
newfilternames = numpy.asarray(newfilternames)
inds = numpy.argsort(newxdata)
newxdata = newxdata[inds]
newydata = newydata[inds]
newfilternames = newfilternames[inds]
return newxdata, newydata, newfilternames
def unpack_vot(votdata):
"""
Utility routine to unpack Vizier VOT photometry data.
Parameters
----------
votdata: A table of Vizier photometry values from the read_vizier_vot
function
Returns
-------
data_set: A list containing a variety of values read from the VOT file
"""
photometry_values = numpy.copy(votdata[0])
error_mask = numpy.copy(votdata[1])
filter_names = numpy.copy(votdata[2])
references = numpy.copy(votdata[3])
refpos = numpy.copy(votdata[4])
data_set = {'wavelength': None, 'frequency': None, 'fnu': None,
'flambda': None, 'lfl': None, 'l4fl': None,
'dfnu': None, 'dflambda': None, 'dl4fl': None,
'dlfl': None, 'mask': None, 'plot_mask': None,
'filter_name': None, 'distance': None,
'position': None, 'refpos': None,
'references': None, 'plot': None, 'source': None,
'colour_by_name': False}
data_set['wavelength'] = numpy.squeeze(photometry_values[0, :])
data_set['frequency'] = numpy.squeeze(photometry_values[1, :])
data_set['fnu'] = numpy.squeeze(photometry_values[4, :])
data_set['flambda'] = numpy.squeeze(photometry_values[6, :])
data_set['lfl'] = numpy.squeeze(photometry_values[8, :])
data_set['l4fl'] = numpy.squeeze(photometry_values[8, :]) *\
(data_set['wavelength']**3)
data_set['dfnu'] = numpy.squeeze(photometry_values[5, :])
data_set['dflambda'] = numpy.squeeze(photometry_values[7, :])
data_set['dlfl'] = numpy.squeeze(photometry_values[9, :])
data_set['dl4fl'] = numpy.squeeze(photometry_values[9, :]) *\
(data_set['wavelength']**3)
data_set['filter_name'] = | numpy.copy(filter_names) | numpy.copy |
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
\file PyProteinBatch.py
\brief Object to agrupate a set of proteins.
\copyright Copyright (c) 2021 Visual Computing group of Ulm University,
Germany. See the LICENSE file at the top-level directory of
this distribution.
\author <NAME> (<EMAIL>)
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
import os
import numpy as np
import h5py
class PyProteinBatch:
"""Class to store a protein batch.
"""
def __init__(self, pProteinList, pAminoInput = False, pLoadText = False):
"""Constructor.
Args:
pProteinList (PyProtein list): List of proteins.
pAminoInput (bool): Boolean that indicates if the inputs
are aminoacids.
"""
# Save if the input is at aminoacid level.
self.aminoInput_ = pAminoInput
if self.aminoInput_:
# Atom lists.
self.atomPos_ = []
self.atomTypes_ = []
self.atomBatchIds_ = []
self.centers_ = []
# Pooling lists.
self.graph1Neighs_ = [[]]
self.graph1NeighsStartIds_ = [[]]
self.graph2Neighs_ = [[]]
self.graph2NeighsStartIds_ = [[]]
curAtomIndexOffset = [0]
curGraph1NeighOffset = [0]
curGraph2NeighOffset = [0]
self.poolingIds_ = None
self.atomAminoIds_ = None
self.atomResidueIds_ = None
self.numResidues_ = 0
#Iterate over the protein list.
for protIter, curProtein in enumerate(pProteinList):
#Get the aminoacid information.
self.atomPos_.append(curProtein.aminoPos_[0])
self.atomTypes_.append(curProtein.aminoType_)
self.atomBatchIds_.append(np.full(curProtein.aminoType_.shape, protIter, dtype=np.int32))
#Get the neighborhood information.
self.graph1Neighs_[0].append(curProtein.aminoNeighs_ + curAtomIndexOffset[0])
self.graph1NeighsStartIds_[0].append(curProtein.aminoNeighsSIndices_ + curGraph1NeighOffset[0])
self.graph2Neighs_[0].append(curProtein.aminoNeighsHB_ + curAtomIndexOffset[0])
self.graph2NeighsStartIds_[0].append(curProtein.aminoNeighsSIndicesHB_ + curGraph2NeighOffset[0])
#Update offsets.
curAtomIndexOffset[0] += len(curProtein.aminoPos_[0])
curGraph1NeighOffset[0] += len(curProtein.aminoNeighs_)
curGraph2NeighOffset[0] += len(curProtein.aminoNeighsHB_)
#Get the protein centers.
self.centers_.append(curProtein.center_)
#Create the numpy arrays.
self.atomPos_ = np.concatenate(self.atomPos_, axis=0)
self.atomTypes_ = np.concatenate(self.atomTypes_, axis=0)
self.atomBatchIds_ = np.concatenate(self.atomBatchIds_, axis=0)
self.centers_ = np.concatenate(self.centers_, axis=0)
self.graph1Neighs_[0] = | np.concatenate(self.graph1Neighs_[0], axis=0) | numpy.concatenate |
import numpy as np
import pandas as pd
import os
from PIL import Image
# This is the case when we want to have camera6 ('camera_p3_right_data') as test camera
fname = '/home/kiyanoush/UoLincoln/Projects/DeepIL Codes/DatasetSize.csv'
testSize = pd.read_csv(fname, header=None)
testSize = np.array(testSize)
subset = testSize[0, 0:20]
sum = np.sum(subset)
print(int(sum))
print(int(5*sum))
print(int(6*sum))
subFolder = 'camera_p1_left_data', 'camera_p1_right_data', 'camera_p2_left_data', 'camera_p2_right_data', 'camera_p3_left_data'
path = '/home/kiyanoush/UoLincoln/Projects/DeepIL Codes/Data'
os.chdir(path)
x=[]
for i in range(1,6):
for j in range(1, int(testSize[0, i-1]) + 1):
for f in subFolder:
os.chdir(path+'/'+ str(i) + '/' + f)
imgName = str(j) + '.jpg'
image = Image.open(imgName)
new_image = image.resize((224,224))
new_image = | np.array(new_image) | numpy.array |
import numpy as np
import math
from FEModel3D import FEModel3D
import Visualization as vis
class Grilladge(FEModel3D):
def __init__(self, no_of_beams=2, beam_spacing=8, span_data=(2, 28, 28), canti_l=2.5, skew=90):
# https://www.youtube.com/watch?v=MBbVq_FIYDA
super().__init__()
self.no_of_beams = no_of_beams
self.beam_spacing = beam_spacing
self.span_data = span_data
self.skew = skew
self.canti_l = canti_l
def _z_coors_in_g1(self, discr=20):
"""returns numpy array of z coordinates in first girder"""
if isinstance(discr, int) == False:
raise TypeError(f"discr must be an integer!")
z_coors_in_g1 = np.array([0.0])
for j in range(self.span_data[0]):
z1_spacing = self.span_data[j+1] / discr
if j == 0:
z_local = 0
else:
z_local = sum(self.span_data[1:j+1])
for i in range(discr):
z_local += z1_spacing
z_coors_in_g1 = np.append(z_coors_in_g1, [z_local])
return np.round(z_coors_in_g1, decimals=3)
def _z_coors_in_g(self, discr=20, gird_no=2):
"""returns numpy array of z coordinates in given girder"""
if isinstance(discr, int) == False:
raise TypeError(f"discr must be an integer!")
if isinstance(gird_no, int) == False:
raise TypeError(f"gird_no must be an integer!")
if gird_no == 1 or self.skew == 90:
z_coors_in_g = self._z_coors_in_g1(discr)
else:
rad_skew = math.radians(self.skew) # skew angle in radians
z_offset = (gird_no - 1) * self.beam_spacing * (1 / math.tan(rad_skew))
z_coors_in_g = self._z_coors_in_g1(discr) + z_offset
return np.round(z_coors_in_g, decimals=3)
def _z_coors_of_cantitip(self, discr=20, edge=1):
"""returns numpy array of z cooridnates of cantilever tips"""
if isinstance(discr, int) == False:
raise TypeError(f"discr must be an integer!")
if isinstance(edge, int) == False:
raise TypeError(f"edge must be an integer!")
if self.skew == 90:
z_coors_of_cantitip = self._z_coors_in_g1(discr)
elif edge == 1:
rad_skew = math.radians(self.skew) # skew angle in radians
z_offset = self.canti_l * (1 / math.tan(rad_skew))
z_coors_of_cantitip = self._z_coors_in_g1(discr) - z_offset
else:
rad_skew = math.radians(self.skew)
z_offset = (self.canti_l + (self.no_of_beams -1) * self.beam_spacing) \
* (1 / math.tan(rad_skew))
z_coors_of_cantitip = self._z_coors_in_g1(discr) + z_offset
return np.round(z_coors_of_cantitip, decimals=3)
def _z_coors_cross_m(self, discr=20, x_dist=4.0):
"""returns numpy array of z cooridnates of lingitudal arbitrary line (z-line) governing nodes"""
if isinstance(discr, int) == False:
raise TypeError(f"discr must be an integer!")
if isinstance(x_dist, float) == False and isinstance(x_dist, int) == False:
raise TypeError(f"x_dist must be a float or integer!")
if self.skew == 90 or x_dist == 0.0:
_z_coors_cross_m = self._z_coors_in_g1(discr)
else:
rad_skew = math.radians(self.skew)
z_offset = x_dist * (1 / math.tan(rad_skew))
_z_coors_cross_m = self._z_coors_in_g1(discr) + z_offset
return np.round(_z_coors_cross_m, decimals=3)
def _x_coors_in_g1(self, discr=20):
"""returns numpy array of x coordinates in first girder"""
if isinstance(discr, int) == False:
raise TypeError(f"discr must be an integer!")
x_coors_in_g1 = np.array([0.0])
for j in range(self.span_data[0]):
x_local = 0.0
for i in range(discr):
x_coors_in_g1 = np.append(x_coors_in_g1, [x_local])
return np.round(x_coors_in_g1, decimals=3)
def _x_coors_cross_m(self, discr=20, x_dist=4.0):
"""returns numpy array of x cooridnates of lingitudal arbitrary line (z-line) governing nodes"""
if isinstance(discr, int) == False:
raise TypeError(f"discr must be an integer!")
if isinstance(x_dist, float) == False and isinstance(x_dist, int) == False:
raise TypeError(f"x_dist must be a float or integer!")
x_coors_cross_m = self._x_coors_in_g1(discr) + x_dist
return np.round(x_coors_cross_m, decimals=3)
def _x_coors_in_g(self, discr=20, gird_no=2):
"""returns numpy array of x coordinates in given girder"""
if isinstance(discr, int) == False:
raise TypeError(f"discr must be an integer!")
x_coors_in_g = self._x_coors_in_g1(discr) + (gird_no-1) * self.beam_spacing
return | np.round(x_coors_in_g, decimals=3) | numpy.round |
## Re implementation of the dcm planner for debugging
## Author : <NAME>
## Date : 20/11/ 2019
import numpy as np
import time
import os
import rospkg
import pybullet as p
import pinocchio as se3
from pinocchio.utils import se3ToXYZQUAT
from robot_properties_solo.config import Solo12Config
from robot_properties_solo.quadruped12wrapper import Quadruped12Robot
from mim_control.solo_impedance_controller import (
SoloImpedanceController,
)
from py_blmc_controllers.solo_centroidal_controller import (
SoloCentroidalController,
)
from reactive_planners.dcm_vrp_planner.re_split_dcm_planner import (
DCMStepPlanner,
)
from reactive_planners.dcm_vrp_planner.solo_step_planner import SoloStepPlanner
from reactive_planners.centroidal_controller.lipm_centroidal_controller import (
LipmCentroidalController,
)
from reactive_planners.utils.trajectory_generator import TrajGenerator
from reactive_planners.utils.solo_state_estimator import SoloStateEstimator
from pinocchio.utils import zero
from matplotlib import pyplot as plt
#################################################################################################
# Create a robot instance. This initializes the simulator as well.
if not ("robot" in globals()):
robot = Quadruped12Robot()
tau = np.zeros(12)
p.resetDebugVisualizerCamera(1.4, 90, -30, (0, 0, 0))
# Reset the robot to some initial state.
q0 = np.matrix(Solo12Config.initial_configuration).T
q0[2] = 0.3
dq0 = np.matrix(Solo12Config.initial_velocity).T
robot.reset_state(q0, dq0)
arr = lambda a: np.array(a).reshape(-1)
mat = lambda a: np.matrix(a).reshape((-1, 1))
total_mass = sum([i.mass for i in robot.pin_robot.model.inertias[1:]])
################### initial Parameters of Step Planner ############################################
l_min = -0.2
l_max = 0.2
w_min = -0.15
w_max = 0.15
t_min = 0.1
t_max = 0.3
l_p = 0.0
ht = 0.23
v_des = [0.0, 0.0]
W = [100.0, 100.0, 1.0, 1000.0, 1000.0]
step_planner = DCMStepPlanner(
l_min, l_max, w_min, w_max, t_min, t_max, l_p, ht
)
#################### Impedance Control Paramters ###################################################
x_des = 4 * [0.0, 0.0, -ht]
xd_des = 4 * [0, 0, 0]
kp = 4 * [200, 200, 200]
kd = 4 * [1.0, 1.0, 1.0]
##################### Centroidal Control Parameters ##################################################
x_com = [0.0, 0.0, ht]
xd_com = [0.0, 0.0, 0.0]
x_ori = [0.0, 0.0, 0.0, 1.0]
x_angvel = [0.0, 0.0, 0.0]
cnt_array = [0, 1, 1, 0]
solo_leg_ctrl = SoloImpedanceController(robot)
centr_lipm_controller = LipmCentroidalController(
robot.pin_robot,
total_mass,
mu=0.5,
kc=[50, 50, 300],
dc=[30, 30, 30],
kb=[1500, 1500, 1500],
db=[10.0, 10.0, 10.0],
eff_ids=robot.pinocchio_endeff_ids,
)
centr_controller = SoloCentroidalController(
robot.pin_robot,
total_mass,
mu=0.6,
kc=[5, 5, 1],
dc=[0, 0, 10],
kb=[100, 100, 100],
db=[10.0, 10.0, 10.0],
eff_ids=robot.pinocchio_endeff_ids,
)
F = np.zeros(12)
#################### Trajecory Generator ################################################################
trj = TrajGenerator(robot.pin_robot)
f_lift = 0.1 ## height the foot lifts of the ground
#################### initial location of the robot #######################################################
sse = SoloStateEstimator(robot.pin_robot)
q, dq = robot.get_state()
com_init = np.reshape(np.array(q[0:3]), (3,))
fl_foot, fr_foot, hl_foot, hr_foot = sse.return_foot_locations(q, dq)
fl_off, fr_off, hl_off, hr_off = sse.return_hip_offset(q, dq)
u = 0.5 * (np.add(fl_foot, hr_foot))[0:2]
zmp = u
n = 1
t = 0
u_min = np.array(
[-100000, -100000]
) ## intitialising with values that won affect force generation
u_max = np.array(
[1000000, 100000]
) ## intitialising with values that won affect force generation
t_end = 9999
sim_time = 2500
################# SoloStepPlaner ###############################################################
ssp = SoloStepPlanner(
robot.pin_robot, | np.array([l_min, w_min]) | numpy.array |
from collections import defaultdict
import itertools
import numpy as np
import pickle
import time
import warnings
from Analysis import binomial_pgf, BranchModel, StaticModel
from simulators.fires.UrbanForest import UrbanForest
from Policies import NCTfires, UBTfires, DWTfires, RHTfires, USTfires
from Utilities import fire_boundary, urban_boundary, forest_children, percolation_parameter, equivalent_percolation_control
np.seterr(all='raise')
def uniform():
# given alpha and beta, compute lattice probabilities for every (parent, child) pair
a = 0.2763
b = np.exp(-1/10)
p = percolation_parameter(a, b)
if p <= 0.5:
raise Warning('Percolation parameter {0:0.2f} is not supercritical'.format(p))
lattice_p = defaultdict(lambda: p)
# given (delta_alpha, delta_beta), construct the equivalent delta_p
delta_a = 0
delta_b = 0.4
dp = equivalent_percolation_control(a, b, delta_a, delta_b)
if p - dp >= 0.5:
raise Warning('Control is insufficient: p - dp = {0:0.2f} - {1:0.2f} = {2:0.2f}'.format(p, dp, p-dp))
control_p = defaultdict(lambda: dp)
control_ab = defaultdict(lambda: (delta_a, delta_b))
# or given delta_p, construct the equivalent (delta_alpha, delta_beta)
# delta_p = 0.4
# control_percolation = defaultdict(lambda: delta_p)
# control_gmdp = defaultdict(lambda: equivalent_gmdp_control(a, b, delta_p))
a = defaultdict(lambda: a)
b = defaultdict(lambda: b)
return a, b, lattice_p, control_p, control_ab
def nonuniform(simulation):
alpha_set = dict()
# beta_set = defaultdict(lambda: np.exp(-1/9))
beta_set = dict()
p_set = dict()
delta_beta = 0.35
control_gmdp = dict()
alpha_start = 0.2
alpha_end = 0.4
for r in range(simulation.dims[0]):
for c in range(simulation.dims[1]):
alpha_set[(r, c)] = alpha_start + (c/(simulation.dims[1]-1))*(alpha_end-alpha_start)
beta1 = np.exp(-1/5)
beta2 = np.exp(-1/10)
for r in range(simulation.dims[0]):
for c in range(simulation.dims[1]):
if c < simulation.dims[1]-simulation.urban_width:
beta_set[(r, c)] = beta1
else:
beta_set[(r, c)] = beta2
control_gmdp[(r, c)] = {'healthy': (alpha_set[(r, c)], 0),
'on_fire': (0, np.amin([delta_beta, beta_set[(r, c)]]))}
# set initial condition
initial_fire = []
r_center = np.floor((simulation.dims[0]-1)/2).astype(np.uint8)
c_center = np.floor((simulation.dims[1]-1)/2).astype(np.uint8)
delta_r = [k for k in range(-2, 3)]
delta_c = [k for k in range(-2, 3)]
deltas = itertools.product(delta_r, delta_c)
for (dr, dc) in deltas:
if dr == 0 and dc == 0:
continue
elif (dr == -2 or dr == 2) and (dc == -2 or dc == 2):
continue
elif dc == dr or dc == -dr:
continue
r, c = r_center + dr, c_center + dc
initial_fire.append((r, c))
# control_p = dict()
for tree_rc in simulation.group.keys():
for neighbor in simulation.group[tree_rc].neighbors:
p = percolation_parameter(alpha_set[neighbor], beta_set[tree_rc])
if p <= 0.5:
warnings.warn('p({0:0.2f}, {1:0.2f}) = {2:0.2f} <= 0.5'.format(alpha_set[neighbor],
beta_set[tree_rc], p))
p_set[(tree_rc, neighbor)] = p
# control_p[(tree_rc, neighbor)] = dict()
#
# for k in control_gmdp[neighbor].keys():
# da, db = control_gmdp[neighbor][k]
# dp = equivalent_percolation_control(alpha_set[neighbor], beta_set[tree_rc], da, db)
# if p - dp >= 0.5:
# warnings.warn('p - dp = {0:0.2f} - {1:0.2f} = {2:0.2f} >= 0.5'.format(p, dp, p - dp))
#
# control_p[(tree_rc, neighbor)][k] = dp
return alpha_set, beta_set, initial_fire, control_gmdp, p_set
def benchmark(simulation, branchmodel, policy, num_generations=1, num_simulations=1):
print('Running policy {0:s} with capacity {1:d} for {2:d} simulations'.format(policy.name,
policy.capacity,
num_simulations))
print('started at {0:s}'.format(time.strftime('%d-%b-%Y %H:%M')))
tic = time.clock()
results = dict()
staticmodel = StaticModel()
for seed in range(num_simulations):
np.random.seed(seed)
simulation.reset()
simulation.rng = seed
while not simulation.early_end:
branchmodel.reset()
branchmodel.set_boundary(fire_boundary(simulation))
if isinstance(policy, USTfires):
staticmodel.set_boundary(urban_boundary(simulation))
policy.urbanboundary = urban_boundary(simulation)
def children_function(p):
return forest_children(simulation, p)
branchmodel.set_children_function(children_function)
for _ in range(num_generations):
for process in branchmodel.GWprocesses.values():
for parent in process.current_parents:
if parent not in branchmodel.lattice_children:
branchmodel.lattice_children[parent] = branchmodel.children_function(parent)
if not isinstance(policy, USTfires):
policy.generate_map(branchmodel)
else:
policy.generate_map(branchmodel, staticmodel)
branchmodel.next_generation(policy)
if isinstance(policy, USTfires):
staticmodel.next_boundary(policy.control_decisions)
# apply control and update simulator
if not isinstance(policy, USTfires):
control = policy.control(branchmodel)
else:
control = policy.control(branchmodel, staticmodel)
simulation.update(control)
if (seed+1) % 10 == 0:
print('completed {0:d} simulations'.format((seed+1)))
results[seed] = {'healthy_trees': simulation.stats_trees[0]/np.sum(simulation.stats_trees),
'healthy_urban': simulation.stats_urban[0]/ | np.sum(simulation.stats_urban) | numpy.sum |
import tensorflow as tf
import tensorflow_addons as tfa
import numpy as np
from random import randint, choice, shuffle
import os
from glob import glob
from PIL import Image
import math
from pathlib import Path
import matplotlib.pyplot as plt
def load_cub_masked():
train_images = np.load('data/cub_train_seg_14x14_pad_20_masked.npy')
test_images = np.load('data/cub_test_seg_14x14_pad_20_masked.npy')
return train_images, None, test_images, None
def calculateIntersection(a0, a1, b0, b1):
if a0 >= b0 and a1 <= b1: # Contained
intersection = a1 - a0
elif a0 < b0 and a1 > b1: # Contains
intersection = b1 - b0
elif a0 < b0 and a1 > b0: # Intersects right
intersection = a1 - b0
elif a1 > b1 and a0 < b1: # Intersects left
intersection = b1 - a0
else: # No intersection (either side)
intersection = 0
return intersection
def calculate_overlap(rand_x,rand_y,drawn_boxes):
# check if a new box is overlapped with drawn boxes more than 15% or not
for box in drawn_boxes:
x,y = box[0], box[1]
if calculateIntersection(rand_x,rand_x+14,x,x+14) * calculateIntersection(rand_y,rand_y+14,y,y+14) / 14**2 > 0.15:
return True
return False
class MultiCUB:
def __init__(self, data, reshape=True):
self.num_channel = data[0].shape[-1]
self.train_x = data[0]
self.train_y = data[1]
self.test_x = data[2]
self.test_y = data[3]
if reshape:
self.train_x = tf.image.resize(self.train_x,(14,14)).numpy() #[28,28] -> [14,14]
self.test_x = tf.image.resize(self.test_x,(14,14)).numpy()
self.bg_list = glob('data/kylberg/*.png')
#triad hard
self.train_colors_triad = [(195,135,255),(193,255,135),(255,165,135),(81,197,255),(255,229,81),(255,81,139)]
self.test_colors_triad = [(255,125,227),(125,255,184),(255,205,125)]
#easy colors
self.train_colors = [(100, 209, 72) , (209, 72, 100) , (209, 127, 72), (72, 129, 209) , (84, 184, 209), (209, 109, 84), (184, 209, 84), (109, 84, 209)]
self.test_colors = [(222, 222, 102),(100,100,219),(219,100,219),(100,219,100)]
def create_sample(self, n, width, height, bg = None, test=False):
canvas = np.zeros([width, height, self.num_channel], np.float32)
if bg=='solid_random':
brightness = randint(0,255)
r = randint(0,brightness)/255.
g = randint(0,brightness)/255.
b = randint(0,brightness)/255.
canvas[:,:,0] = r
canvas[:,:,1] = g
canvas[:,:,2] = b
elif bg=='solid_fixed':
color = choice(self.train_colors)
canvas[:,:,0] = color[0]/255.
canvas[:,:,1] = color[1]/255.
canvas[:,:,2] = color[2]/255.
elif bg=='unseen_solid_fixed':
color = choice(self.test_colors)
canvas[:,:,0] = color[0]/255.
canvas[:,:,1] = color[1]/255.
canvas[:,:,2] = color[2]/255.
elif bg=='white':
canvas[:,:,:] = | np.ones_like(canvas) | numpy.ones_like |
# Copyright 2020 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for flax.linen."""
from absl.testing import absltest
import jax
from jax import random
from jax import lax
from jax.nn import initializers
import jax.numpy as jnp
import numpy as onp
from typing import Any, Tuple, Iterable, Callable
from flax import linen as nn
from flax.linen import compact
from flax.core import Scope, freeze
# Parse absl flags test_srcdir and test_tmpdir.
jax.config.parse_flags_with_absl()
# Require JAX omnistaging mode.
jax.config.enable_omnistaging()
class DummyModule(nn.Module):
@compact
def __call__(self, x):
bias = self.param('bias', initializers.ones, x.shape)
return x + bias
class Dense(nn.Module):
features: int
@compact
def __call__(self, x):
kernel = self.param('kernel',
initializers.lecun_normal(),
(x.shape[-1], self.features))
y = jnp.dot(x, kernel)
return y
class ModuleTest(absltest.TestCase):
def test_init_module(self):
rngkey = jax.random.PRNGKey(0)
x = jnp.array([1.])
scope = Scope({}, {'params': rngkey}, mutable=['params'])
y = DummyModule(parent=scope)(x)
params = scope.variables()['params']
y2 = DummyModule(parent=scope.rewound())(x)
onp.testing.assert_allclose(y, y2)
onp.testing.assert_allclose(y, jnp.array([2.]))
self.assertEqual(params, {'bias': jnp.array([1.])})
def test_arg_module(self):
rngkey = jax.random.PRNGKey(0)
x = jnp.ones((10,))
scope = Scope({}, {'params': rngkey}, mutable=['params'])
y = Dense(3, parent=scope)(x)
params = scope.variables()['params']
y2 = Dense(3, parent=scope.rewound())(x)
onp.testing.assert_allclose(y, y2)
self.assertEqual(params['kernel'].shape, (10, 3))
def test_util_fun(self):
rngkey = jax.random.PRNGKey(0)
class MLP(nn.Module):
@compact
def __call__(self, x):
x = self._mydense(x)
x = self._mydense(x)
return x
def _mydense(self, x):
return Dense(3)(x)
x = jnp.ones((10,))
scope = Scope({}, {'params': rngkey}, mutable=['params'])
y = MLP(parent=scope)(x)
params = scope.variables()['params']
y2 = MLP(parent=scope.rewound())(x)
onp.testing.assert_allclose(y, y2)
param_shape = jax.tree_map(jnp.shape, params)
self.assertEqual(param_shape,
{'Dense_0': {'kernel': (10, 3)},
'Dense_1': {'kernel': (3, 3)}})
def test_nested_module_reuse(self):
rngkey = jax.random.PRNGKey(0)
class MLP(nn.Module):
@compact
def __call__(self, x):
x = self._mydense(x)
x = self._mydense(x)
return x
def _mydense(self, x):
return Dense(3)(x)
class Top(nn.Module):
@compact
def __call__(self, x):
mlp = MLP()
y = mlp(x)
z = mlp(x)
return y + z
x = jnp.ones((10,))
scope = Scope({}, {'params': rngkey}, mutable=['params'])
y = Top(parent=scope)(x)
params = scope.variables()['params']
y2 = Top(parent=scope.rewound())(x)
onp.testing.assert_allclose(y, y2)
param_shape = jax.tree_map(jnp.shape, params)
self.assertEqual(param_shape,
{'MLP_0':
{'Dense_0': {'kernel': (10, 3)},
'Dense_1': {'kernel': (3, 3)}}})
def test_setup_dict_assignment(self):
rngkey = jax.random.PRNGKey(0)
class MLP(nn.Module):
def setup(self):
self.lyrs1 = {'a': Dense(3), 'b': Dense(3),}
self.lyrs2 = [Dense(3), Dense(3)]
def __call__(self, x):
y = self.lyrs1['a'](x)
z = self.lyrs1['b'](y)
#w = self.lyrs2[0](x)
return z
x = jnp.ones((10,))
scope = Scope({}, {'params': rngkey}, mutable=['params'])
y = MLP(parent=scope)(x)
params = scope.variables()['params']
y2 = MLP(parent=scope.rewound())(x)
| onp.testing.assert_allclose(y, y2) | numpy.testing.assert_allclose |
"""Contains most of the methods that compose the ORIGIN software."""
import itertools
import logging
import warnings
from datetime import datetime
from functools import wraps
from time import time
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning)
import matplotlib.pyplot as plt
import numpy as np
from astropy.modeling.fitting import LevMarLSQFitter
from astropy.modeling.models import Gaussian1D
from astropy.nddata import overlap_slices
from astropy.stats import (
gaussian_fwhm_to_sigma,
gaussian_sigma_to_fwhm,
sigma_clipped_stats,
)
from astropy.table import Column, Table, join
from astropy.stats import sigma_clip
from astropy.utils.exceptions import AstropyUserWarning
from joblib import Parallel, delayed
from mpdaf.obj import Image
from mpdaf.tools import progressbar
from numpy import fft
from numpy.linalg import multi_dot
from scipy import fftpack, stats
from scipy.interpolate import interp1d
from scipy.ndimage import binary_dilation, binary_erosion
from scipy.ndimage import label as ndi_label
from scipy.ndimage import maximum_filter
from scipy.signal import fftconvolve
from scipy.sparse.linalg import svds
from scipy.spatial import ConvexHull, cKDTree
from .source_masks import gen_source_mask
__all__ = (
'add_tglr_stat',
'compute_deblended_segmap',
'Compute_GreedyPCA',
'compute_local_max',
'compute_segmap_gauss',
'compute_thresh_gaussfit',
'Compute_threshold_purity',
'compute_true_purity',
'Correlation_GLR_test',
'create_masks',
'estimation_line',
'merge_similar_lines',
'purity_estimation',
'spatial_segmentation',
'spatiospectral_merging',
'unique_sources',
)
def timeit(f):
"""Decorator which prints the execution time of a function."""
@wraps(f)
def timed(*args, **kw):
logger = logging.getLogger(__name__)
t0 = time()
result = f(*args, **kw)
logger.debug('%s executed in %0.1fs', f.__name__, time() - t0)
return result
return timed
def orthogonal_projection(a, b):
"""Compute the orthogonal projection: a.(a^T.a)-1.a^T.b
NOTE: does not include the (a^T.a)-1 term as it is often not needed (when
a is already normalized).
"""
# Using multi_dot which is faster than np.dot(np.dot(a, a.T), b)
# Another option would be to use einsum, less readable but also very
# fast with Numpy 1.14+ and optimize=True. This seems to be as fast as
# multi_dot.
# return np.einsum('i,j,jk->ik', a, a, b, optimize=True)
if a.ndim == 1:
a = a[:, None]
return multi_dot([a, a.T, b])
@timeit
def spatial_segmentation(Nx, Ny, NbSubcube, start=None):
"""Compute indices to split spatially in NbSubcube x NbSubcube regions.
Each zone is computed from the left to the right and the top to the bottom
First pixel of the first zone has coordinates : (row,col) = (Nx,1).
Parameters
----------
Nx : int
Number of columns
Ny : int
Number of rows
NbSubcube : int
Number of subcubes for the spatial segmentation
start : tuple
if not None, the tupe is the (y,x) starting point
Returns
-------
intx, inty : int, int
limits in pixels of the columns/rows for each zone
"""
# Segmentation of the rows vector in Nbsubcube parts from right to left
inty = np.linspace(Ny, 0, NbSubcube + 1, dtype=np.int)
# Segmentation of the columns vector in Nbsubcube parts from left to right
intx = np.linspace(0, Nx, NbSubcube + 1, dtype=np.int)
if start is not None:
inty += start[0]
intx += start[1]
return inty, intx
def DCTMAT(nl, order):
"""Return the DCT transformation matrix of size nl-by-(order+1).
Equivalent function to Matlab/Octave's dtcmtx.
https://octave.sourceforge.io/signal/function/dctmtx.html
Parameters
----------
order : int
Order of the DCT (spectral length).
Returns
-------
array: DCT Matrix
"""
yy, xx = np.mgrid[:nl, : order + 1]
D0 = np.sqrt(2 / nl) * np.cos((yy + 0.5) * (np.pi / nl) * xx)
D0[:, 0] *= 1 / np.sqrt(2)
return D0
@timeit
def dct_residual(w_raw, order, var, approx, mask):
"""Function to compute the residual of the DCT on raw data.
Parameters
----------
w_raw : array
Data array.
order : int
The number of atom to keep for the DCT decomposition.
var : array
Variance array.
approx : bool
If True, an approximate computation is used, not taking the variance
into account.
Returns
-------
Faint, cont : array
Residual and continuum estimated from the DCT decomposition.
"""
nl = w_raw.shape[0]
D0 = DCTMAT(nl, order)
shape = w_raw.shape[1:]
nspec = np.prod(shape)
if approx:
# Compute the DCT transformation, without using the variance.
#
# Given the transformation matrix D0, we compute for each spectrum S:
#
# C = D0.D0^t.S
#
# Old version using tensordot:
# A = np.dot(D0, D0.T)
# cont = np.tensordot(A, w_raw, axes=(0, 0))
# Looping on spectra and using multidot is ~6x faster:
# D0 is typically 3681x11 elements, so it is much more efficient
# to compute D0^t.S first (note the array is reshaped below)
cont = [
multi_dot([D0, D0.T, w_raw[:, y, x]])
for y, x in progressbar(np.ndindex(shape), total=nspec)
]
# For reference, this is identical to the following scipy version,
# though scipy is 2x slower than tensordot (probably because it
# computes all the coefficients)
# from scipy.fftpack import dct
# w = (np.arange(nl) < (order + 1)).astype(int)
# cont = dct(dct(w_raw, type=2, norm='ortho', axis=0) * w[:,None,None],
# type=3, norm='ortho', axis=0, overwrite_x=False)
else:
# Compute the DCT transformation, using the variance.
#
# As the noise differs on each spectral component, we need to take into
# account the (diagonal) covariance matrix ฮฃ for each spectrum S:
#
# C = D0.(D^t.ฮฃ^-1.D)^-1.D0^t.ฮฃ^-1.S
#
w_raw_var = w_raw / var
D0T = D0.T
# Old version (slow):
# def continuum(D0, D0T, var, w_raw_var):
# A = np.linalg.inv(np.dot(D0T / var, D0))
# return np.dot(np.dot(np.dot(D0, A), D0T), w_raw_var)
#
# cont = Parallel()(
# delayed(continuum)(D0, D0T, var[:, i, j], w_raw_var[:, i, j])
# for i in range(w_raw.shape[1]) for j in range(w_raw.shape[2]))
# cont = np.asarray(cont).T.reshape(w_raw.shape)
# map of valid spaxels, i.e. spaxels with at least one valid value
valid = ~np.any(mask, axis=0)
from numpy.linalg import inv
cont = []
for y, x in progressbar(np.ndindex(shape), total=nspec):
if valid[y, x]:
res = multi_dot(
[D0, inv(np.dot(D0T / var[:, y, x], D0)), D0T, w_raw_var[:, y, x]]
)
else:
res = multi_dot([D0, D0.T, w_raw[:, y, x]])
cont.append(res)
return np.stack(cont).T.reshape(w_raw.shape)
def compute_segmap_gauss(data, pfa, fwhm_fsf=0, bins='fd'):
"""Compute segmentation map from an image, using gaussian statistics.
Parameters
----------
data : array
Input values, typically from a O2 test.
pfa : float
Desired false alarm.
fwhm : int
Width (in integer pixels) of the filter, to convolve with a PSF disc.
bins : str
Method for computings bins (see numpy.histogram_bin_edges).
Returns
-------
float, array
threshold, and labeled image.
"""
# test threshold : uses a Gaussian approximation of the test statistic
# under H0
histO2, frecO2, gamma, mea, std = compute_thresh_gaussfit(data, pfa, bins=bins)
# threshold - erosion and dilation to clean ponctual "source"
mask = data > gamma
mask = binary_erosion(mask, border_value=1, iterations=1)
mask = binary_dilation(mask, iterations=1)
# convolve with PSF
if fwhm_fsf > 0:
fwhm_pix = int(fwhm_fsf) // 2
size = fwhm_pix * 2 + 1
disc = np.hypot(*list(np.mgrid[:size, :size] - fwhm_pix)) < fwhm_pix
mask = fftconvolve(mask, disc, mode='same')
mask = mask > 1e-9
return gamma, ndi_label(mask)[0]
def compute_deblended_segmap(
image, npixels=5, snr=3, dilate_size=11, maxiters=5, sigma=3, fwhm=3.0, kernelsize=5
):
"""Compute segmentation map using photutils.
The segmentation map is computed with the following steps:
- Creation of a mask of sources with the ``snr`` threshold, using
`photutils.make_source_mask`.
- Estimation of the background statistics with this mask
(`astropy.stats.sigma_clipped_stats`), to estimate a refined threshold
with ``median + sigma * rms``.
- Convolution with a Gaussian kernel.
- Creation of the segmentation image, using `photutils.detect_sources`.
- Deblending of the segmentation image, using `photutils.deblend_sources`.
Parameters
----------
image : mpdaf.obj.Image
The input image.
npixels : int
The number of connected pixels that an object must have to be detected.
snr, dilate_size :
See `photutils.make_source_mask`.
maxiters, sigma :
See `astropy.stats.sigma_clipped_stats`.
fwhm : float
Kernel size (pixels) for the PSF convolution.
kernelsize : int
Size of the convolution kernel.
Returns
-------
`~mpdaf.obj.Image`
The deblended segmentation map.
"""
from astropy.convolution import Gaussian2DKernel
from photutils import make_source_mask, detect_sources
data = image.data
mask = make_source_mask(data, snr=snr, npixels=npixels, dilate_size=dilate_size)
bkg_mean, bkg_median, bkg_rms = sigma_clipped_stats(
data, sigma=sigma, mask=mask, maxiters=maxiters
)
threshold = bkg_median + sigma * bkg_rms
logger = logging.getLogger(__name__)
logger.info(
'Background Median %.2f RMS %.2f Threshold %.2f', bkg_median, bkg_rms, threshold
)
sig = fwhm * gaussian_fwhm_to_sigma
kernel = Gaussian2DKernel(sig, x_size=kernelsize, y_size=kernelsize)
kernel.normalize()
segm = detect_sources(data, threshold, npixels=npixels, filter_kernel=kernel)
segm_deblend = phot_deblend_sources(
image, segm, npixels=npixels, filter_kernel=kernel, mode='linear'
)
return segm_deblend
def phot_deblend_sources(img, segmap, **kwargs):
"""Wrapper to catch warnings from deblend_sources."""
from photutils import deblend_sources
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
category=AstropyUserWarning,
message='.*contains negative values.*',
)
deblend = deblend_sources(img.data, segmap, **kwargs)
return Image(data=deblend.data, wcs=img.wcs, mask=img.mask, copy=False)
def createradvar(cu, ot):
"""Compute the compactness of areas using variance of position.
The variance is computed on the position given by adding one of the 'ot'
to 'cu'.
Parameters
----------
cu : 2D array
The current array
ot : 3D array
The other array
Returns
-------
var : array
The radial variances
"""
N = ot.shape[0]
out = np.zeros(N)
for n in range(N):
tmp = cu + ot[n, :, :]
y, x = np.where(tmp > 0)
r = np.sqrt((y - y.mean()) ** 2 + (x - x.mean()) ** 2)
out[n] = np.var(r)
return out
def fusion_areas(label, MinSize, MaxSize, option=None):
"""Function which merge areas which have a surface less than
MinSize if the size after merging is less than MaxSize.
The criteria of neighbor can be related to the minimum surface
or to the compactness of the output area
Parameters
----------
label : area
The labels of areas
MinSize : int
The size of areas under which they need to merge
MaxSize : int
The size of areas above which they cant merge
option : string
if 'var' the compactness criteria is used
if None the minimum surface criteria is used
Returns
-------
label : array
The labels of merged areas
"""
while True:
indlabl = np.argsort(np.sum(label, axis=(1, 2)))
tampon = label.copy()
for n in indlabl:
# if the label is not empty
cu = label[n, :, :]
cu_size = np.sum(cu)
if cu_size > 0 and cu_size < MinSize:
# search for neighbors
labdil = label[n, :, :].copy()
labdil = binary_dilation(labdil, iterations=1)
# only neighbors
test = np.sum(label * labdil[np.newaxis, :, :], axis=(1, 2)) > 0
indice = np.where(test == 1)[0]
ind = np.where(indice != n)[0]
indice = indice[ind]
# BOUCLER SUR LES CANDIDATS
ot = label[indice, :, :]
# test size of current with neighbor
if option is None:
test = np.sum(ot, axis=(1, 2))
elif option == 'var':
test = createradvar(cu, ot)
else:
raise ValueError('bad option')
if len(test) > 0:
# keep the min-size
ind = np.argmin(test)
cand = indice[ind]
if (np.sum(label[n, :, :]) + test[ind]) < MaxSize:
label[n, :, :] += label[cand, :, :]
label[cand, :, :] = 0
# clean empty area
ind = np.sum(label, axis=(1, 2)) > 0
label = label[ind, :, :]
tampon = tampon[ind, :, :]
if np.sum(tampon - label) == 0:
break
return label
@timeit
def area_segmentation_square_fusion(nexpmap, MinS, MaxS, NbSubcube, Ny, Nx):
"""Create non square area based on continuum test.
The full 2D image is first segmented in subcube. The area are fused in case
they are too small. Thanks to the continuum test, detected sources are
fused with associated area. The convex enveloppe of the sources inside each
area is then done. Finally all the convex enveloppe growth until using all
the pixels
Parameters
----------
nexpmap : 2D array
the active pixel of the image
MinS : int
The size of areas under which they need to merge
MaxS : int
The size of areas above which they cant merge
NbSubcube : int
Number of subcubes for the spatial segmentation
Nx : int
Number of columns
Ny : int
Number of rows
Returns
-------
label : array
label of the fused square
"""
# square area index with borders
Vert = np.sum(nexpmap, axis=1)
Hori = np.sum(nexpmap, axis=0)
y1 = np.where(Vert > 0)[0][0]
x1 = np.where(Hori > 0)[0][0]
y2 = Ny - np.where(Vert[::-1] > 0)[0][0]
x2 = Nx - np.where(Hori[::-1] > 0)[0][0]
start = (y1, x1)
inty, intx = spatial_segmentation(Nx, Ny, NbSubcube, start=start)
# % FUSION square AREA
label = []
for numy in range(NbSubcube):
for numx in range(NbSubcube):
y1, y2, x1, x2 = inty[numy + 1], inty[numy], intx[numx], intx[numx + 1]
tmp = nexpmap[y1:y2, x1:x2]
if np.mean(tmp) != 0:
labtest = ndi_label(tmp)[0]
labtmax = labtest.max()
for n in range(labtmax):
label_tmp = np.zeros((Ny, Nx))
label_tmp[y1:y2, x1:x2] = labtest == (n + 1)
label.append(label_tmp)
label = np.array(label)
return fusion_areas(label, MinS, MaxS)
@timeit
def area_segmentation_sources_fusion(labsrc, label, pfa, Ny, Nx):
"""Function to create non square area based on continuum test. Thanks
to the continuum test, detected sources are fused with associated area.
The convex enveloppe of the sources inside
each area is then done. Finally all the convex enveloppe growth until
using all the pixels
Parameters
----------
labsrc : array
segmentation map
label : array
label of fused square generated in area_segmentation_square_fusion
pfa : float
Pvalue for the test which performs segmentation
NbSubcube : int
Number of subcubes for the spatial segmentation
Nx : int
Number of columns
Ny : int
Number of rows
Returns
-------
label_out : array
label of the fused square and sources
"""
# compute the sources label
nlab = labsrc.max()
sources = np.zeros((nlab, Ny, Nx))
for n in range(1, nlab + 1):
sources[n - 1, :, :] = (labsrc == n) > 0
sources_save = sources.copy()
nlabel = label.shape[0]
nsrc = sources.shape[0]
for n in range(nsrc):
cu_src = sources[n, :, :]
# find the area in which the current source
# has bigger probability to be
test = np.sum(cu_src[np.newaxis, :, :] * label, axis=(1, 2))
if len(test) > 0:
ind = np.argmax(test)
# associate the source to the label
label[ind, :, :] = (label[ind, :, :] + cu_src) > 0
# mask other labels from this sources
mask = (1 - label[ind, :, :])[np.newaxis, :, :]
ot_lab = np.delete(np.arange(nlabel), ind)
label[ot_lab, :, :] *= mask
# delete the source
sources[n, :, :] = 0
return label, np.sum(sources_save, axis=0)
@timeit
def area_segmentation_convex_fusion(label, src):
"""Function to compute the convex enveloppe of the sources inside
each area is then done. Finally all the convex enveloppe growth until
using all the pixels
Parameters
----------
label : array
label containing the fusion of fused squares and sources
generated in area_segmentation_sources_fusion
src : array
label of estimated sources from segmentation map
Returns
-------
label_out : array
label of the convex
"""
label_fin = []
# for each label
for lab_n in range(label.shape[0]):
# keep only the sources inside the label
lab = label[lab_n, :, :]
data = src * lab
if np.sum(data > 0):
points = np.array(np.where(data > 0)).T
y_0 = points[:, 0].min()
x_0 = points[:, 1].min()
points[:, 0] -= y_0
points[:, 1] -= x_0
sny, snx = points[:, 0].max() + 1, points[:, 1].max() + 1
# compute the convex enveloppe of a sub part of the label
lab_temp = Convexline(points, snx, sny)
# in full size
label_out = np.zeros((label.shape[1], label.shape[2]))
label_out[y_0 : y_0 + sny, x_0 : x_0 + snx] = lab_temp
label_out *= lab
label_fin.append(label_out)
return np.array(label_fin)
def Convexline(points, snx, sny):
"""Function to compute the convex enveloppe of the sources inside
each area is then done and full the polygone
Parameters
----------
data : array
contain the position of source for one of the label
snx,sny: int,int
the effective size of area in the label
Returns
-------
lab_out : array
The filled convex enveloppe corresponding the sub label
"""
# convex enveloppe vertices
hull = ConvexHull(points)
xs = hull.points[hull.simplices[:, 1]]
xt = hull.points[hull.simplices[:, 0]]
sny, snx = points[:, 0].max() + 1, points[:, 1].max() + 1
tmp = np.zeros((sny, snx))
# create le line between vertices
for n in range(hull.simplices.shape[0]):
x0, x1, y0, y1 = xs[n, 1], xt[n, 1], xs[n, 0], xt[n, 0]
nx = np.abs(x1 - x0)
ny = np.abs(y1 - y0)
if ny > nx:
xa, xb, ya, yb = y0, y1, x0, x1
else:
xa, xb, ya, yb = x0, x1, y0, y1
if xa > xb:
xb, xa, yb, ya = xa, xb, ya, yb
indx = np.arange(xa, xb, dtype=int)
N = len(indx)
indy = np.array(ya + (indx - xa) * (yb - ya) / N, dtype=int)
if ny > nx:
tmpx, tmpy = indx, indy
indy, indx = tmpx, tmpy
tmp[indy, indx] = 1
radius = 1
dxy = 2 * radius
x = np.linspace(-dxy, dxy, 1 + (dxy) * 2)
y = np.linspace(-dxy, dxy, 1 + (dxy) * 2)
xv, yv = np.meshgrid(x, y)
r = np.sqrt(xv ** 2 + yv ** 2)
mask = np.abs(r) <= radius
# to close the lines
conv_lab = fftconvolve(tmp, mask, mode='same') > 1e-9
lab_out = conv_lab.copy()
for n in range(conv_lab.shape[0]):
ind = np.where(conv_lab[n, :] == 1)[0]
lab_out[n, ind[0] : ind[-1]] = 1
return lab_out
@timeit
def area_growing(label, mask):
"""Growing and merging of all areas
Parameters
----------
label : array
label containing convex enveloppe of each area
mask : array
mask of positive pixels
Returns
-------
label_out : array
label of the convex envelop grown to the max number of pixels
"""
# start by smaller
set_ind = np.argsort(np.sum(label, axis=(1, 2)))
# closure horizon
niter = 20
label_out = label.copy()
nlab = label_out.shape[0]
while True:
s = np.sum(label_out)
for n in set_ind:
cu_lab = label_out[n, :, :]
ind = np.delete(np.arange(nlab), n)
ot_lab = label_out[ind, :, :]
border = (1 - (np.sum(ot_lab, axis=0) > 0)) * mask
# closure in all case + 1 dilation
cu_lab = binary_dilation(cu_lab, iterations=niter + 1)
cu_lab = binary_erosion(cu_lab, border_value=1, iterations=niter)
label_out[n, :, :] = cu_lab * border
if np.sum(label_out) == np.sum(mask) or np.sum(label_out) == s:
break
return label_out
@timeit
def area_segmentation_final(label, MinS, MaxS):
"""Merging of small areas and give index
Parameters
----------
label : array
Label containing convex enveloppe of each area
MinS : number
The size of areas under which they need to merge
MaxS : number
The size of areas above which they cant merge
Returns
-------
sety,setx : array
List of index of each label
"""
# if an area is too small
label = fusion_areas(label, MinS, MaxS, option='var')
# create label map
areamap = np.zeros(label.shape[1:])
for i in range(label.shape[0]):
areamap[label[i, :, :] > 0] = i + 1
return areamap
@timeit
def Compute_GreedyPCA_area(
NbArea, cube_std, areamap, Noise_population, threshold_test, itermax, testO2
):
"""Function to compute the PCA on each zone of a data cube.
Parameters
----------
NbArea : int
Number of area
cube_std : array
Cube data weighted by the standard deviation
areamap : array
Map of areas
Noise_population : float
Proportion of estimated noise part used to define the
background spectra
threshold_test : list
User given list of threshold (not pfa) to apply on each area, the
list is of length NbAreas or of length 1.
itermax : int
Maximum number of iterations
testO2 : list of arrays
Result of the O2 test
Returns
-------
cube_faint : array
Faint greedy decomposition od STD Cube
"""
cube_faint = cube_std.copy()
mapO2 = np.zeros(cube_std.shape[1:])
nstop = 0
area_iter = range(1, NbArea + 1)
if NbArea > 1:
area_iter = progressbar(area_iter)
for area_ind in area_iter:
# limits of each spatial zone
ksel = areamap == area_ind
# Data in this spatio-spectral zone
cube_temp = cube_std[:, ksel]
thr = threshold_test[area_ind - 1]
test = testO2[area_ind - 1]
cube_faint[:, ksel], mO2, kstop = Compute_GreedyPCA(
cube_temp, test, thr, Noise_population, itermax
)
mapO2[ksel] = mO2
nstop += kstop
return cube_faint, mapO2, nstop
def Compute_PCA_threshold(faint, pfa):
"""Compute threshold for the PCA.
Parameters
----------
faint : array
Standardized data.
pfa : float
PFA of the test.
Returns
-------
test, histO2, frecO2, thresO2, mea, std
Threshold for the O2 test
"""
test = O2test(faint)
# automatic threshold computation
histO2, frecO2, thresO2, mea, std = compute_thresh_gaussfit(test, pfa)
return test, histO2, frecO2, thresO2, mea, std
def Compute_GreedyPCA(cube_in, test, thresO2, Noise_population, itermax):
"""Function to compute greedy svd. thanks to the test (test_fun) and
according to a defined threshold (threshold_test) the cube is segmented
in nuisance and background part. A part of the background part
(1/Noise_population %) is used to compute a mean background, a signature.
The Nuisance part is orthogonalized to this signature in order to not
loose this part during the greedy process. SVD is performed on nuisance
in order to modelized the nuisance part and the principal eigen vector,
only one, is used to perform the projection of the whole set of data:
Nuisance and background. The Nuisance spectra which satisfied the test
are updated in the background computation and the background is so
cleaned from sources signature. The iteration stop when all the spectra
satisfy the criteria
Parameters
----------
Cube_in : array
The 3D cube data clean
test_fun: function
the test to be performed on data
Noise_population : float
Fraction of spectra estimated as background
itermax : int
Maximum number of iterations
Returns
-------
faint : array
cleaned cube
mapO2 : array
2D MAP filled with the number of iteration per spectra
thresO2 : float
Threshold for the O2 test
nstop : int
Nb of times the iterations have been stopped when > itermax
"""
logger = logging.getLogger(__name__)
# nuisance part
pypx = np.where(test > thresO2)[0]
npix = len(pypx)
faint = cube_in.copy()
mapO2 = np.zeros(faint.shape[1])
nstop = 0
with progressbar(total=npix, miniters=0, leave=False) as bar:
# greedy loop based on test
nbiter = 0
while len(pypx) > 0:
nbiter += 1
mapO2[pypx] += 1
if nbiter > itermax:
nstop += 1
logger.warning('Warning iterations stopped at %d', nbiter)
break
# vector data
test_v = np.ravel(test)
test_v = test_v[test_v > 0]
nind = np.where(test_v <= thresO2)[0]
sortind = np.argsort(test_v[nind])
# at least one spectra is used to perform the test
nb = 1 + int(len(nind) / Noise_population)
# background estimation
b = np.mean(faint[:, nind[sortind[:nb]]], axis=1)
# cube segmentation
x_red = faint[:, pypx]
# orthogonal projection with background.
x_red -= orthogonal_projection(b, x_red)
x_red /= np.nansum(b ** 2)
# sparse svd if nb spectrum > 1 else normal svd
if x_red.shape[1] == 1:
break
# if PCA will not converge or if giant pint source will exists
# in faint PCA the reason will be here, in later case
# add condition while calculating the "mean_in_pca"
# deactivate the substraction of the mean.
# This will make the vector whish is above threshold
# equal to the background. For now we prefer to keep it, to
# stop iteration earlier in order to keep residual sources
# with the hypothesis that this spectrum is slightly above
# the threshold (what we observe in data)
U, s, V = np.linalg.svd(x_red, full_matrices=False)
else:
U, s, V = svds(x_red, k=1)
# orthogonal projection
faint -= orthogonal_projection(U[:, 0], faint)
# test
test = O2test(faint)
# nuisance part
pypx = np.where(test > thresO2)[0]
bar.update(npix - len(pypx) - bar.n)
bar.update(npix - len(pypx) - bar.n)
return faint, mapO2, nstop
def O2test(arr):
"""Compute the second order test on spaxels.
The test estimate the background part and nuisance part of the data by mean
of second order test: Testing mean and variance at same time of spectra.
Parameters
----------
arr : array-like
The 3D cube data to test.
Returns
-------
ndarray
result of the test.
"""
# np.einsum('ij,ij->j', arr, arr) / arr.shape[0]
return np.mean(arr ** 2, axis=0)
def compute_thresh_gaussfit(data, pfa, bins='fd', sigclip=10):
"""Compute a threshold with a gaussian fit of a distribution.
Parameters
----------
data : array
2D data from the O2 test.
pfa : float
Desired false alarm.
bins : str
Method for computings bins (see numpy.histogram_bin_edges).
Returns
-------
histO2 : histogram value of the test
frecO2 : frequencies of the histogram
thresO2 : automatic threshold for the O2 test
mea : mean value of the fit
std : sigma value of the fit
"""
logger = logging.getLogger(__name__)
data = data[data > 0]
data = sigma_clip(data, sigclip)
data = data.compressed()
histO2, frecO2 = np.histogram(data, bins=bins, density=True)
ind = np.argmax(histO2)
mod = frecO2[ind]
ind2 = np.argmin((histO2[ind] / 2 - histO2[:ind]) ** 2)
fwhm = mod - frecO2[ind2]
sigma = fwhm / np.sqrt(2 * np.log(2))
coef = stats.norm.ppf(pfa)
thresO2 = mod - sigma * coef
logger.debug('1st estimation mean/std/threshold: %f/%f/%f', mod, sigma, thresO2)
x = (frecO2[1:] + frecO2[:-1]) / 2
g1 = Gaussian1D(amplitude=histO2.max(), mean=mod, stddev=sigma)
fit_g = LevMarLSQFitter()
xcut = g1.mean + gaussian_sigma_to_fwhm * g1.stddev / 2
ksel = x < xcut
g2 = fit_g(g1, x[ksel], histO2[ksel])
mea, std = (g2.mean.value, g2.stddev.value)
# make sure to return float, not np.float64
thresO2 = float(mea - std * coef)
return histO2, frecO2, thresO2, mea, std
def _convolve_fsf(psf, cube, weights=None):
ones = np.ones_like(cube)
if weights is not None:
cube = cube * weights
ones *= weights
psf = np.ascontiguousarray(psf[::-1, ::-1])
psf -= psf.mean()
# build a weighting map per PSF and convolve
cube_fsf = fftconvolve(cube, psf, mode='same')
# Spatial part of the norm of the 3D atom
psf **= 2
norm_fsf = fftconvolve(ones, psf, mode='same')
return cube_fsf, norm_fsf
def _convolve_profile(Dico, cube_fft, norm_fft, fshape, n_jobs, parallel):
# Second cube of correlation values
dico_fft = fft.rfftn(Dico, fshape)[:, None] * cube_fft
cube_profile = _convolve_spectral(
parallel, n_jobs, dico_fft, fshape, func=fft.irfftn
)
dico_fft = fft.rfftn(Dico ** 2, fshape)[:, None] * norm_fft
norm_profile = _convolve_spectral(
parallel, n_jobs, dico_fft, fshape, func=fft.irfftn
)
norm_profile[norm_profile <= 0] = np.inf
np.sqrt(norm_profile, out=norm_profile)
cube_profile /= norm_profile
return cube_profile
def _convolve_spectral(parallel, nslices, arr, shape, func=fft.rfftn):
arr = np.array_split(arr, nslices, axis=-1)
out = parallel(delayed(func)(chunk, shape, axes=(0,)) for chunk in arr)
return np.concatenate(out, axis=-1)
@timeit
def Correlation_GLR_test(
cube, fsf, weights, profiles, nthreads=1, pcut=None, pmeansub=True
):
"""Compute the cube of GLR test values with the given PSF and
dictionary of spectral profiles.
Parameters
----------
cube : array
data cube
fsf : list of arrays
FSF for each field of this data cube
weights : list of array
Weight maps of each field
profiles : list of ndarray
Dictionary of spectral profiles to test
nthreads : int
number of threads
pcut : float
Cut applied to the profiles to limit their width
pmeansub : bool
Subtract the mean of the profiles
Returns
-------
correl : array
cube of T_GLR values of maximum correlation
profile : array
Number of the profile associated to the T_GLR
correl_min : array
cube of T_GLR values of minimum correlation
"""
logger = logging.getLogger(__name__)
Nz, Ny, Nx = cube.shape
# Spatial convolution of the weighted data with the zero-mean FSF
logger.info(
'Step 1/3 and 2/3: '
'Spatial convolution of weighted data with the zero-mean FSF, '
'Computing Spatial part of the norm of the 3D atoms'
)
if weights is None: # one FSF
fsf = [fsf]
weights = [None]
nfields = len(fsf)
fields = range(nfields)
if nfields > 1:
fields = progressbar(fields)
if nthreads != 1:
# copy the arrays because otherwise joblib's memmap handling fails
# (maybe because of astropy.io.fits doing weird things with the memap?)
cube = np.array(cube)
# Make sure that we have a float array in C-order because scipy.fft
# (new in v1.4) fails with Fortran ordered arrays.
cube = cube.astype(float)
with Parallel(n_jobs=nthreads) as parallel:
for nf in fields:
# convolve spatially each spectral channel by the FSF, and do the
# same for the norm (inverse variance)
res = parallel(
progressbar(
[
delayed(_convolve_fsf)(fsf[nf][i], cube[i], weights=weights[nf])
for i in range(Nz)
]
)
)
res = [np.stack(arr) for arr in zip(*res)]
if nf == 0:
cube_fsf, norm_fsf = res
else:
cube_fsf += res[0]
norm_fsf += res[1]
# First cube of correlation values
# initialization with the first profile
logger.info('Step 3/3 Computing second cube of correlation values')
# Prepare profiles:
# Cut the profiles and subtract the mean, if asked to do so
prof_cut = []
for prof in profiles:
prof = prof.copy()
if pcut is not None:
lpeak = prof.argmax()
lw = np.max(np.abs(np.where(prof >= pcut)[0][[0, -1]] - lpeak))
prof = prof[lpeak - lw : lpeak + lw + 1]
prof /= np.linalg.norm(prof)
if pmeansub:
prof -= prof.mean()
prof_cut.append(prof)
# compute the optimal shape for FFTs (on the wavelength axis).
# For profiles with different shapes, we need to know the indices to
# extract the signal from the inverse fft.
s1 = np.array(cube_fsf.shape) # cube shape
s2 = np.array([(d.shape[0], 1, 1) for d in prof_cut]) # profiles shape
fftshape = s1 + s2 - 1 # fft shape
fshape = [
fftpack.helper.next_fast_len(int(d)) # optimal fft shape
for d in fftshape.max(axis=0)[:1]
]
# and now computes the indices to extract the cube from the inverse fft.
startind = (fftshape - s1) // 2
endind = startind + s1
cslice = [slice(startind[k, 0], endind[k, 0]) for k in range(len(endind))]
# Compute the FFTs of the cube and norm cube, splitting them on multiple
# threads if needed
with Parallel(n_jobs=nthreads, backend='threading') as parallel:
cube_fft = _convolve_spectral(
parallel, nthreads, cube_fsf, fshape, func=fft.rfftn
)
norm_fft = _convolve_spectral(
parallel, nthreads, norm_fsf, fshape, func=fft.rfftn
)
cube_fsf = norm_fsf = res = None
cube_fft = cube_fft.reshape(cube_fft.shape[0], -1)
norm_fft = norm_fft.reshape(norm_fft.shape[0], -1)
profile = np.empty((Nz, Ny * Nx), dtype=np.uint8)
correl = np.full((Nz, Ny * Nx), -np.inf)
correl_min = np.full((Nz, Ny * Nx), np.inf)
# for each profile, compute convolve the convolved cube and norm cube.
# Then for each pixel we keep the maximum correlation (and min correlation)
# and the profile number with the max correl.
with Parallel(n_jobs=nthreads, backend='threading') as parallel:
for k in progressbar(range(len(prof_cut))):
cube_profile = _convolve_profile(
prof_cut[k], cube_fft, norm_fft, fshape, nthreads, parallel
)
cube_profile = cube_profile[cslice[k]]
profile[cube_profile > correl] = k
np.maximum(correl, cube_profile, out=correl)
np.minimum(correl_min, cube_profile, out=correl_min)
profile = profile.reshape(Nz, Ny, Nx)
correl = correl.reshape(Nz, Ny, Nx)
correl_min = correl_min.reshape(Nz, Ny, Nx)
return correl, profile, correl_min
def compute_local_max(correl, correl_min, mask, size=3):
"""Compute the local maxima of the maximum correlation and local maxima
of minus the minimum correlation distribution.
Parameters
----------
correl : array
T_GLR values with edges excluded (from max correlation)
correl_min : array
T_GLR values with edges excluded (from min correlation)
mask : array
mask array (true if pixel is masked)
size : int
Number of connected components
Returns
-------
array, array
local maxima of correlations and local maxima of -correlations
"""
# local maxima of maximum correlation
if np.isscalar(size):
size = (size, size, size)
local_max = maximum_filter(correl, size=size)
local_mask = correl == local_max
local_mask[mask] = False
local_max *= local_mask
# local maxima of minus minimum correlation
minus_correl_min = -correl_min
local_min = maximum_filter(minus_correl_min, size=size)
local_mask = minus_correl_min == local_min
local_mask[mask] = False
local_min *= local_mask
return local_max, local_min
def itersrc(cat, tol_spat, tol_spec, n, id_cu):
"""Recursive function to perform the spatial merging.
If neighborhood are close spatially to a lines: they are merged,
then the neighbor of the seed is analysed if they are enough close to
the current line (a neighbor of the original seed) they are merged
only if the frequency is enough close (surrogate) if the frequency is
different it is rejected.
If two line (or a group of lines and a new line) are:
Enough close without a big spectral gap
not in the same label (a group in background close to one source
inside a source label)
the resulting ID is the ID of the source label and not the background
Parameters
----------
cat : kinda of catalog of the previously merged lines
xout,yout,zout,aout,iout:
the 3D position, area label and ID for all analysed lines
tol_spat : int
spatial tolerance for the spatial merging
tol_spec : int
spectral tolerance for the spectral merging
n : int
index of the original seed
id_cu :
ID of the original seed
"""
# compute spatial distance to other points.
# - id_cu is the detection processed at the start (from
# spatiospectral_merging), while n is the detection currently processed
# in the recursive call
matched = cat['matched']
spatdist = np.hypot(cat['x0'][n] - cat['x0'], cat['y0'][n] - cat['y0'])
spatdist[matched] = np.inf
cu_spat = np.hypot(cat['x0'][id_cu] - cat['x0'], cat['y0'][id_cu] - cat['y0'])
cu_spat[matched] = np.inf
ind = np.where(spatdist < tol_spat)[0]
if len(ind) == 0:
return
for indn in ind:
if not matched[indn]:
if cu_spat[indn] > tol_spat * np.sqrt(2):
# check spectral content
dz = np.sqrt((cat['z0'][indn] - cat['z0'][id_cu]) ** 2)
if dz < tol_spec:
cat[indn]['matched'] = True
cat[indn]['imatch'] = id_cu
itersrc(cat, tol_spat, tol_spec, indn, id_cu)
else:
cat[indn]['matched'] = True
cat[indn]['imatch'] = id_cu
itersrc(cat, tol_spat, tol_spec, indn, id_cu)
def spatiospectral_merging(tbl, tol_spat, tol_spec):
"""Perform the spatial and spatio spectral merging.
The spectral merging give the same ID if several group of lines (from
spatial merging) if they share at least one line frequency
Parameters
----------
tbl : `astropy.table.Table`
ID,x,y,z,...
tol_spat : int
spatial tolerance for the spatial merging
tol_spec : int
spectral tolerance for the spectral merging
Returns
-------
`astropy.table.Table`
Table: id, x, y, z, area, imatch, imatch2
imatch is the ID after spatial and spatio spectral merging.
imatch2 is the ID after spatial merging only.
"""
Nz = len(tbl)
tbl['_id'] = np.arange(Nz) # id of the detection
tbl['matched'] = | np.zeros(Nz, dtype=bool) | numpy.zeros |
# -*- coding: utf-8 -*-
"""Tools to plot tensors."""
import numpy as np
def sample_sphere(N=100):
"""Define all spherical angles."""
phi = np.linspace(0, 2 * np.pi, N)
theta = np.linspace(0, np.pi, N)
# Cartesian coordinates that correspond to the spherical angles:
r = np.array(
[
np.outer(np.cos(phi), np.sin(theta)),
np.outer(np.sin(phi), np.sin(theta)),
np.outer(np.ones_like(phi), np.cos(theta)),
]
)
return r
def sample_circle(plane="xy", N=100):
"""Define all angles in a certain plane."""
phi = np.linspace(0, 2 * np.pi, N)
if plane == "xy":
return np.array([np.cos(phi), np.sin(phi), np.ones_like(phi)])
elif plane == "xz":
return np.array([np.cos(phi), np.ones_like(phi), np.sin(phi)])
elif plane == "yz":
return np.array([np.ones_like(phi), np.cos(phi), np.sin(phi)])
def plot_orbit2(ax, plotargs, *tensors):
"""Orbital plot of a second order."""
r = sample_sphere()
for a in tensors:
assert np.shape(a) == (3, 3)
x, y, z = np.einsum("ij,jkl->ikl", a, r)
ax.plot_surface(x, y, z, **plotargs)
m = max(
np.max(x),
np.max(y),
np.max(z),
abs(np.min(x)),
abs(np.min(y)),
abs(np.min(z)),
)
ax.set_xlim(-m, m)
ax.set_ylim(-m, m)
ax.set_zlim(-m, m)
ax.set_xlabel("$x$")
ax.set_ylabel("$y$")
ax.set_zlabel("$z$")
ax.set_title("Second Order Orientation Tensor")
# ax.set_aspect(1.0)
def plot_orbit4(ax, plotargs, *tensors):
"""Orbital plot of a second order."""
r = sample_sphere()
for A in tensors:
assert np.shape(A) == (3, 3, 3, 3)
x, y, z = np.einsum("ijkl,jmn,kmn,lmn->imn", A, r, r, r)
ax.plot_surface(x, y, z, **plotargs)
m = max(
np.max(x),
np.max(y),
np.max(z),
abs(np.min(x)),
abs(np.min(y)),
abs(np.min(z)),
)
ax.set_xlim(-m, m)
ax.set_ylim(-m, m)
ax.set_zlim(-m, m)
ax.set_xlabel("$x$")
ax.set_ylabel("$y$")
ax.set_zlabel("$z$")
ax.set_title("Fourth Order Orientation Tensor")
# ax.set_aspect(1.0)
def plot_projection2(ax, plane, *tensors):
"""Orbital plot of a second order."""
R = sample_circle(plane)
for a in tensors:
assert np.shape(a) == (3, 3)
x, y, z = | np.einsum("ij,jk->ik", a, R) | numpy.einsum |
# coding: utf-8
# In[1]:
get_ipython().run_cell_magic('javascript', '', '<!-- Ignore this block -->\nIPython.OutputArea.prototype._should_scroll = function(lines) {\n return false;\n}')
# # Data preprocessing
# 1. convert any non-numeric values to numeric values.
# 2. If required drop out the rows with missing values or NA. In next lectures we will handle sparse data, which will allow us to use records with missing values.
# 3. Split the data into a train(80%) and test(20%) .
# In[2]:
get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'retina'")
from __future__ import division
import pandas as pd
import numpy as np
from math import sqrt, isnan
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
"""Set global rcParams for pyplotlib"""
plt.rcParams["figure.figsize"] = "18,25"
# ### TextEncoder
#
# Here the data is mix of numbers and text. Text value cannot be directly used and should be converted to numeric data.<br>
# For this I have created a function text encoder which accepts a pandas series. Text encoder returns a lookUp dictionary for recreating the numeric value for text value.
# In[3]:
def textEncoder(*textVectors):
lookUpDictionary = {}
lookupValue = 1
for textVector in textVectors:
for key in textVector.unique():
if key not in lookUpDictionary:
lookUpDictionary[key] = lookupValue
lookupValue +=1
return lookUpDictionary
# ### SplitDataSet Procedure
# This method splits the dataset into trainset and testset based upon the trainSetSize value. For splitting the dataset, I am using pandas.sample to split the data. This gives me trainset. For testset I am calculating complement of the trainset. This I am doing by droping the index present in training set.
# In[4]:
"""Splits the provided pandas dataframe into training and test dataset"""
def splitDataSet(inputDataframe, trainSetSize):
trainSet = inputDataframe.sample(frac=trainSetSize)
testSet = inputDataframe.drop(trainSet.index)
return trainSet,testSet
# ### generatePearsonCoefficient Procedure
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/f76ccfa7c2ed7f5b085115086107bbe25d329cec">
# For sample:-
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/bd1ccc2979b0fd1c1aec96e386f686ae874f9ec0">
# For selecting some features and for dropping others I am using Pearson's Coefficient. The value of Pearson's coefficient lies between [-1, 1] and tells how two features are related<br>
# <table>
# <tr><td>Strength of Association</td><td>Positive</td><td>Negative</td></tr><tr><td>Small</td><td>.1 to .3 </td><td>-0.1 to -0.3 </td></tr><tr><td>Medium</td><td>.3 to .5 </td><td>-0.3 to -0.5 </td></tr><tr><td>Large</td><td>.5 to 1.0 </td><td>-0.5 to -1.0 </td></tr></table>
#
# In[5]:
"""Generate pearson's coefficient"""
def generatePearsonCoefficient(A, B):
A = A - A.mean()
B = B - B.mean()
return ((A * B).sum())/(sqrt((A * A).sum()) * sqrt((B * B).sum()))
# ### predictLinearRegression Procedure
# This method performs predicts the value for Y given X and model parameters. This method will add bias to X.<br>
# The prediction is given by BX<sup>T</sup>
# In[6]:
"""Method to make prediction for yTest"""
def predictionLinearRegression(X, modelParameters):
X = np.insert(X, 0, 1, axis=1)
yPrediction = np.dot(modelParameters, X.T)
return yPrediction
# ### RMSE procedure
# Will calculate root mean squared error for given Ytrue values and YPrediction.
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/fc187c3557d633423444d4c80a4a50cd6ecc3dd4">
#
# In[7]:
"""Model accuracy estimator RMSE"""
def RMSE(yTrue, yPrediction):
n = yTrue.shape[0]
return sqrt((1.0) * np.sum(np.square((yTrue - yPrediction))))/n
# ### armijoStepLengthController proedure
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/ed6d74a5c23f9034a072125eeb316eee5faeed43">
# In[8]:
"""Uses armijo principle to detect next value of alpha.
Alpha values are rewritten. Passed to function just to maintain uniformity
"""
def armijoStepLengthController(fx, alpha, x, y, beta, gradient, delta, maxIterations = 1000):
alpha = 1.0
gradientSquare = np.dot(gradient, gradient)
for i in range(0, maxIterations):
alpha = alpha/2
residual_alpha_gradient = y - np.dot((beta - (alpha * gradient)), x .T)
fx_alpha_gradient = np.dot(residual_alpha_gradient.T, residual_alpha_gradient)
"""Convergence condition for armijo principle"""
if fx_alpha_gradient < fx - (alpha * delta * gradientSquare):
break;
return alpha
# ### boldDriverStepLengthController procedure
# An extension to armijo steplength controller. Retain alpha values.
# In[9]:
def boldDriverStepLengthController(fx, alpha, x, y, beta, gradient, maxIterations = 1000,
alphaMinus = 0.5, alphaPlus = 1.1):
alpha = alpha * alphaPlus
for i in range(0, maxIterations):
alpha = alpha * alphaMinus
residual_alpha_gradient = y - np.dot((beta - (alpha * gradient)), x .T)
fx_alpha_gradient = np.dot(residual_alpha_gradient.T, residual_alpha_gradient)
"""Convergence condition for bold driver method"""
if fx - fx_alpha_gradient > 0:
break;
return alpha
# ### linearRegressionGradientDescent procedure
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/26a319f33db70a80f8c5373f4348a198a202056c">
# Calculate slope at the given point(gradient) and travel in the negative direction with provided step length.<br/>
# In[10]:
"""If no step length controller is provided then values of alpha will be taken as step length.
Else the step length controller will be used. Additional parameters to the controller are
provided by stepLengthControllerParameters"""
def linearRegressionGradientDescent(x, y, xTest, yTest, alpha, beta,
maxIterations=1000, epsilon=1.1e-20,
stepLengthController = None, stepLengthControllerParameters = None):
x = np.insert(x, 0, 1, axis=1)
x = x * 1.0
y = y * 1.0
if stepLengthController != None:
print("Warning using stepLengthController alpha values will be rewritten")
plotX = []
plotY_diff = []
plotY_RMSE = []
y_prediction = np.dot(beta, x.T)
residual = y_prediction - y
f_x = np.dot(residual.T, residual)
rmse = RMSE(yTest, predictionLinearRegression(xTest, beta))
"""For plotting graph"""
plotY_RMSE.append(rmse)
plotY_diff.append(f_x)
plotX.append(0)
for i in range(1, maxIterations):
gradient = np.dot(x.T, residual) * 2
"""Use step length controller if required"""
if stepLengthController != None:
alpha = stepLengthController(fx = f_x, alpha = alpha, x = x, y = y,
beta = beta, gradient = gradient, **stepLengthControllerParameters)
beta = beta - (alpha * gradient)
y_prediction = np.dot(beta, x.T)
residual = y_prediction - y
f_x_new = np.dot(residual.T, residual)
rmse = RMSE(yTest, predictionLinearRegression(xTest, beta))
"""For plotting graph"""
plotY_RMSE.append(rmse)
plotY_diff.append(abs(f_x_new - f_x))
plotX.append(i)
if abs(f_x - f_x_new) < epsilon:
print("Converged in " + str(i) + " iterations")
return beta, plotX, plotY_diff, plotY_RMSE, f_x, rmse
f_x = f_x_new
print("Warning algorithm failed to converge in " + str(maxIterations) + " interations")
return beta, plotX, plotY_diff, plotY_RMSE, f_x, rmse
# # Gradient descent for airlines fare data
# ### Load the airlines dataset
# In[11]:
""" File path change accordingly"""
directoryPath = "data"
airFareData = pd.read_csv(directoryPath+"/airq402.dat", sep='\s+',header = None)
airFareData.head(10)
"""Adding header"""
airFareData.columns = ["city1", "city2", "avgFare", "distance", "avgWeeklyPassengers",
"marketLeadingAirline", "marketShareLA", "averageFare", "lowPriceAirline",
"marketShareLPA", "price"]
airFareData.head()
# ### Using textEncoder to convert text data to numeric data
# In[12]:
"""Using lambda functions to replace text values based upon lockup dictionary"""
cityLookupDictionary = textEncoder(airFareData.city1, airFareData.city2)
airFareData['city1'] = airFareData.city1.apply(lambda cityName:
cityLookupDictionary[cityName])
airFareData['city2'] = airFareData.city2.apply(lambda cityName:
cityLookupDictionary[cityName])
airLineLookupDictionary = textEncoder(airFareData.lowPriceAirline, airFareData.marketLeadingAirline)
airFareData['lowPriceAirline'] = airFareData.lowPriceAirline.apply(lambda cityName:
airLineLookupDictionary[cityName])
airFareData['marketLeadingAirline'] = airFareData.marketLeadingAirline.apply(lambda cityName:
airLineLookupDictionary[cityName])
# ### Check and remove missing data
# In[13]:
airFareData.dropna(inplace = True)
airFareData.head()
# ### Check for corelation between different X and Y
# In[14]:
for column in airFareData:
if column != "price":
print("The corelation between " + column +" vs price is " +
str(generatePearsonCoefficient(airFareData[column], airFareData['price'])))
# ### Visualizing the data
# In[15]:
plt.close()
figure, ((ax1, ax2), (ax3, ax4), (ax5, ax6), (ax7, ax8), (ax9, ax10)) = plt.subplots(5,2,sharey='none')
ax1.plot(airFareData.city1, airFareData.price, "ro")
ax1.grid()
ax1.set_title("city1 vs price")
ax1.set_xlabel("city1")
ax1.set_ylabel("price")
ax2.plot(airFareData.city2, airFareData.price, "ro")
ax2.grid()
ax2.set_title("city2 vs price")
ax2.set_xlabel("city2")
ax2.set_ylabel("price")
ax3.plot(airFareData.avgFare, airFareData.price, "ro")
ax3.grid()
ax3.set_title("avgFare vs price")
ax3.set_xlabel("avgFare")
ax3.set_ylabel("price")
ax4.plot(airFareData.distance, airFareData.price, "ro")
ax4.grid()
ax4.set_title("distance vs price")
ax4.set_xlabel("distance")
ax4.set_ylabel("price")
ax5.plot(airFareData.avgWeeklyPassengers, airFareData.price, "ro")
ax5.grid()
ax5.set_title("avgWeeklyPassengers vs price")
ax5.set_xlabel("avgWeeklyPassengers")
ax5.set_ylabel("price")
ax6.plot(airFareData.marketLeadingAirline, airFareData.price, "ro")
ax6.grid()
ax6.set_title("marketLeadingAirline vs price")
ax6.set_xlabel("marketLeadingAirline")
ax6.set_ylabel("price")
ax7.plot(airFareData.marketShareLA, airFareData.price, "ro")
ax7.grid()
ax7.set_title("marketShareLA vs price")
ax7.set_xlabel("marketShareLA")
ax7.set_ylabel("price")
ax8.plot(airFareData.averageFare, airFareData.price, "ro")
ax8.grid()
ax8.set_title("averageFare vs price")
ax8.set_xlabel("averageFare")
ax8.set_ylabel("price")
ax9.plot(airFareData.lowPriceAirline, airFareData.price, "ro")
ax9.grid()
ax9.set_title("lowPriceAirline vs price")
ax9.set_xlabel("lowPriceAirline")
ax9.set_ylabel("price")
ax10.plot(airFareData.marketShareLPA, airFareData.price, "ro")
ax10.grid()
ax10.set_title("marketShareLPA vs price")
ax10.set_xlabel("marketShareLPA")
ax10.set_ylabel("price")
plt.show()
# By looking at pearson's coefficient we can drop city1, city2, marketLeadingAirline, lowPriceAirline as they do not have any corelation with price.
# ### Selecting the required features and splitting the dataset using splitDataSetProcedure
# In[16]:
airFareData = airFareData[['avgFare', 'distance', 'avgWeeklyPassengers', 'marketShareLA',
'averageFare', 'marketShareLPA', 'price']]
airFareData.head()
# In[17]:
trainSet, testSet = splitDataSet(airFareData, 0.8)
print(trainSet.shape)
print(testSet.shape)
# In[18]:
trainSet.head()
# ### Running gradient descent with alpha parameter grid serach
# In[19]:
"""Setting beta constant as future comparasion will be easy"""
np.random.seed(8)
inputBeta = | np.random.random_sample(7) | numpy.random.random_sample |
import numpy as np
import matplotlib.pyplot as plt
from os import makedirs
from os.path import isfile, exists
from scipy.constants import mu_0
# from numba import njit
def calcDipolMomentAnalytical(remanence, volume):
""" Calculating the magnetic moment from the remanence in T and the volume in m^3"""
m = remanence * volume / mu_0 # [A * m^2]
return m
def plotSimple(data, FOV, fig, ax, cbar=True, **args):
""" Generate simple colorcoded plot of 2D grid data with contour. Returns axes object."""
im = ax.imshow(data, extent=FOV, origin="lower", **args)
cs = ax.contour(data, colors="k", extent=FOV, origin="lower", linestyles="dotted")
class nf(float):
def __repr__(self):
s = f"{self:.1f}"
return f"{self:.0f}" if s[-1] == "0" else s
cs.levels = [nf(val) for val in cs.levels]
if plt.rcParams["text.usetex"]:
fmt = r"%r"
else:
fmt = "%r"
ax.clabel(cs, cs.levels, inline=True, fmt=fmt, fontsize=10)
if cbar == True:
fig.colorbar(im, ax=ax)
return im
def centerCut(field, axis):
"""return a slice of the data at the center for the specified axis"""
dims = np.shape(field)
return np.take(field, indices=int(dims[axis] / 2), axis=axis)
def isHarmonic(field, sphericalMask, shellMask):
"""Checks if the extrema of the field are in the shell."""
fullField = np.multiply(field, sphericalMask) # [T]
reducedField = | np.multiply(field, shellMask) | numpy.multiply |
'''
Based on https://www.mattkeeter.com/projects/contours/
and http://www.iquilezles.org/
'''
import numpy as np
from util import *
import matplotlib.pyplot as plt
import matplotlib.patches as patches
fig1 = plt.figure()
ax1 = fig1.add_subplot(111, aspect='equal')
ax1.set_xlim([0, 1])
ax1.set_ylim([0, 1])
class ImplicitObject:
def __init__(self, implicit_lambda_function):
self.implicit_lambda_function = implicit_lambda_function
def eval_point(self, two_d_point):
assert two_d_point.shape == (2, 1) # not allow vectorize yet
value = self.implicit_lambda_function(two_d_point[0][0], two_d_point[1][0])
return value;
def is_point_inside(self, two_d_point):
assert two_d_point.shape == (2, 1), "two_d_point format incorrect, {}".format(two_d_point)
value = self.eval_point(two_d_point)
if value <= 0:
return True
else:
return False
def union(self, ImplicitObjectInstance):
return ImplicitObject(lambda x, y: min(
self.eval_point(np.array([[x], [y]])),
ImplicitObjectInstance.eval_point(np.array([[x], [y]]))
))
def intersect(self, ImplicitObjectInstance):
return ImplicitObject(lambda x, y: max(
self.eval_point(np.array([[x], [y]])),
ImplicitObjectInstance.eval_point(np.array([[x], [y]]))
))
def negate(self):
return ImplicitObject(lambda x, y: -1 * self.eval_point(np.array([[x], [y]])))
def substraction(self, ImplicitObjectInstance):
# substraction ImplicitObjectInstance from self
return self.intersect(ImplicitObjectInstance.negate())
# distance deformations
# http://www.iquilezles.org/www/articles/smin/smin.htm
# exponential smooth min (k = 32);
# float smin( float a, float b, float k )
# {
# float res = exp( -k*a ) + exp( -k*b );
# return -log( res )/k;
# }
# http://www.iquilezles.org/www/articles/smin/smin.htm
# You must be carefull when using distance transformation functions,
# as the field created might not be a real distance function anymore.
# You will probably need to decrease your step size,
# if you are using a raymarcher to sample this.
# The displacement example below is using sin(20*p.x)*sin(20*p.y)*sin(20*p.z) as displacement pattern,
# but you can of course use anything you might imagine.
# As for smin() function in opBlend(), please read the smooth minimum article in this same site.
def exponential_smooth_union(self, ImplicitObjectInstance):
def smin(a, b, smooth_parameter = 32):
res = np.exp( -smooth_parameter*a ) + np.exp( -smooth_parameter*b );
return -np.log(res)/smooth_parameter
return ImplicitObject(lambda x, y: smin(
self.eval_point(np.array([[x], [y]])),
ImplicitObjectInstance.eval_point(np.array([[x], [y]]))
))
# // polynomial smooth min (k = 0.1);
# float smin( float a, float b, float k )
# {
# float h = clamp( 0.5+0.5*(b-a)/k, 0.0, 1.0 );
# return mix( b, a, h ) - k*h*(1.0-h);
# }
def polynomial_smooth_union(self, ImplicitObjectInstance):
def smin(a, b, smooth_parameter = 0.1):
h = clamp(0.5+0.5*(b-a)/smooth_parameter, 0.0, 1.0 )
return mix( b, a, h ) - smooth_parameter*h*(1.0-h);
return ImplicitObject(lambda x, y: smin(
self.eval_point(np.array([[x], [y]])),
ImplicitObjectInstance.eval_point(np.array([[x], [y]]))
))
# cannot make it work
# // power smooth min (k = 8);
# float smin( float a, float b, float k )
# {
# a = pow( a, k ); b = pow( b, k );
# return pow( (a*b)/(a+b), 1.0/k );
# }
# def power_smooth_union(self, ImplicitObjectInstance):
# def smin(a, b, smooth_parameter=3):
# print("---------------------")
# print(type(a))
# print(type(b))
# a = pow( a, smooth_parameter )
# b = pow( b, smooth_parameter )
# return np.log(a +)
# # print(pow( (a*b)/(a+b), 1.0/smooth_parameter ))
# # return pow( (a*b)/(a+b), 1.0/smooth_parameter )
# return ImplicitObject(lambda x, y: smin(
# self.eval_point(np.array([[x], [y]])),
# ImplicitObjectInstance.eval_point(np.array([[x], [y]]))
# ))
# distance deformations
# float opDisplace( vec3 p )
# {
# float d1 = primitive(p);
# float d2 = displacement(p);
# return d1+d2;
# }
def displace(self, frequency, scale):
def displacement(x, y, frequency, scale):
return self.eval_point(np.array([[x], [y]])) + (np.sin(frequency*x)*np.sin(frequency*y))/scale
return ImplicitObject(lambda x, y: displacement(x, y, frequency, scale))
# domain deformations
def derivative_at_point(self, two_d_point, epsilon = 0.001):
assert two_d_point.shape == (2, 1), 'wrong data two_d_point {}'.format(two_d_point)
x = two_d_point[0][0]
y = two_d_point[1][0]
dx = self.eval_point(np.array([[x + epsilon], [y]])) - self.eval_point(np.array([[x - epsilon], [y]]))
dy = self.eval_point(np.array([[x], [y + epsilon]])) - self.eval_point(np.array([[x], [y - epsilon]]))
length = np.sqrt(dx**2 + dy**2)
if length <= epsilon:
print('dodgy: probably error')
print(two_d_point)
print(dx)
print(dy)
print(self.eval_point(np.array([[x + epsilon], [y]])))
print(self.eval_point(np.array([[x - epsilon], [y]])))
print(self.eval_point(np.array([[x], [y + epsilon]])))
print(self.eval_point(np.array([[x], [y - epsilon]])) )
return np.array([[0],[0]])
else:
assert length >= epsilon, \
'length {} if less than epislon {} check dx {} dy {} two_d_point {}'.format(
length, epsilon, dx, dy, two_d_point
)
return np.array([[dx / length],[dy / length]])
def visualize_bitmap(self, xmin, xmax, ymin, ymax, num_points=200):
self.visualize(xmin, xmax, ymin, ymax, 'bitmap', num_points)
def visualize_distance_field(self, xmin, xmax, ymin, ymax, num_points=200):
self.visualize(xmin, xmax, ymin, ymax, 'distance_field', num_points)
def visualize(self, xmin, xmax, ymin, ymax, visualize_type = 'bitmap', num_points=200):
assert xmin!=xmax, "incorrect usage xmin == xmax"
assert ymin!=ymax, "incorrect usage ymin == ymax"
assert visualize_type in ['bitmap', 'distance_field'], \
'visualize_type should be either bitmap or distance_field, but not {}'.format(visualize_type)
visualize_matrix = np.empty((num_points, num_points));
import matplotlib.pyplot as plt
x_linspace = np.linspace(xmin, xmax, num_points)
y_linspace = np.linspace(ymin, ymax, num_points)
for x_counter in range(len(x_linspace)):
for y_counter in range(len(y_linspace)):
x = x_linspace[x_counter]
y = y_linspace[y_counter]
if visualize_type == 'bitmap':
visualize_matrix[x_counter][y_counter] = \
not self.is_point_inside(np.array([[x],[y]])) # for mapping the color of distance_field
elif visualize_type == 'distance_field':
visualize_matrix[x_counter][y_counter] = self.eval_point(np.array([[x],[y]]))
else:
raise ValueError('Unknown visualize_type -> {}'.format(visualize_type))
visualize_matrix = np.rot90(visualize_matrix)
assert(visualize_matrix.shape == (num_points, num_points))
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(visualize_matrix, cmap=plt.cm.gray)
plt.show() # TODO: label on x, y axis
# In[12]:
class ImplicitCircle(ImplicitObject):
def __init__(self, x0, y0, r0):
self.implicit_lambda_function = lambda x, y: np.sqrt((x - x0)**2 + (y - y0)**2) - r0
# In[15]:
class Left(ImplicitObject):
def __init__(self, x0):
self.implicit_lambda_function = lambda x, _: x - x0
class Right(ImplicitObject):
def __init__(self, x0):
self.implicit_lambda_function = lambda x, _: x0 - x
class Lower(ImplicitObject):
def __init__(self, y0):
self.implicit_lambda_function = lambda _, y: y - y0
class Upper(ImplicitObject):
def __init__(self, y0):
self.implicit_lambda_function = lambda _, y: y0 - y
# In[17]:
class ImplicitRectangle(ImplicitObject):
def __init__(self, xmin, xmax, ymin, ymax):
assert xmin!=xmax, "incorrect usage xmin == xmax"
assert ymin!=ymax, "incorrect usage ymin == ymax"
# right xmin โฉ left xmax โฉ upper ymin โฉ lower ymax
self.implicit_lambda_function = (Right(xmin).intersect(Left(xmax)).intersect(Upper(ymin)).intersect(Lower(ymax))).implicit_lambda_function
class ImplicitFailureStar(ImplicitObject):
# http://www.iquilezles.org/live/index.htm
def __init__(self, inner_radius, outer_radius, frequency, x0=0, y0=0):
self. implicit_lambda_function = \
lambda x, y:inner_radius + outer_radius*np.cos(np.arctan2(y,x)*frequency)
class ImplicitStar(ImplicitObject):
# http://www.iquilezles.org/live/index.htm
def __init__(self, inner_radius, outer_radius, frequency, x0=0, y0=0):
self. implicit_lambda_function = \
lambda x, y: ImplicitStar.smoothstep(
inner_radius + outer_radius*np.cos(np.arctan2(y - x0, x - y0)*frequency),
inner_radius + outer_radius*np.cos(np.arctan2(y - x0, x - y0)*frequency) + 0.01,
np.sqrt((x - x0)**2 + (y - y0)**2)
)
@staticmethod
def smoothstep(edge0, edge1, x):
# https://en.wikipedia.org/wiki/Smoothstep
# float smoothstep(float edge0, float edge1, float x)
# {
# // Scale, bias and saturate x to 0..1 range
# x = clamp((x - edge0)/(edge1 - edge0), 0.0, 1.0);
# // Evaluate polynomial
# return x*x*(3 - 2*x);
# }
x = clamp((x - edge0)/(edge1 -edge0), 0.0, 1.0)
return x*x*(3-2*x)
class ImplicitTree(ImplicitStar):
# http://www.iquilezles.org/live/index.htm
def __init__(self, inner_radius=0.2, outer_radius=0.1, frequency=10, x0=0.4, y0=0.5):
self.inner_radius = inner_radius
self.outer_radius = outer_radius
self.frequency = frequency
self.x0 = x0
self.y0 = y0
self. implicit_lambda_function = self.implicit_lambda_function
def implicit_lambda_function(self, x, y):
local_x = x - self.x0
local_y = y - self.y0
r = self.inner_radius + self.outer_radius*np.cos(np.arctan2(local_y, local_x)*self.frequency + 20*local_x + 1)
result = ImplicitStar.smoothstep(r, r + 0.01,np.sqrt(local_x**2 + local_y**2))
r = 0.015
r += 0.002 * np.cos(120.0 * local_y)
r += np.exp(-20.0 * y)
result *= 1.0 - (1.0 - ImplicitStar.smoothstep(r, r + 1, abs(local_x+ 0.2*np.sin(2.0 *local_y)))) * \
(1.0 - ImplicitStar.smoothstep(0.0, 0.1, local_y))
return result
# In[19]:
# h = (rectangle (0.1, 0.1) (0.25, 0.9) โช
# rectangle (0.1, 0.1) (0.6, 0.35) โช
# circle (0.35, 0.35) 0.25) โฉ inv
# (circle (0.35, 0.35) 0.1 โช
# rectangle (0.25, 0.1) (0.45, 0.35))
# i = rectangle (0.75, 0.1) (0.9, 0.55) โช
# circle (0.825, 0.75) 0.1
class Tree:
def __init__(self,
tree_or_cell_0, tree_or_cell_1, tree_or_cell_2, tree_or_cell_3):
assert (isinstance(tree_or_cell_0, Tree) | isinstance(tree_or_cell_0, Cell)) & (isinstance(tree_or_cell_1, Tree) | isinstance(tree_or_cell_1, Cell)) & (isinstance(tree_or_cell_2, Tree) | isinstance(tree_or_cell_2, Cell)) & (isinstance(tree_or_cell_3, Tree) | isinstance(tree_or_cell_3, Cell))
self.tree_or_cell_0 = tree_or_cell_0
self.tree_or_cell_1 = tree_or_cell_1
self.tree_or_cell_2 = tree_or_cell_2
self.tree_or_cell_3 = tree_or_cell_3
class Cell:
def __init__(self, xmin, xmax, ymin, ymax, cell_type):
assert xmin!=xmax, "incorrect usage xmin == xmax"
assert ymin!=ymax, "incorrect usage ymin == ymax"
assert cell_type in ['Empty', 'Full', 'Leaf', 'Root', 'NotInitialized']
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
self.xmid = (xmin + xmax)/2
self.ymid = (ymin + ymax)/2
self.cell_type = cell_type
'''
2 -- 3
| |
0 -- 1
'''
self.point_0 = np.array([[xmin],[ymin]])
self.point_1 = np.array([[xmax],[ymin]])
self.point_2 = np.array([[xmin],[ymax]])
self.point_3 = np.array([[xmax],[ymax]])
if self.is_Root():
self.to_Root()
def xmin_xmax_ymin_ymax(self):
return [self.xmin, self.xmax, self.ymin, self.ymax]
def to_Root(self):
assert self.cell_type != 'Root'
self.cell_type = 'Root'
self.cell0 = Cell(self.xmin, self.xmid, self.ymin, self.ymid, 'NotInitialized')
self.cell1 = Cell(self.xmid, self.xmax, self.ymin, self.ymid, 'NotInitialized')
self.cell2 = Cell(self.xmin, self.xmid, self.ymid, self.ymax, 'NotInitialized')
self.cell3 = Cell(self.xmid, self.xmax, self.ymid, self.ymax, 'NotInitialized')
def to_Empty(self):
assert self.is_Root()
self.cell_type = 'Empty'
del self.cell0
del self.cell1
del self.cell2
del self.cell3
def to_Full(self):
assert self.is_Root()
self.cell_type = 'Full'
del self.cell0
del self.cell1
del self.cell2
del self.cell3
def to_Leaf(self):
assert self.is_Root()
self.cell_type = 'Leaf'
del self.cell0
del self.cell1
del self.cell2
del self.cell3
def check_not_initialized_exists(self):
# raise Error if NotInitialized exists
if self.is_Root():
self.cell0.check_not_initialized_exists()
self.cell1.check_not_initialized_exists()
self.cell2.check_not_initialized_exists()
self.cell3.check_not_initialized_exists()
else:
if self.is_NotInitialized():
raise ValueError('cell should not be as cell_type {}'.format(self.cell_type))
else:
pass
def check_Leaf_exists(self):
# raise Error if NotInitialized exists
if self.is_Root():
self.cell0.check_Leaf_exists()
self.cell1.check_Leaf_exists()
self.cell2.check_Leaf_exists()
self.cell3.check_Leaf_exists()
else:
if self.is_Leaf():
raise ValueError('cell should not be as cell_type {}'.format(self.cell_type))
else:
pass
def eval_type(self, implicit_object_instance):
assert self.is_NotInitialized(), 'this function is only called when the cell type is not initialized'
is_point0_inside = implicit_object_instance.is_point_inside(self.point_0)
is_point1_inside = implicit_object_instance.is_point_inside(self.point_1)
is_point2_inside = implicit_object_instance.is_point_inside(self.point_2)
is_point3_inside = implicit_object_instance.is_point_inside(self.point_3)
if ((is_point0_inside is True) &
(is_point1_inside is True) &
(is_point2_inside is True) &
(is_point3_inside is True) ):
self.cell_type = 'Full'
elif ( (is_point0_inside is False) &
(is_point1_inside is False) &
(is_point2_inside is False) &
(is_point3_inside is False) ):
self.cell_type = 'Empty'
else:
# print('to Leaf')
self.cell_type = 'Leaf'
def add_marching_cude_points(self, edge_vectice_0, edge_vectice_1, mc_connect_indicator):
assert self.is_Leaf()
# self.edge_vectice_0 = edge_vectice_0
# self.edge_vectice_1 = edge_vectice_1
try:
self.edge_vectices.append((edge_vectice_0, edge_vectice_1, mc_connect_indicator))
except AttributeError:
self.edge_vectices = [(edge_vectice_0, edge_vectice_1, mc_connect_indicator)]
def get_first_indicator(self):
assert self.is_Leaf()
assert len(self.edge_vectices) >= 1
return self.edge_vectices[0][2]
def get_second_indicator(self):
assert self.is_Leaf()
assert len(self.edge_vectices) >= 1
print(self.edge_vectices[1])
print(self.edge_vectices[1][0])
print(self.edge_vectices[1][1])
return self.edge_vectices[1][2]
def get_marching_cude_points(self):
# remove the edges =
raise NotImplementedError
def debug_print(self, counter):
counter += 1
if self.cell_type in ['Full', 'Empty', 'Leaf', 'NotInitialized']:
# print(counter)
pass
else:
self.cell0.debug_print(counter)
self.cell1.debug_print(counter)
self.cell2.debug_print(counter)
self.cell3.debug_print(counter)
def visualize(self, ax1):
if self.cell_type in ['Empty', 'Full', 'Leaf', 'NotInitialized']:
if self.is_Empty():
color = 'grey'
elif self.is_Full():
color = 'black'
elif self.is_Leaf():
color = 'green'
elif self.is_NotInitialized():
color = 'red'
else:
raise ValueError('cell should not be as cell_type {}'.format(self.cell_type))
ax1.add_patch(
patches.Rectangle(
(self.xmin, self.ymin), # (x,y)
self.xmax - self.xmin, # width
self.ymax - self.ymin, # height
edgecolor = color,
facecolor = 'white'
)
)
elif self.is_Root():
self.cell0.visualize(ax1)
self.cell1.visualize(ax1)
self.cell2.visualize(ax1)
self.cell3.visualize(ax1)
else:
raise ValueError('cell should not be as cell_type {}'.format(self.cell_type))
def print_type(self):
if not self.is_Root():
print(self.cell_type)
else:
print('Root')
self.cell0.print_type()
self.cell1.print_type()
self.cell2.print_type()
self.cell3.print_type()
def initialise_cell_type(self, implicit_object_instance):
if self.is_Root():
self.cell0.initialise_cell_type(implicit_object_instance)
self.cell1.initialise_cell_type(implicit_object_instance)
self.cell2.initialise_cell_type(implicit_object_instance)
self.cell3.initialise_cell_type(implicit_object_instance)
elif self.is_NotInitialized():
self.eval_type(implicit_object_instance)
else:
raise ValueError('There should not be any other \
cell_type when calling this function -> {}'.format(self.cell_type))
def bisection(self, two_points_contain_vectice, implicit_object_instance, epsilon = 0.0001):
''' not considering the orientation left_or_right '''
assert len(two_points_contain_vectice[0]) == 2, "two_points_contain_vectice[0] wrong format, {}".format(two_points_contain_vectice[0])
assert len(two_points_contain_vectice[1]) == 2, "two_points_contain_vectice[0] wrong format, {}".format(two_points_contain_vectice[0])
assert isinstance(two_points_contain_vectice, np.ndarray), 'two_points_contain_vectice has wrong type {}'.format(type(two_points_contain_vectice))
#two_points_contain_vectice = [[[ 0.125 ]
# [ 0.09375]]
# [[ 0.125 ]
# [ 0.125 ]]]
edge0_x = two_points_contain_vectice[0][0][0]
edge0_y = two_points_contain_vectice[0][1][0]
edge1_x = two_points_contain_vectice[1][0][0]
edge1_y = two_points_contain_vectice[1][1][0]
# print(edge0_x)
# print(edge0_y)
# print(edge1_x)
# print(edge1_y)
is_edge0_inside = implicit_object_instance.is_point_inside(np.array([[edge0_x], [edge0_y]]))
is_edge1_inside = implicit_object_instance.is_point_inside(np.array([[edge1_x], [edge1_y]]))
# TODO: find a assert to make sure two_points_contain_vectice are not the same
assert is_edge0_inside != is_edge1_inside,\
'it cannot be both points {} {}'.format(
is_edge0_inside,
is_edge1_inside
)
edge_xmid = (edge1_x + edge0_x)/2
edge_ymid = (edge1_y + edge0_y)/2
if np.sqrt((edge1_x - edge0_x)**2 + (edge1_y - edge0_y)**2) <= epsilon:
return (edge_xmid, edge_ymid)
is_edge_mid_inside = implicit_object_instance.is_point_inside(np.array([[edge_xmid], [edge_ymid]]))
if is_edge_mid_inside is not is_edge0_inside:
return self.bisection(np.array([[[edge0_x], [edge0_y]],[[edge_xmid], [edge_ymid]]]),
implicit_object_instance,
epsilon = 0.01)
elif is_edge_mid_inside is not is_edge1_inside:
return self.bisection(np.array([[[edge1_x], [edge1_y]],[[edge_xmid], [edge_ymid]]]),
implicit_object_instance,
epsilon = 0.01)
else:
raise ValueError
@staticmethod
def mc_two_one_connect_h(two_vertice_first, two_type_first, two_vertice_second, two_type_second,
one_vertice, one_type):
# connect left to right two to one
assert 'left' in one_type, 'left not in one_type {}'.format(one_type)
if 'left' in one_type[0]:
if two_type_first == ['bottom', 'right']:
return np.array([two_vertice_first, one_vertice])
elif two_type_second == ['bottom', 'right']:
return np.array([two_vertice_second, one_vertice])
else:
raise ValueError('two_type_first {}, two_type_second {}'.format(two_type_first, two_type_second))
elif 'left' in one_type[1]:
if two_type_first == ['right', 'top']:
return np.array([two_vertice_first, one_vertice])
elif two_type_second == ['right', 'top']:
return np.array([two_vertice_second, one_vertice])
else:
raise ValueError('two_type_first {}, two_type_second {}'.format(two_type_first, two_type_second))
else:
raise ValueError
@staticmethod
def mc_one_two_connect_h(one_vertice, one_type,
two_vertice_first, two_type_first, two_vertice_second, two_type_second):
# connect left to right one to two
assert 'right' in one_type, 'right not in one_type {}'.format(one_type)
if 'right' in one_type[0]:
if two_type_first == ['top', 'left']:
return np.array([one_vertice, two_vertice_first])
elif two_type_second == ['top', 'left']:
return np.array([one_vertice, two_vertice_second])
else:
raise ValueError('two_type_first {}, two_type_second {}'.format(two_type_first, two_type_second))
elif 'right' in one_type[1]:
if two_type_first == ['left', 'bottom']:
return np.array([one_vertice, two_vertice_first])
elif two_type_second == ['left', 'bottom']:
return np.array([one_vertice, two_vertice_second])
else:
raise ValueError('two_type_first {}, two_type_second {}'.format(two_type_first, two_type_second))
else:
raise ValueError
@staticmethod
def mc_two_two_connect_h(two_vertice_l_first, two_type_l_first, two_vertice_l_second, two_type_l_second,
two_vertice_r_first, two_type_r_first, two_vertice_r_second, two_type_r_second):
# connect left to right two to two
''' not tested '''
if two_type_l_first == ('top', 'left'):
assert two_type_l_second == ('bottom', 'right')
if two_type_r_first == ('bottom', 'left'):
return np.array([two_type_l_second, two_vertice_r_first])
elif two_type_r_second == ('bottom', 'left'):
return np.array([two_type_l_second, two_vertice_r_second])
else:
raise ValueError
elif two_type_l_first == ('bottom', 'right'):
assert two_type_l_second == ('top', 'left')
if two_type_r_first == ('bottom', 'left'):
return np.array([two_vertice_l_first, two_vertice_r_first])
elif two_type_r_second == ('bottom', 'left'):
return np.array([two_vertice_l_first, two_vertice_r_second])
else:
raise ValueError
elif two_type_l_first == ('left', 'bottom'):
assert two_type_l_second == ('right', 'top')
if two_type_r_first == ('top', 'left'):
return np.array([two_type_l_second, two_type_r_first])
elif two_type_r_second == ('top', 'left'):
return np.array([two_type_l_second, two_type_r_second])
else:
return ValueError
elif two_type_l_first == ('right', 'top'):
assert two_type_l_second == ('left', 'bottom')
if two_type_r_first == ('top', 'left'):
return np.array([two_type_l_first, two_type_r_first])
elif two_type_r_second == ('top', 'left'):
return np.array([two_type_l_first, two_type_r_second])
else:
return ValueError
else:
raise ValueError
@staticmethod
def mc_two_one_connect_v(two_vertice_first, two_type_first, two_vertice_second, two_type_second,
one_vertice, one_type):
# connect top to bottom two to one
assert 'top' in one_type, 'bottom not in one_type {}'.format(one_type)
if 'top' in one_type[0]: # TODO: why not == instead of in
if two_type_first == ['left', 'bottom']:
return np.array([two_vertice_first, one_vertice])
elif two_type_second == ['left', 'bottom']:
return np.array([two_type_second, one_vertice])
else:
raise ValueError
elif 'top' in one_type[1]:
if two_type_first == ['bottom', 'right']:
return np.array([two_vertice_first, one_vertice])
elif two_type_second == ['bottom', 'right']:
return np.array([two_type_second, one_vertice])
else:
raise ValueError
else:
raise ValueError
@staticmethod
def mc_one_two_connect_v(one_vertice, one_type,
two_vertice_first, two_type_first, two_vertice_second, two_type_second):
# connect top to bottom two to one
assert 'bottom' in one_type
if 'bottom' in one_type[0]: # TODO: why not == instead of in
if two_type_first == ['right', 'top']:
return np.array([one_vertice, two_vertice_first])
elif two_type_second == ['right', 'top']:
return np.array([one_vertice, two_vertice_second])
else:
raise ValueError
elif 'bottom' in one_type[1]:
if two_type_first == ['top', 'left']:
return np.array([one_vertice, two_vertice_first])
elif two_type_second == ['top', 'left']:
return np.array([one_vertice, two_vertice_second])
else:
raise ValueError
else:
raise ValueError
@staticmethod
def mc_two_two_connect_v(two_vertice_t_first, two_type_t_first, two_vertice_t_second, two_type_t_second,
two_vertice_b_first, two_type_b_first, two_vertice_b_second, two_type_b_second):
''' not tested '''
if two_type_t_first == ['top', 'left']:
assert two_type_t_second == ['bottom', 'right']
if two_type_b_first == ['right', 'top']:
return np.array([two_type_t_second, two_type_b_first])
elif two_type_b_second == ['right', 'top']:
return np.array([two_type_t_second, two_type_b_second])
else:
raise ValueError
elif two_type_t_first == ['bottom', 'right']:
assert two_type_t_second == ['top', 'left']
if two_type_b_first == ['right', 'top']:
return np.array([two_type_t_first, two_type_b_first])
elif two_type_b_second == ['right', 'top']:
return np.array([two_type_t_first, two_type_b_second])
else:
raise ValueError
elif two_type_t_first == ['right', 'top']:
assert two_type_t_second == ['left', 'bottom']
if two_type_b_first == ['top', 'left']:
return np.array([two_type_t_second, two_type_b_first])
elif two_type_b_second == ['top', 'left']:
return np.array([two_type_t_second, two_type_b_second])
else:
raise ValueError
elif two_type_t_first == ['left', 'bottom']:
assert two_type_t_second == ['right', 'top']
if two_type_b_first == ['top', 'left']:
return | np.array([two_type_t_first, two_type_b_first]) | numpy.array |
"""
Test for the normalization operation
"""
from datetime import datetime
from unittest import TestCase
import numpy as np
import pandas as pd
import pyproj
import xarray as xr
from jdcal import gcal2jd
from numpy.testing import assert_array_almost_equal
from xcube.core.gridmapping import GridMapping
from xcube.core.new import new_cube
from xcube.core.normalize import DatasetIsNotACubeError
from xcube.core.normalize import adjust_spatial_attrs
from xcube.core.normalize import decode_cube
from xcube.core.normalize import encode_cube
from xcube.core.normalize import normalize_coord_vars
from xcube.core.normalize import normalize_dataset
from xcube.core.normalize import normalize_missing_time
# noinspection PyPep8Naming
def assertDatasetEqual(expected, actual):
# this method is functionally equivalent to
# `assert expected == actual`, but it
# checks each aspect of equality separately for easier debugging
assert expected.equals(actual), (expected, actual)
class DecodeCubeTest(TestCase):
def test_cube_stays_cube(self):
dataset = new_cube(variables=dict(a=1, b=2, c=3))
cube, grid_mapping, rest = decode_cube(dataset)
self.assertIs(dataset, cube)
self.assertIsInstance(grid_mapping, GridMapping)
self.assertTrue(grid_mapping.crs.is_geographic)
self.assertIsInstance(rest, xr.Dataset)
self.assertEqual(set(), set(rest.data_vars))
def test_no_cube_vars_are_dropped(self):
dataset = new_cube(variables=dict(a=1, b=2, c=3))
dataset = dataset.assign(
d=xr.DataArray([8, 9, 10], dims='level'),
crs=xr.DataArray(0, attrs=pyproj.CRS.from_string('CRS84').to_cf()),
)
self.assertEqual({'a', 'b', 'c', 'd', 'crs'}, set(dataset.data_vars))
cube, grid_mapping, rest = decode_cube(dataset)
self.assertIsInstance(cube, xr.Dataset)
self.assertIsInstance(grid_mapping, GridMapping)
self.assertEqual({'a', 'b', 'c'}, set(cube.data_vars))
self.assertEqual(pyproj.CRS.from_string('CRS84'), grid_mapping.crs)
self.assertIsInstance(rest, xr.Dataset)
self.assertEqual({'d', 'crs'}, set(rest.data_vars))
def test_encode_is_inverse(self):
dataset = new_cube(variables=dict(a=1, b=2, c=3),
x_name='x', y_name='y')
dataset = dataset.assign(
d=xr.DataArray([8, 9, 10], dims='level'),
crs=xr.DataArray(0, attrs=pyproj.CRS.from_string('CRS84').to_cf()),
)
cube, grid_mapping, rest = decode_cube(dataset)
dataset2 = encode_cube(cube, grid_mapping, rest)
self.assertEqual(set(dataset.data_vars), set(dataset2.data_vars))
self.assertIn('crs', dataset2.data_vars)
def test_no_cube_vars_found(self):
dataset = new_cube()
self.assertEqual(set(), set(dataset.data_vars))
with self.assertRaises(DatasetIsNotACubeError) as cm:
decode_cube(dataset, force_non_empty=True)
self.assertEqual("No variables found with dimensions"
" ('time', [...] 'lat', 'lon')"
" or dimension sizes too small",
f'{cm.exception}')
def test_no_grid_mapping(self):
dataset = xr.Dataset(dict(a=[1, 2, 3], b=0.5))
with self.assertRaises(DatasetIsNotACubeError) as cm:
decode_cube(dataset)
self.assertEqual("Failed to detect grid mapping:"
" cannot find any grid mapping in dataset",
f'{cm.exception}')
def test_grid_mapping_not_geographic(self):
dataset = new_cube(x_name='x', y_name='y',
variables=dict(a=0.5), crs='epsg:25832')
with self.assertRaises(DatasetIsNotACubeError) as cm:
decode_cube(dataset, force_geographic=True)
self.assertEqual("Grid mapping must use geographic CRS,"
" but was 'ETRS89 / UTM zone 32N'",
f'{cm.exception}')
class EncodeCubeTest(TestCase):
def test_geographical_crs(self):
cube = new_cube(variables=dict(a=1, b=2, c=3))
gm = GridMapping.from_dataset(cube)
dataset = encode_cube(cube, gm)
self.assertIs(cube, dataset)
dataset = encode_cube(cube, gm,
xr.Dataset(dict(d=True)))
self.assertIsInstance(dataset, xr.Dataset)
self.assertEqual({'a', 'b', 'c', 'd'}, set(dataset.data_vars))
def test_non_geographical_crs(self):
cube = new_cube(x_name='x',
y_name='y',
crs='epsg:25832',
variables=dict(a=1, b=2, c=3))
gm = GridMapping.from_dataset(cube)
dataset = encode_cube(cube,
gm,
xr.Dataset(dict(d=True)))
self.assertIsInstance(dataset, xr.Dataset)
self.assertEqual({'a', 'b', 'c', 'd', 'crs'}, set(dataset.data_vars))
class TestNormalize(TestCase):
def test_normalize_zonal_lat_lon(self):
resolution = 10
lat_size = 3
lat_coords = np.arange(0, 30, resolution)
lon_coords = [i + 5. for i in np.arange(-180.0, 180.0, resolution)]
lon_size = len(lon_coords)
one_more_dim_size = 2
one_more_dim_coords = np.random.random(2)
var_values_1_1d = xr.DataArray(np.random.random(lat_size),
coords=[('latitude_centers', lat_coords)],
dims=['latitude_centers'],
attrs=dict(chunk_sizes=[lat_size],
dimensions=['latitude_centers']))
var_values_1_1d.encoding = {'chunks': (lat_size,)}
var_values_1_2d = xr.DataArray(np.array([var_values_1_1d.values for _ in lon_coords]).T,
coords={'lat': lat_coords, 'lon': lon_coords},
dims=['lat', 'lon'],
attrs=dict(chunk_sizes=[lat_size, lon_size],
dimensions=['lat', 'lon']))
var_values_1_2d.encoding = {'chunks': (lat_size, lon_size)}
var_values_2_2d = xr.DataArray(np.random.random(lat_size * one_more_dim_size).
reshape(lat_size, one_more_dim_size),
coords={'latitude_centers': lat_coords,
'one_more_dim': one_more_dim_coords},
dims=['latitude_centers', 'one_more_dim'],
attrs=dict(chunk_sizes=[lat_size, one_more_dim_size],
dimensions=['latitude_centers', 'one_more_dim']))
var_values_2_2d.encoding = {'chunks': (lat_size, one_more_dim_size)}
var_values_2_3d = xr.DataArray(np.array([var_values_2_2d.values for _ in lon_coords]).T,
coords={'one_more_dim': one_more_dim_coords,
'lat': lat_coords,
'lon': lon_coords, },
dims=['one_more_dim', 'lat', 'lon', ],
attrs=dict(chunk_sizes=[one_more_dim_size,
lat_size,
lon_size],
dimensions=['one_more_dim', 'lat', 'lon']))
var_values_2_3d.encoding = {'chunks': (one_more_dim_size, lat_size, lon_size)}
dataset = xr.Dataset({'first': var_values_1_1d, 'second': var_values_2_2d})
expected = xr.Dataset({'first': var_values_1_2d, 'second': var_values_2_3d})
expected = expected.assign_coords(
lon_bnds=xr.DataArray([[i - (resolution / 2), i + (resolution / 2)] for i in expected.lon.values],
dims=['lon', 'bnds']))
expected = expected.assign_coords(
lat_bnds=xr.DataArray([[i - (resolution / 2), i + (resolution / 2)] for i in expected.lat.values],
dims=['lat', 'bnds']))
actual = normalize_dataset(dataset)
xr.testing.assert_equal(actual, expected)
self.assertEqual(actual.first.chunk_sizes, expected.first.chunk_sizes)
self.assertEqual(actual.second.chunk_sizes, expected.second.chunk_sizes)
def test_normalize_lon_lat_2d(self):
"""
Test nominal execution
"""
dims = ('time', 'y', 'x')
attrs = {'valid_min': 0., 'valid_max': 1.}
t_size = 2
y_size = 3
x_size = 4
a_data = np.random.random_sample((t_size, y_size, x_size))
b_data = np.random.random_sample((t_size, y_size, x_size))
time_data = [1, 2]
lat_data = [[10., 10., 10., 10.],
[20., 20., 20., 20.],
[30., 30., 30., 30.]]
lon_data = [[-10., 0., 10., 20.],
[-10., 0., 10., 20.],
[-10., 0., 10., 20.]]
dataset = xr.Dataset({'a': (dims, a_data, attrs),
'b': (dims, b_data, attrs)
},
{'time': (('time',), time_data),
'lat': (('y', 'x'), lat_data),
'lon': (('y', 'x'), lon_data)
},
{'geospatial_lon_min': -15.,
'geospatial_lon_max': 25.,
'geospatial_lat_min': 5.,
'geospatial_lat_max': 35.
}
)
new_dims = ('time', 'lat', 'lon')
expected = xr.Dataset({'a': (new_dims, a_data, attrs),
'b': (new_dims, b_data, attrs)},
{'time': (('time',), time_data),
'lat': (('lat',), [10., 20., 30.]),
'lon': (('lon',), [-10., 0., 10., 20.]),
},
{'geospatial_lon_min': -15.,
'geospatial_lon_max': 25.,
'geospatial_lat_min': 5.,
'geospatial_lat_max': 35.})
actual = normalize_dataset(dataset)
xr.testing.assert_equal(actual, expected)
def test_normalize_lon_lat(self):
"""
Test nominal execution
"""
dataset = xr.Dataset({'first': (['latitude',
'longitude'], [[1, 2, 3],
[2, 3, 4]])})
expected = xr.Dataset({'first': (['lat', 'lon'], [[1, 2, 3],
[2, 3, 4]])})
actual = normalize_dataset(dataset)
assertDatasetEqual(actual, expected)
dataset = xr.Dataset({'first': (['lat', 'long'], [[1, 2, 3],
[2, 3, 4]])})
expected = xr.Dataset({'first': (['lat', 'lon'], [[1, 2, 3],
[2, 3, 4]])})
actual = normalize_dataset(dataset)
assertDatasetEqual(actual, expected)
dataset = xr.Dataset({'first': (['latitude',
'spacetime'], [[1, 2, 3],
[2, 3, 4]])})
expected = xr.Dataset({'first': (['lat', 'spacetime'], [[1, 2, 3],
[2, 3, 4]])})
actual = normalize_dataset(dataset)
assertDatasetEqual(actual, expected)
dataset = xr.Dataset({'first': (['zef', 'spacetime'], [[1, 2, 3],
[2, 3, 4]])})
expected = xr.Dataset({'first': (['zef', 'spacetime'], [[1, 2, 3],
[2, 3, 4]])})
actual = normalize_dataset(dataset)
assertDatasetEqual(actual, expected)
def test_normalize_does_not_reorder_increasing_lat(self):
first = np.zeros([3, 45, 90])
first[0, :, :] = np.eye(45, 90)
ds = xr.Dataset({
'first': (['time', 'lat', 'lon'], first),
'second': (['time', 'lat', 'lon'], np.zeros([3, 45, 90])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': [datetime(2000, x, 1) for x in range(1, 4)]}).chunk(
chunks={'time': 1})
actual = normalize_dataset(ds)
xr.testing.assert_equal(actual, ds)
def test_normalize_with_missing_time_dim(self):
ds = xr.Dataset({'first': (['lat', 'lon'], np.zeros([90, 180])),
'second': (['lat', 'lon'], np.zeros([90, 180]))},
coords={'lat': np.linspace(-89.5, 89.5, 90),
'lon': np.linspace(-179.5, 179.5, 180)},
attrs={'time_coverage_start': '20120101',
'time_coverage_end': '20121231'})
norm_ds = normalize_dataset(ds)
self.assertIsNot(norm_ds, ds)
self.assertEqual(len(norm_ds.coords), 4)
self.assertIn('lon', norm_ds.coords)
self.assertIn('lat', norm_ds.coords)
self.assertIn('time', norm_ds.coords)
self.assertIn('time_bnds', norm_ds.coords)
self.assertEqual(norm_ds.first.shape, (1, 90, 180))
self.assertEqual(norm_ds.second.shape, (1, 90, 180))
self.assertEqual(norm_ds.coords['time'][0], xr.DataArray(pd.to_datetime('2012-07-01T12:00:00')))
self.assertEqual(norm_ds.coords['time_bnds'][0][0], xr.DataArray(pd.to_datetime('2012-01-01')))
self.assertEqual(norm_ds.coords['time_bnds'][0][1], xr.DataArray(pd.to_datetime('2012-12-31')))
def test_normalize_with_time_called_t(self):
ds = xr.Dataset({'first': (['time', 'lat', 'lon'], np.zeros([4, 90, 180])),
'second': (['time', 'lat', 'lon'], np.zeros([4, 90, 180])),
't': ('time', np.array(['2005-07-02T00:00:00.000000000',
'2006-07-02T12:00:00.000000000',
'2007-07-03T00:00:00.000000000',
'2008-07-02T00:00:00.000000000'], dtype='datetime64[ns]'))},
coords={'lat': np.linspace(-89.5, 89.5, 90),
'lon': np.linspace(-179.5, 179.5, 180)},
attrs={'time_coverage_start': '2005-01-17',
'time_coverage_end': '2008-08-17'})
norm_ds = normalize_dataset(ds)
self.assertIsNot(norm_ds, ds)
self.assertEqual(len(norm_ds.coords), 3)
self.assertIn('lon', norm_ds.coords)
self.assertIn('lat', norm_ds.coords)
self.assertIn('time', norm_ds.coords)
self.assertEqual(norm_ds.first.shape, (4, 90, 180))
self.assertEqual(norm_ds.second.shape, (4, 90, 180))
self.assertEqual(norm_ds.coords['time'][0], xr.DataArray(pd.to_datetime('2005-07-02T00:00')))
def test_normalize_julian_day(self):
"""
Test Julian Day -> Datetime conversion
"""
tuples = [gcal2jd(2000, x, 1) for x in range(1, 13)]
ds = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.zeros([88, 90, 12])),
'second': (['lat', 'lon', 'time'], np.zeros([88, 90, 12])),
'lat': np.linspace(-88, 45, 88),
'lon': np.linspace(-178, 178, 90),
'time': [x[0] + x[1] for x in tuples]})
ds.time.attrs['long_name'] = 'time in julian days'
expected = xr.Dataset({
'first': (['time', 'lat', 'lon'], np.zeros([12, 88, 90])),
'second': (['time', 'lat', 'lon'], np.zeros([12, 88, 90])),
'lat': np.linspace(-88, 45, 88),
'lon': np.linspace(-178, 178, 90),
'time': [datetime(2000, x, 1) for x in range(1, 13)]})
expected.time.attrs['long_name'] = 'time'
actual = normalize_dataset(ds)
assertDatasetEqual(actual, expected)
class AdjustSpatialTest(TestCase):
def test_nominal(self):
ds = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.zeros([45, 90, 12])),
'second': (['lat', 'lon', 'time'], np.zeros([45, 90, 12])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': [datetime(2000, x, 1) for x in range(1, 13)]})
ds.lon.attrs['units'] = 'degrees_east'
ds.lat.attrs['units'] = 'degrees_north'
ds1 = adjust_spatial_attrs(ds)
# Make sure original dataset is not altered
with self.assertRaises(KeyError):
# noinspection PyStatementEffect
ds.attrs['geospatial_lat_min']
# Make sure expected values are in the new dataset
self.assertEqual(ds1.attrs['geospatial_lat_min'], -90)
self.assertEqual(ds1.attrs['geospatial_lat_max'], 90)
self.assertEqual(ds1.attrs['geospatial_lat_units'], 'degrees_north')
self.assertEqual(ds1.attrs['geospatial_lat_resolution'], 4)
self.assertEqual(ds1.attrs['geospatial_lon_min'], -180)
self.assertEqual(ds1.attrs['geospatial_lon_max'], 180)
self.assertEqual(ds1.attrs['geospatial_lon_units'], 'degrees_east')
self.assertEqual(ds1.attrs['geospatial_lon_resolution'], 4)
self.assertEqual(ds1.attrs['geospatial_bounds'],
'POLYGON((-180.0 -90.0, -180.0 90.0, 180.0 90.0,'
' 180.0 -90.0, -180.0 -90.0))')
# Test existing attributes update
lon_min, lat_min, lon_max, lat_max = -20, -40, 60, 40
indexers = {'lon': slice(lon_min, lon_max),
'lat': slice(lat_min, lat_max)}
ds2 = ds1.sel(**indexers)
ds2 = adjust_spatial_attrs(ds2)
self.assertEqual(ds2.attrs['geospatial_lat_min'], -42)
self.assertEqual(ds2.attrs['geospatial_lat_max'], 42)
self.assertEqual(ds2.attrs['geospatial_lat_units'], 'degrees_north')
self.assertEqual(ds2.attrs['geospatial_lat_resolution'], 4)
self.assertEqual(ds2.attrs['geospatial_lon_min'], -20)
self.assertEqual(ds2.attrs['geospatial_lon_max'], 60)
self.assertEqual(ds2.attrs['geospatial_lon_units'], 'degrees_east')
self.assertEqual(ds2.attrs['geospatial_lon_resolution'], 4)
self.assertEqual(ds2.attrs['geospatial_bounds'],
'POLYGON((-20.0 -42.0, -20.0 42.0, 60.0 42.0, 60.0'
' -42.0, -20.0 -42.0))')
def test_nominal_inverted(self):
# Inverted lat
ds = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.zeros([45, 90, 12])),
'second': (['lat', 'lon', 'time'], np.zeros([45, 90, 12])),
'lat': np.linspace(88, -88, 45),
'lon': np.linspace(-178, 178, 90),
'time': [datetime(2000, x, 1) for x in range(1, 13)]})
ds.lon.attrs['units'] = 'degrees_east'
ds.lat.attrs['units'] = 'degrees_north'
ds1 = adjust_spatial_attrs(ds)
# Make sure original dataset is not altered
with self.assertRaises(KeyError):
# noinspection PyStatementEffect
ds.attrs['geospatial_lat_min']
# Make sure expected values are in the new dataset
self.assertEqual(ds1.attrs['geospatial_lat_min'], -90)
self.assertEqual(ds1.attrs['geospatial_lat_max'], 90)
self.assertEqual(ds1.attrs['geospatial_lat_units'], 'degrees_north')
self.assertEqual(ds1.attrs['geospatial_lat_resolution'], 4)
self.assertEqual(ds1.attrs['geospatial_lon_min'], -180)
self.assertEqual(ds1.attrs['geospatial_lon_max'], 180)
self.assertEqual(ds1.attrs['geospatial_lon_units'], 'degrees_east')
self.assertEqual(ds1.attrs['geospatial_lon_resolution'], 4)
self.assertEqual(ds1.attrs['geospatial_bounds'],
'POLYGON((-180.0 -90.0, -180.0 90.0, 180.0 90.0,'
' 180.0 -90.0, -180.0 -90.0))')
# Test existing attributes update
lon_min, lat_min, lon_max, lat_max = -20, -40, 60, 40
indexers = {'lon': slice(lon_min, lon_max),
'lat': slice(lat_max, lat_min)}
ds2 = ds1.sel(**indexers)
ds2 = adjust_spatial_attrs(ds2)
self.assertEqual(ds2.attrs['geospatial_lat_min'], -42)
self.assertEqual(ds2.attrs['geospatial_lat_max'], 42)
self.assertEqual(ds2.attrs['geospatial_lat_units'], 'degrees_north')
self.assertEqual(ds2.attrs['geospatial_lat_resolution'], 4)
self.assertEqual(ds2.attrs['geospatial_lon_min'], -20)
self.assertEqual(ds2.attrs['geospatial_lon_max'], 60)
self.assertEqual(ds2.attrs['geospatial_lon_units'], 'degrees_east')
self.assertEqual(ds2.attrs['geospatial_lon_resolution'], 4)
self.assertEqual(ds2.attrs['geospatial_bounds'],
'POLYGON((-20.0 -42.0, -20.0 42.0, 60.0 42.0, 60.0'
' -42.0, -20.0 -42.0))')
def test_bnds(self):
ds = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.zeros([45, 90, 12])),
'second': (['lat', 'lon', 'time'], np.zeros([45, 90, 12])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': [datetime(2000, x, 1) for x in range(1, 13)]})
ds.lon.attrs['units'] = 'degrees_east'
ds.lat.attrs['units'] = 'degrees_north'
lat_bnds = np.empty([len(ds.lat), 2])
lon_bnds = np.empty([len(ds.lon), 2])
ds['nv'] = [0, 1]
lat_bnds[:, 0] = ds.lat.values - 2
lat_bnds[:, 1] = ds.lat.values + 2
lon_bnds[:, 0] = ds.lon.values - 2
lon_bnds[:, 1] = ds.lon.values + 2
ds['lat_bnds'] = (['lat', 'nv'], lat_bnds)
ds['lon_bnds'] = (['lon', 'nv'], lon_bnds)
ds.lat.attrs['bounds'] = 'lat_bnds'
ds.lon.attrs['bounds'] = 'lon_bnds'
ds1 = adjust_spatial_attrs(ds)
# Make sure original dataset is not altered
with self.assertRaises(KeyError):
# noinspection PyStatementEffect
ds.attrs['geospatial_lat_min']
# Make sure expected values are in the new dataset
self.assertEqual(ds1.attrs['geospatial_lat_min'], -90)
self.assertEqual(ds1.attrs['geospatial_lat_max'], 90)
self.assertEqual(ds1.attrs['geospatial_lat_units'], 'degrees_north')
self.assertEqual(ds1.attrs['geospatial_lat_resolution'], 4)
self.assertEqual(ds1.attrs['geospatial_lon_min'], -180)
self.assertEqual(ds1.attrs['geospatial_lon_max'], 180)
self.assertEqual(ds1.attrs['geospatial_lon_units'], 'degrees_east')
self.assertEqual(ds1.attrs['geospatial_lon_resolution'], 4)
self.assertEqual(ds1.attrs['geospatial_bounds'],
'POLYGON((-180.0 -90.0, -180.0 90.0, 180.0 90.0,'
' 180.0 -90.0, -180.0 -90.0))')
# Test existing attributes update
lon_min, lat_min, lon_max, lat_max = -20, -40, 60, 40
indexers = {'lon': slice(lon_min, lon_max),
'lat': slice(lat_min, lat_max)}
ds2 = ds1.sel(**indexers)
ds2 = adjust_spatial_attrs(ds2)
self.assertEqual(ds2.attrs['geospatial_lat_min'], -42)
self.assertEqual(ds2.attrs['geospatial_lat_max'], 42)
self.assertEqual(ds2.attrs['geospatial_lat_units'], 'degrees_north')
self.assertEqual(ds2.attrs['geospatial_lat_resolution'], 4)
self.assertEqual(ds2.attrs['geospatial_lon_min'], -20)
self.assertEqual(ds2.attrs['geospatial_lon_max'], 60)
self.assertEqual(ds2.attrs['geospatial_lon_units'], 'degrees_east')
self.assertEqual(ds2.attrs['geospatial_lon_resolution'], 4)
self.assertEqual(ds2.attrs['geospatial_bounds'],
'POLYGON((-20.0 -42.0, -20.0 42.0, 60.0 42.0, 60.0'
' -42.0, -20.0 -42.0))')
def test_bnds_inverted(self):
# Inverted lat
ds = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.zeros([45, 90, 12])),
'second': (['lat', 'lon', 'time'], np.zeros([45, 90, 12])),
'lat': np.linspace(88, -88, 45),
'lon': np.linspace(-178, 178, 90),
'time': [datetime(2000, x, 1) for x in range(1, 13)]})
ds.lon.attrs['units'] = 'degrees_east'
ds.lat.attrs['units'] = 'degrees_north'
lat_bnds = np.empty([len(ds.lat), 2])
lon_bnds = np.empty([len(ds.lon), 2])
ds['nv'] = [0, 1]
lat_bnds[:, 0] = ds.lat.values + 2
lat_bnds[:, 1] = ds.lat.values - 2
lon_bnds[:, 0] = ds.lon.values - 2
lon_bnds[:, 1] = ds.lon.values + 2
ds['lat_bnds'] = (['lat', 'nv'], lat_bnds)
ds['lon_bnds'] = (['lon', 'nv'], lon_bnds)
ds.lat.attrs['bounds'] = 'lat_bnds'
ds.lon.attrs['bounds'] = 'lon_bnds'
ds1 = adjust_spatial_attrs(ds)
# Make sure original dataset is not altered
with self.assertRaises(KeyError):
# noinspection PyStatementEffect
ds.attrs['geospatial_lat_min']
# Make sure expected values are in the new dataset
self.assertEqual(ds1.attrs['geospatial_lat_min'], -90)
self.assertEqual(ds1.attrs['geospatial_lat_max'], 90)
self.assertEqual(ds1.attrs['geospatial_lat_units'], 'degrees_north')
self.assertEqual(ds1.attrs['geospatial_lat_resolution'], 4)
self.assertEqual(ds1.attrs['geospatial_lon_min'], -180)
self.assertEqual(ds1.attrs['geospatial_lon_max'], 180)
self.assertEqual(ds1.attrs['geospatial_lon_units'], 'degrees_east')
self.assertEqual(ds1.attrs['geospatial_lon_resolution'], 4)
self.assertEqual(ds1.attrs['geospatial_bounds'],
'POLYGON((-180.0 -90.0, -180.0 90.0, 180.0 90.0,'
' 180.0 -90.0, -180.0 -90.0))')
# Test existing attributes update
lon_min, lat_min, lon_max, lat_max = -20, -40, 60, 40
indexers = {'lon': slice(lon_min, lon_max),
'lat': slice(lat_max, lat_min)}
ds2 = ds1.sel(**indexers)
ds2 = adjust_spatial_attrs(ds2)
self.assertEqual(ds2.attrs['geospatial_lat_min'], -42)
self.assertEqual(ds2.attrs['geospatial_lat_max'], 42)
self.assertEqual(ds2.attrs['geospatial_lat_units'], 'degrees_north')
self.assertEqual(ds2.attrs['geospatial_lat_resolution'], 4)
self.assertEqual(ds2.attrs['geospatial_lon_min'], -20)
self.assertEqual(ds2.attrs['geospatial_lon_max'], 60)
self.assertEqual(ds2.attrs['geospatial_lon_units'], 'degrees_east')
self.assertEqual(ds2.attrs['geospatial_lon_resolution'], 4)
self.assertEqual(ds2.attrs['geospatial_bounds'],
'POLYGON((-20.0 -42.0, -20.0 42.0, 60.0 42.0, 60.0 -42.0, -20.0 -42.0))')
def test_once_cell_with_bnds(self):
# Only one cell in lat/lon
ds = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.zeros([1, 1, 12])),
'second': (['lat', 'lon', 'time'], np.zeros([1, 1, 12])),
'lat': np.array([52.5]),
'lon': np.array([11.5]),
'lat_bnds': (['lat', 'bnds'], np.array([[52.4, 52.6]])),
'lon_bnds': (['lon', 'bnds'], np.array([[11.4, 11.6]])),
'time': [datetime(2000, x, 1) for x in range(1, 13)]})
ds.lon.attrs['units'] = 'degrees_east'
ds.lat.attrs['units'] = 'degrees_north'
ds1 = adjust_spatial_attrs(ds)
self.assertAlmostEqual(ds1.attrs['geospatial_lat_resolution'], 0.2)
self.assertAlmostEqual(ds1.attrs['geospatial_lat_min'], 52.4)
self.assertAlmostEqual(ds1.attrs['geospatial_lat_max'], 52.6)
self.assertEqual(ds1.attrs['geospatial_lat_units'], 'degrees_north')
self.assertAlmostEqual(ds1.attrs['geospatial_lon_resolution'], 0.2)
self.assertAlmostEqual(ds1.attrs['geospatial_lon_min'], 11.4)
self.assertAlmostEqual(ds1.attrs['geospatial_lon_max'], 11.6)
self.assertEqual(ds1.attrs['geospatial_lon_units'], 'degrees_east')
self.assertEqual(ds1.attrs['geospatial_bounds'],
'POLYGON((11.4 52.4, 11.4 52.6, 11.6 52.6, 11.6 52.4, 11.4 52.4))')
def test_once_cell_without_bnds(self):
# Only one cell in lat/lon
ds = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.zeros([1, 1, 12])),
'second': (['lat', 'lon', 'time'], np.zeros([1, 1, 12])),
'lat': np.array([52.5]),
'lon': np.array([11.5]),
'time': [datetime(2000, x, 1) for x in range(1, 13)]})
ds.lon.attrs['units'] = 'degrees_east'
ds.lat.attrs['units'] = 'degrees_north'
ds2 = adjust_spatial_attrs(ds)
# Datasets should be the same --> not modified
self.assertIs(ds2, ds)
class NormalizeCoordVarsTest(TestCase):
def test_ds_with_potential_coords(self):
ds = xr.Dataset({'first': (['lat', 'lon'], np.zeros([90, 180])),
'second': (['lat', 'lon'], np.zeros([90, 180])),
'lat_bnds': (['lat', 'bnds'], np.zeros([90, 2])),
'lon_bnds': (['lon', 'bnds'], np.zeros([180, 2]))},
coords={'lat': np.linspace(-89.5, 89.5, 90),
'lon': np.linspace(-179.5, 179.5, 180)})
new_ds = normalize_coord_vars(ds)
self.assertIsNot(ds, new_ds)
self.assertEqual(len(new_ds.coords), 4)
self.assertIn('lon', new_ds.coords)
self.assertIn('lat', new_ds.coords)
self.assertIn('lat_bnds', new_ds.coords)
self.assertIn('lon_bnds', new_ds.coords)
self.assertEqual(len(new_ds.data_vars), 2)
self.assertIn('first', new_ds.data_vars)
self.assertIn('second', new_ds.data_vars)
def test_ds_with_potential_coords_and_bounds(self):
ds = xr.Dataset({'first': (['lat', 'lon'], np.zeros([90, 180])),
'second': (['lat', 'lon'], np.zeros([90, 180])),
'lat_bnds': (['lat', 'bnds'], np.zeros([90, 2])),
'lon_bnds': (['lon', 'bnds'], np.zeros([180, 2])),
'lat': (['lat'], np.linspace(-89.5, 89.5, 90)),
'lon': (['lon'], np.linspace(-179.5, 179.5, 180))})
new_ds = normalize_coord_vars(ds)
self.assertIsNot(ds, new_ds)
self.assertEqual(len(new_ds.coords), 4)
self.assertIn('lon', new_ds.coords)
self.assertIn('lat', new_ds.coords)
self.assertIn('lat_bnds', new_ds.coords)
self.assertIn('lon_bnds', new_ds.coords)
self.assertEqual(len(new_ds.data_vars), 2)
self.assertIn('first', new_ds.data_vars)
self.assertIn('second', new_ds.data_vars)
def test_ds_with_no_potential_coords(self):
ds = xr.Dataset({'first': (['lat', 'lon'], np.zeros([90, 180])),
'second': (['lat', 'lon'], np.zeros([90, 180]))},
coords={'lat': np.linspace(-89.5, 89.5, 90),
'lon': np.linspace(-179.5, 179.5, 180)},
attrs={'time_coverage_start': '20120101'})
new_ds = normalize_coord_vars(ds)
self.assertIs(ds, new_ds)
class NormalizeMissingTimeTest(TestCase):
def test_ds_without_time(self):
ds = xr.Dataset({'first': (['lat', 'lon'], np.zeros([90, 180])),
'second': (['lat', 'lon'], np.zeros([90, 180]))},
coords={'lat': np.linspace(-89.5, 89.5, 90),
'lon': np.linspace(-179.5, 179.5, 180)},
attrs={'time_coverage_start': '20120101',
'time_coverage_end': '20121231'})
new_ds = normalize_missing_time(ds)
self.assertIsNot(ds, new_ds)
self.assertEqual(len(new_ds.coords), 4)
self.assertIn('lon', new_ds.coords)
self.assertIn('lat', new_ds.coords)
self.assertIn('time', new_ds.coords)
self.assertIn('time_bnds', new_ds.coords)
self.assertEqual(new_ds.coords['time'].attrs.get('long_name'), 'time')
self.assertEqual(new_ds.coords['time'].attrs.get('bounds'), 'time_bnds')
self.assertEqual(new_ds.first.shape, (1, 90, 180))
self.assertEqual(new_ds.second.shape, (1, 90, 180))
self.assertEqual(new_ds.coords['time'][0], xr.DataArray(pd.to_datetime('2012-07-01T12:00:00')))
self.assertEqual(new_ds.coords['time'].attrs.get('long_name'), 'time')
self.assertEqual(new_ds.coords['time'].attrs.get('bounds'), 'time_bnds')
self.assertEqual(new_ds.coords['time_bnds'][0][0], xr.DataArray(pd.to_datetime('2012-01-01')))
self.assertEqual(new_ds.coords['time_bnds'][0][1], xr.DataArray(pd.to_datetime('2012-12-31')))
self.assertEqual(new_ds.coords['time_bnds'].attrs.get('long_name'), 'time')
def test_ds_without_bounds(self):
ds = xr.Dataset({'first': (['lat', 'lon'], np.zeros([90, 180])),
'second': (['lat', 'lon'], np.zeros([90, 180]))},
coords={'lat': np.linspace(-89.5, 89.5, 90),
'lon': np.linspace(-179.5, 179.5, 180)},
attrs={'time_coverage_start': '20120101'})
new_ds = normalize_missing_time(ds)
self.assertIsNot(ds, new_ds)
self.assertEqual(len(new_ds.coords), 3)
self.assertIn('lon', new_ds.coords)
self.assertIn('lat', new_ds.coords)
self.assertIn('time', new_ds.coords)
self.assertNotIn('time_bnds', new_ds.coords)
self.assertEqual(new_ds.first.shape, (1, 90, 180))
self.assertEqual(new_ds.second.shape, (1, 90, 180))
self.assertEqual(new_ds.coords['time'][0], xr.DataArray(pd.to_datetime('2012-01-01')))
self.assertEqual(new_ds.coords['time'].attrs.get('long_name'), 'time')
self.assertEqual(new_ds.coords['time'].attrs.get('bounds'), None)
def test_ds_without_time_attrs(self):
ds = xr.Dataset({'first': (['lat', 'lon'], np.zeros([90, 180])),
'second': (['lat', 'lon'], np.zeros([90, 180]))},
coords={'lat': np.linspace(-89.5, 89.5, 90),
'lon': np.linspace(-179.5, 179.5, 180)})
new_ds = normalize_missing_time(ds)
self.assertIs(ds, new_ds)
def test_ds_with_cftime(self):
time_data = xr.cftime_range(start='2010-01-01T00:00:00',
periods=6,
freq='D',
calendar='gregorian').values
ds = xr.Dataset({'first': (['time', 'lat', 'lon'], np.zeros([6, 90, 180])),
'second': (['time', 'lat', 'lon'], np.zeros([6, 90, 180]))},
coords={'lat': np.linspace(-89.5, 89.5, 90),
'lon': np.linspace(-179.5, 179.5, 180),
'time': time_data},
attrs={'time_coverage_start': '20120101',
'time_coverage_end': '20121231'})
new_ds = normalize_missing_time(ds)
self.assertIs(ds, new_ds)
def test_normalize_with_missing_time_dim(self):
ds = xr.Dataset({'first': (['lat', 'lon'], np.zeros([90, 180])),
'second': (['lat', 'lon'], np.zeros([90, 180]))},
coords={'lat': np.linspace(-89.5, 89.5, 90),
'lon': np.linspace(-179.5, 179.5, 180)},
attrs={'time_coverage_start': '20120101',
'time_coverage_end': '20121231'})
norm_ds = normalize_dataset(ds)
self.assertIsNot(norm_ds, ds)
self.assertEqual(len(norm_ds.coords), 4)
self.assertIn('lon', norm_ds.coords)
self.assertIn('lat', norm_ds.coords)
self.assertIn('time', norm_ds.coords)
self.assertIn('time_bnds', norm_ds.coords)
self.assertEqual(norm_ds.first.shape, (1, 90, 180))
self.assertEqual(norm_ds.second.shape, (1, 90, 180))
self.assertEqual(norm_ds.coords['time'][0], xr.DataArray(pd.to_datetime('2012-07-01T12:00:00')))
self.assertEqual(norm_ds.coords['time_bnds'][0][0], xr.DataArray(pd.to_datetime('2012-01-01')))
self.assertEqual(norm_ds.coords['time_bnds'][0][1], xr.DataArray(pd.to_datetime('2012-12-31')))
def test_normalize_with_missing_time_dim_from_filename(self):
ds = xr.Dataset({'first': (['lat', 'lon'], np.zeros([90, 180])),
'second': (['lat', 'lon'], np.zeros([90, 180]))},
coords={'lat': np.linspace(-89.5, 89.5, 90),
'lon': np.linspace(-179.5, 179.5, 180)},
)
ds_encoding = dict(source='20150204_etfgz_20170309_dtsrgth')
ds.encoding.update(ds_encoding)
norm_ds = normalize_dataset(ds)
self.assertIsNot(norm_ds, ds)
self.assertEqual(len(norm_ds.coords), 4)
self.assertIn('lon', norm_ds.coords)
self.assertIn('lat', norm_ds.coords)
self.assertIn('time', norm_ds.coords)
self.assertIn('time_bnds', norm_ds.coords)
self.assertEqual(norm_ds.first.shape, (1, 90, 180))
self.assertEqual(norm_ds.second.shape, (1, 90, 180))
self.assertEqual(norm_ds.coords['time'][0], xr.DataArray(pd.to_datetime('2016-02-21T00:00:00')))
self.assertEqual(norm_ds.coords['time_bnds'][0][0], xr.DataArray(pd.to_datetime('2015-02-04')))
self.assertEqual(norm_ds.coords['time_bnds'][0][1], xr.DataArray(pd.to_datetime('2017-03-09')))
class Fix360Test(TestCase):
def test_fix_360_lon(self):
# The following simulates a strangely geo-coded soil moisture dataset we found
lon_size = 360
lat_size = 130
time_size = 12
ds = xr.Dataset({
'first': (['time', 'lat', 'lon'],
np.random.random_sample([time_size, lat_size, lon_size])),
'second': (['time', 'lat', 'lon'],
np.random.random_sample([time_size, lat_size,
lon_size]))},
coords={'lon': np.linspace(1., 360., lon_size),
'lat': np.linspace(-65., 64., lat_size),
'time': [datetime(2000, x, 1)
for x in range(1, time_size + 1)]},
attrs=dict(geospatial_lon_min=0.,
geospatial_lon_max=360.,
geospatial_lat_min=-65.5,
geospatial_lat_max=+64.5,
geospatial_lon_resolution=1.,
geospatial_lat_resolution=1.))
new_ds = normalize_dataset(ds)
self.assertIsNot(ds, new_ds)
self.assertEqual(ds.dims, new_ds.dims)
self.assertEqual(ds.sizes, new_ds.sizes)
assert_array_almost_equal(new_ds.lon, | np.linspace(-179.5, 179.5, 360) | numpy.linspace |
"""Tests for is_completely_positive."""
import numpy as np
from toqito.channel_props import is_completely_positive
from toqito.channels import depolarizing
def test_is_completely_positive_kraus_false():
"""Verify non-completely positive channel as Kraus ops as False."""
unitary_mat = np.array([[1, 1], [-1, 1]]) / np.sqrt(2)
kraus_ops = [[np.identity(2), np.identity(2)], [unitary_mat, -unitary_mat]]
np.testing.assert_equal(is_completely_positive(kraus_ops), False)
def test_is_completely_positive_choi_true():
"""Verify Choi matrix of the depolarizing map is completely positive."""
np.testing.assert_equal(is_completely_positive(depolarizing(2)), True)
if __name__ == "__main__":
| np.testing.run_module_suite() | numpy.testing.run_module_suite |
import numpy as np
import pyvista as pv
from shapely.geometry import Polygon
from shapely.geometry.polygon import orient
import heapq
"""
Authors
-------
<NAME> : <EMAIL>
"""
APPROX_ZERO = .0001
class Node:
"""
node class for connecting line segments
"""
def __init__(self, start, end):
self.start = start
self.end = end
self.next: Node = None
self.root: Node = None
def polygonsFromMesh(zLevel: float, mesh: pv.PolyData) -> 'list[Polygon]':
"""
slices a mesh along a plane parallel to xy plane at height zLevel
Parameters
----------
zLevel: float
z height to slice at
mesh: PolyData
environment mesh
Returns
-------
list[Polygons]
list of polygons resulting from z slice
"""
points = np.array([mesh.cell_points(i) for i in range(mesh.n_cells)])
vectors = np.roll(points, 1,axis=1) - points
t = np.einsum('k, ijk->ij', [0, 0, 1], np.subtract(points, np.array([[0, 0, zLevel]]))) / np.einsum('ijk, k->ij', -vectors, [0, 0, 1])
indexLine = np.sum((t >= 0) & (t < 1), axis=1) > 1
intersections = np.sum(indexLine)
indexIntersection = (t[indexLine] > 0) & (t[indexLine] < 1)
p = np.reshape(points[indexLine][indexIntersection], [intersections, 2, 3])
d = | np.reshape(vectors[indexLine][indexIntersection], [intersections, 2, 3]) | numpy.reshape |
import numpy as np
import time
from sidnet import alexnet
from key_check import key_check,key_press
import math as m
import tensorflow as tf
WIDTH = 80
HEIGHT = 60
LR = 0.0001
EPOCH = 10
MODEL_NAME_THROTTLE ='SIDNET_-{}-{}-{}-{}-{}.model'.format(0.001,'sidnet',EPOCH,'FORZA','throttle')
MODEL_NAME_STEERING ='SIDNET_-{}-{}-{}-{}-{}.model'.format(0.0005,'sidnet',EPOCH,'FORZA','steering')
tf.reset_default_graph()
model_throttle = alexnet(HEIGHT,WIDTH,LR)
model_throttle.load(MODEL_NAME_THROTTLE,weights_only=True)
tf.reset_default_graph()
model_steering = alexnet(HEIGHT,WIDTH,LR)
model_steering.load(MODEL_NAME_STEERING,weights_only=True)
control_file = 'input_file.npy'
output_file = 'output_file.npy'
def main():
while(True):
paused = False
if not paused:
now = time.time()
for i in range(10):
try:
img = | np.load(output_file) | numpy.load |
import logging
from collections.abc import Sized
import numpy as np
from scipy import interpolate
from ibllib.io.extractors import training_trials
from ibllib.io.extractors.base import BaseBpodTrialsExtractor, run_extractor_classes
import ibllib.io.raw_data_loaders as raw
from ibllib.misc import structarr
import ibllib.exceptions as err
import brainbox.behavior.wheel as wh
_logger = logging.getLogger('ibllib')
WHEEL_RADIUS_CM = 1 # we want the output in radians
THRESHOLD_RAD_PER_SEC = 10
THRESHOLD_CONSECUTIVE_SAMPLES = 0
EPS = 7. / 3 - 4. / 3 - 1
def get_trial_start_times(session_path, data=None):
if not data:
data = raw.load_data(session_path)
trial_start_times = []
for tr in data:
trial_start_times.extend(
[x[0] for x in tr['behavior_data']['States timestamps']['trial_start']])
return np.array(trial_start_times)
def sync_rotary_encoder(session_path, bpod_data=None, re_events=None):
if not bpod_data:
bpod_data = raw.load_data(session_path)
evt = re_events or raw.load_encoder_events(session_path)
# we work with stim_on (2) and closed_loop (3) states for the synchronization with bpod
tre = evt.re_ts.values / 1e6 # convert to seconds
# the first trial on the rotary encoder is a dud
rote = {'stim_on': tre[evt.sm_ev == 2][:-1],
'closed_loop': tre[evt.sm_ev == 3][:-1]}
bpod = {
'stim_on': np.array([tr['behavior_data']['States timestamps']
['stim_on'][0][0] for tr in bpod_data]),
'closed_loop': np.array([tr['behavior_data']['States timestamps']
['closed_loop'][0][0] for tr in bpod_data]),
}
if rote['closed_loop'].size <= 1:
raise err.SyncBpodWheelException("Not enough Rotary Encoder events to perform wheel"
" synchronization. Wheel data not extracted")
# bpod bug that spits out events in ms instead of us
if np.diff(bpod['closed_loop'][[-1, 0]])[0] / np.diff(rote['closed_loop'][[-1, 0]])[0] > 900:
_logger.error("Rotary encoder stores values in ms instead of us. Wheel timing inaccurate")
rote['stim_on'] *= 1e3
rote['closed_loop'] *= 1e3
# just use the closed loop for synchronization
# handle different sizes in synchronization:
sz = min(rote['closed_loop'].size, bpod['closed_loop'].size)
# if all the sample are contiguous and first samples match
diff_first_match = np.diff(rote['closed_loop'][:sz]) - np.diff(bpod['closed_loop'][:sz])
# if all the sample are contiguous and last samples match
diff_last_match = np.diff(rote['closed_loop'][-sz:]) - np.diff(bpod['closed_loop'][-sz:])
# 99% of the pulses match for a first sample lock
DIFF_THRESHOLD = 0.005
if np.mean(np.abs(diff_first_match) < DIFF_THRESHOLD) > 0.99:
re = rote['closed_loop'][:sz]
bp = bpod['closed_loop'][:sz]
indko = np.where(np.abs(diff_first_match) >= DIFF_THRESHOLD)[0]
# 99% of the pulses match for a last sample lock
elif np.mean(np.abs(diff_last_match) < DIFF_THRESHOLD) > 0.99:
re = rote['closed_loop'][-sz:]
bp = bpod['closed_loop'][-sz:]
indko = np.where(np.abs(diff_last_match) >= DIFF_THRESHOLD)[0]
# last resort is to use ad-hoc sync function
else:
bp, re = raw.sync_trials_robust(bpod['closed_loop'], rote['closed_loop'],
diff_threshold=DIFF_THRESHOLD, max_shift=5)
indko = np.array([])
# raise ValueError("Can't sync bpod and rotary encoder: non-contiguous sync pulses")
# remove faulty indices due to missing or bad syncs
indko = np.int32(np.unique(np.r_[indko + 1, indko]))
re = np.delete(re, indko)
bp = np.delete(bp, indko)
# check the linear drift
assert bp.size > 1
poly = np.polyfit(bp, re, 1)
assert np.all(np.abs(np.polyval(poly, bp) - re) < 0.002)
return interpolate.interp1d(re, bp, fill_value="extrapolate")
def get_wheel_position(session_path, bp_data=None, display=False):
"""
Gets wheel timestamps and position from Bpod data. Position is in radian (constant above for
radius is 1) mathematical convention.
:param session_path:
:param bp_data (optional): bpod trials read from jsonable file
:param display (optional): (bool)
:return: timestamps (np.array)
:return: positions (np.array)
"""
status = 0
if not bp_data:
bp_data = raw.load_data(session_path)
df = raw.load_encoder_positions(session_path)
if df is None:
_logger.error('No wheel data for ' + str(session_path))
return None, None
data = structarr(['re_ts', 're_pos', 'bns_ts'],
shape=(df.shape[0],), formats=['f8', 'f8', object])
data['re_ts'] = df.re_ts.values
data['re_pos'] = df.re_pos.values * -1 # anti-clockwise is positive in our output
data['re_pos'] = data['re_pos'] / 1024 * 2 * np.pi # convert positions to radians
trial_starts = get_trial_start_times(session_path)
# need a flag if the data resolution is 1ms due to the old version of rotary encoder firmware
if np.all(np.mod(data['re_ts'], 1e3) == 0):
status = 1
data['re_ts'] = data['re_ts'] / 1e6 # convert ts to seconds
# # get the converter function to translate re_ts into behavior times
re2bpod = sync_rotary_encoder(session_path)
data['re_ts'] = re2bpod(data['re_ts'])
def get_reset_trace_compensation_with_state_machine_times():
# this is the preferred way of getting resets using the state machine time information
# it will not always work depending on firmware versions, new bugs
iwarn = []
ns = len(data['re_pos'])
tr_dc = np.zeros_like(data['re_pos']) # trial dc component
for bp_dat in bp_data:
restarts = np.sort(np.array(
bp_dat['behavior_data']['States timestamps']['reset_rotary_encoder'] +
bp_dat['behavior_data']['States timestamps']['reset2_rotary_encoder'])[:, 0])
ind = np.unique(np.searchsorted(data['re_ts'], restarts, side='left') - 1)
# the rotary encoder doesn't always reset right away, and the reset sample given the
# timestamp can be ambiguous: look for zeros
for i in np.where(data['re_pos'][ind] != 0)[0]:
# handle boundary effects
if ind[i] > ns - 2:
continue
# it happens quite often that we have to lock in to next sample to find the reset
if data['re_pos'][ind[i] + 1] == 0:
ind[i] = ind[i] + 1
continue
# also case where the rotary doesn't reset to 0, but erratically to -1/+1
if data['re_pos'][ind[i]] <= (1 / 1024 * 2 * np.pi):
ind[i] = ind[i] + 1
continue
# compounded with the fact that the reset may have happened at next sample.
if np.abs(data['re_pos'][ind[i] + 1]) <= (1 / 1024 * 2 * np.pi):
ind[i] = ind[i] + 1
continue
# sometimes it is also the last trial that has this behaviour
if (bp_data[-1] is bp_dat) or (bp_data[0] is bp_dat):
continue
iwarn.append(ind[i])
# at which point we are running out of possible bugs and calling it
tr_dc[ind] = data['re_pos'][ind - 1]
if iwarn: # if a warning flag was caught in the loop throw a single warning
_logger.warning('Rotary encoder reset events discrepancy. Doing my best to merge.')
_logger.debug('Offending inds: ' + str(iwarn) + ' times: ' + str(data['re_ts'][iwarn]))
# exit status 0 is fine, 1 something went wrong
return tr_dc, len(iwarn) != 0
# attempt to get the resets properly unless the unit is ms which means precision is
# not good enough to match SM times to wheel samples time
if not status:
tr_dc, status = get_reset_trace_compensation_with_state_machine_times()
# if something was wrong or went wrong agnostic way of getting resets: just get zeros values
if status:
tr_dc = np.zeros_like(data['re_pos']) # trial dc component
i0 = np.where(data['re_pos'] == 0)[0]
tr_dc[i0] = data['re_pos'][i0 - 1]
# even if things went ok, rotary encoder may not log the whole session. Need to fix outside
else:
i0 = np.where(np.bitwise_and(np.bitwise_or(data['re_ts'] >= trial_starts[-1],
data['re_ts'] <= trial_starts[0]),
data['re_pos'] == 0))[0]
# make sure the bounds are not included in the current list
i0 = np.delete(i0, np.where(np.bitwise_or(i0 >= len(data['re_pos']) - 1, i0 == 0)))
# a 0 sample is not a reset if 2 conditions are met:
# 1/2 no inflexion (continuous derivative)
c1 = np.abs(np.sign(data['re_pos'][i0 + 1] - data['re_pos'][i0]) -
np.sign(data['re_pos'][i0] - data['re_pos'][i0 - 1])) == 2
# 2/2 needs to be below threshold
c2 = np.abs((data['re_pos'][i0] - data['re_pos'][i0 - 1]) /
(EPS + (data['re_ts'][i0] - data['re_ts'][i0 - 1]))) < THRESHOLD_RAD_PER_SEC
# apply reset to points identified as resets
i0 = i0[np.where(np.bitwise_not(np.bitwise_and(c1, c2)))]
tr_dc[i0] = data['re_pos'][i0 - 1]
# unwrap the rotation (in radians) and then add the DC component from restarts
data['re_pos'] = np.unwrap(data['re_pos']) + np.cumsum(tr_dc)
# Also forgot to mention that time stamps may be repeated or very close to one another.
# Find them as they will induce large jitters on the velocity function or errors in
# attempts of interpolation
rep_idx = np.where(np.diff(data['re_ts']) <= THRESHOLD_CONSECUTIVE_SAMPLES)[0]
# Change the value of the repeated position
data['re_pos'][rep_idx] = (data['re_pos'][rep_idx] +
data['re_pos'][rep_idx + 1]) / 2
data['re_ts'][rep_idx] = (data['re_ts'][rep_idx] +
data['re_ts'][rep_idx + 1]) / 2
# Now remove the repeat times that are rep_idx + 1
data = np.delete(data, rep_idx + 1)
# convert to cm
data['re_pos'] = data['re_pos'] * WHEEL_RADIUS_CM
# DEBUG PLOTS START HERE ########################
if display:
import matplotlib.pyplot as plt
plt.figure()
ax = plt.axes()
tstart = get_trial_start_times(session_path)
tts = np.c_[tstart, tstart, tstart + np.nan].flatten()
vts = np.c_[tstart * 0 + 100, tstart * 0 - 100, tstart + np.nan].flatten()
ax.plot(tts, vts, label='Trial starts')
ax.plot(re2bpod(df.re_ts.values / 1e6), df.re_pos.values / 1024 * 2 * np.pi,
'.-', label='Raw data')
i0 = np.where(df.re_pos.values == 0)
ax.plot(re2bpod(df.re_ts.values[i0] / 1e6), df.re_pos.values[i0] / 1024 * 2 * np.pi,
'r*', label='Raw data zero samples')
ax.plot(re2bpod(df.re_ts.values / 1e6), tr_dc, label='reset compensation')
ax.set_xlabel('Bpod Time')
ax.set_ylabel('radians')
# restarts = np.array(bp_data[10]['behavior_data']['States timestamps']
# ['reset_rotary_encoder']).flatten()
# x__ = np.c_[restarts, restarts, restarts + np.nan].flatten()
# y__ = np.c_[restarts * 0 + 1, restarts * 0 - 1, restarts+ np.nan].flatten()
# ax.plot(x__, y__, 'k', label='Restarts')
ax.plot(data['re_ts'], data['re_pos'] / WHEEL_RADIUS_CM, '.-', label='Output Trace')
ax.legend()
# plt.hist(np.diff(data['re_ts']), 400, range=[0, 0.01])
return data['re_ts'], data['re_pos']
def infer_wheel_units(pos):
"""
Given an array of wheel positions, infer the rotary encoder resolution, encoding type and units
The encoding type varies across hardware (Bpod uses X1 while FPGA usually extracted as X4), and
older data were extracted in linear cm rather than radians.
:param pos: a 1D array of extracted wheel positions
:return units: the position units, assumed to be either 'rad' or 'cm'
:return resolution: the number of decoded fronts per 360 degree rotation
:return encoding: one of {'X1', 'X2', 'X4'}
"""
if len(pos.shape) > 1: # Ensure 1D array of positions
pos = pos.flatten()
# Check the values and units of wheel position
res = np.array([wh.ENC_RES, wh.ENC_RES / 2, wh.ENC_RES / 4])
# min change in rad and cm for each decoding type
# [rad_X4, rad_X2, rad_X1, cm_X4, cm_X2, cm_X1]
min_change = np.concatenate([2 * np.pi / res, wh.WHEEL_DIAMETER * np.pi / res])
pos_diff = np.median(np.abs(np.ediff1d(pos)))
# find min change closest to min pos_diff
idx = np.argmin(np.abs(min_change - pos_diff))
if idx < len(res):
# Assume values are in radians
units = 'rad'
encoding = idx
else:
units = 'cm'
encoding = idx - len(res)
enc_names = {0: 'X4', 1: 'X2', 2: 'X1'}
return units, int(res[encoding]), enc_names[int(encoding)]
def extract_wheel_moves(re_ts, re_pos, display=False):
"""
Extract wheel positions and times from sync fronts dictionary
:param re_ts: numpy array of rotary encoder timestamps
:param re_pos: numpy array of rotary encoder positions
:param display: bool: show the wheel position and velocity for full session with detected
movements highlighted
:return: wheel_moves dictionary
"""
if len(re_ts.shape) == 1:
assert re_ts.size == re_pos.size, 'wheel data dimension mismatch'
else:
_logger.debug('2D wheel timestamps')
if len(re_pos.shape) > 1: # Ensure 1D array of positions
re_pos = re_pos.flatten()
# Linearly interpolate the times
x = np.arange(re_pos.size)
re_ts = np.interp(x, re_ts[:, 0], re_ts[:, 1])
units, res, enc = infer_wheel_units(re_pos)
_logger.info('Wheel in %s units using %s encoding', units, enc)
# The below assertion is violated by Bpod wheel data
# assert np.allclose(pos_diff, min_change, rtol=1e-05), 'wheel position skips'
# Convert the pos threshold defaults from samples to correct unit
thresholds = wh.samples_to_cm(np.array([8, 1.5]), resolution=res)
if units == 'rad':
thresholds = wh.cm_to_rad(thresholds)
kwargs = {'pos_thresh': thresholds[0],
'pos_thresh_onset': thresholds[1],
'make_plots': display}
# Interpolate and get onsets
pos, t = wh.interpolate_position(re_ts, re_pos, freq=1000)
on, off, amp, peak_vel = wh.movements(t, pos, freq=1000, **kwargs)
assert on.size == off.size, 'onset/offset number mismatch'
assert np.all(np.diff(on) > 0) and np.all(
np.diff(off) > 0), 'onsets/offsets not strictly increasing'
assert np.all((off - on) > 0), 'not all offsets occur after onset'
# Put into dict
wheel_moves = {
'intervals': np.c_[on, off], 'peakAmplitude': amp, 'peakVelocity_times': peak_vel}
return wheel_moves
def extract_first_movement_times(wheel_moves, trials, min_qt=None):
"""
Extracts the time of the first sufficiently large wheel movement for each trial.
To be counted, the movement must occur between go cue / stim on and before feedback /
response time. The movement onset is sometimes just before the cue (occurring in the
gap between quiescence end and cue start, or during the quiescence period but sub-
threshold). The movement is sufficiently large if it is greater than or equal to THRESH
:param wheel_moves: dictionary of detected wheel movement onsets and peak amplitudes for
use in extracting each trial's time of first movement.
:param trials: dictionary of trial data
:param min_qt: the minimum quiescence period, if None a default is used
:return: numpy array of first movement times, bool array indicating whether movement
crossed response threshold, and array of indices for wheel_moves arrays
"""
THRESH = .1 # peak amp should be at least .1 rad; ~1/3rd of the distance to threshold
MIN_QT = .2 # default minimum enforced quiescence period
# Determine minimum quiescent period
if min_qt is None:
min_qt = MIN_QT
_logger.info('minimum quiescent period assumed to be %.0fms', MIN_QT * 1e3)
elif isinstance(min_qt, Sized) and len(min_qt) > len(trials['goCue_times']):
min_qt = np.array(min_qt[0:trials['goCue_times'].size])
# Initialize as nans
first_move_onsets = np.full(trials['goCue_times'].shape, np.nan)
ids = np.full(trials['goCue_times'].shape, int(-1))
is_final_movement = np.zeros(trials['goCue_times'].shape, bool)
flinch = abs(wheel_moves['peakAmplitude']) < THRESH
all_move_onsets = wheel_moves['intervals'][:, 0]
# Iterate over trials, extracting onsets approx. within closed-loop period
cwarn = 0
for i, (t1, t2) in enumerate(zip(trials['goCue_times'] - min_qt,
trials['feedback_times'])):
if ~ | np.isnan(t2 - t1) | numpy.isnan |
#!/usr/bin/env python
# coding=utf-8
"""
Basic visualization functions
| Option | Description |
| ------ | ----------- |
| title | viz.py |
| authors | <NAME>, <NAME>, <NAME>, <NAME> |
| date | 2020-03-18 |
"""
from copy import copy
import os
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import mne
import meshio
def transform(locs: np.ndarray,traX: float=0.15, traY: float=0, traZ: float=0.5, rotY: float=(np.pi)/2, rotZ: float=(np.pi)/2) -> np.ndarray:
"""
Calculates new locations for the EEG locations.
Arguments:
locs: array of shape (n_sensors, 3)
3d coordinates of the sensors
traX: float
X translation to apply to the sensors
traY: float
Y translation to apply to the sensors
traZ: float
Z translation to apply to the sensors
rotY: float
Y rotation to apply to the sensors
rotZ: float
Z rotation to apply to the sensors
Returns:
result: array (n_sensors, 3)
new 3d coordinates of the sensors
"""
# Z rotation
newX = locs[:, 0] * np.cos(rotZ) - locs[:, 1] * np.sin(rotZ)
newY = locs[:, 0] * np.sin(rotZ) + locs[:, 1] * np.cos(rotZ)
locs[:, 0] = newX
locs[:, 0] = locs[:, 0] + traX
locs[:, 1] = newY
locs[:, 1] = locs[:, 1] + traY
# Reduce the size of the eeg headsets
newZ = locs[:, 0] * np.cos(rotZ) + locs[:, 1] * np.cos(rotZ) + locs[:, 2] * np.cos(rotZ/2)
locs[:, 2] = newZ
locs[:, 2] = locs[:, 2]
# Y rotation
newX = locs[:, 0] * np.cos(rotY) + locs[:, 2] * np.sin(rotY)
newZ = - locs[:, 0] * np.sin(rotY) + locs[:, 2] * np.cos(rotY)
locs[:, 0] = newX
locs[:, 2] = newZ
locs[:, 2] = locs[:, 2] + traZ
locs[:, 0] = locs[:, 0]
return locs
def plot_sensors_2d(epo1: mne.Epochs, epo2: mne.Epochs, lab: bool = True):
"""
Plots sensors in 2D with x representation for bad sensors.
Arguments:
epo1: mne.Epochs
Epochs object to get channels information
epo2: mne.Epochs
Epochs object to get channels information
lab: option to plot channel names
True by default.
Returns:
None: plot the sensors in 2D within the current axis.
"""
bads_epo1 = []
bads_epo1 = epo1.info['bads']
bads_epo2 = []
bads_epo2 = epo2.info['bads']
# extract sensor info and transform loc to fit with headmodel
loc1 = copy(np.array([ch['loc'][:3] for ch in epo1.info['chs']]))
loc1 = transform(loc1, traX=-0.17, traY=0, traZ=0.08, rotY=(-np.pi/12), rotZ=(-np.pi/2))
lab1 = [ch for ch in epo1.ch_names]
loc2 = copy(np.array([ch['loc'][:3] for ch in epo2.info['chs']]))
loc2 = transform(loc2, traX=0.17, traY=0, traZ=0.08, rotY=(np.pi/12), rotZ=np.pi/2)
lab2 = [ch for ch in epo2.ch_names]
for ch in epo1.ch_names:
if ch in bads_epo1:
index_ch = epo1.ch_names.index(ch)
x1, y1, z1 = loc1[index_ch, :]
plt.plot(x1, y1, marker='x', color='dimgrey')
if lab:
plt.text(x1+0.012, y1+0.012, lab1[index_ch],
horizontalalignment='center',
verticalalignment='center')
else:
index_ch = epo1.ch_names.index(ch)
x1, y1, z1 = loc1[index_ch, :]
plt.plot(x1, y1, marker='o', color='dimgrey')
if lab:
plt.text(x1+0.012, y1+0.012, lab1[index_ch],
horizontalalignment='center',
verticalalignment='center')
for ch in epo2.ch_names:
if ch in bads_epo2:
index_ch = epo2.ch_names.index(ch)
x2, y2, z2 = loc2[index_ch, :]
plt.plot(x2, y2, marker='x', color='dimgrey')
if lab:
plt.text(x2+0.012, y2+0.012, lab2[index_ch],
horizontalalignment='center',
verticalalignment='center')
else:
index_ch = epo2.ch_names.index(ch)
x2, y2, z2 = loc2[index_ch, :]
plt.plot(x2, y2, marker='o', color='dimgrey')
if lab:
plt.text(x2+0.012, y2+0.012, lab2[index_ch],
horizontalalignment='center',
verticalalignment='center')
def plot_links_2d(epo1: mne.Epochs, epo2: mne.Epochs, C: np.ndarray, threshold: float=0.95, steps: int=10):
"""
Plots hyper-connectivity in 2D.
Arguments:
epo1: mne.Epochs
Epochs object to get channels information
epo2: mne.Epochs
Epochs object to get channels information
C: array, (len(loc1), len(loc2))
matrix with the values of hyper-connectivity
threshold: float
threshold for the inter-brain links;
only those above the set value will be plotted
steps: int
number of steps for the Bezier curves
if <3 equivalent to ploting straight lines
weight: numpy.float
Connectivity weight to determine the thickness
of the link
Returns:
None: plot the links in 2D within the current axis.
"""
# extract sensor infos and transform loc to fit with headmodel
loc1 = copy(np.array([ch['loc'][:3] for ch in epo1.info['chs']]))
loc1 = transform(loc1, traX=-0.17, traY=0, traZ=0.08, rotY=(-np.pi/12), rotZ=(-np.pi/2))
loc2 = copy(np.array([ch['loc'][:3] for ch in epo2.info['chs']]))
loc2 = transform(loc2, traX=0.17, traY=0, traZ=0.08, rotY=(np.pi/12), rotZ=np.pi/2)
ctr1 = np.nanmean(loc1, 0)
ctr2 = np.nanmean(loc2, 0)
cmap_p = matplotlib.cm.get_cmap('Reds')
norm_p = matplotlib.colors.Normalize(vmin=threshold, vmax=np.nanmax(C[:]))
cmap_n = matplotlib.cm.get_cmap('Blues_r')
norm_n = matplotlib.colors.Normalize(vmin=np.min(C[:]), vmax=-threshold)
for e1 in range(len(loc1)):
x1 = loc1[e1, 0]
y1 = loc1[e1, 1]
for e2 in range(len(loc2)):
x2 = loc2[e2, 0]
y2 = loc2[e2, 1]
if C[e1, e2] >= threshold:
color_p = cmap_p(norm_p(C[e1, e2]))
if steps <= 2:
weight = 0.2 +1.6*((C[e1, e2]-threshold)/(np.nanmax(C[:]-threshold)))
plt.plot([loc1[e1, 0], loc2[e2, 0]],
[loc1[e1, 1], loc2[e2, 1]],
'-', color=color_p, linewidth=weight)
else:
alphas = np.linspace(0, 1, steps)
weight = 0.2 +1.6*((C[e1, e2]-threshold)/(np.nanmax(C[:]-threshold)))
for idx in range(len(alphas)-1):
a = alphas[idx]
b = alphas[idx+1]
xn = ((1-a)**3 * x1 +
3 * (1-a)**2 * a * (2 * x1 - ctr1[0]) +
3 * (1-a) * a**2 * (2 * x2 - ctr2[0]) +
a**3 * x2)
xnn = ((1-b)**3 * x1 +
3 * (1-b)**2 * b * (2 * x1 - ctr1[0]) +
3 * (1-b) * b**2 * (2 * x2 - ctr2[0]) +
b**3 * x2)
yn = ((1-a)**3 * y1 +
3 * (1-a)**2 * a * (2 * y1 - ctr1[1]) +
3 * (1-a) * a**2 * (2 * y2 - ctr2[1]) +
a**3 * y2)
ynn = ((1-b)**3 * y1 +
3 * (1-b)**2 * b * (2 * y1 - ctr1[1]) +
3 * (1-b) * b**2 * (2 * y2 - ctr2[1]) +
b**3 * y2)
plt.plot([xn, xnn], [yn, ynn],
'-', color=color_p, linewidth=weight)
if C[e1, e2] <= -threshold:
color_n = cmap_n(norm_n(C[e1, e2]))
if steps <= 2:
weight = 0.2 +1.6*((-C[e1, e2]-threshold)/(np.nanmax(C[:]-threshold)))
plt.plot([loc1[e1, 0], loc2[e2, 0]],
[loc1[e1, 1], loc2[e2, 1]],
'-', color=color_n, linewidth=weight)
else:
alphas = np.linspace(0, 1, steps)
weight = 0.2 +1.6*((-C[e1, e2]-threshold)/(np.nanmax(C[:]-threshold)))
for idx in range(len(alphas)-1):
a = alphas[idx]
b = alphas[idx+1]
xn = ((1-a)**3 * x1 +
3 * (1-a)**2 * a * (2 * x1 - ctr1[0]) +
3 * (1-a) * a**2 * (2 * x2 - ctr2[0]) +
a**3 * x2)
xnn = ((1-b)**3 * x1 +
3 * (1-b)**2 * b * (2 * x1 - ctr1[0]) +
3 * (1-b) * b**2 * (2 * x2 - ctr2[0]) +
b**3 * x2)
yn = ((1-a)**3 * y1 +
3 * (1-a)**2 * a * (2 * y1 - ctr1[1]) +
3 * (1-a) * a**2 * (2 * y2 - ctr2[1]) +
a**3 * y2)
ynn = ((1-b)**3 * y1 +
3 * (1-b)**2 * b * (2 * y1 - ctr1[1]) +
3 * (1-b) * b**2 * (2 * y2 - ctr2[1]) +
b**3 * y2)
plt.plot([xn, xnn], [yn, ynn],
'-', color=color_n, linewidth=weight)
def plot_sensors_3d(ax: str, epo1: mne.Epochs, epo2: mne.Epochs, lab: bool = False):
"""
Plots sensors in 3D with x representation for bad sensors.
Arguments:
ax: Matplotlib axis created with projection='3d'
epo1: mne.Epochs
Epochs object to get channel information
epo2: mne.Epochs
Epochs object to get channel information
lab: option to plot channel names
False by default.
Returns:
None: plot the sensors in 3D within the current axis.
"""
# extract sensor infos and transform loc to fit with headmodel
loc1 = copy(np.array([ch['loc'][:3] for ch in epo1.info['chs']]))
loc1 = transform(loc1, traX=-0.17, traY=0, traZ=0.08, rotY=(-np.pi/12), rotZ=(-np.pi/2))
lab1 = [ch for ch in epo1.ch_names]
loc2 = copy(np.array([ch['loc'][:3] for ch in epo2.info['chs']]))
loc2 = transform(loc2, traX=0.17, traY=0, traZ=0.08, rotY=(np.pi/12), rotZ=np.pi/2)
lab2 = [ch for ch in epo2.ch_names]
bads_epo1 =[]
bads_epo1 = epo1.info['bads']
bads_epo2 =[]
bads_epo2 = epo2.info['bads']
for ch in epo1.ch_names:
if ch in bads_epo1:
index_ch = epo1.ch_names.index(ch)
x1, y1, z1 = loc1[index_ch, :]
ax.scatter(x1, y1, z1, marker='x', color='dimgrey')
if lab:
ax.text(x1+0.012, y1+0.012 ,z1, lab1[index_ch],
horizontalalignment='center',
verticalalignment='center')
else:
index_ch = epo1.ch_names.index(ch)
x1, y1, z1 = loc1[index_ch, :]
ax.scatter(x1, y1, z1, marker='o', color='dimgrey')
if lab:
ax.text(x1+0.012, y1+0.012 ,z1, lab1[index_ch],
horizontalalignment='center',
verticalalignment='center')
for ch in epo2.ch_names:
if ch in bads_epo2:
index_ch = epo2.ch_names.index(ch)
x2, y2, z2 = loc2[index_ch, :]
ax.scatter(x2, y2, z2, marker='x', color='dimgrey')
if lab:
ax.text(x2+0.012, y2+0.012 ,z2, lab2[index_ch],
horizontalalignment='center',
verticalalignment='center')
else:
index_ch = epo2.ch_names.index(ch)
x2, y2, z2 = loc2[index_ch, :]
ax.scatter(x2, y2, z2, marker='o', color='dimgrey')
if lab:
ax.text(x2+0.012, y2+0.012 ,z2, lab2[index_ch],
horizontalalignment='center',
verticalalignment='center')
def plot_links_3d(ax: str, epo1: mne.Epochs, epo2: mne.Epochs, C: np.ndarray, threshold: float=0.95, steps: int=10):
"""
Plots hyper-connectivity in 3D.
Arguments:
ax: Matplotlib axis created with projection='3d'
loc1: arrays of shape (n_sensors, 3)
3d coordinates of the sensors
loc2: arrays of shape (n_sensors, 3)
3d coordinates of the sensors
C: array, (len(loc1), len(loc2))
matrix with the values of hyper-connectivity
threshold: float
threshold for the inter-brain links;
only those above the set value will be plotted
steps: int
number of steps for the Bezier curves
if <3 equivalent to ploting straight lines
weight: numpy.float
Connectivity weight to determine the thickness
of the link
Returns:
None: plot the links in 3D within the current axis.
Plot hyper-connectivity in 3D.
"""
# extract sensor infos and transform loc to fit with headmodel
loc1 = copy(np.array([ch['loc'][:3] for ch in epo1.info['chs']]))
loc1 = transform(loc1, traX=-0.17, traY=0, traZ=0.08, rotY=(-np.pi/12), rotZ=(-np.pi/2))
loc2 = copy(np.array([ch['loc'][:3] for ch in epo2.info['chs']]))
loc2 = transform(loc2, traX=0.17, traY=0, traZ=0.08, rotY=(np.pi/12), rotZ=np.pi/2)
ctr1 = np.nanmean(loc1, 0)
ctr1[2] -= 0.2
ctr2 = np.nanmean(loc2, 0)
ctr2[2] -= 0.2
cmap_p = matplotlib.cm.get_cmap('Reds')
norm_p = matplotlib.colors.Normalize(vmin=threshold, vmax=np.nanmax(C[:]))
cmap_n = matplotlib.cm.get_cmap('Blues_r')
norm_n = matplotlib.colors.Normalize(vmin= | np.min(C[:]) | numpy.min |
""" distance.py
Module containing classes and functions related to calculating distance and
similarity measurements.
"""
import numpy as np
from joblib import Parallel, delayed
from joblib.pool import has_shareable_memory
def _calcDistance(fiberMatrix1, fiberMatrix2):
""" *INTERNAL FUNCTION*
Computes average Euclidean distance
INPUT:
fiberMatrix1 - 3D matrix containing fiber spatial infomration
fiberMatrix2 - 3D matrix containing fiber spatial information for
comparison
OUTPUT:
Average Euclidean distance of sample points
"""
return np.mean(np.linalg.norm(np.subtract(fiberMatrix1, fiberMatrix2),
axis=0), axis=1)
def _calcQDistance(fiberMatrix1, fiberMatrix2):
""" *INTERNAL FUNCTION*
Computes average Euclidean distance
INPUT:
fiberMatrix1 - 3D matrix containing fiber quantitative infomration
fiberMatrix2 - 3D matrix containing fiber quantitative information for
comparison
OUTPUT:
Average "Euclidean" distance of quantitative values
"""
return np.asarray(np.mean(np.linalg.norm(fiberMatrix1, fiberMatrix2), axis=1))
def _fiberDistance_internal(fiberMatrix1, fiberMatrix2, flip=False,
pflag=False, n_jobs=-1):
""" *INTERNAL FUNCTION*
Computes the distance between one fiber and individual fibers within a
group (array) of fibers.
INPUT:
fiberMatrix1 - 3D matrix containing fiber spatial infomration
fiberMatrix2 - 3D matrix containing fiber spatial information for
comparison
flip - flag to flip fiber
pflag - flag to indicate if clustering is performed with priors
n_jobs - number of processes/threads (defaults to use all available
resources)
OUTPUT:
distance - Matrix containing distance between fibers
"""
# Calculates the avg Euclidean distance between fibers
if flip is False:
distance = Parallel(n_jobs=n_jobs, backend='threading')(
delayed(_calcDistance, has_shareable_memory)(
fiberMatrix1[:, i, None], fiberMatrix2)
for i in range(fiberMatrix1.shape[1]))
# Flipped fiber
else:
distance = Parallel(n_jobs=n_jobs, backend='threading')(
delayed(_calcDistance, has_shareable_memory)(
np.flip(fiberMatrix1[:, i, None], axis=2), fiberMatrix2)
for i in range(fiberMatrix1.shape[1]))
if pflag is False:
return distance
else:
label, minDist = [], []
for i in range(fiberMatrix1.shape[1]):
idx = int(np.argmin(distance[i]))
label.append(idx)
minDist.append(distance[0][label[-1]])
del distance
return minDist, label
def _scalarDistance_internal(fiberScalarMatrix1, fiberScalarMatrix2,
flip=False, pflag=False, n_jobs=-1):
""" *INTERNAL FUNCTION*
Computes the "distance" between the scalar values between one fiber and
the fibers within a group (array) of fibers.
INPUT:
fiberScalarMatrix1 - array of scalar information pertaining to a group
of fibers
fiberScalarMatrix2 - array of scalar information pertaining to a group
of fibers for comparison
flip - flag to flip fiber
pflag - flag to indicate if clustering is performed with priors
n_jobs - number of processes/threads (defaults to use all available
resources)
OUTPUT:
qDistance - computed scalar "distance" between fibers
"""
# Calculates squared distance of scalars
qDistance = np.empty((fiberScalarMatrix1.shape[0],
fiberScalarMatrix2.shape[0]), dtype=np.float32)
# Calculates the mean distance between fiber metrics
if flip is False:
qDistance = Parallel(n_jobs=n_jobs, backend='threading')(
delayed(_calcQDistance, has_shareable_memory)(
fiberScalarMatrix1[i, :], fiberScalarMatrix2)
for i in range(fiberScalarMatrix1.shape[0]))
# Flip fiber
else:
qDistance = Parallel(n_jobs=n_jobs, backend='threading')(
delayed(_calcQDistance, has_shareable_memory)(
| np.flip(fiberScalarMatrix1[i, :], fiberScalarMatrix2) | numpy.flip |
""" Module for risk-exploiting reward perimeter scenario
Environment will contain:
- A 2D reward distribution. The objective of the multi-agent system is to form a "perimeter" around the reward distribution
such that is maximize the surface integral of the reward distribution across the surface of the largest convex region defined by the agents
- Rewards will be normalized by the total, true integral of the reward function
- The reward distribution goes negative toward the outskirts of the domain, thus the optimal policy would distribute agents along the curve
of where the reward function crosses zero
- The reward function has a corresponding risk function; i.e. when the reward becomes non-zero, so does the risk of agent failure
- If an agent fails, it is still used as part of the reward calculation, but it is not able to move for the rest of the episode
Agents will:
- observe the position of other agents as well as thier local measurement of the risk/reward function
"""
import numpy as np
from random import shuffle
from multiagent.scenario import BaseScenario
from particle_environments.mager.world import MortalAgent, HazardousWorld, RiskRewardLandmark
from particle_environments.mager.observation import format_observation
from particle_environments.common import is_collision, distance, delta_pos, delta_vel
from particle_environments.common import ExtendedRadialPolynomialRewardFunction2D as ExtendedRadialReward
from particle_environments.common import RadialBernoulliRiskFunction2D as RadialRisk
from rl_algorithms.scenariolearning import ScenarioHeuristicAgentTrainer
# Scenario Parameters
_MAX_COMMUNICATION_DISTANCE = np.inf
_AGENT_SIZE = 0.01
_LANDMARK_SIZE = 0.1
_AGENT_OBSERVATION_LEN = 6
_LANDMARK_OBSERVATION_LEN = 1
_NUM_AGENTS = 3
_LANDMARKS = []
_LANDMARKS.append(
RiskRewardLandmark( risk_fn=RadialRisk(_LANDMARK_SIZE), reward_fn=ExtendedRadialReward(_LANDMARK_SIZE, 1.0)))
_N_LANDMARKS = len(_LANDMARKS)
_POSITIVE_REGION_INTEGRAL = _LANDMARKS[0].reward_fn.get_radial_integral(_LANDMARK_SIZE)
class Scenario(BaseScenario):
# static class
num_agents = _NUM_AGENTS
assert _LANDMARK_SIZE > 0.0
assert _POSITIVE_REGION_INTEGRAL > 0.0
def make_world(self):
world = HazardousWorld(collision_termination_probability=0.0)
# observation-based communication
world.dim_c = 0
world.max_communication_distance = _MAX_COMMUNICATION_DISTANCE
# collaborative, systemic rewards that are identical for all agents
world.collaborative = True
world.systemic_rewards = True
world.identical_rewards = True
# add landmarks
world.landmarks = []
for lm in _LANDMARKS:
world.landmarks.append(lm)
for i, landmark in enumerate(world.landmarks):
landmark.name = 'landmark %d' % i
landmark.collide = False
landmark.movable = False
landmark.size = _LANDMARK_SIZE
# properties for landmarks
if isinstance(landmark, RiskRewardLandmark) and landmark.is_hazard:
#TODO: make colors heatmap of risk probability over all bounds
landmark.color = np.array([landmark.risk_fn.get_failure_probability(0,0) + .1, 0, 0])
else:
landmark.color = np.array([0.25, 0.25, 0.25])
# make initial conditions
self.reset_world(world)
return world
def reset_world(self, world):
# random properties for agents
# add agents
world.agents = [MortalAgent() for i in range(self.num_agents)]
for i, agent in enumerate(world.agents):
agent.name = 'agent %d' % i
agent.collide = True
agent.silent = True
agent.terminated = False
agent.size = _AGENT_SIZE
agent.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
agent.state.p_vel = | np.zeros(world.dim_p) | numpy.zeros |
import numpy as np
import pandas as pd
from ..stats._utils import corr, scale
from .ordi_plot import ordiplot, screeplot
class RedundancyAnalysis():
r"""Compute redundancy analysis, a type of canonical analysis.
Redundancy analysis (RDA) is a principal component analysis on predicted
values :math:`\hat{Y}` obtained by fitting response variables :math:`Y` with
explanatory variables :math:`X` using a multiple regression.
EXPLAIN WHEN TO USE RDA
Parameters
----------
y : pd.DataFrame
:math:`n \times p` response matrix, where :math:`n` is the number
of samples and :math:`p` is the number of features. Its columns
need be dimensionally homogeneous (or you can set `scale_Y=True`).
This matrix is also referred to as the community matrix that
commonly stores information about species abundances
x : pd.DataFrame
:math:`n \times m, n \geq m` matrix of explanatory
variables, where :math:`n` is the number of samples and
:math:`m` is the number of metadata variables. Its columns
need not be standardized, but doing so turns regression
coefficients into standard regression coefficients.
scale_Y : bool, optional
Controls whether the response matrix columns are scaled to
have unit standard deviation. Defaults to `False`.
scaling : int
Scaling type 1 (scaling=1) produces a distance biplot. It focuses on
the ordination of rows (samples) because their transformed
distances approximate their original euclidean
distances. Especially interesting when most explanatory
variables are binary.
Scaling type 2 produces a correlation biplot. It focuses
on the relationships among explained variables (`y`). It
is interpreted like scaling type 1, but taking into
account that distances between objects don't approximate
their euclidean distances.
See more details about distance and correlation biplots in
[1]_, \S 9.1.4.
sample_scores_type : str
Type of sample score to output, either 'lc' and 'wa'.
Returns
-------
Ordination object, Ordonation plot, Screeplot
See Also
--------
ca
cca
Notes
-----
The algorithm is based on [1]_, \S 11.1.
References
----------
.. [1] <NAME>. and Legendre L. 1998. Numerical
Ecology. Elsevier, Amsterdam.
"""
def __init__(self, scale_Y=True, scaling=1, sample_scores_type='wa',
n_permutations = 199, permute_by=[], seed=None):
# initialize the self object
if not isinstance(scale_Y, bool):
raise ValueError("scale_Y must be either True or False.")
if not (scaling == 1 or scaling == 2):
raise ValueError("scaling must be either 1 (distance analysis) or 2 (correlation analysis).")
if not (sample_scores_type == 'wa' or sample_scores_type == 'lc'):
raise ValueError("sample_scores_type must be either 'wa' or 'lc'.")
self.scale_Y = scale_Y
self.scaling = scaling
self.sample_scores_type = sample_scores_type
self.n_permutations = n_permutations
self.permute_by = permute_by
self.seed = seed
def fit(self, X, Y, W=None):
# I use Y as the community matrix and X as the constraining as_matrix.
# vegan uses the inverse, which is confusing since the response set
# is usually Y and the explaination set is usually X.
# These steps are numbered as in Legendre and Legendre, Numerical Ecology,
# 3rd edition, section 11.1.3
# 0) Preparation of data
feature_ids = X.columns
sample_ids = X.index # x index and y index should be the same
response_ids = Y.columns
X = X.as_matrix() # Constraining matrix, typically of environmental variables
Y = Y.as_matrix() # Community data matrix
if W is not None:
condition_ids = W.columns
W = W.as_matrix()
q = W.shape[1] # number of covariables (used in permutations)
else:
q=0
# dimensions
n_x, m = X.shape
n_y, p = Y.shape
if n_x == n_y:
n = n_x
else:
raise ValueError("Tables x and y must contain same number of rows.")
# scale
if self.scale_Y:
Y = (Y - Y.mean(axis=0)) / Y.std(axis=0, ddof=1)
X = X - X.mean(axis=0)# / X.std(axis=0, ddof=1)
# Note: Legendre 2011 does not scale X.
# If there is a covariable matrix W, the explanatory matrix X becomes the
# residuals of a regression between X as response and W as explanatory.
if W is not None:
W = (W - W.mean(axis=0))# / W.std(axis=0, ddof=1)
# Note: Legendre 2011 does not scale W.
B_XW = np.linalg.lstsq(W, X)[0]
X_hat = W.dot(B_XW)
X_ = X - X_hat # X is now the residual
else:
X_ = X
B = np.linalg.lstsq(X_, Y)[0]
Y_hat = X_.dot(B)
Y_res = Y - Y_hat # residuals
# 3) Perform a PCA on Y_hat
## perform singular value decomposition.
## eigenvalues can be extracted from u
## eigenvectors can be extracted from vt
u, s, vt = np.linalg.svd(Y_hat, full_matrices=False)
u_res, s_res, vt_res = np.linalg.svd(Y_res, full_matrices=False)
# compute eigenvalues from singular values
eigenvalues = s**2/(n-1)
eigenvalues_res = s_res**2/(n-1)
## determine rank kc
kc = np.linalg.matrix_rank(Y_hat)
kc_res = np.linalg.matrix_rank(Y_res)
## retain only eigenvs superior to tolerance
eigenvalues = eigenvalues[:kc]
eigenvalues_res = eigenvalues_res[:kc_res]
eigenvalues_values_all = np.r_[eigenvalues, eigenvalues_res]
trace = np.sum(np.diag(np.cov(Y.T)))
trace_res = np.sum(np.diag(np.cov(Y_res.T)))
eigenvectors = vt.T[:,:kc]
eigenvectors_res = vt_res.T[:,:kc_res]
## cannonical axes used to compute F_marginal
canonical_axes = u[:, :kc]
## axes names
ordi_column_names = ['RDA%d' % (i+1) for i in range(kc)]
ordi_column_names_res = ['RDA_res%d' % (i+1) for i in range(kc_res)]
# 4) Ordination of objects (site scores, or vegan's wa scores)
F = Y.dot(eigenvectors) # columns of F are the ordination vectors
F_res = Y_res.dot(eigenvectors_res) # columns of F are the ordination vectors
# 5) F in space X (site constraints, or vegan's lc scores)
Z = Y_hat.dot(eigenvectors)
Z_res = Y_res.dot(eigenvectors_res)
# 6) Correlation between the ordination vectors in spaces Y and X
rk = np.corrcoef(F, Z) # not used yet
rk_res = np.corrcoef(F_res, Z_res) # not used yet
# 7) Contribution of the explanatory variables X to the canonical ordination
# axes
# 7.1) C (canonical coefficient): the weights of the explanatory variables X in
# the formation of the matrix of fitted site scores
C = B.dot(eigenvectors) # not used yet
C_res = B.dot(eigenvectors_res) # not used yet
# 7.2) The correlations between X and the ordination vectors in space X are
# used to represent the explanatory variables in biplots.
corXZ = corr(X_, Z)
corXZ_res = corr(X_, Z_res)
# 8) Compute triplot objects
# I combine fitted and residuals scores into the DataFrames
singular_values_all = np.r_[s[:kc], s_res[:kc_res]]
ordi_column_names_all = ordi_column_names + ordi_column_names_res
const = np.sum(singular_values_all**2)**0.25
if self.scaling == 1:
scaling_factor = const
D = np.diag(np.sqrt(eigenvalues/trace)) # Diagonal matrix of weights (Numerical Ecology with R, p. 196)
D_res = np.diag(np.sqrt(eigenvalues_res/trace_res))
elif self.scaling == 2:
scaling_factor = singular_values_all / const
D = np.diag(np.ones(kc)) # Diagonal matrix of weights
D_res = np.diag(np.ones(kc_res))
response_scores = pd.DataFrame(np.hstack((eigenvectors, eigenvectors_res)) * scaling_factor,
index=response_ids,
columns=ordi_column_names_all)
response_scores.index.name = 'ID'
if self.sample_scores_type == 'wa':
sample_scores = pd.DataFrame(np.hstack((F, F_res)) / scaling_factor,
index=sample_ids,
columns=ordi_column_names_all)
elif self.sample_scores_type == 'lc':
sample_scores = pd.DataFrame(np.hstack((Z, Z_res)) / scaling_factor,
index=sample_ids,
columns=ordi_column_names_all)
sample_scores.index.name = 'ID'
biplot_scores = pd.DataFrame(np.hstack((corXZ.dot(D), corXZ_res.dot(D_res))) * scaling_factor,
index=feature_ids,
columns=ordi_column_names_all)
biplot_scores.index.name = 'ID'
sample_constraints = pd.DataFrame(np.hstack((Z, F_res)) / scaling_factor,
index=sample_ids,
columns=ordi_column_names_all)
sample_constraints.index.name = 'ID'
p_explained = pd.Series(singular_values_all / singular_values_all.sum(), index=ordi_column_names_all)
# Statistics
## Response statistics
### Unadjusted R2
SSY_i = np.sum(Y**2, axis=0)#np.array([np.sum((Y[:, i] - Y[:, i].mean())**2) for i in range(p)])
SSYhat_i = np.sum(Y_hat**2, axis=0)#np.array([np.sum((Y_hat[:, i] - Y_hat[:, i].mean())**2) for i in range(p)])
SSYres_i = np.sum(Y_res**2, axis=0)#np.array([np.sum((Y_res[:, i] - Y_res[:, i].mean())**2) for i in range(p)])
R2_i = SSYhat_i/SSY_i
R2 = np.mean(R2_i)
### Adjusted R2
R2a_i = 1-((n-1)/(n-m-1))*(1-R2_i)
R2a = np.mean(R2a_i)
### F-statistic
F_stat_i = (R2_i/m) / ((1-R2_i) / (n-m-1))
F_stat = (R2/m) / ((1-R2) / (n-m-1))
response_stats_each = pd.DataFrame({'R2': R2_i, 'Adjusted R2': R2a_i, 'F': F_stat_i},
index = response_ids)
response_stats_summary = pd.DataFrame({'R2': R2, 'Adjusted R2': R2a, 'F':F_stat},
index = ['Summary'])
response_stats = pd.DataFrame(pd.concat([response_stats_each, response_stats_summary], axis=0),
columns = ['F', 'R2', 'Adjusted R2'])
## Canonical axis statistics
"""
the permutation algorithm is inspired by the supplementary material
published i Legendre et al., 2011, doi 10.1111/j.2041-210X.2010.00078.x
"""
if 'axes' in self.permute_by:
if W is None:
F_m = s[0]**2 / (np.sum(Y**2) - np.sum(Y_hat**2))
F_m_perm = np.array([])
for j in range(self.n_permutations):
Y_perm = Y[ | np.random.permutation(n) | numpy.random.permutation |
import numpy as np
def default_limit_theta(theta):
return theta
def gradient_descent(x, y, iterations, predict, derivative, theta=None, limit_theta=None):
assert x.shape[0] == y.shape[0]
if theta is None:
theta = np.zeros(x.shape[1])
assert theta.shape[0] == x.shape[1]
if limit_theta is None:
limit_theta = default_limit_theta
number_of_samples = y.size
previous_theta_correction_sign = np.zeros(theta.shape)
predicted_y = predict(x, theta)
error_y = predicted_y - y
error_squared = np.sum(np.square(error_y))
previous_error_squared = error_squared
# set the initial rate
alpha = 0.1 * np.ones(theta.shape)
for i in range(iterations):
# How much effect would updating theta have on each value?
derivatives = derivative(x, theta)
theta_correction = (1.0 / number_of_samples) * alpha * (derivatives.T.dot(error_y)).reshape(theta.shape)
next_theta = limit_theta(theta - theta_correction)
predicted_y = predict(x, next_theta)
error_y = predicted_y - y
error_squared = np.sum( | np.square(error_y) | numpy.square |
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
import copy
from .databunch import *
from .evaluate import *
def to_numpy(X):
try:
return arr.data.cpu().numpy()
except: pass
return arr
class ptarray(np.ndarray):
_metadata = ['_pt_scaler', '_pt_indices', '_train_indices', '_valid_indices', '_test_indices', '_ycolumn', '_columns', '_bias']
def __new__(cls, input_array):
return np.asarray(input_array).view(cls)
def __array_finalize__(self, obj) -> None:
if obj is None: return
d = { a:getattr(obj, a) for a in self._metadata if hasattr(obj, a) }
self.__dict__.update(d)
def __array_function__(self, func, types, *args, **kwargs):
return self._wrap(super().__array_function__(func, types, *args, **kwargs))
def __getitem__(self, item):
r = super().__getitem__(item)
if type(item) == tuple and len(item) == 2 and type(item[0]) == slice:
r._columns = r._columns[item[1]]
if self._check_list_attr('_pt_scaler'):
r._pt_scaler = r._pt_scaler[item[1]]
return r
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
def cast(i):
if type(i) is PTArray:
return i.view(np.ndarray)
return i
inputs = [ cast(i) for i in inputs ]
return self._wrap(super().__array_ufunc__(ufunc, method, *inputs, **kwargs))
def _check_list_attr(self, attr):
try:
return hasattr(self, attr) and len(getattr(self, attr)) > 0
except:
return False
def _pt_scaler_exists(self):
return self._check_list_attr('_pt_scaler')
def _test_indices_exists(self):
return self._check_list_attr('_test_indices')
# def add_bias(self):
# assert not hasattr(self, '_bias'), 'You cannot add a bias twice'
# self._bias = 1
# r = self._wrap(np.concatenate([np.ones((self.shape[0], 1)), self], axis=1))
# r._bias = 1
# r._columns = ['bias'] + r._columns
# if r._pt_scaler_exists():
# r._pt_scaler = [None] + r._pt_scaler
# return r
def ycolumn(self, columns):
r = copy.copy(self)
r._ycolumn = columns
return r
@property
def yscaler(self):
return [ s for s, c in zip(self._pt_scaler, self._columns) if c in self._ycolumn ]
@property
def _ycolumnsnr(self):
return [ i for i, c in enumerate(self._columns) if c in self._ycolumn ]
@property
def _xcolumns(self):
return [ c for c in self._columns if c not in self._ycolumn ]
@property
def _xcolumnsnr(self):
return [ i for i, c in enumerate(self._columns) if c not in self._ycolumn ]
def _train_indices_exists(self):
return self._check_list_attr('_train_indices')
def _valid_indices_exists(self):
return self._check_list_attr('_valid_indices')
def _wrap(self, a):
a = PTArray(a)
a.__dict__.update(self.__dict__)
return a
def polynomials(self, degree):
assert not self._pt_scaler_exists(), "Run polynomials before scaling"
poly = PolynomialFeatures(degree, include_bias=False)
p = poly.fit_transform(self[:,:-self.ycolumns])
return self._wrap(np.concatenate([p, self[:, -self.ycolumns:]], axis=1))
def to_arrays(self):
if self._test_indices_exists():
return self.train_X, self.valid_X, self.test_X, self.train_y, self.valid_y, self.test_y
elif self._valid_indices_exists():
return self.train_X, self.valid_X, self.train_y, self.valid_y
else:
return self.train_X, self.train_y
def scale(self, scalertype=StandardScaler):
assert self._train_indices_exists(), "Split the DataFrame before scaling!"
assert not self._pt_scaler_exists(), "Trying to scale twice, which is a really bad idea!"
r = self._wrap(copy.deepcopy(self))
r._pt_scaler = tuple(self._create_scaler(scalertype, column) for column in self[self._train_indices].T)
return r.transform(self)
@staticmethod
def _create_scaler(scalertype, column):
scaler = scalertype()
scaler.fit(column.reshape(-1,1))
return scaler
def transform(self, array):
out = []
for column, scaler in zip(array.T, self._pt_scaler):
if scaler is not None:
out.append(scaler.transform(column.reshape(-1,1)))
else:
out.append(column)
return self._wrap(np.concatenate(out, axis=1))
def inverse_transform_y(self, y):
y = to_numpy(y)
y = y.reshape(-1, len(self._ycolumns))
out = [ y[i] if self._pt_scaler[-self._ycolumns+i] is None else self._pt_scaler[-self._ycolumns+i].inverse_transform(y[:,i]) for i in range(y.shape[1]) ]
if len(out) == 1:
return self._wrap(out[0])
return self._wrap(np.concatenate(out, axis=1))
def inverse_transform_X(self, X):
X = to_numpy(X)
transform = [ X[i] if self._pt_scaler[i] is None else self._pt_scaler[i].inverse_transform(X[:,i]) for i in range(X.shape[1]) ]
return np._wrap( | np.concatenate(transform, axis=1) | numpy.concatenate |
from validator_api.coinmarketcap import *
import numpy as np
# Scoring grids (identical for both Plaid and Coinbase)
score_bins = np.array([500, 560, 650, 740, 800, 870])
loan_bins = np.array([0.5, 1, 5, 10, 15, 20, 25])*1000
score_quality = ['very poor', 'poor', 'fair',
'good', 'very good', 'excellent', 'exceptional']
# Some helper functions
def comma_separated_list(l):
'''Takes a Pyhton list as input and returns a string of comma separated elements with AND at the end'''
if len(l) == 1:
msg = l[0]
elif len(l) == 2:
msg = l[0]+' and '+l[1]
else:
msg = ', '.join(l[:-1]) + ', and ' + l[-1]
return msg
# -------------------------------------------------------------------------- #
# Plaid #
# -------------------------------------------------------------------------- #
def create_interpret_plaid():
'''
Description:
Initializes a dict with a concise summary to communicate and interpret the SCRTsibyl score.
It includes the most important metrics used by the credit scoring algorithm (for Plaid).
'''
return {'score':
{
'score_exist': False,
'points': None,
'quality': None,
'loan_amount': None,
'loan_duedate': None,
'card_names': None,
'cum_balance': None,
'bank_accounts': None
},
'advice':
{
'credit_exist': False,
'credit_error': False,
'velocity_error': False,
'stability_error': False,
'diversity_error': False
}
}
def interpret_score_plaid(score, feedback):
'''
Description:
returns a dict explaining the meaning of the numerical score
Parameters:
score (float): user's SCRTsibyl numerical score
feedback (dict): score feedback, reporting stats on main Plaid metrics
Returns:
interpret (dict): dictionaries with the major contributing score metrics
'''
try:
# Create 'interpret' dict to interpret the numerical score
interpret = create_interpret_plaid()
# Score
if feedback['fetch']:
pass
else:
interpret['score']['score_exist'] = True
interpret['score']['points'] = int(score)
interpret['score']['quality'] = score_quality[np.digitize(
score, score_bins, right=False)]
interpret['score']['loan_amount'] = int(
loan_bins[np.digitize(score, score_bins, right=False)])
if ('loan_duedate' in list(feedback['stability'].keys())):
interpret['score']['loan_duedate'] = int(
feedback['stability']['loan_duedate'])
if ('card_names' in list(feedback['credit'].keys())) and (feedback['credit']['card_names']):
interpret['score']['card_names'] = [c.capitalize()
for c in feedback['credit']['card_names']]
if 'cumulative_current_balance' in list(feedback['stability'].keys()):
interpret['score']['cum_balance'] = feedback['stability']['cumulative_current_balance']
if 'bank_accounts' in list(feedback['diversity'].keys()):
interpret['score']['bank_accounts'] = feedback['diversity']['bank_accounts']
# Advice
if 'no credit card' not in list(feedback['credit'].values()):
interpret['advice']['credit_exist'] = True
if 'error' in list(feedback['credit'].keys()):
interpret['advice']['credit_error'] = True
if 'error' in list(feedback['velocity'].keys()):
interpret['advice']['velocity_error'] = True
if 'error' in list(feedback['stability'].keys()):
interpret['advice']['stability_error'] = True
if 'error' in list(feedback['diversity'].keys()):
interpret['advice']['diversity_error'] = True
except Exception as e:
interpret = str(e)
finally:
return interpret
def qualitative_feedback_plaid(score, feedback, coinmarketcap_key):
'''
Description:
A function to format and return a qualitative description of the numerical score obtained by the user
Parameters:
score (float): user's SCRTsibyl numerical score
feedback (dict): score feedback, reporting stats on main Plaid metrics
Returns:
msg (str): qualitative message explaining the numerical score to the user. Return this message to the user in the front end of the Dapp
'''
# Secret Rate
rate = coinmarketcap_rate(coinmarketcap_key, 'USD', 'SCRT')
# SCORE
all_keys = [x for y in [list(feedback[k].keys())
for k in feedback.keys()] for x in y]
# Case #1: NO score exists. Return fetch error when the Oracle did not fetch any data and computed no score
if feedback['fetch']:
msg = 'Sorry! SCRTsibyl could not calculate your credit score as there is no active credit line nor transaction history associated with your bank account. Try to log into an alternative bank account if you have one.'
# Case #2: a score exists. Return descriptive score feedback
else:
# Declare score variables
quality = score_quality[np.digitize(score, score_bins, right=False)]
points = int(score)
loan_amount = int(
loan_bins[np.digitize(score, score_bins, right=False)])
# Communicate the score
msg = 'Your SCRTsibyl score is {} - {} points. This score qualifies you for a short term loan of up to ${:,.0f} USD ({:,.0f} SCRT)'\
.format(quality.upper(), points, loan_amount, loan_amount*rate)
if ('loan_duedate' in list(feedback['stability'].keys())):
msg = msg + ' over a recommended pay back period of {0} monthly installments.'.format(
feedback['stability']['loan_duedate'])
else:
msg = msg + '.'
# Interpret the score
# Credit cards
if ('card_names' in all_keys) and (feedback['credit']['card_names']):
msg = msg + ' Part of your score is based on the transaction history of your {} credit card'.format(
', '.join([c for c in feedback['credit']['card_names']]))
if len(feedback['credit']['card_names']) > 1:
msg = msg + 's.'
else:
msg = msg + '.'
# Tot balance now
if 'cumulative_current_balance' in all_keys:
msg = msg + ' Your total current balance is ${:,.0f} USD across all accounts held with {}.'.format(
feedback['stability']['cumulative_current_balance'], feedback['diversity']['bank_name'])
# ADVICE
# Case #1: there's error(s). Either some functions broke or data is missing.
if 'error' in all_keys:
# Subcase #1.1: the error is that no credit card exists
if 'no credit card' in list(feedback['credit'].values()):
msg = msg + ' SCRTsibyl found no credit card associated with your bank account. Credit scores rely heavily on credit card history. Improve your score by selecting a different bank account which shows credit history.'
# Subcase #1.2: the error is elsewhere
else:
metrics_w_errors = [k for k in feedback.keys(
) if 'error' in list(feedback[k].keys())]
msg = msg + ' An error occurred while computing the score metric called {}. As a result, your score was rounded down. Try again later or select an alternative bank account if you have one.'.format(
comma_separated_list(metrics_w_errors))
return msg
# -------------------------------------------------------------------------- #
# Coinbase #
# -------------------------------------------------------------------------- #
def create_interpret_coinbase():
'''
Description:
Initializes a dict with a concise summary to communicate and interpret the SCRTsibyl score.
It includes the most important metrics used by the credit scoring algorithm (for Coinbase).
'''
return {'score':
{
'score_exist': False,
'points': None,
'quality': None,
'loan_amount': None,
'loan_duedate': None,
'wallet_age(days)': None,
'current_balance': None
},
'advice':
{
'kyc_error': False,
'history_error': False,
'liquidity_error': False,
'activity_error': False
}
}
def interpret_score_coinbase(score, feedback):
'''
Description:
returns a dict explaining the meaning of the numerical score
Parameters:
score (float): user's SCRTsibyl numerical score
feedback (dict): score feedback, reporting stats on main Coinbase metrics
Returns:
interpret (dict): dictionaries with the major contributing score metrics
'''
try:
# Create 'interpret' dict to interpret the numerical score
interpret = create_interpret_coinbase()
# Score
if ('kyc' in feedback.keys()) & (feedback['kyc']['verified'] == False):
pass
else:
interpret['score']['score_exist'] = True
interpret['score']['points'] = int(score)
interpret['score']['quality'] = score_quality[np.digitize(
score, score_bins, right=False)]
interpret['score']['loan_amount'] = int(
loan_bins[ | np.digitize(score, score_bins, right=False) | numpy.digitize |
# flake8: noqa
from pkg_resources import resource_filename
from functools import lru_cache
import warnings
import numpy as np
from ...matlab_funcs import besselh, besselj, gammaln, lscov, quadl
from ...sci_funcs import legendrePlm
from ...core import stress2legendre
def boundary(costheta, a=1, epsilon=.1, nu=0):
"""Projected boundary of a prolate spheroid
Compute the boundary according to equation (4) in
:cite:`Boyde2009` with the addition of the
Poisson's ratio of the object.
.. math::
B(\\theta) = a (1+\\epsilon)
\\left[ (1+\\epsilon)^2 - \\epsilon (1+\\nu)
(2+\\epsilon (1-\\nu)) \\cos^2 \\theta \\right]^{-1/2}
This boundary function was derived for a prolate spheroid under
the assumption that the semi-major axis :math:`a` and the
semi-minor axes :math:`b=c` are defined as
.. math::
a = b \\cdot \\frac{1+ \\epsilon}{1- \\nu \\epsilon}
The boundary function :math:`B(\\theta)` can be derived with
the above relation using the equation for a prolate spheroid.
Parameters
----------
costheta: float or np.ndarray
Cosine of polar coordinates :math:`\\theta`
at which to compute the boundary.
a: float
Equatorial radii of prolate spheroid (semi-minor axis).
epsilon: float
Stretch ratio; defines size of semi-major axis:
:math:`a = (1+\\epsilon) b`. Note that this is not
the eccentricity of the prolate spheroid.
nu: float
Poisson's ratio :math:`\\nu` of the material.
Returns
-------
B: 1d ndarray
Radial object boundary in dependence of theta
:math:`B(\\theta)`.
Notes
-----
For :math:`\\nu=0`, the above equation becomes
equation (4) in :cite:`Boyde2009`.
"""
x = costheta
B = a * (1 + epsilon) \
/ ((1 + epsilon)**2
- epsilon * (1 + nu) * (2 + epsilon * (1 - nu)) * x**2)**.5
return B
@lru_cache(maxsize=32)
def get_hgc():
"""Load hypergeometric coefficients from *hypergeomdata2.dat*.
These coefficients were computed by <NAME>
using Wolfram Mathematica.
"""
hpath = resource_filename("ggf.stress.boyde2009", "hypergeomdata2.dat")
hgc = np.loadtxt(hpath)
return hgc
def stress(object_index=1.41, medium_index=1.3465, poisson_ratio=0.45,
semi_minor=2.8466e-6, stretch_ratio=0.1, wavelength=780e-9,
beam_waist=3, power_left=.6, power_right=.6, dist=100e-6,
n_points=100, theta_max=np.pi, field_approx="davis",
ret_legendre_decomp=False, verbose=False):
"""Compute the stress acting on a prolate spheroid
The prolate spheroid has semi-major axis :math:`a` and
semi-minor axis :math:`b=c`.
Parameters
----------
object_index: float
Refractive index of the spheroid
medium_index: float
Refractive index of the surrounding medium
poisson_ratio: float
Poisson's ratio of the spheroid material
semi_minor: float
Semi-minor axis (inner) radius of the stretched object
:math:`b=c`.
stretch_ratio: float
Measure of the deformation, defined as :math:`(a - b) / b`
wavelength: float
Wavelenth of the gaussian beam [m]
beam_waist: float
Beam waist radius of the gaussian beam [wavelengths]
power_left: float
Laser power of the left beam [W]
power_right: float
Laser power of the right beam [W]
dist: float
Distance between beam waist and object center [m]
n_points: int
Number of points to compute stresses for
theta_max: float
Maximum angle to compute stressed for
field_approx: str
TODO
ret_legendre_decomp: bool
If True, return coefficients of decomposition of stress
into Legendre polynomials
verbose: int
Increase verbosity
Returns
-------
theta: 1d ndarray
Angles for which stresses are computed
sigma_rr: 1d ndarray
Radial stress corresponding to angles
coeff: 1d ndarray
If `ret_legendre_decomp` is True, return compositions
of decomposition of stress into Legendre polynomials.
Notes
-----
- The angles `theta` are computed on a grid that does not
include zero and `theta_max`.
- This implementation was first presented in :cite:`Boyde2009`.
"""
if field_approx not in ["davis", "barton"]:
raise ValueError("`field_approx` must be 'davis' or 'barton'")
object_index = complex(object_index)
medium_index = complex(medium_index)
W0 = beam_waist * wavelength
epsilon = stretch_ratio
nu = poisson_ratio
# ZRL = 0.5*medium_index*2*np.pi/wavelength*W0**2 # Rayleigh range [m]
# WZ = W0*(1+(beam_pos+d)**2/ZRL**2)**0.5 # beam waist at specified
# position [m]
K0 = 2 * np.pi / wavelength # wave vector [m]
Alpha = semi_minor * K0 # size parameter
C = 3e8 # speed of light [m/s]
# maximum number of orders
lmax = np.int(np.round(2 + Alpha + 4 * (Alpha)**(1 / 3) + 10))
if lmax > 120:
msg = 'Required number of orders for accurate expansion exceeds allowed maximum!' \
+ 'Reduce size of trapped particle!'
raise ValueError(msg)
if epsilon == 0:
# spherical object, no point-matching needed (mmax = 0)
mmax = 3
else:
if (epsilon > 0.15):
warnings.warn('Stretching ratio is high: {}'.format(epsilon))
# spheroidal object, point-matching required (mmax has to be divisible
# by 3)
mmax = 6 * lmax
# permittivity in surrounding medium [1]
EpsilonI = medium_index**2
EpsilonII = object_index**2 # permittivity in within cell [1]
MuI = 1.000 # permeability in surrounding medium [1]
MuII = 1.000 # permeability within cell [1]
# wave constant in Maxwell's equations (surrounding medium) [1/m]
K1I = 1j * K0 * EpsilonI
# wave constant in Maxwell's equations (within cell) [1/m]
K1II = 1j * K0 * EpsilonII
# wave constant in Maxwell's equations (surrounding medium) [1/m]
K2I = 1j * K0
# wave constant in Maxwell's equations (within cell) [1/m]
K2II = 1j * K0
KI = (-K1I * K2I)**0.5 # wave vector (surrounding medium) [1/m]
KII = (-K1II * K2II)**0.5 # wave vector (within cell) [1/m]
# dimensionless parameters
k0 = 1 # wave vector
a = semi_minor * K0 # internal radius of stretched cell
d = dist * K0 # distance from cell centre to optical stretcher
# ap = a*(1+stretch_ratio) # semi-major axis (after stretching)
# bp = a*(1-poisson_ratio*stretch_ratio) # semi-minor axis (after
# stretching)
w0 = W0 * K0 # Gaussian width
# wave constant in Maxwell's equations (surrounding medium)
k1I = K1I / K0
# wave constant in Maxwell's equations (within cell)
k1II = K1II / K0
# wave constant in Maxwell's equations (surrounding medium)
k2I = K2I / K0
# wave constant in Maxwell's equations (within cell)
k2II = K2II / K0
kI = KI / K0 # wave vector (surrounding medium)
kII = KII / K0 # wave vector (within cell)
beta = kI # wave vector of Gaussian beam
# other definitions
# amplitude of electric field of left laser [kg m/(s**2 C)]
EL = np.sqrt(power_left / (medium_index * C * W0**2))
# amplitude of electric field of right laser [kg m/(s**2 C)]
ER = np.sqrt(power_right / (medium_index * C * W0**2))
HL = beta / k0 * EL # left laser amplitude of magnetic field
HR = beta / k0 * ER # right laser amplitude of magnetic field
zR = beta * w0**2 / 2 # definition of Rayleigh range
S = (1 + 1j * d / zR)**(-1) # complex amplitude for Taylor expansion
s = 1 / (beta * w0) # expansion parameter for Gaussian (Barton)
# Functions
# object boundary function: r(th) = a*B1(x) x= cos(th)
def B1(x): return boundary(costheta=x, a=1, epsilon=epsilon, nu=nu)
# Riccati Bessel functions and their derivatives
# Riccati Bessel function (psi)
def psi(l, z): return (np.pi / 2 * z)**(1 / 2) * besselj(l + 1 / 2, z)
def psi1(l, z): return (np.pi / (2. * z))**(1 / 2) * \
(z * besselj(l - 1 / 2, z) - l *
besselj(l + 1 / 2, z)) # first derivative (psi')
def psi2(l, z): return (np.pi / 2)**(1 / 2) * (l + l**2 - z**2) * \
besselj(l + 1 / 2, z) * z**(-3 / 2) # second derivative (psi'')
# First order Taylor expansion of psi is too inaccurate for larger values of k*a*Eps.
# Hence, to match 1-st and higher order terms in Eps, subtract the 0-th order terms (no angular dependence)
# from the exact function psi (including angular dependence)
# Riccati Bessel function excluding angular dependence in 0-th order
# (psiex)
def psiex(l, z, x): return psi(l, z * B1(x)) - psi(l, z)
def psi1ex(l, z, x): return psi1(l, z * B1(x)) - \
psi1(l, z) # first derivative of psiex
def psi2ex(l, z, x): return psi2(l, z * B1(x)) - \
psi2(l, z) # second derivative of psi
# defined for abbreviation
def psixx(l, z, x): return psi(l, z * B1(x))
def psi1xx(l, z, x): return psi1(l, z * B1(x))
def psi2xx(l, z, x): return psi2(l, z * B1(x))
# Hankel function and its derivative
def xi(l, z): return (np.pi / 2 * z)**(1 / 2) * besselh(l + 1 / 2, z)
def xi1(l, z): return (np.pi / (2 * z))**(1 / 2) * \
((l + 1) * besselh(l + 1 / 2, z) - z * besselh(l + 3 / 2, z))
def xi2(l, z): return (np.pi / 2)**(1 / 2) / \
z**(3 / 2) * (l + l**2 - z**2) * besselh(l + 1 / 2, z)
# Comments: see above for psiex
def xiex(l, z, x): return xi(l, z * B1(x)) - xi(l, z)
def xi1ex(l, z, x): return xi1(l, z * B1(x)) - xi1(l, z)
def xi2ex(l, z, x): return xi2(l, z * B1(x)) - xi2(l, z)
def xixx(l, z, x): return xi(l, z * B1(x))
def xi1xx(l, z, x): return xi1(l, z * B1(x))
def xi2xx(l, z, x): return xi2(l, z * B1(x))
#% Associated Legendre functions P(m)_l(x) and their derivatives
#% select mth component of vector 'legendre' [P**(m)_l(x)]
# [zeros(m,1);1;zeros(l-m,1)].'*legendre(l,x)
#% legendre polynomial [P**(m)_l(x)]
def legendrePl(l, x): return legendrePlm(1, l, x)
#% legendre polynomial [P**(1)_l(x)]
def legendrePlm1(m, l, x): return (
(l - m + 1.) * legendrePlm(m, l + 1, x) - (l + 1.) * x * legendrePlm(m, l, x)) / (x**2 - 1)
#% derivative d/dx[P**(m)_l(x)]
def legendrePl1(l, x): return legendrePlm1(1, l, x)
# defined to avoid division by zero (which can occur for x=1 in
# legendrePl1...
def legendrePlmex1(m, l, x): return -((l - m + 1) *
legendrePlm(m, l + 1, x) - (l + 1) * x * legendrePlm(m, l, x))
def legendrePlex1(l, x): return legendrePlmex1(1, l, x)
# Hypergeometric and Gamma functions
hypergeomcoeff = get_hgc()
## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ##
# Gaussian beam (incident fields) - in Cartesian basis (either Davis first order or Barton fifth order fields)
# electric and magnetic fields according to Davis (first order)
if field_approx == "davis":
# left
def eExiL(r, th, phi): return EL * (1 + 1j * (r * np.cos(th) + d) / zR)**(-1) * np.exp(-r**2 *
np.sin(th)**2 / (w0**2 * (1 + 1j * (r * np.cos(th) + d) / zR))) * np.exp(1j * beta * (r * np.cos(th) + d))
def eEyiL(r, th, phi): return 0
def eEziL(r, th, phi): return -1j * (1 + 1j * (r * np.cos(th) + d) /
zR)**(-1) * r * np.sin(th) * np.cos(phi) / zR * eExiL(r, th, phi)
def eHxiL(r, th, phi): return 0
def eHyiL(r, th, phi): return HL * (1 + 1j * (r * np.cos(th) + d) / zR)**(-1) * np.exp(-r**2 *
np.sin(th)**2 / (w0**2 * (1 + 1j * (r * np.cos(th) + d) / zR))) * np.exp(1j * beta * (r * np.cos(th) + d))
def eHziL(r, th, phi): return -1j * (1 + 1j * (r * np.cos(th) + d) /
zR)**(-1) * r * np.sin(th) * np.sin(phi) / zR * eHyiL(r, th, phi)
# right
def eExiR(r, th, phi): return ER * (1 - 1j * (r * np.cos(th) - d) / zR)**(-1) * np.exp(-r**2 *
np.sin(th)**2 / (w0**2 * (1 - 1j * (r * np.cos(th) - d) / zR))) * np.exp(-1j * beta * (r * np.cos(th) - d))
def eEyiR(r, th, phi): return 0
def eEziR(r, th, phi): return +1j * (1 - 1j * (r * np.cos(th) - d) /
zR)**(-1) * r * np.sin(th) * np.cos(phi) / zR * eExiR(r, th, phi)
def eHxiR(r, th, phi): return 0
def eHyiR(r, th, phi): return -HR * (1 - 1j * (r * np.cos(th) - d) / zR)**(-1) * np.exp(-r**2 *
np.sin(th)**2 / (w0**2 * (1 - 1j * (r * np.cos(th) - d) / zR))) * np.exp(-1j * beta * (r * np.cos(th) - d))
def eHziR(r, th, phi): return +1j * (1 - 1j * (r * np.cos(th) - d) /
zR)**(-1) * r * np.sin(th) * np.sin(phi) / zR * eHyiR(r, th, phi)
else: # electric and magnetic fields according to Barton (fifth order)
# Note: in Barton propagation is: np.exp(i (omega*t-k*z)) and not np.exp(i (k*z-omega*t)).
# Hence, take complex conjugate of equations or make the changes z ->
# -z, Ez -> -Ez, Hz -> -Hz while x and y are not affected.
def Xi(r, th, phi): return r * np.sin(th) * np.cos(phi) / w0
def Eta(r, th, phi): return r * np.sin(th) * np.sin(phi) / w0
def ZetaL(r, th): return (r * np.cos(th) + d) / (beta * w0**2)
def Rho(r, th, phi): return np.sqrt(
Xi(r, th, phi)**2 + Eta(r, th, phi)**2)
def QL(r, th): return 1. / (1j + 2 * ZetaL(r, th))
def Psi0L(r, th, phi): return 1j * QL(r, th) * \
np.exp(-1j * (Rho(r, th, phi))**2 * QL(r, th))
def eExiL(r, th, phi): return np.conj(EL * Psi0L(r, th, phi) * np.exp(-1j * ZetaL(r, th) / s**2) *
(1 + s**2 * (-Rho(r, th, phi)**2 * QL(r, th)**2 + 1j * Rho(r, th, phi)**4 * QL(r, th)**3 - 2 * QL(r, th)**2 * Xi(r, th, phi)**2) +
s**4 * (2 * Rho(r, th, phi)**4 * QL(r, th)**4 - 3 * 1j * Rho(r, th, phi)**6 * QL(r, th)**5 - 0.5 * Rho(r, th, phi)**8 * QL(r, th)**6 +
(8 * Rho(r, th, phi)**2 * QL(r, th)**4 - 2 * 1j * Rho(r, th, phi)**4 * QL(r, th)**5) * Xi(r, th, phi)**2)))
def eEyiL(r, th, phi): return np.conj(EL * Psi0L(r, th, phi) * np.exp(-1j * ZetaL(r, th) / s**2) *
(s**2 * (-2 * QL(r, th)**2 * Xi(r, th, phi) * Eta(r, th, phi)) +
s**4 * ((8 * Rho(r, th, phi)**2 * QL(r, th)**4 - 2 * 1j * Rho(r, th, phi)**4 * QL(r, th)**5) * Xi(r, th, phi) * Eta(r, th, phi))))
def eEziL(r, th, phi): return np.conj(EL * Psi0L(r, th, phi) * np.exp(-1j * ZetaL(r, th) / s**2) *
(s * (-2 * QL(r, th) * Xi(r, th, phi)) + s**3 * (6 * Rho(r, th, phi)**2 * QL(r, th)**3 - 2 * 1j * Rho(r, th, phi)**4 * QL(r, th)**4) * Xi(r, th, phi) +
s**5 * (-20 * Rho(r, th, phi)**4 * QL(r, th)**5 + 10 * 1j * Rho(r, th, phi)**6 * QL(r, th)**6 + Rho(r, th, phi)**8 * QL(r, th)**7) * Xi(r, th, phi)))
def eHxiL(r, th, phi): return np.conj(+np.sqrt(EpsilonI) * EL * Psi0L(r, th, phi) * np.exp(-1j * ZetaL(r, th) / s**2) *
(s**2 * (-2 * QL(r, th)**2 * Xi(r, th, phi) * Eta(r, th, phi)) +
s**4 * ((8 * Rho(r, th, phi)**2 * QL(r, th)**4 - 2 * 1j * Rho(r, th, phi)**4 * QL(r, th)**5) * Xi(r, th, phi) * Eta(r, th, phi))))
def eHyiL(r, th, phi): return np.conj(+np.sqrt(EpsilonI) * EL * Psi0L(r, th, phi) * np.exp(-1j * ZetaL(r, th) / s**2) *
(1 + s**2 * (-Rho(r, th, phi)**2 * QL(r, th)**2 + 1j * Rho(r, th, phi)**4 * QL(r, th)**3 - 2 * QL(r, th)**2 * Eta(r, th, phi)**2) +
s**4 * (2 * Rho(r, th, phi)**4 * QL(r, th)**4 - 3 * 1j * Rho(r, th, phi)**6 * QL(r, th)**5 - 0.5 * Rho(r, th, phi)**8 * QL(r, th)**6 +
(8 * Rho(r, th, phi)**2 * QL(r, th)**4 - 2 * 1j * Rho(r, th, phi)**4 * QL(r, th)**5) * Eta(r, th, phi)**2)))
def eHziL(r, th, phi): return np.conj(np.sqrt(EpsilonI) * EL * Psi0L(r, th, phi) * np.exp(-1j * ZetaL(r, th) / s**2) *
(s * (-2 * QL(r, th) * Eta(r, th, phi)) + s**3 * (6 * Rho(r, th, phi)**2 * QL(r, th)**3 - 2 * 1j * Rho(r, th, phi)**4 * QL(r, th)**4) * Eta(r, th, phi) +
s**5 * (-20 * Rho(r, th, phi)**4 * QL(r, th)**5 + 10 * 1j * Rho(r, th, phi)**6 * QL(r, th)**6 + Rho(r, th, phi)**8 * QL(r, th)**7) * Eta(r, th, phi)))
# right
# Take left fiber fields and make coordinate changes (x,y,z,d) ->
# (x,-y,-z,d) and amplitude changes (Ex,Ey,Ez) -> (Ex,-Ey,-Ez).
def ZetaR(r, th): return -(r * np.cos(th) - d) / (beta * w0**2)
def QR(r, th): return 1. / (1j + 2 * ZetaR(r, th))
def Psi0R(r, th, phi): return 1j * QR(r, th) * \
np.exp(-1j * (Rho(r, th, phi))**2 * QR(r, th))
def eExiR(r, th, phi): return np.conj(ER * Psi0R(r, th, phi) * np.exp(-1j * ZetaR(r, th) / s**2) *
(1 + s**2 * (-Rho(r, th, phi)**2 * QR(r, th)**2 + 1j * Rho(r, th, phi)**4 * QR(r, th)**3 - 2 * QR(r, th)**2 * Xi(r, th, phi)**2) +
s**4 * (2 * Rho(r, th, phi)**4 * QR(r, th)**4 - 3 * 1j * Rho(r, th, phi)**6 * QR(r, th)**5 - 0.5 * Rho(r, th, phi)**8 * QR(r, th)**6 +
(8 * Rho(r, th, phi)**2 * QR(r, th)**4 - 2 * 1j * Rho(r, th, phi)**4 * QR(r, th)**5) * Xi(r, th, phi)**2)))
def eEyiR(r, th, phi): return np.conj(ER * Psi0R(r, th, phi) * np.exp(-1j * ZetaR(r, th) / s**2) *
(s**2 * (-2 * QR(r, th)**2 * Xi(r, th, phi) * Eta(r, th, phi)) +
s**4 * ((8 * Rho(r, th, phi)**2 * QR(r, th)**4 - 2 * 1j * Rho(r, th, phi)**4 * QR(r, th)**5) * Xi(r, th, phi) * Eta(r, th, phi))))
def eEziR(r, th, phi): return - np.conj(ER * Psi0R(r, th, phi) * np.exp(-1j * ZetaR(r, th) / s**2) *
(s * (-2 * QR(r, th) * Xi(r, th, phi)) + s**3 * (6 * Rho(r, th, phi)**2 * QR(r, th)**3 - 2 * 1j * Rho(r, th, phi)**4 * QR(r, th)**4) * Xi(r, th, phi) +
s**5 * (-20 * Rho(r, th, phi)**4 * QR(r, th)**5 + 10 * 1j * Rho(r, th, phi)**6 * QR(r, th)**6 + Rho(r, th, phi)**8 * QR(r, th)**7) * Xi(r, th, phi)))
def eHxiR(r, th, phi): return - np.conj(+np.sqrt(EpsilonI) * ER * Psi0R(r, th, phi) * np.exp(-1j * ZetaR(r, th) / s**2) *
(s**2 * (-2 * QR(r, th)**2 * Xi(r, th, phi) * Eta(r, th, phi)) +
s**4 * ((8 * Rho(r, th, phi)**2 * QR(r, th)**4 - 2 * 1j * Rho(r, th, phi)**4 * QR(r, th)**5) * Xi(r, th, phi) * Eta(r, th, phi))))
def eHyiR(r, th, phi): return - np.conj(+np.sqrt(EpsilonI) * ER * Psi0R(r, th, phi) * np.exp(-1j * ZetaR(r, th) / s**2) *
(1 + s**2 * (-Rho(r, th, phi)**2 * QR(r, th)**2 + 1j * Rho(r, th, phi)**4 * QR(r, th)**3 - 2 * QR(r, th)**2 * Eta(r, th, phi)**2) +
s**4 * (2 * Rho(r, th, phi)**4 * QR(r, th)**4 - 3 * 1j * Rho(r, th, phi)**6 * QR(r, th)**5 - 0.5 * Rho(r, th, phi)**8 * QR(r, th)**6 +
(8 * Rho(r, th, phi)**2 * QR(r, th)**4 - 2 * 1j * Rho(r, th, phi)**4 * QR(r, th)**5) * Eta(r, th, phi)**2)))
def eHziR(r, th, phi): return np.conj(np.sqrt(EpsilonI) * ER * Psi0R(r, th, phi) * np.exp(-1j * ZetaR(r, th) / s**2) *
(s * (-2 * QR(r, th) * Eta(r, th, phi)) + s**3 * (6 * Rho(r, th, phi)**2 * QR(r, th)**3 - 2 * 1j * Rho(r, th, phi)**4 * QR(r, th)**4) * Eta(r, th, phi) +
s**5 * (-20 * Rho(r, th, phi)**4 * QR(r, th)**5 + 10 * 1j * Rho(r, th, phi)**6 * QR(r, th)**6 + Rho(r, th, phi)**8 * QR(r, th)**7) * Eta(r, th, phi)))
# Gaussian beam (incident fields) - in spherical polar coordinates basis
# left
def eEriL(r, th, phi): return np.sin(th) * np.cos(phi) * eExiL(r, th, phi) + \
np.sin(th) * np.sin(phi) * eEyiL(r, th, phi) + \
np.cos(th) * eEziL(r, th, phi)
def eEthiL(r, th, phi): return np.cos(th) * np.cos(phi) * eExiL(r, th, phi) + \
np.cos(th) * np.sin(phi) * eEyiL(r, th, phi) - \
np.sin(th) * eEziL(r, th, phi)
def eEphiiL(r, th, phi): return - np.sin(phi) * \
eExiL(r, th, phi) + np.cos(phi) * eEyiL(r, th, phi)
def eHriL(r, th, phi): return np.sin(th) * np.cos(phi) * eHxiL(r, th, phi) + \
np.sin(th) * np.sin(phi) * eHyiL(r, th, phi) + \
np.cos(th) * eHziL(r, th, phi)
def eHthiL(r, th, phi): return np.cos(th) * np.cos(phi) * eHxiL(r, th, phi) + \
np.cos(th) * np.sin(phi) * eHyiL(r, th, phi) - \
np.sin(th) * eHziL(r, th, phi)
def eHphiiL(r, th, phi): return - np.sin(phi) * \
eHxiL(r, th, phi) + np.cos(phi) * eHyiL(r, th, phi)
# right
def eEriR(r, th, phi): return np.sin(th) * np.cos(phi) * eExiR(r, th, phi) + \
np.sin(th) * np.sin(phi) * eEyiR(r, th, phi) + \
np.cos(th) * eEziR(r, th, phi)
def eEthiR(r, th, phi): return np.cos(th) * np.cos(phi) * eExiR(r, th, phi) + \
np.cos(th) * np.sin(phi) * eEyiR(r, th, phi) - \
np.sin(th) * eEziR(r, th, phi)
def eEphiiR(r, th, phi): return - np.sin(phi) * \
eExiR(r, th, phi) + np.cos(phi) * eEyiR(r, th, phi)
def eHriR(r, th, phi): return np.sin(th) * np.cos(phi) * eHxiR(r, th, phi) + \
np.sin(th) * np.sin(phi) * eHyiR(r, th, phi) + \
np.cos(th) * eHziR(r, th, phi)
def eHthiR(r, th, phi): return np.cos(th) * np.cos(phi) * eHxiR(r, th, phi) + \
np.cos(th) * np.sin(phi) * eHyiR(r, th, phi) - \
np.sin(th) * eHziR(r, th, phi)
def eHphiiR(r, th, phi): return - np.sin(phi) * \
eHxiR(r, th, phi) + np.cos(phi) * eHyiR(r, th, phi)
eBL = np.zeros(lmax, dtype=complex)
mBL = np.zeros(lmax, dtype=complex)
eBR = np.zeros(lmax, dtype=complex)
# eBR_test = np.zeros(lmax, dtype=complex)
mBR = np.zeros(lmax, dtype=complex)
if field_approx == "davis": # Davis
tau = np.zeros(lmax, dtype=complex)
for ii in range(lmax):
l = ii + 1
tau[ii] = 0 # sum over m and n
niimax = int(np.floor((l - 1) / 3))
for n in range(niimax + 1):
miimax = int(np.floor((l - 1) / 2 - 3 / 2 * n))
for m in range(miimax + 1):
if l < (2 * m + 3 * n + 2):
Delta = 0
else:
Delta = 1
tau[ii] = tau[ii] + np.exp(gammaln(l / 2 - m - n) + gammaln(m + n + 2) - gammaln(l - 2 * m - 3 * n) - gammaln(l / 2 + 2) - gammaln(m + 1) - gammaln(n + 1)) * hypergeomcoeff[l - 1, m + n] \
* (-1)**(m + 1) / (S**m * (beta * zR)**n) * (S / (1j * beta * w0))**(2 * m + 2 * n) * (1 - Delta * 2 * S / (beta * zR) * (l - 1 - 2 * m - 3 * n))
# print(tau)
# calculate expansion coefficients of order l for electric and magnetic
# Debye potentials
def emB(l): return S * np.exp(1j * beta * d) * (1j * beta / (2 * kI))**(l - 1) * \
(l + 1 / 2)**2 / (l + 1) * \
np.exp(gammaln(2 * l) - gammaln(l + 1)) * tau[l - 1]
for ii in range(lmax):
l = ii + 1
# left
eBL[ii] = EL * emB(l)
mBL[ii] = HL * emB(l)
# right
# should include factor of (-1)**(l-1) for symmetry reasons, this
# is taken into account only after least square fitting (see below)
eBR[ii] = ER * emB(l)
# should include factor of (-1)**l for symmetry reasons, this is
# taken into account only after least square fitting (see
mBR[ii] = HR * emB(l)
else: # Barton
for ii in range(lmax):
l = ii + 1
eBL[ii] = np.sqrt(2) * quadl(lambda th: eEriL(a, th, np.pi / 4) * legendrePl(l, np.cos(
th)) * np.sin(th), 0, np.pi) * (2 * l + 1) / (2 * l * (l + 1)) * a**2 / psi(l, kI * a) * kI**2 / (l * (l + 1))
mBL[ii] = np.sqrt(2) * quadl(lambda th: eHriL(a, th, np.pi / 4) * legendrePl(l, np.cos(
th)) * np.sin(th), 0, np.pi) * (2 * l + 1) / (2 * l * (l + 1)) * a**2 / psi(l, kI * a) * kI**2 / (l * (l + 1))
eBR[ii] = np.sqrt(2) * quadl(lambda th: eEriR(a, th, np.pi / 4) * legendrePl(l, np.cos(
th)) * np.sin(th), 0, np.pi) * (2 * l + 1) / (2 * l * (l + 1)) * a**2 / psi(l, kI * a) * kI**2 / (l * (l + 1))
mBR[ii] = np.sqrt(2) * quadl(lambda th: eHriR(a, th, np.pi / 4) * legendrePl(l, np.cos(
th)) * np.sin(th), 0, np.pi) * (2 * l + 1) / (2 * l * (l + 1)) * a**2 / psi(l, kI * a) * kI**2 / (l * (l + 1))
# make symetrical with left expansion coefficients
eBR[ii] = eBR[ii] * (-1)**(l - 1)
# make symetrical with left expansion coefficients
mBR[ii] = mBR[ii] * (-1)**l
# coefficients for internal fields (eCl, mCl) and scattered fields (eDl,
# mDl)
eCL = np.zeros(lmax, dtype=complex)
mCL = np.zeros(lmax, dtype=complex)
eCR = np.zeros(lmax, dtype=complex)
mCR = np.zeros(lmax, dtype=complex)
eDL = np.zeros(lmax, dtype=complex)
mDL = np.zeros(lmax, dtype=complex)
eDR = np.zeros(lmax, dtype=complex)
mDR = np.zeros(lmax, dtype=complex)
for ii in range(lmax):
l = ii + 1
# internal (left and right)
eCL[ii] = k1I / kI * (kII)**2 * (xi(l, kI * a) * psi1(l, kI * a) - xi1(l, kI * a) * psi(l, kI * a)) / (
k1I * kII * xi(l, kI * a) * psi1(l, kII * a) - k1II * kI * xi1(l, kI * a) * psi(l, kII * a)) * eBL[ii]
mCL[ii] = k2I / kI * (kII)**2 * (xi(l, kI * a) * psi1(l, kI * a) - xi1(l, kI * a) * psi(l, kI * a)) / (
k2I * kII * xi(l, kI * a) * psi1(l, kII * a) - k2II * kI * xi1(l, kI * a) * psi(l, kII * a)) * mBL[ii]
eCR[ii] = k1I / kI * (kII)**2 * (xi(l, kI * a) * psi1(l, kI * a) - xi1(l, kI * a) * psi(l, kI * a)) / (
k1I * kII * xi(l, kI * a) * psi1(l, kII * a) - k1II * kI * xi1(l, kI * a) * psi(l, kII * a)) * eBR[ii]
mCR[ii] = k2I / kI * (kII)**2 * (xi(l, kI * a) * psi1(l, kI * a) - xi1(l, kI * a) * psi(l, kI * a)) / (
k2I * kII * xi(l, kI * a) * psi1(l, kII * a) - k2II * kI * xi1(l, kI * a) * psi(l, kII * a)) * mBR[ii]
# scattered (left and right)
eDL[ii] = (k1I * kII * psi(l, kI * a) * psi1(l, kII * a) - k1II * kI * psi1(l, kI * a) * psi(l, kII * a)) / \
(k1II * kI * xi1(l, kI * a) * psi(l, kII * a) - k1I *
kII * xi(l, kI * a) * psi1(l, kII * a)) * eBL[ii]
mDL[ii] = (k2I * kII * psi(l, kI * a) * psi1(l, kII * a) - k2II * kI * psi1(l, kI * a) * psi(l, kII * a)) / \
(k2II * kI * xi1(l, kI * a) * psi(l, kII * a) - k2I *
kII * xi(l, kI * a) * psi1(l, kII * a)) * mBL[ii]
eDR[ii] = (k1I * kII * psi(l, kI * a) * psi1(l, kII * a) - k1II * kI * psi1(l, kI * a) * psi(l, kII * a)) / \
(k1II * kI * xi1(l, kI * a) * psi(l, kII * a) - k1I *
kII * xi(l, kI * a) * psi1(l, kII * a)) * eBR[ii]
mDR[ii] = (k2I * kII * psi(l, kI * a) * psi1(l, kII * a) - k2II * kI * psi1(l, kI * a) * psi(l, kII * a)) / \
(k2II * kI * xi1(l, kI * a) * psi(l, kII * a) - k2I *
kII * xi(l, kI * a) * psi1(l, kII * a)) * mBR[ii]
# First Order Expansion Coefficients
# coefficients for internal fields (eCcl, mCcl) and scattered fields
# (eDdl, mDdl)
eLambda1L = {}
mLambda1L = {}
eLambda2L = {}
mLambda2L = {}
eLambda3L = {}
mLambda3L = {}
eLambda1R = {}
mLambda1R = {}
eLambda2R = {}
mLambda2R = {}
eLambda3R = {}
mLambda3R = {}
for jj in range(lmax):
l = jj + 1
# left
eLambda1L[l] = lambda x, l=l, jj=jj: (eBL[jj] / kI * psi1ex(l, kI * a, x) - eCL[jj] / kII * psi1ex(
l, kII * a, x) + eDL[jj] / kI * xi1ex(l, kI * a, x)) # electric parameter1 left
mLambda1L[l] = lambda x, l=l, jj=jj: (mBL[jj] / kI * psi1ex(l, kI * a, x) - mCL[jj] / kII * psi1ex(
l, kII * a, x) + mDL[jj] / kI * xi1ex(l, kI * a, x)) # magnetic parameter1 left
eLambda2L[l] = lambda x, l=l, jj=jj: (k1I / kI**2 * eBL[jj] * psiex(l, kI * a, x) - k1II / kII**2 * eCL[jj] * psiex(
l, kII * a, x) + k1I / kI**2 * eDL[jj] * xiex(l, kI * a, x)) # electric parameter2 left
mLambda2L[l] = lambda x, l=l, jj=jj: (k2I / kI**2 * mBL[jj] * psiex(l, kI * a, x) - k2II / kII**2 * mCL[jj] * psiex(
l, kII * a, x) + k2I / kI**2 * mDL[jj] * xiex(l, kI * a, x)) # magnetic parameter2 left
eLambda3L[l] = lambda x, l=l, jj=jj: (eBL[jj] * (psiex(l, kI * a, x) + psi2ex(l, kI * a, x)) * k1I - eCL[jj] * (psiex(l, kII * a, x) + psi2ex(l, kII * a, x)) * k1II
+ eDL[jj] * (xiex(l, kI * a, x) + xi2ex(l, kI * a, x)) * k1I) # electric parameter3 left
mLambda3L[l] = lambda x, l=l, jj=jj: (mBL[jj] * (psiex(l, kI * a, x) + psi2ex(l, kI * a, x)) * MuI - mCL[jj] * (psiex(l, kII * a, x) + psi2ex(l, kII * a, x)) * MuII
+ mDL[jj] * (xiex(l, kI * a, x) + xi2ex(l, kI * a, x)) * MuI) # magnetic parameter3 left
# right
eLambda1R[l] = lambda x, l=l, jj=jj: (eBR[jj] / kI * psi1ex(l, kI * a, x) - eCR[jj] / kII * psi1ex(
l, kII * a, x) + eDR[jj] / kI * xi1ex(l, kI * a, x)) # electric parameter1 right
mLambda1R[l] = lambda x, l=l, jj=jj: (mBR[jj] / kI * psi1ex(l, kI * a, x) - mCR[jj] / kII * psi1ex(
l, kII * a, x) + mDR[jj] / kI * xi1ex(l, kI * a, x)) # magnetic parameter1 right
eLambda2R[l] = lambda x, l=l, jj=jj: (k1I / kI**2 * eBR[jj] * psiex(l, kI * a, x) - k1II / kII**2 * eCR[jj] * psiex(
l, kII * a, x) + k1I / kI**2 * eDR[jj] * xiex(l, kI * a, x)) # electric parameter2 right
mLambda2R[l] = lambda x, l=l, jj=jj: (k2I / kI**2 * mBR[jj] * psiex(l, kI * a, x) - k2II / kII**2 * mCR[jj] * psiex(
l, kII * a, x) + k2I / kI**2 * mDR[jj] * xiex(l, kI * a, x)) # magnetic parameter2 right
eLambda3R[l] = lambda x, l=l, jj=jj: (eBR[jj] * (psiex(l, kI * a, x) + psi2ex(l, kI * a, x)) * k1I - eCR[jj] * (psiex(l, kII * a, x) + psi2ex(l, kII * a, x)) * k1II
+ eDR[jj] * (xiex(l, kI * a, x) + xi2ex(l, kI * a, x)) * k1I) # electric parameter3 right
mLambda3R[l] = lambda x, l=l, jj=jj: (mBR[jj] * (psiex(l, kI * a, x) + psi2ex(l, kI * a, x)) * MuI - mCR[jj] * (psiex(l, kII * a, x) + psi2ex(l, kII * a, x)) * MuII
+ mDR[jj] * (xiex(l, kI * a, x) + xi2ex(l, kI * a, x)) * MuI) # magnetic parameter3 right
# define points for least square fitting [similar to operating on
# equations with int_ {-1}**(+1} dx legendrePl(m,x)...]
x = {}
for m in range(1, int(mmax / 3) + 1):
x[int(m)] = (-1 + 0.1 * (m - 1) / (mmax / 3))
x[int(m + mmax / 3)] = (-0.9 + 1.8 * m / (mmax / 3))
x[int(m + 2 * mmax / 3)] = (+0.9 + 0.1 * m / (mmax / 3))
efun1aL = np.zeros((mmax, lmax), dtype=complex)
efun1bL = np.zeros((mmax, lmax), dtype=complex)
efun2aL = np.zeros((mmax, lmax), dtype=complex)
efun2bL = np.zeros((mmax, lmax), dtype=complex)
efun3L = np.zeros((mmax, lmax), dtype=complex)
mfun1aL = np.zeros((mmax, lmax), dtype=complex)
mfun1bL = np.zeros((mmax, lmax), dtype=complex)
mfun2aL = np.zeros((mmax, lmax), dtype=complex)
mfun2bL = np.zeros((mmax, lmax), dtype=complex)
mfun3L = np.zeros((mmax, lmax), dtype=complex)
efun1aR = np.zeros((mmax, lmax), dtype=complex)
efun1bR = np.zeros((mmax, lmax), dtype=complex)
efun2aR = np.zeros((mmax, lmax), dtype=complex)
efun2bR = np.zeros((mmax, lmax), dtype=complex)
efun3R = np.zeros((mmax, lmax), dtype=complex)
mfun1aR = np.zeros((mmax, lmax), dtype=complex)
mfun1bR = np.zeros((mmax, lmax), dtype=complex)
mfun2aR = np.zeros((mmax, lmax), dtype=complex)
mfun2bR = np.zeros((mmax, lmax), dtype=complex)
mfun3R = np.zeros((mmax, lmax), dtype=complex)
for ii in range(mmax):
m = ii + 1
# define points for least square fitting [similar to operating on equations with int_ {-1}**(+1} dx legendrePl(m,x)...]
# the points x[m] lie in the range [-1,1] or similarly th(m) is in the range [0,pi]
# x[m] = (-1 + 2*(m-1)/(mmax-1))
for jj in range(lmax):
l = jj + 1
# left
efun1aL[ii, jj] = eLambda1L[l](x[m]) * legendrePl(l, x[m])
efun1bL[ii, jj] = eLambda1L[l](x[m]) * legendrePlex1(l, x[m])
mfun1aL[ii, jj] = mLambda1L[l](x[m]) * legendrePl(l, x[m])
mfun1bL[ii, jj] = mLambda1L[l](x[m]) * legendrePlex1(l, x[m])
mfun2aL[ii, jj] = mLambda2L[l](x[m]) * legendrePl(l, x[m])
mfun2bL[ii, jj] = mLambda2L[l](x[m]) * legendrePlex1(l, x[m])
efun2aL[ii, jj] = eLambda2L[l](x[m]) * legendrePl(l, x[m])
efun2bL[ii, jj] = eLambda2L[l](x[m]) * legendrePlex1(l, x[m])
efun3L[ii, jj] = eLambda3L[l](x[m]) * legendrePl(l, x[m])
mfun3L[ii, jj] = mLambda3L[l](x[m]) * legendrePl(l, x[m])
# right
efun1aR[ii, jj] = eLambda1R[l](x[m]) * legendrePl(l, x[m])
efun1bR[ii, jj] = eLambda1R[l](x[m]) * legendrePlex1(l, x[m])
mfun1aR[ii, jj] = mLambda1R[l](x[m]) * legendrePl(l, x[m])
mfun1bR[ii, jj] = mLambda1R[l](x[m]) * legendrePlex1(l, x[m])
mfun2aR[ii, jj] = mLambda2R[l](x[m]) * legendrePl(l, x[m])
mfun2bR[ii, jj] = mLambda2R[l](x[m]) * legendrePlex1(l, x[m])
efun2aR[ii, jj] = eLambda2R[l](x[m]) * legendrePl(l, x[m])
efun2bR[ii, jj] = eLambda2R[l](x[m]) * legendrePlex1(l, x[m])
efun3R[ii, jj] = eLambda3R[l](x[m]) * legendrePl(l, x[m])
mfun3R[ii, jj] = mLambda3R[l](x[m]) * legendrePl(l, x[m])
# first order BC can be written in form: M11*eCc + M12*eDd + M13*mCc + M14*mDd = N1
# ...
# M61*eCc + M62*eDd + M63*mCc + M64*mDd = N6
# M11...M66 are matrices including the pre-factors for the individual terms in the sums of eCc[jj], etc
# eCc...mDd are vectors including all the expansion coefficients (eCc = [eCc(1),...,eCc(lmax)])
# N1...N6 include the 0-th order terms and angular-dependent Legendre and
# Bessel functions
N1L = np.zeros(mmax, dtype=complex)
N2L = np.zeros(mmax, dtype=complex)
N3L = np.zeros(mmax, dtype=complex)
N4L = np.zeros(mmax, dtype=complex)
N5L = np.zeros(mmax, dtype=complex)
N6L = np.zeros(mmax, dtype=complex)
N1R = np.zeros(mmax, dtype=complex)
N2R = np.zeros(mmax, dtype=complex)
N3R = np.zeros(mmax, dtype=complex)
N4R = np.zeros(mmax, dtype=complex)
N5R = np.zeros(mmax, dtype=complex)
N6R = np.zeros(mmax, dtype=complex)
for ii in range(mmax):
# left
N1L[ii] = np.sum(efun1aL[ii, :]) - np.sum(mfun2bL[ii, :])
N2L[ii] = np.sum(efun1bL[ii, :]) - np.sum(mfun2aL[ii, :])
N3L[ii] = np.sum(mfun1aL[ii, :]) - np.sum(efun2bL[ii, :])
N4L[ii] = np.sum(mfun1bL[ii, :]) - np.sum(efun2aL[ii, :])
N5L[ii] = np.sum(efun3L[ii, :])
N6L[ii] = np.sum(mfun3L[ii, :])
# right
N1R[ii] = np.sum(efun1aR[ii, :]) - np.sum(mfun2bR[ii, :])
N2R[ii] = np.sum(efun1bR[ii, :]) - np.sum(mfun2aR[ii, :])
N3R[ii] = np.sum(mfun1aR[ii, :]) - np.sum(efun2bR[ii, :])
N4R[ii] = np.sum(mfun1bR[ii, :]) - np.sum(efun2aR[ii, :])
N5R[ii] = np.sum(efun3R[ii, :])
N6R[ii] = np.sum(mfun3R[ii, :])
##
M11 = np.zeros((mmax, lmax), dtype=complex)
M12 = np.zeros((mmax, lmax), dtype=complex)
M13 = np.zeros((mmax, lmax), dtype=complex)
M14 = np.zeros((mmax, lmax), dtype=complex)
M21 = np.zeros((mmax, lmax), dtype=complex)
M22 = np.zeros((mmax, lmax), dtype=complex)
M23 = np.zeros((mmax, lmax), dtype=complex)
M24 = np.zeros((mmax, lmax), dtype=complex)
M31 = np.zeros((mmax, lmax), dtype=complex)
M32 = np.zeros((mmax, lmax), dtype=complex)
M33 = np.zeros((mmax, lmax), dtype=complex)
M34 = np.zeros((mmax, lmax), dtype=complex)
M41 = np.zeros((mmax, lmax), dtype=complex)
M42 = np.zeros((mmax, lmax), dtype=complex)
M43 = np.zeros((mmax, lmax), dtype=complex)
M44 = np.zeros((mmax, lmax), dtype=complex)
M51 = np.zeros((mmax, lmax), dtype=complex)
M52 = np.zeros((mmax, lmax), dtype=complex)
M53 = np.zeros((mmax, lmax), dtype=complex)
M54 = np.zeros((mmax, lmax), dtype=complex)
M61 = np.zeros((mmax, lmax), dtype=complex)
M62 = np.zeros((mmax, lmax), dtype=complex)
M63 = np.zeros((mmax, lmax), dtype=complex)
M64 = np.zeros((mmax, lmax), dtype=complex)
for ii in range(mmax):
m = ii + 1
for jj in range(lmax):
l = jj + 1
M11[ii, jj] = +1 / kII * \
psi1xx(l, kII * a, x[m]) * legendrePl(l, x[m])
M12[ii, jj] = -1 / kI * \
xi1xx(l, kI * a, x[m]) * legendrePl(l, x[m])
M13[ii, jj] = +1 / k1II * \
psixx(l, kII * a, x[m]) * legendrePlex1(l, x[m])
M14[ii, jj] = -1 / k1I * \
xixx(l, kI * a, x[m]) * legendrePlex1(l, x[m])
M21[ii, jj] = +1 / kII * \
psi1xx(l, kII * a, x[m]) * legendrePlex1(l, x[m])
M22[ii, jj] = -1 / kI * \
xi1xx(l, kI * a, x[m]) * legendrePlex1(l, x[m])
M23[ii, jj] = +1 / k1II * \
psixx(l, kII * a, x[m]) * legendrePl(l, x[m])
M24[ii, jj] = -1 / k1I * \
xixx(l, kI * a, x[m]) * legendrePl(l, x[m])
M31[ii, jj] = +1 / k2II * \
psixx(l, kII * a, x[m]) * legendrePlex1(l, x[m])
M32[ii, jj] = -1 / k2I * \
xixx(l, kI * a, x[m]) * legendrePlex1(l, x[m])
M33[ii, jj] = M11[ii, jj]
M34[ii, jj] = M12[ii, jj]
M41[ii, jj] = +1 / k2II * \
psixx(l, kII * a, x[m]) * legendrePl(l, x[m])
M42[ii, jj] = -1 / k2I * \
xixx(l, kI * a, x[m]) * legendrePl(l, x[m])
M43[ii, jj] = M21[ii, jj]
M44[ii, jj] = M22[ii, jj]
M51[ii, jj] = + k1II * (psixx(l, kII * a, x[m]) +
psi2xx(l, kII * a, x[m])) * legendrePl(l, x[m])
M52[ii, jj] = - k1I * (xixx(l, kI * a, x[m]) +
xi2xx(l, kI * a, x[m])) * legendrePl(l, x[m])
M53[ii, jj] = 0
M54[ii, jj] = 0
M61[ii, jj] = 0
M62[ii, jj] = 0
M63[ii, jj] = + MuII * (psixx(l, kII * a, x[m]) +
psi2xx(l, kII * a, x[m])) * legendrePl(l, x[m])
M64[ii, jj] = - MuI * (xixx(l, kI * a, x[m]) +
xi2xx(l, kI * a, x[m])) * legendrePl(l, x[m])
Matrix = np.zeros((6 * mmax, 6 * lmax), dtype=complex)
for ii in range(mmax):
m = ii + 1
for jj in range(lmax):
l = jj + 1
Matrix[ii, jj] = M11[ii, jj]
Matrix[ii, jj + lmax] = M12[ii, jj]
Matrix[ii, jj + 2 * lmax] = M13[ii, jj]
Matrix[ii, jj + 3 * lmax] = M14[ii, jj]
Matrix[ii + mmax, jj] = M21[ii, jj]
Matrix[ii + mmax, jj + lmax] = M22[ii, jj]
Matrix[ii + mmax, jj + 2 * lmax] = M23[ii, jj]
Matrix[ii + mmax, jj + 3 * lmax] = M24[ii, jj]
Matrix[ii + 2 * mmax, jj] = M31[ii, jj]
Matrix[ii + 2 * mmax, jj + lmax] = M32[ii, jj]
Matrix[ii + 2 * mmax, jj + 2 * lmax] = M33[ii, jj]
Matrix[ii + 2 * mmax, jj + 3 * lmax] = M34[ii, jj]
Matrix[ii + 3 * mmax, jj] = M41[ii, jj]
Matrix[ii + 3 * mmax, jj + lmax] = M42[ii, jj]
Matrix[ii + 3 * mmax, jj + 2 * lmax] = M43[ii, jj]
Matrix[ii + 3 * mmax, jj + 3 * lmax] = M44[ii, jj]
Matrix[ii + 4 * mmax, jj] = M51[ii, jj]
Matrix[ii + 4 * mmax, jj + lmax] = M52[ii, jj]
Matrix[ii + 4 * mmax, jj + 2 * lmax] = M53[ii, jj]
Matrix[ii + 4 * mmax, jj + 3 * lmax] = M54[ii, jj]
Matrix[ii + 5 * mmax, jj] = M61[ii, jj]
Matrix[ii + 5 * mmax, jj + lmax] = M62[ii, jj]
Matrix[ii + 5 * mmax, jj + 2 * lmax] = M63[ii, jj]
Matrix[ii + 5 * mmax, jj + 3 * lmax] = M64[ii, jj]
VectorL = np.zeros(6 * mmax, dtype=complex)
VectorR = np.zeros(6 * mmax, dtype=complex)
for ii in range(mmax):
# left and right
VectorL[ii] = N1L[ii]
VectorL[ii + mmax] = N2L[ii]
VectorL[ii + 2 * mmax] = N3L[ii]
VectorL[ii + 3 * mmax] = N4L[ii]
VectorL[ii + 4 * mmax] = N5L[ii]
VectorL[ii + 5 * mmax] = N6L[ii]
VectorR[ii] = N1R[ii]
VectorR[ii + mmax] = N2R[ii]
VectorR[ii + 2 * mmax] = N3R[ii]
VectorR[ii + 3 * mmax] = N4R[ii]
VectorR[ii + 4 * mmax] = N5R[ii]
VectorR[ii + 5 * mmax] = N6R[ii]
Weight = np.zeros(6 * mmax, dtype=complex)
# weights
for ii in range(mmax):
m = ii + 1
Weight[ii] = 1
Weight[ii + mmax] = 1
Weight[ii + 2 * mmax] = 1
Weight[ii + 3 * mmax] = 1
Weight[ii + 4 * mmax] = 1 / (100 * m**0.8)
Weight[ii + 5 * mmax] = 1 / (22 * m**0.20)
# Weights were commented out?:
# xL = lscov (Matrix,VectorL.',Weight.').'
xL = lscov(np.array(Matrix), np.array(VectorL).T, np.array(Weight).T)
xR = lscov(np.array(Matrix), np.array(VectorR).T, np.array(Weight).T)
if verbose:
print('If Eps=0, ignore previous error messages regarding rank deficiency!')
eCcL = np.zeros(lmax, dtype=complex)
eDdL = np.zeros(lmax, dtype=complex)
mCcL = np.zeros(lmax, dtype=complex)
mDdL = np.zeros(lmax, dtype=complex)
eCcR = np.zeros(lmax, dtype=complex)
eDdR = np.zeros(lmax, dtype=complex)
mCcR = np.zeros(lmax, dtype=complex)
mDdR = np.zeros(lmax, dtype=complex)
for jj in range(lmax):
l = jj + 1
# left and right first order expansion coefficients
eCcL[jj] = xL[jj]
eDdL[jj] = xL[jj + lmax]
mCcL[jj] = xL[jj + 2 * lmax]
mDdL[jj] = xL[jj + 3 * lmax]
eCcR[jj] = xR[jj] * (-1)**(l - 1)
eDdR[jj] = xR[jj + lmax] * (-1)**(l - 1)
mCcR[jj] = xR[jj + 2 * lmax] * (-1)**l
mDdR[jj] = xR[jj + 3 * lmax] * (-1)**l
# corrected expansion coefficients for right optical fiber*
eBR[jj] = eBR[jj] * (-1)**(l - 1)
eCR[jj] = eCR[jj] * (-1)**(l - 1)
eDR[jj] = eDR[jj] * (-1)**(l - 1)
mBR[jj] = mBR[jj] * (-1)**l
mCR[jj] = mCR[jj] * (-1)**l
mDR[jj] = mDR[jj] * (-1)**l
#*additional factors [(-1)**(l-1),(-1)**l] should have been included when eB, mB are calculated however due to least squares approach slight antisymmetry arises...
# hence include factors only after least square approach
# Field Expansions For Incident Fields
# sums of radial, zenithal and azimuthal field terms from l=1 to l=lmax
# initialisation of sums
# Paul: These are not used?!
#EriL = lambda r, th, phi: 0
#EriR = lambda r, th, phi: 0
#EthiL = lambda r, th, phi: 0
#EthiR = lambda r, th, phi: 0
#EphiiL = lambda r, th, phi: 0
#EphiiR = lambda r, th, phi: 0
#HriL = lambda r, th, phi: 0
#HriR = lambda r, th, phi: 0
#HthiL = lambda r, th, phi: 0
#HthiR = lambda r, th, phi: 0
#HphiiL = lambda r, th, phi: 0
#HphiiR = lambda r, th, phi: 0
# left
#EriL = lambda r, th, phi: EriL(r,th,phi) + eBL[l-1]*(psi(l,kI*r)+psi2(l,kI*r))*legendrePl(l,np.cos(th))*np.cos(phi)
# EthiL = lambda r, th, phi: EthiL(r,th,phi) -(eBL[l-1]*psi1 (l,kI*r)/(kI*r)*legendrePl1(l,np.cos(th))*np.sin(th) \
# + mBL[l-1]*psi (l,kI*r)/(k1I*r)*legendrePl (l,np.cos(th))/np.sin(th))*np.cos(phi)
# EphiiL = lambda r, th, phi: EphiiL(r,th,phi) -(eBL[l-1]*psi1 (l,kI*r)/(kI*r)*legendrePl (l,np.cos(th))/np.sin(th) \
# + mBL[l-1]*psi (l,kI*r)/(k1I*r)*legendrePl1(l,np.cos(th))*np.sin(th))*np.sin(phi)
#HriL = lambda r, th, phi: HriL(r,th,phi) + mBL[l-1]*(psi(l,kI*r)+psi2(l,kI*r))*legendrePl(l,np.cos(th))*np.sin(phi)
# HthiL = lambda r, th, phi: HthiL(r,th,phi) -(mBL[l-1]*psi1 (l,kI*r)/(kI*r)*legendrePl1(l,np.cos(th))*np.sin(th) \
# + eBL[l-1]*psi (l,kI*r)/(k2I*r)*legendrePl (l,np.cos(th))/np.sin(th))*np.sin(phi)
# HphiiL = lambda r, th, phi: HphiiL(r,th,phi) +(mBL[l-1]*psi1 (l,kI*r)/(kI*r)*legendrePl (l,np.cos(th))/np.sin(th) \
# + eBL[l-1]*psi (l,kI*r)/(k2I*r)*legendrePl1(l,np.cos(th))*np.sin(th))*np.cos(phi)
# right
#EriR = lambda r, th, phi: EriR(r,th,phi) + eBR[l-1]*(psi(l,kI*r)+psi2(l,kI*r))*legendrePl(l,np.cos(th))*np.cos(phi)
# EthiR = lambda r, th, phi: EthiR(r,th,phi) -(eBR[l-1]*psi1 (l,kI*r)/(kI*r)*legendrePl1(l,np.cos(th))*np.sin(th) \
# + mBR[l-1]*psi (l,kI*r)/(k1I*r)*legendrePl (l,np.cos(th))/np.sin(th))*np.cos(phi)
# EphiiR = lambda r, th, phi: EphiiR(r,th,phi) -(eBR[l-1]*psi1 (l,kI*r)/(kI*r)*legendrePl (l,np.cos(th))/np.sin(th) \
# + mBR[l-1]*psi (l,kI*r)/(k1I*r)*legendrePl1(l,np.cos(th))*np.sin(th))*np.sin(phi)
#HriR = lambda r, th, phi: HriR(r,th,phi) + mBR[l-1]*(psi(l,kI*r)+psi2(l,kI*r))*legendrePl(l,np.cos(th))*np.sin(phi)
# HthiR = lambda r, th, phi: HthiR(r,th,phi) -(mBR[l-1]*psi1 (l,kI*r)/(kI*r)*legendrePl1(l,np.cos(th))*np.sin(th) \
# + eBR[l-1]*psi (l,kI*r)/(k2I*r)*legendrePl (l,np.cos(th))/np.sin(th))*np.sin(phi)
# HphiiR = lambda r, th, phi: HphiiR(r,th,phi) +(mBR[l-1]*psi1 (l,kI*r)/(kI*r)*legendrePl (l,np.cos(th))/np.sin(th) \
# + eBR[l-1]*psi (l,kI*r)/(k2I*r)*legendrePl1(l,np.cos(th))*np.sin(th))*np.cos(phi)
# Field Expansions For Scattered Fields
# sums of radial, zenithal and azimuthal field terms from l=1 to l=lmax
# Paul: workaround to recursing into lambda functions
def wrapper_expansion(lambda_func):
def wrapped(r, th, ph):
result = 0
for jj in range(lmax):
l = jj + 1
result += lambda_func(r, th, ph, l)
return result
return wrapped
# left
def ErsL_it(r, th, phi, l): return (
eDL[l - 1] + eDdL[l - 1]) * (xi(l, kI * r) + xi2(l, kI * r)) * legendrePl(l, np.cos(th)) * np.cos(phi)
def EthsL_it(r, th, phi, l): return -((eDL[l - 1] + eDdL[l - 1]) * xi1(l, kI * r) / (kI * r) * legendrePl1(l, np.cos(th)) * | np.sin(th) | numpy.sin |
# -*- coding: utf-8 -*-
from __future__ import print_function
"""Main module."""
import numpy as np
import itertools as it
import scipy.stats as sps
import scipy.linalg as sl
import os, pickle
from astropy import units as u
import hasasia
from .utils import create_design_matrix
current_path = os.path.abspath(hasasia.__path__[0])
sc_dir = os.path.join(current_path,'sensitivity_curves/')
__all__ =['GWBSensitivityCurve',
'DeterSensitivityCurve',
'Pulsar',
'Spectrum',
'R_matrix',
'G_matrix',
'get_Tf',
'get_NcalInv',
'resid_response',
'HellingsDownsCoeff',
'get_Tspan',
'get_TspanIJ',
'corr_from_psd',
'quantize_fast',
'red_noise_powerlaw',
'Agwb_from_Seff_plaw',
'PI_hc',
'nanograv_11yr_stoch',
'nanograv_11yr_deter',
]
## Some constants
yr_sec = 365.25*24*3600
fyr = 1/yr_sec
def R_matrix(designmatrix, N):
"""
Create R matrix as defined in Ellis et al (2013)
and Demorest et al (2012)
Parameters
----------
designmatrix : array
Design matrix of timing model.
N : array
TOA uncertainties [s]
Returns
-------
R matrix
"""
M = designmatrix
n,m = M.shape
L = np.linalg.cholesky(N)
Linv = np.linalg.inv(L)
U,s,_ = np.linalg.svd(np.matmul(Linv,M), full_matrices=True)
Id = np.eye(M.shape[0])
S = np.zeros_like(M)
S[:m,:m] = np.diag(s)
inner = np.linalg.inv(np.matmul(S.T,S))
outer = np.matmul(S,np.matmul(inner,S.T))
return Id - np.matmul(L,np.matmul(np.matmul(U,outer),np.matmul(U.T,Linv)))
def G_matrix(designmatrix):
"""
Create G matrix as defined in van Haasteren 2013
Parameters
----------
designmatrix : array
Design matrix for a pulsar timing model.
Returns
-------
G matrix
"""
M = designmatrix
n , m = M.shape
U, _ , _ = np.linalg.svd(M, full_matrices=True)
return U[:,m:]
def get_Tf(designmatrix, toas, N=None, nf=200, fmin=None, fmax=2e-7,
freqs=None, exact_astro_freqs = False, from_G=True, twofreqs=False):
"""
Calculate the transmission function for a given pulsar design matrix, TOAs
and TOA errors.
Parameters
----------
designmatrix : array
Design matrix for a pulsar timing model, N_TOA x N_param.
toas : array
Times-of-arrival for pulsar, N_TOA long.
N : array
Covariance matrix for pulsar time-of-arrivals, N_TOA x N_TOA. Often just
a diagonal matrix of inverse TOA errors squared.
nf : int, optional
Number of frequencies at which to calculate transmission function.
fmin : float, optional
Minimum frequency at which to calculate transmission function.
fmax : float, optional
Maximum frequency at which to calculate transmission function.
exact_astro_freqs : bool, optional
Whether to use exact 1/year and 2/year frequency values in calculation.
from_G : bool, optional
Whether to use G matrix for transmission function calculate. If False
R-matrix is used.
"""
if not from_G and N is None:
err_msg = 'Covariance Matrix must be provided if constructing'
err_msg += ' from R-matrix.'
raise ValueError(err_msg)
M = designmatrix
N_TOA = M.shape[0]
## Prep Correlation
t1, t2 = np.meshgrid(toas, toas)
tm = np.abs(t1-t2)
# make filter
T = toas.max()-toas.min()
f0 = 1 / T
if freqs is None:
if fmin is None:
fmin = f0/5
ff = np.logspace(np.log10(fmin), np.log10(fmax), nf,dtype='float128')
if exact_astro_freqs:
ff = np.sort(np.append(ff,[fyr,2*fyr]))
nf +=2
else:
nf = len(freqs)
ff = freqs
Tmat = np.zeros(nf, dtype='float64')
if from_G:
G = G_matrix(M)
m = G.shape[1]
Gtilde = np.zeros((ff.size,G.shape[1]),dtype='complex128')
Gtilde = np.dot(np.exp(1j*2*np.pi*ff[:,np.newaxis]*toas),G)
Tmat = np.matmul(np.conjugate(Gtilde),Gtilde.T)/N_TOA
if twofreqs:
Tmat = np.real(Tmat)
else:
Tmat = np.real(np.diag(Tmat))
else:
R = R_matrix(M, N)
for ct, f in enumerate(ff):
Tmat[ct] = np.real(np.sum(np.exp(1j*2*np.pi*f*tm)*R)/N_TOA)
return np.real(Tmat), ff, T
def get_NcalInv(psr, nf=200, fmin=None, fmax=2e-7, freqs=None,
exact_yr_freqs = False, full_matrix=False,
return_Gtilde_Ncal=False, tm_fit=True):
r"""
Calculate the inverse-noise-wieghted transmission function for a given
pulsar. This calculates
:math:`\mathcal{N}^{-1}(f,f') , \; \mathcal{N}^{-1}(f)`
in `[1]`_, see Equations (19-20).
.. _[1]: https://arxiv.org/abs/1907.04341
Parameters
----------
psr : array
Pulsar object.
nf : int, optional
Number of frequencies at which to calculate transmission function.
fmin : float, optional
Minimum frequency at which to calculate transmission function.
fmax : float, optional
Maximum frequency at which to calculate transmission function.
exact_yr_freqs : bool, optional
Whether to use exact 1/year and 2/year frequency values in calculation.
Returns
-------
inverse-noise-weighted transmission function
"""
toas = psr.toas
# make filter
T = toas.max()-toas.min()
f0 = 1 / T
if freqs is None:
if fmin is None:
fmin = f0/5
ff = np.logspace(np.log10(fmin), np.log10(fmax), nf,dtype='float128')
if exact_yr_freqs:
ff = np.sort(np.append(ff,[fyr,2*fyr]))
nf +=2
else:
nf = len(freqs)
ff = freqs
if tm_fit:
G = G_matrix(psr.designmatrix)
else:
G = np.eye(toas.size)
Gtilde = np.zeros((ff.size,G.shape[1]),dtype='complex128')
#N_freqs x N_TOA-N_par
# Note we do not include factors of NTOA or Timespan as they cancel
# with the definition of Ncal
Gtilde = np.dot(np.exp(1j*2*np.pi*ff[:,np.newaxis]*toas),G)
# N_freq x N_TOA-N_par
Ncal = np.matmul(G.T,np.matmul(psr.N,G)) #N_TOA-N_par x N_TOA-N_par
NcalInv = np.linalg.inv(Ncal) #N_TOA-N_par x N_TOA-N_par
TfN = np.matmul(np.conjugate(Gtilde),np.matmul(NcalInv,Gtilde.T)) / 2
if return_Gtilde_Ncal:
return | np.real(TfN) | numpy.real |
import sys
if sys.version_info[0] == 2:
import Tkinter as tk
from tkFileDialog import askdirectory
else:
import tkinter as tk
from tkinter.filedialog import askdirectory
import numpy as np
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import os
### set size for figures
x_size = 7
y_size = 5
#############################################################################
############################## SAVE_FREQ ####################################
#############################################################################
def save_freq(arg0,arg2,arg3,arg4):
path_file = askdirectory()
path_file = os.path.join(path_file,'freq_perc.txt')
f = open(path_file,'w')
f.write('Frequencies for file %s\n\n' % (arg0))
f.write('Filter_round Frequency Period Percentage\n\n')
for i in range(len(arg2)):
f.write('%d %.8f %.2f %.2f\n' % (arg2[i],1.0 / arg3[i],arg3[i],arg4[i]))
f.close()
#############################################################################
#############################################################################
############################## DETREND_PLOT #################################
#############################################################################
def detrend_plot(main_win,arg0,arg1,arg2):
ticks = np.arange(0,len(arg0),len(arg0) / 7,dtype = int)
t = np.arange(1,len(arg0) + 1,dtype = int)
time = np.array(arg2,dtype = str)
ticks_vec = t[ticks]
time_label = time[ticks]
def save_fig():
path_tot = askdirectory()
plt.rc('text',usetex = True)
plt.rc('font',family = 'serif')
plt.plot(t,arg0 + arg1,'r',label = 'original')
if not np.isscalar(arg1):
plt.plot(t,arg1,'k',label = 'trend')
plt.plot(t,arg0,'b',label = 'detrended')
plt.legend(loc = 0)
plt.xlim(float(entries[0].get()),float(entries[1].get()))
plt.ylim(float(entries[2].get()),float(entries[3].get()))
plt.xlabel(entries[4].get())
plt.ylabel(entries[5].get())
plt.title(entries[6].get())
plt.xticks(ticks,time_label)
plt.margins(0.2)
plt.subplots_adjust(bottom = 0.2)
plt.savefig(os.path.join(path_tot,'ts.pdf'))
plt.close()
def screen_fig():
fig_ts = Figure(figsize = (x_size,y_size))
a = fig_ts.add_subplot(111)
a.plot(t,arg0 + arg1,'r',label = 'original')
if not np.isscalar(arg1):
a.plot(t,arg1,'k',label = 'trend')
a.plot(t,arg0,'b',label = 'detrended')
a.legend(loc = 0)
a.set_xlim(float(entries[0].get()),float(entries[1].get()))
a.set_ylim(float(entries[2].get()),float(entries[3].get()))
a.set_xlabel(entries[4].get(),fontsize = 15)
a.set_ylabel(entries[5].get(),fontsize = 15)
a.set_title(entries[6].get(),fontsize = 15)
a.set_xticks(ticks_vec)
a.set_xticklabels(time_label)
fig_ts.tight_layout()
canvas = FigureCanvasTkAgg(fig_ts,master = frame_1)
canvas.get_tk_widget().grid(row = 0,column = 0)
canvas.draw()
def reset_fig():
for i in range(len(entries)):
entries[i].delete(0,tk.END)
entries[i].insert(0,values[i])
screen_fig()
top = tk.Toplevel(main_win)
top.geometry("%dx%d" % (int(main_win.winfo_screenwidth() * 0.93 * 0.85),
int(main_win.winfo_screenheight() * 0.65)))
top.wm_title("Time Series")
top.resizable(width = False,height = False)
frame_1 = tk.Frame(top)
frame_1.grid(row = 0,column = 0)
frame_2 = tk.Frame(top)
frame_2.grid(row = 0,column = 1)
names = ["X Limit (left)","X Limit (right)","Y Limit (bottom)","Y Limit (top)","X Label","Y Label","Title"]
if not np.isscalar(arg1):
values = [t[0],t[-1],np.min([np.min(arg0[~np.isnan(arg0)]),np.min(arg1[~np.isnan(arg1)]),
np.min(arg0[~np.isnan(arg0)] + arg1[~np.isnan(arg1)])]) - 1.0,
np.max([np.max(arg0[~np.isnan(arg0)]),np.max(arg1[~np.isnan(arg1)]),
np.max(arg0[~np.isnan(arg0)] + arg1[~np.isnan(arg1)])]) + 1.0,'t','$X_t$',
'Time Series']
else:
values = [t[0],t[-1],np.min(arg0[~np.isnan(arg0)]) - 1.0,np.max(arg0[~np.isnan(arg0)]) + 1.0,
't','$X_t$','Time Series']
entries = []
for i in range(len(names)):
tk.Label(frame_2,text = names[i],font = "Verdana 13 bold").grid(row = 2 * i,column = 0,
padx = int(main_win.winfo_screenwidth() * 0.01))
entries.append(tk.Entry(frame_2,width = 18))
entries[-1].insert(0,values[i])
entries[-1].grid(row = 2 * i,column = 1)
for i in range(len(names)):
tk.Label(frame_2,text = "").grid(row = 2 * i + 1,column = 0)
screen_fig()
tk.Button(frame_2,text = "Replot",font = "Verdana 13 bold",command = screen_fig).grid(row = 2 * len(names),column = 0)
tk.Button(frame_2,text = "Save",font = "Verdana 13 bold",command = save_fig).grid(row = 2 * len(names),column = 1)
tk.Label(frame_2,text = "").grid(row = 2 * len(names) + 1,column = 0)
tk.Button(frame_2,text = "Reset",font = "Verdana 13 bold",command = reset_fig).grid(row = 2 * len(names) + 2,column = 0)
#############################################################################
#############################################################################
############################## SPECTRUM_PLOT ################################
#############################################################################
def spectrum_plot(main_win,arg0,arg1,arg2):
def save_fig():
path_tot = askdirectory()
plt.rc('text',usetex = True)
plt.rc('font',family = 'serif')
plt.plot(arg1,arg0,'b')
if arg2 != 0:
plt.plot((arg1[0],arg1[-1]),(arg2,arg2),'r')
plt.xlabel(entries[0].get())
plt.ylabel(entries[1].get())
plt.xlim(float(entries[2].get()),float(entries[3].get()))
plt.ylim(float(entries[4].get()),float(entries[5].get()))
plt.title(entries[6].get())
plt.savefig(os.path.join(path_tot,'spectrum_in.pdf'))
plt.close()
def screen_fig():
fig_ts = Figure(figsize = (x_size,y_size))
a = fig_ts.add_subplot(111)
a.plot(arg1,arg0,'b')
if arg2 != 0:
a.plot((arg1[0],arg1[-1]),(arg2,arg2),'r')
a.set_xlabel(entries[0].get(),fontsize = 15)
a.set_ylabel(entries[1].get(),fontsize = 15)
a.set_xlim(float(entries[2].get()),float(entries[3].get()))
a.set_ylim(float(entries[4].get()),float(entries[5].get()))
a.set_title(entries[6].get(),fontsize = 15)
fig_ts.tight_layout()
canvas = FigureCanvasTkAgg(fig_ts,master = frame_1)
canvas.get_tk_widget().grid(row = 0,column = 0)
canvas.draw()
def reset_fig():
for i in range(len(entries)):
entries[i].delete(0,tk.END)
entries[i].insert(0,values[i])
screen_fig()
top = tk.Toplevel(main_win)
top.geometry("%dx%d" % (int(main_win.winfo_screenwidth() * 0.93 * 0.85),
int(main_win.winfo_screenheight() * 0.65)))
if arg2 != 0:
top.wm_title("Spectrum")
else:
top.wm_title("Spectrum of residuals")
top.resizable(width = False,height = False)
frame_1 = tk.Frame(top)
frame_1.grid(row = 0,column = 0)
frame_2 = tk.Frame(top)
frame_2.grid(row = 0,column = 1)
names = ["X Label","Y Label","X Limit (left)","X Limit (right)","Y Limit (bottom)","Y Limit (top)","Title"]
if arg2 != 0:
values = ['$\\nu$','$P(\\nu)$',0,arg1[-1],0,np.max(arg0) + 10.0,'LS spectrum (initial)']
else:
values = ['$\\nu$','$P(\\nu)$',0,arg1[-1],0,np.max(arg0) + 10.0,'LS spectrum of residuals']
entries = []
for i in range(len(names)):
tk.Label(frame_2,text = names[i],font = "Verdana 13 bold").grid(row = 2 * i,column = 0,
padx = int(main_win.winfo_screenwidth() * 0.01))
entries.append(tk.Entry(frame_2,width = 18))
entries[-1].insert(0,values[i])
entries[-1].grid(row = 2 * i,column = 1)
for i in range(len(names)):
tk.Label(frame_2,text = "").grid(row = 2 * i + 1,column = 0)
screen_fig()
tk.Button(frame_2,text = "Replot",font = "Verdana 13 bold",command = screen_fig).grid(row = 2 * len(names),column = 0)
tk.Button(frame_2,text = "Save",font = "Verdana 13 bold",command = save_fig).grid(row = 2 * len(names),column = 1)
tk.Label(frame_2,text = "").grid(row = 2 * len(names) + 1,column = 0)
tk.Button(frame_2,text = "Reset",font = "Verdana 13 bold",command = reset_fig).grid(row = 2 * len(names) + 2,column = 0)
#############################################################################
#############################################################################
############################## RES_PLOT #####################################
#############################################################################
def res_plot(main_win,arg0,arg1,arg2,arg3):
ticks = np.arange(0,len(arg0),len(arg0) / 7,dtype = int)
t = np.arange(1,len(arg0) + 1,dtype = int)
time = np.array(arg3,dtype = str)
ticks_vec = t[ticks]
time_label = time[ticks]
pn_norm_notnan = arg2[~np.isnan(arg2)]
outlier_lim = 3.0
num_outliers_max = len(pn_norm_notnan[pn_norm_notnan > outlier_lim])
num_outliers_min = len(pn_norm_notnan[pn_norm_notnan < -outlier_lim])
num_outliers = num_outliers_max + num_outliers_min
def save_fig():
path_tot = askdirectory()
plt.figure(figsize = (12,9))
plt.rc('text',usetex = True)
plt.rc('font',family = 'serif')
plt.subplot(2,1,1)
plt.plot(t,arg0)
plt.xlim(int(entries[0].get()),int(entries[1].get()))
plt.ylim(float(entries[5].get()),float(entries[6].get()))
plt.xticks(ticks,'')
plt.ylabel(entries[2].get())
plt.title(entries[4].get())
plt.margins(0.2)
plt.subplots_adjust(hspace = 0.0)
plt.subplot(2,1,2)
sigma = '%.2f' % arg1
if int(matplotlib.__version__.split('.')[0]) == 2:
plt.bar(t,arg2,width = 10,label = 'num outl = ' + str(num_outliers))
else:
plt.bar(t,arg2,width = 0.1,label = 'num outl = ' + str(num_outliers))
plt.plot((t[0],t[-1]),(outlier_lim,outlier_lim),'r',label = '$\sigma$ = ' + sigma)
plt.plot((t[0],t[-1]),(-outlier_lim,-outlier_lim),'r')
plt.legend(loc = 0)
plt.xlim(int(entries[0].get()),int(entries[1].get()))
plt.ylim(float(entries[7].get()),float(entries[8].get()))
plt.xticks(ticks,time_label)
plt.ylabel(entries[3].get())
plt.margins(0.2)
plt.subplots_adjust(hspace = 0.0)
plt.savefig(os.path.join(path_tot,'res.pdf'))
plt.close()
def screen_fig():
fig_ts = Figure(figsize = (x_size,y_size))
a = fig_ts.add_subplot(211)
a.plot(t,arg0)
a.set_xlim(int(entries[0].get()),int(entries[1].get()))
a.set_ylim(float(entries[5].get()),float(entries[6].get()))
a.set_xticks(ticks_vec)
a.set_xticklabels('')
a.set_ylabel(entries[2].get(),fontsize = 15)
a.set_title(entries[4].get(),fontsize = 15)
b = fig_ts.add_subplot(212)
sigma = '%.2f' % arg1
if int(matplotlib.__version__.split('.')[0]) == 2:
b.bar(t,arg2,width = 10,label = 'num outl = ' + str(num_outliers))
else:
b.bar(t,arg2,width = 0.1,label = 'num outl = ' + str(num_outliers))
b.plot((t[0],t[-1]),(outlier_lim,outlier_lim),'r',label = '$\sigma$ = ' + sigma)
b.plot((t[0],t[-1]),(-outlier_lim,-outlier_lim),'r')
b.legend(loc = 0)
b.set_xlim(int(entries[0].get()),int(entries[1].get()))
b.set_ylim(float(entries[7].get()),float(entries[8].get()))
b.set_xticks(ticks)
b.set_xticklabels(time_label)
b.set_ylabel(entries[3].get(),fontsize = 15)
fig_ts.tight_layout()
canvas = FigureCanvasTkAgg(fig_ts,master = frame_1)
canvas.get_tk_widget().grid(row = 0,column = 0)
canvas.draw()
def reset_fig():
for i in range(len(entries)):
entries[i].delete(0,tk.END)
entries[i].insert(0,values[i])
screen_fig()
top = tk.Toplevel(main_win)
top.geometry("%dx%d" % (int(main_win.winfo_screenwidth() * 0.93 * 0.85),
int(main_win.winfo_screenheight() * 0.65)))
top.wm_title("Residuals")
top.resizable(width = False,height = False)
frame_1 = tk.Frame(top)
frame_1.grid(row = 0,column = 0)
frame_2 = tk.Frame(top)
frame_2.grid(row = 0,column = 1)
names = ["X Limit (left)","X Limit (right)","Y Label (top)","Y Label (bottom)","Title",
"Y1 Limit (bottom)","Y1 Limit (top)","Y2 Limit (bottom)","Y2 Limit (top)"]
values = [t[0],t[-1],'$N_t$','$N_t^{norm}$','Residuals / Normalised residuals',np.min(arg0[~np.isnan(arg0)]) - 10.0,
np.max(arg0[~np.isnan(arg0)]) + 10.0,np.min(arg2[~np.isnan(arg0)]) - 1.0,np.max(arg2[~np.isnan(arg0)]) + 1.0]
entries = []
for i in range(len(names)):
tk.Label(frame_2,text = names[i],font = "Verdana 13 bold").grid(row = 2 * i,column = 0,
padx = int(main_win.winfo_screenwidth() * 0.01))
entries.append(tk.Entry(frame_2,width = 18))
entries[-1].insert(0,values[i])
entries[-1].grid(row = 2 * i,column = 1)
for i in range(len(names)):
tk.Label(frame_2,text = "").grid(row = 2 * i + 1,column = 0)
screen_fig()
tk.Button(frame_2,text = "Replot",font = "Verdana 13 bold",command = screen_fig).grid(row = 2 * len(names),column = 0)
tk.Button(frame_2,text = "Save",font = "Verdana 13 bold",command = save_fig).grid(row = 2 * len(names),column = 1)
tk.Label(frame_2,text = "").grid(row = 2 * len(names) + 1,column = 0)
tk.Button(frame_2,text = "Reset",font = "Verdana 13 bold",command = reset_fig).grid(row = 2 * len(names) + 2,column = 0)
#############################################################################
#############################################################################
############################## DFA_PLOT #####################################
#############################################################################
def dfa_plot(main_win,arg0,arg1,arg2,arg3):
def save_fig():
path_tot = askdirectory()
plt.rc('text',usetex = True)
plt.rc('font',family = 'serif')
plt.plot( | np.log(arg0) | numpy.log |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import division
import os
import time
import sys
import argparse
import numpy as np
import tables
from astropy.table import Table
import logging
import warnings
from Chandra.Time import DateTime
import agasc
from kadi import events
from Ska.engarchive import fetch, fetch_sci
import mica.archive.obspar
from mica.starcheck.starcheck import get_starcheck_catalog_at_date
import Ska.astro
from Quaternion import Quat
from chandra_aca import dark_model
from chandra_aca.transform import radec_to_eci
# Ignore known numexpr.necompiler and table.conditions warning
warnings.filterwarnings(
'ignore',
message="using `oa_ndim == 0` when `op_axes` is NULL is deprecated.*",
category=DeprecationWarning)
logger = logging.getLogger('star_stats')
logger.setLevel(logging.INFO)
if not len(logger.handlers):
logger.addHandler(logging.StreamHandler())
STAT_VERSION = 0.6
GUIDE_COLS = {
'obs': [
('obsid', 'int'),
('obi', 'int'),
('kalman_tstart', 'float'),
('npnt_tstop', 'float'),
('kalman_datestart', 'S21'),
('npnt_datestop', 'S21'),
('revision', 'S15')],
'cat': [
('slot', 'int'),
('idx', 'int'),
('type', 'S5'),
('yang', 'float'),
('zang', 'float'),
('sz', 'S4'),
('mag', 'float')],
'stat': [
('n_samples', 'int'),
('n_track', 'int'),
('f_track', 'float'),
('f_racq', 'float'),
('f_srch', 'float'),
('f_none', 'float'),
('n_kalman', 'int'),
('no_track', 'float'),
('f_within_0.3', 'float'),
('f_within_1', 'float'),
('f_within_3', 'float'),
('f_within_5', 'float'),
('f_outside_5', 'float'),
('f_obc_bad', 'float'),
('f_common_col', 'float'),
('f_quad_bound', 'float'),
('f_sat_pix', 'float'),
('f_def_pix', 'float'),
('f_ion_rad', 'float'),
('f_mult_star', 'float'),
('aoacmag_min', 'float'),
('aoacmag_mean', 'float'),
('aoacmag_max', 'float'),
('aoacmag_5th', 'float'),
('aoacmag_16th', 'float'),
('aoacmag_50th', 'float'),
('aoacmag_84th', 'float'),
('aoacmag_95th', 'float'),
('aoacmag_std', 'float'),
('aoacyan_mean', 'float'),
('aoaczan_mean', 'float'),
('dy_min', 'float'),
('dy_mean', 'float'),
('dy_std', 'float'),
('dy_max', 'float'),
('dz_min', 'float'),
('dz_mean', 'float'),
('dz_std', 'float'),
('dz_max', 'float'),
('dr_min', 'float'),
('dr_mean', 'float'),
('dr_std', 'float'),
('dr_5th', 'float'),
('dr_95th', 'float'),
('dr_max', 'float'),
('n_track_interv', 'int'),
('n_long_track_interv', 'int'),
('n_long_no_track_interv', 'int'),
('n_racq_interv', 'int'),
('n_srch_interv', 'int'),
],
'agasc': [
('agasc_id', 'int'),
('color', 'float'),
('ra', 'float'),
('dec', 'float'),
('epoch', 'float'),
('pm_ra', 'int'),
('pm_dec', 'int'),
('var', 'int'),
('pos_err', 'int'),
('mag_aca', 'float'),
('mag_aca_err', 'int'),
('mag_band', 'int'),
('pos_catid', 'int'),
('aspq1', 'int'),
('aspq2', 'int'),
('aspq3', 'int'),
('acqq1', 'int'),
('acqq2', 'int'),
('acqq4', 'int')],
'temp': [
('n100_warm_frac', 'float'),
('tccd_mean', 'float'),
('tccd_max', 'float')],
'bad': [
('known_bad', 'bool'),
('bad_comment', 'S15')],
}
def get_options():
parser = argparse.ArgumentParser(
description="Update guide stats table")
parser.add_argument("--check-missing",
action='store_true',
help="check for missing observations in table and reprocess")
parser.add_argument("--obsid",
help="specific obsid to process. Not required in regular update mode")
parser.add_argument("--start",
help="start time for processing")
parser.add_argument("--stop",
help="stop time for processing")
parser.add_argument("--datafile",
default="gs.h5")
opt = parser.parse_args()
return opt
def _deltas_vs_obc_quat(vals, times, catalog):
# Misalign is the identity matrix because this is the OBC quaternion
aca_misalign = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
q_att = Quat(q=np.array([vals['AOATTQT1'],
vals['AOATTQT2'],
vals['AOATTQT3'],
vals['AOATTQT4']]).transpose())
Ts = q_att.transform
acqs = catalog
R2A = 206264.81
dy = {}
dz = {}
yag = {}
zag = {}
star_info = {}
for slot in range(0, 8):
if slot not in acqs['slot']:
continue
agasc_id = acqs[acqs['slot'] == slot][0]['id']
if agasc_id is None:
logger.info("No agasc id for slot {}, skipping".format(slot))
continue
try:
# This is not perfect for star catalogs for agasc 1.4 and 1.5
star = agasc.get_star(agasc_id, date=times[0], use_supplement=False)
except:
logger.info("agasc error on slot {}:{}".format(
slot, sys.exc_info()[0]))
continue
ra = star['RA_PMCORR']
dec = star['DEC_PMCORR']
star_pos_eci = radec_to_eci(ra, dec)
d_aca = np.dot(np.dot(aca_misalign, Ts.transpose(0, 2, 1)),
star_pos_eci).transpose()
yag[slot] = np.arctan2(d_aca[:, 1], d_aca[:, 0]) * R2A
zag[slot] = np.arctan2(d_aca[:, 2], d_aca[:, 0]) * R2A
dy[slot] = vals['AOACYAN{}'.format(slot)] - yag[slot]
dz[slot] = vals['AOACZAN{}'.format(slot)] - zag[slot]
star_info[slot] = star
return dy, dz, star_info, yag, zag
def get_data(start, stop, obsid=None, starcheck=None):
# Get telemetry
msids = ['AOACASEQ', 'AOACQSUC', 'AOFREACQ', 'AOFWAIT', 'AOREPEAT',
'AOACSTAT', 'AOACHIBK', 'AOFSTAR', 'AOFATTMD', 'AOACPRGS',
'AOATUPST', 'AONSTARS', 'AOPCADMD', 'AORFSTR1', 'AORFSTR2',
'AOATTQT1', 'AOATTQT2', 'AOATTQT3', 'AOATTQT4']
per_slot = ['AOACQID', 'AOACFCT', 'AOIMAGE',
'AOACMAG', 'AOACYAN', 'AOACZAN',
'AOACICC', 'AOACIDP', 'AOACIIR', 'AOACIMS',
'AOACIQB', 'AOACISP']
slot_msids = [field + '%s' % slot
for field in per_slot
for slot in range(0, 8)]
start_time = DateTime(start).secs
stop_time = DateTime(stop)
dat = fetch.MSIDset(msids + slot_msids,
start_time,
stop_time)
if len(dat['AOACASEQ']) == 0:
raise ValueError("No telemetry for obsid {}".format(obsid))
# Interpolate the MSIDset onto the original time grid (which shouldn't do much)
# but also remove all rows where any one msid has a bad value
dat.interpolate(times=dat['AOACASEQ'].times, bad_union=True)
eng_data = Table([col.vals for col in dat.values()], names=dat.keys())
eng_data['times'] = dat.times
times = eng_data['times']
if starcheck is None:
return eng_data, times, None
catalog = Table(starcheck['cat'])
catalog.sort('idx')
# Filter the catalog to be just guide stars
catalog = catalog[(catalog['type'] == 'GUI') | (catalog['type'] == 'BOT')]
# Get the position deltas relative to onboard solution
dy, dz, star_info, yag, zag = _deltas_vs_obc_quat(eng_data, times, catalog)
# And add the deltas to the table
for slot in range(0, 8):
if slot not in dy:
continue
eng_data['dy{}'.format(slot)] = dy[slot].data
eng_data['dz{}'.format(slot)] = dz[slot].data
eng_data['cat_yag{}'.format(slot)] = yag[slot]
eng_data['cat_zag{}'.format(slot)] = zag[slot]
cat_entry = catalog[catalog['slot'] == slot][0]
dmag = eng_data['AOACMAG{}'.format(slot)] - cat_entry['mag']
eng_data['dmag'] = dmag.data
eng_data['time'] = times
return eng_data, star_info
def consecutive(data, stepsize=1):
return np.split(data, np.where(np.diff(data) != stepsize)[0] + 1)
def calc_gui_stats(data, star_info):
logger.info("calculating statistics")
gui_stats = {}
for slot in range(0, 8):
if 'dy{}'.format(slot) not in data.colnames:
continue
stats = {}
aoacfct = data['AOACFCT{}'.format(slot)]
stats['n_samples'] = len(aoacfct)
if len(aoacfct) == 0:
gui_stats[slot] = stats
continue
stats['n_track'] = np.count_nonzero(aoacfct == 'TRAK')
stats['f_track'] = stats['n_track'] / stats['n_samples']
stats['f_racq'] = np.count_nonzero(aoacfct == 'RACQ') / stats['n_samples']
stats['f_srch'] = np.count_nonzero(aoacfct == 'SRCH') / stats['n_samples']
stats['f_none'] = np.count_nonzero(aoacfct == 'NONE') / stats['n_samples']
if np.all(aoacfct != 'TRAK'):
gui_stats[slot] = stats
continue
trak = data[aoacfct == 'TRAK']
ok_flags = ((trak['AOACIIR{}'.format(slot)] == 'OK ')
& (trak['AOACISP{}'.format(slot)] == 'OK '))
stats['n_kalman'] = np.count_nonzero(ok_flags)
stats['no_track'] = (stats['n_samples'] - stats['n_track']) / stats['n_samples']
stats['f_obc_bad'] = (stats['n_track'] - stats['n_kalman']) / stats['n_track']
stats['f_common_col'] = np.count_nonzero(trak['AOACICC{}'.format(slot)] == 'ERR') / stats['n_track']
stats['f_sat_pix'] = np.count_nonzero(trak['AOACISP{}'.format(slot)] == 'ERR') / stats['n_track']
stats['f_def_pix'] = np.count_nonzero(trak['AOACIDP{}'.format(slot)] == 'ERR') / stats['n_track']
stats['f_ion_rad'] = np.count_nonzero(trak['AOACIIR{}'.format(slot)] == 'ERR') / stats['n_track']
stats['f_mult_star'] = np.count_nonzero(trak['AOACIMS{}'.format(slot)] == 'ERR') / stats['n_track']
stats['f_quad_bound'] = np.count_nonzero(trak['AOACIQB{}'.format(slot)] == 'ERR') / stats['n_track']
track_interv = consecutive(np.flatnonzero(
data['AOACFCT{}'.format(slot)] == 'TRAK'))
stats['n_track_interv'] = len(track_interv)
track_interv_durations = np.array([len(interv) for interv in track_interv])
stats['n_long_track_interv'] = np.count_nonzero(track_interv_durations > 60)
not_track_interv = consecutive(np.flatnonzero(
data['AOACFCT{}'.format(slot)] != 'TRAK'))
not_track_interv_durations = np.array([len(interv) for interv in not_track_interv])
stats['n_long_no_track_interv'] = np.count_nonzero(not_track_interv_durations > 60)
stats['n_racq_interv'] = len(consecutive(np.flatnonzero(
data['AOACFCT{}'.format(slot)] == 'RACQ')))
stats['n_srch_interv'] = len(consecutive(np.flatnonzero(
data['AOACFCT{}'.format(slot)] == 'SRCH')))
stats['n_track_interv'] = len(consecutive(np.flatnonzero(
data['AOACFCT{}'.format(slot)] == 'TRAK')))
# reduce this to just the samples that don't have IR or SP set and are in Kalman on guide stars
# and are after the first 60 seconds
kal = trak[ok_flags & (trak['AOACASEQ'] == 'KALM') & (trak['AOPCADMD'] == 'NPNT')
& (trak['AOFSTAR'] == 'GUID') & (trak['time'] > (data['time'][0] + 60))]
dy = kal['dy{}'.format(slot)]
dz = kal['dz{}'.format(slot)]
# cheating here and ignoring spherical trig
dr = (dy ** 2 + dz ** 2) ** .5
stats['star_tracked'] = np.any(dr < 5.0)
stats['spoiler_tracked'] = np.any(dr > 5.0)
deltas = {'dy': dy, 'dz': dz, 'dr': dr}
stats['dr_5th'] = np.percentile(deltas['dr'], 5)
stats['dr_95th'] = np.percentile(deltas['dr'], 95)
for ax in deltas:
stats['{}_mean'.format(ax)] = np.mean(deltas[ax])
stats['{}_std'.format(ax)] = np.std(deltas[ax])
stats['{}_max'.format(ax)] = np.max(deltas[ax])
stats['{}_min'.format(ax)] = np.min(deltas[ax])
mag = kal['AOACMAG{}'.format(slot)]
stats['aoacmag_min'] = np.min(mag)
stats['aoacmag_mean'] = np.mean(mag)
stats['aoacmag_max'] = np.max(mag)
stats['aoacmag_std'] = np.std(mag)
for perc in [5, 16, 50, 84, 95]:
stats[f'aoacmag_{perc}th'] = np.percentile(mag, perc)
stats['aoacyan_mean'] = np.mean(kal['AOACYAN{}'.format(slot)])
stats['aoaczan_mean'] = np.mean(kal['AOACZAN{}'.format(slot)])
for dist in ['0.3', '1', '3', '5']:
stats['f_within_{}'.format(dist)] = np.count_nonzero(dr < float(dist)) / len(kal)
stats['f_outside_5'] = np.count_nonzero(dr > 5) / len(kal)
gui_stats[slot] = stats
return gui_stats
def _get_obsids_to_update(check_missing=False, table_file=None, start=None, stop=None):
if check_missing:
last_tstart = start if start is not None else '2007:271:12:00:00'
kadi_obsids = events.obsids.filter(start=last_tstart)
try:
h5 = tables.open_file(table_file, 'r')
tbl = h5.root.data[:]
h5.close()
except:
raise ValueError
# get all obsids that aren't already in tbl
obsids = [o.obsid for o in kadi_obsids if o.obsid not in tbl['obsid']]
else:
try:
h5 = tables.open_file(table_file, 'r')
tbl = h5.get_node('/', 'data')
last_tstart = tbl.cols.kalman_tstart[tbl.colindexes['kalman_tstart'][-1]]
h5.close()
except:
last_tstart = start if start is not None else '2002:012:12:00:00'
kadi_obsids = events.obsids.filter(start=last_tstart, stop=stop)
# Skip the first obsid (as we already have it in the table)
obsids = [o.obsid for o in kadi_obsids][1:]
return obsids
def calc_stats(obsid):
obspar = mica.archive.obspar.get_obspar(obsid)
if not obspar:
raise ValueError("No obspar for {}".format(obsid))
manvr = None
dwell = None
try:
manvrs = events.manvrs.filter(obsid=obsid, n_dwell__gt=0)
dwells = events.dwells.filter(obsid=obsid)
if dwells.count() == 1 and manvrs.count() == 0:
# If there is more than one dwell for the manvr but they have
# different obsids (unusual) so don't throw an overlapping interval kadi error
# just get the maneuver to the attitude with this dwell
dwell = dwells[0]
manvr = dwell.manvr
elif dwells.count() == 0:
# If there's just nothing, that doesn't need an error here
# and gets caught outside the try/except
pass
else:
# Else just take the first matches from each
manvr = manvrs[0]
dwell = dwells[0]
except ValueError:
multi_manvr = events.manvrs.filter(start=obspar['tstart'] - 100000,
stop=obspar['tstart'] + 100000)
multi = multi_manvr.select_overlapping(events.obsids(obsid=obsid))
deltas = [np.abs(m.tstart - obspar['tstart']) for m in multi]
manvr = multi[ | np.argmin(deltas) | numpy.argmin |
from __future__ import absolute_import, division, print_function, unicode_literals
from banzai.utils import stats
import numpy as np
from numpy import ma
np.random.seed(10031312)
def test_median_axis_none_mask_none():
for i in range(25):
size = np.random.randint(1, 10000)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
a = np.random.normal(mean, sigma, size)
expected = np.median(a.astype(np.float32))
actual = stats.median(a)
assert np.float32(expected) == actual
def test_median_2d_axis_none_mask_none():
for i in range(5):
size1 = np.random.randint(1, 300)
size2 = np.random.randint(1, 300)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
a = np.random.normal(mean, sigma, size=(size1, size2))
expected = np.median(a.astype(np.float32))
actual = stats.median(a)
assert np.float32(expected) == actual
def test_median_3d_axis_none_mask_none():
for i in range(5):
size1 = np.random.randint(1, 50)
size2 = np.random.randint(1, 50)
size3 = np.random.randint(1, 50)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
a = np.random.normal(mean, sigma, size=(size1, size2, size3))
expected = np.median(a.astype(np.float32))
actual = stats.median(a)
assert np.float32(expected) == actual
def test_median_2d_axis_0_mask_none():
for i in range(5):
size1 = np.random.randint(1, 300)
size2 = np.random.randint(1, 300)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
a = np.random.normal(mean, sigma, size=(size1, size2))
expected = np.median(a.astype(np.float32), axis=0)
actual = stats.median(a, axis=0)
np.testing.assert_allclose(actual, expected.astype(np.float32), atol=1e-6)
def test_median_2d_axis_1_mask_none():
for i in range(5):
size1 = np.random.randint(1, 300)
size2 = np.random.randint(5, 300)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
a = np.random.normal(mean, sigma, size=(size1, size2))
expected = np.median(a.astype(np.float32), axis=1)
actual = stats.median(a, axis=1)
np.testing.assert_allclose(actual, expected.astype(np.float32), atol=1e-6)
def test_median_3d_axis_0_mask_none():
for i in range(5):
size1 = np.random.randint(5, 50)
size2 = np.random.randint(1, 50)
size3 = np.random.randint(1, 50)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
a = np.random.normal(mean, sigma, size=(size1, size2, size3))
expected = np.median(a.astype(np.float32), axis=0)
actual = stats.median(a, axis=0)
np.testing.assert_allclose(actual, expected.astype(np.float32), atol=1e-6)
def test_median_3d_axis_1_mask_none():
for i in range(5):
size1 = np.random.randint(1, 50)
size2 = np.random.randint(5, 50)
size3 = np.random.randint(1, 50)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
a = np.random.normal(mean, sigma, size=(size1, size2, size3))
expected = np.median(a.astype(np.float32), axis=1)
actual = stats.median(a, axis=1)
np.testing.assert_allclose(actual, expected.astype(np.float32), atol=1e-6)
def test_median_3d_axis_2_mask_none():
for i in range(5):
size1 = np.random.randint(1, 50)
size2 = np.random.randint(1, 50)
size3 = np.random.randint(5, 50)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
a = np.random.normal(mean, sigma, size=(size1, size2, size3))
expected = np.median(a.astype(np.float32), axis=2)
actual = stats.median(a, axis=2)
np.testing.assert_allclose(actual, expected.astype(np.float32), atol=1e-6)
def test_median_axis_none_mask():
for i in range(25):
size = np.random.randint(1, 10000)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
a = np.random.normal(mean, sigma, size)
value_to_mask = np.random.uniform(0, 1.0)
mask = np.random.uniform(0, 1, size) < value_to_mask
expected = ma.median(ma.array(a, mask=mask, dtype=np.float32))
actual = stats.median(a, mask=mask)
assert np.float32(expected) == actual
def test_median_2d_axis_none_mask():
for i in range(5):
size1 = np.random.randint(1, 300)
size2 = np.random.randint(1, 300)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
value_to_mask = np.random.uniform(0, 1)
mask = np.random.uniform(0, 1, size=(size1, size2)) < value_to_mask
a = np.random.normal(mean, sigma, size=(size1, size2))
expected = ma.median(ma.array(a, mask=mask, dtype=np.float32))
actual = stats.median(a, mask=mask)
assert np.float32(expected) == actual
def test_median_3d_axis_none_mask():
for i in range(5):
size1 = np.random.randint(1, 50)
size2 = np.random.randint(1, 50)
size3 = np.random.randint(1, 50)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
value_to_mask = np.random.uniform(0, 1)
mask = np.random.uniform(0., 1.0, size=(size1, size2, size3)) < value_to_mask
a = np.random.normal(mean, sigma, size=(size1, size2, size3))
expected = ma.median(ma.array(a, mask=mask, dtype=np.float32))
actual = stats.median(a, mask=mask)
assert np.float32(expected) == actual
def test_median_2d_axis_0_mask():
for i in range(5):
size1 = np.random.randint(5, 300)
size2 = np.random.randint(1, 300)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
value_to_mask = np.random.uniform(0, 1)
mask = np.random.uniform(0., 1.0, size=(size1, size2)) < value_to_mask
a = np.random.normal(mean, sigma, size=(size1, size2))
expected = ma.median(ma.array(a, mask=mask, dtype=np.float32), axis=0)
actual = stats.median(a, mask=mask, axis=0)
np.testing.assert_allclose(actual, expected.astype(np.float32), atol=1e-6)
def test_median_2d_axis_1_mask():
for i in range(5):
size1 = np.random.randint(1, 300)
size2 = np.random.randint(5, 300)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
value_to_mask = np.random.uniform(0, 1)
mask = np.random.uniform(0., 1.0, size=(size1, size2)) < value_to_mask
a = np.random.normal(mean, sigma, size=(size1, size2))
expected = ma.median(ma.array(a, mask=mask, dtype=np.float32), axis=1)
actual = stats.median(a, mask=mask, axis=1)
np.testing.assert_allclose(actual, expected.astype(np.float32), atol=1e-6)
def test_median_3d_axis_0_mask():
for i in range(5):
size1 = np.random.randint(5, 50)
size2 = np.random.randint(1, 50)
size3 = np.random.randint(1, 50)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
value_to_mask = np.random.uniform(0, 1)
mask = np.random.uniform(0, 1, size=(size1, size2, size3)) < value_to_mask
a = np.random.normal(mean, sigma, size=(size1, size2, size3))
expected = ma.median(ma.array(a, mask=mask, dtype=np.float32), axis=0)
actual = stats.median(a, mask=mask, axis=0)
np.testing.assert_allclose(actual, expected.astype(np.float32), atol=1e-6)
def test_median_3d_axis_1_mask():
for i in range(5):
size1 = np.random.randint(1, 50)
size2 = np.random.randint(5, 50)
size3 = np.random.randint(1, 50)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
value_to_mask = np.random.uniform(0, 1)
mask = np.random.uniform(0, 1, size=(size1, size2, size3)) < value_to_mask
a = np.random.normal(mean, sigma, size=(size1, size2, size3))
expected = ma.median(ma.array(a, mask=mask, dtype=np.float32), axis=1)
actual = stats.median(a, mask=mask, axis=1)
np.testing.assert_allclose(actual, expected.astype(np.float32), atol=1e-6)
def test_median_3d_axis_2_mask():
for i in range(5):
size1 = np.random.randint(1, 50)
size2 = np.random.randint(1, 50)
size3 = np.random.randint(5, 50)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
value_to_mask = np.random.uniform(0, 1)
mask = np.random.uniform(0, 1, size=(size1, size2, size3)) < value_to_mask
a = np.random.normal(mean, sigma, size=(size1, size2, size3))
expected = ma.median(ma.array(a, mask=mask, dtype=np.float32), axis=2)
actual = stats.median(a, mask=mask, axis=2)
np.testing.assert_allclose(actual, expected.astype(np.float32), atol=1e-6)
def test_absolute_deviation_axis_none_mask_none():
for i in range(250):
size = np.random.randint(1, 10000)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
a = np.random.normal(mean, sigma, size)
expected = np.abs(a.astype(np.float32) - np.median(a.astype(np.float32)))
actual = stats.absolute_deviation(a)
np.testing.assert_allclose(actual, np.float32(expected), atol=1e-4)
def test_absolute_deviation_2d_axis_none_mask_none():
for i in range(5):
size1 = np.random.randint(1, 300)
size2 = np.random.randint(1, 300)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
a = np.random.normal(mean, sigma, size=(size1, size2))
expected = np.abs(a.astype(np.float32) - np.median(a.astype(np.float32)))
actual = stats.absolute_deviation(a)
np.testing.assert_allclose(actual, np.float32(expected), atol=1e-4)
def test_absolute_deviation_3d_axis_none_mask_none():
for i in range(5):
size1 = np.random.randint(1, 50)
size2 = np.random.randint(1, 50)
size3 = np.random.randint(1, 50)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
a = np.random.normal(mean, sigma, size=(size1, size2, size3))
expected = np.abs(a.astype(np.float32) - np.median(a.astype(np.float32)))
actual = stats.absolute_deviation(a)
np.testing.assert_allclose(actual, np.float32(expected), atol=1e-4)
def test_absolute_deviation_2d_axis_0_mask_none():
for i in range(5):
size1 = np.random.randint(5, 300)
size2 = np.random.randint(1, 300)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
a = np.random.normal(mean, sigma, size=(size1, size2))
expected = np.abs(a.astype(np.float32) - np.median(a.astype(np.float32), axis=0))
actual = stats.absolute_deviation(a, axis=0)
np.testing.assert_allclose(actual, expected.astype(np.float32), atol=1e-4)
def test_absolute_deviation_2d_axis_1_mask_none():
for i in range(5):
size1 = np.random.randint(1, 300)
size2 = np.random.randint(5, 300)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
a = np.random.normal(mean, sigma, size=(size1, size2))
expected = np.abs(a.astype(np.float32).T - np.median(a.astype(np.float32), axis=1)).T
actual = stats.absolute_deviation(a, axis=1)
np.testing.assert_allclose(actual, expected.astype(np.float32), atol=1e-4)
def test_absolute_deviation_3d_axis_0_mask_none():
for i in range(5):
size1 = np.random.randint(5, 50)
size2 = np.random.randint(1, 50)
size3 = np.random.randint(1, 50)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
a = np.random.normal(mean, sigma, size=(size1, size2, size3))
expected = np.abs(a.astype(np.float32) - np.median(a.astype(np.float32), axis=0))
actual = stats.absolute_deviation(a, axis=0)
np.testing.assert_allclose(actual, expected.astype(np.float32), atol=1e-4)
def test_absolute_deviation_3d_axis_1_mask_none():
for i in range(5):
size1 = np.random.randint(1, 50)
size2 = np.random.randint(5, 50)
size3 = np.random.randint(1, 50)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
a = np.random.normal(mean, sigma, size=(size1, size2, size3))
expected = np.abs(a.astype(np.float32) - np.median(a.astype(np.float32), axis=1).reshape(size1, 1, size3))
actual = stats.absolute_deviation(a, axis=1)
np.testing.assert_allclose(actual, expected.astype(np.float32), atol=1e-4)
def test_absolute_deviation_3d_axis_2_mask_none():
for i in range(5):
size1 = np.random.randint(1, 50)
size2 = np.random.randint(5, 50)
size3 = np.random.randint(1, 50)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
a = np.random.normal(mean, sigma, size=(size1, size2, size3))
expected = np.abs(a.astype(np.float32) - np.median(a.astype(np.float32), axis=2).reshape(size1, size2, 1))
actual = stats.absolute_deviation(a, axis=2)
np.testing.assert_allclose(actual, expected.astype(np.float32), atol=5e-4)
def test_absolute_deviation_axis_none_mask():
for i in range(25):
size = np.random.randint(1, 10000)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
a = np.random.normal(mean, sigma, size)
value_to_mask = np.random.uniform(0, 0.8)
mask = np.random.uniform(0, 1.0, size) < value_to_mask
a_masked = ma.array(a, mask=mask, dtype=np.float32)
expected = np.abs(a_masked - ma.median(a_masked))
actual = stats.absolute_deviation(a, mask=mask)
np.testing.assert_allclose(actual[~mask], np.float32(expected)[~mask], atol=1e-4)
def test_absolute_deviation_2d_axis_none_mask():
for i in range(5):
size1 = np.random.randint(1, 300)
size2 = np.random.randint(1, 300)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
a = np.random.normal(mean, sigma, size=(size1, size2))
value_to_mask = np.random.uniform(0, 0.8)
mask = np.random.uniform(0, 1.0, size=(size1, size2)) < value_to_mask
a_masked = ma.array(a, mask=mask, dtype=np.float32)
expected = np.abs(a - ma.median(a_masked))
actual = stats.absolute_deviation(a, mask=mask)
np.testing.assert_allclose(actual, np.float32(expected), atol=1e-4)
def test_absolute_deviation_3d_axis_none_mask():
for i in range(5):
size1 = np.random.randint(1, 50)
size2 = np.random.randint(1, 50)
size3 = np.random.randint(1, 50)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
a = np.random.normal(mean, sigma, size=(size1, size2, size3))
value_to_mask = np.random.uniform(0, 0.8)
mask = np.random.uniform(0, 1.0, size=(size1, size2, size3)) < value_to_mask
a_masked = ma.array(a, mask=mask, dtype=np.float32)
expected = np.abs(a - ma.median(a_masked))
actual = stats.absolute_deviation(a, mask=mask)
np.testing.assert_allclose(actual, np.float32(expected), atol=1e-8)
def test_absolute_deviation_2d_axis_0_mask():
for i in range(5):
size1 = np.random.randint(5, 300)
size2 = np.random.randint(1, 300)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
a = np.random.normal(mean, sigma, size=(size1, size2))
value_to_mask = np.random.uniform(0, 0.8)
mask = np.random.uniform(0, 1.0, size=(size1, size2)) < value_to_mask
a_masked = ma.array(a, mask=mask, dtype=np.float32)
expected = np.abs(a - ma.median(a_masked, axis=0))
actual = stats.absolute_deviation(a, mask=mask, axis=0)
np.testing.assert_allclose(actual, np.float32(expected), atol=1e-4)
def test_absolute_deviation_2d_axis_1_mask():
for i in range(5):
size1 = np.random.randint(1, 300)
size2 = np.random.randint(5, 300)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
a = np.random.normal(mean, sigma, size=(size1, size2))
value_to_mask = np.random.uniform(0, 0.8)
mask = np.random.uniform(0, 1.0, size=(size1, size2)) < value_to_mask
a_masked = ma.array(a, mask=mask, dtype=np.float32)
expected = np.abs(a.T - ma.median(a_masked, axis=1)).T
actual = stats.absolute_deviation(a, mask=mask, axis=1)
np.testing.assert_allclose(actual, np.float32(expected), atol=1e-4)
def test_absolute_deviation_3d_axis_0_mask():
for i in range(5):
size1 = np.random.randint(5, 50)
size2 = np.random.randint(1, 50)
size3 = np.random.randint(1, 50)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
a = np.random.normal(mean, sigma, size=(size1, size2, size3))
value_to_mask = np.random.uniform(0, 0.8)
mask = np.random.uniform(0, 1.0, size=(size1, size2, size3)) < value_to_mask
a_masked = ma.array(a, mask=mask, dtype=np.float32)
expected = np.abs(a - ma.median(a_masked, axis=0))
actual = stats.absolute_deviation(a, mask=mask, axis=0)
np.testing.assert_allclose(actual, expected.astype(np.float32), atol=1e-9)
def test_absolute_deviation_3d_axis_1_mask():
for i in range(5):
size1 = np.random.randint(1, 50)
size2 = np.random.randint(5, 50)
size3 = np.random.randint(1, 50)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
a = np.random.normal(mean, sigma, size=(size1, size2, size3))
value_to_mask = np.random.uniform(0, 0.8)
mask = np.random.uniform(0, 1.0, size=(size1, size2, size3)) < value_to_mask
a_masked = ma.array(a, mask=mask, dtype=np.float32)
expected = np.abs(a - np.expand_dims(ma.median(a_masked, axis=1), axis=1))
actual = stats.absolute_deviation(a, mask=mask, axis=1)
np.testing.assert_allclose(actual, expected.astype(np.float32), atol=1e-9)
def test_absolute_deviation_3d_axis_2_mask():
for i in range(5):
size1 = np.random.randint(1, 50)
size2 = np.random.randint(1, 50)
size3 = np.random.randint(5, 50)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
a = np.random.normal(mean, sigma, size=(size1, size2, size3))
value_to_mask = np.random.uniform(0, 0.8)
mask = np.random.uniform(0, 1.0, size=(size1, size2, size3)) < value_to_mask
a_masked = ma.array(a, mask=mask, dtype=np.float32)
expected = np.abs(a - np.expand_dims(ma.median(a_masked, axis=2), axis=2))
actual = stats.absolute_deviation(a, mask=mask, axis=2)
np.testing.assert_allclose(actual, expected.astype(np.float32), atol=1e-9)
def test_mad_axis_none_mask_none():
for i in range(25):
size = np.random.randint(1, 10000)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
a = np.random.normal(mean, sigma, size)
expected = np.median(np.abs(a.astype(np.float32) - np.median(a.astype(np.float32))))
actual = stats.median_absolute_deviation(a)
np.testing.assert_allclose(actual, np.float32(expected), atol=1e-4)
def test_mad_2d_axis_none_mask_none():
for i in range(5):
size1 = np.random.randint(1, 300)
size2 = np.random.randint(1, 300)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
a = np.random.normal(mean, sigma, size=(size1, size2))
expected = np.median(np.abs(a.astype(np.float32) - np.median(a.astype(np.float32))))
actual = stats.median_absolute_deviation(a)
np.testing.assert_allclose(actual, np.float32(expected), atol=1e-4)
def test_mad_3d_axis_none_mask_none():
for i in range(5):
size1 = np.random.randint(1, 50)
size2 = np.random.randint(1, 50)
size3 = np.random.randint(1, 50)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
a = np.random.normal(mean, sigma, size=(size1, size2, size3))
expected = np.median(np.abs(a.astype(np.float32) - np.median(a.astype(np.float32))))
actual = stats.median_absolute_deviation(a)
np.testing.assert_allclose(actual, np.float32(expected), atol=1e-4)
def test_mad_2d_axis_0_mask_none():
for i in range(5):
size1 = np.random.randint(5, 300)
size2 = np.random.randint(1, 300)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
a = np.random.normal(mean, sigma, size=(size1, size2))
expected = np.median(np.abs(a.astype(np.float32) - np.median(a.astype(np.float32), axis=0)), axis=0)
actual = stats.median_absolute_deviation(a, axis=0)
np.testing.assert_allclose(actual, expected.astype(np.float32), atol=1e-4)
def test_mad_2d_axis_1_mask_none():
for i in range(5):
size1 = np.random.randint(1, 300)
size2 = np.random.randint(5, 300)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
a = np.random.normal(mean, sigma, size=(size1, size2))
expected = np.median(np.abs(a.astype(np.float32).T - np.median(a.astype(np.float32), axis=1)).T, axis=1)
actual = stats.median_absolute_deviation(a, axis=1)
np.testing.assert_allclose(actual, expected.astype(np.float32), atol=1e-4)
def test_mad_3d_axis_0_mask_none():
for i in range(5):
size1 = np.random.randint(5, 50)
size2 = np.random.randint(1, 50)
size3 = np.random.randint(1, 50)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
a = np.random.normal(mean, sigma, size=(size1, size2, size3))
expected = np.median(np.abs(a.astype(np.float32) - np.median(a.astype(np.float32), axis=0)), axis=0)
actual = stats.median_absolute_deviation(a, axis=0)
np.testing.assert_allclose(actual, expected.astype(np.float32), atol=1e-4)
def test_mad_3d_axis_1_mask_none():
for i in range(5):
size1 = np.random.randint(1, 50)
size2 = np.random.randint(5, 50)
size3 = np.random.randint(1, 50)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
a = np.random.normal(mean, sigma, size=(size1, size2, size3))
expected = np.median(np.abs(a.astype(np.float32) - np.median(a.astype(np.float32), axis=1).reshape(size1, 1, size3)), axis=1)
actual = stats.median_absolute_deviation(a, axis=1)
np.testing.assert_allclose(actual, expected.astype(np.float32), atol=1e-4)
def test_mad_3d_axis_2_mask_none():
for i in range(5):
size1 = np.random.randint(10, 50)
size2 = np.random.randint(10, 50)
size3 = np.random.randint(10, 50)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
a = np.random.normal(mean, sigma, size=(size1, size2, size3))
b = a.copy()
expected = np.median(np.abs(a.astype(np.float32) - np.median(a.astype(np.float32), axis=2).reshape(size1, size2, 1)), axis=2)
actual = stats.median_absolute_deviation(b, axis=2)
np.testing.assert_allclose(actual, expected, rtol=1e-5)
def test_mad_axis_none_mask():
for i in range(25):
size = np.random.randint(1, 10000)
mean = np.random.uniform(-1000, 1000)
sigma = np.random.uniform(0, 1000)
a = np.random.normal(mean, sigma, size)
value_to_mask = np.random.uniform(0, 0.8)
mask = np.random.uniform(0, 1.0, size) < value_to_mask
a_masked = ma.array(a, mask=mask, dtype=np.float32)
expected = ma.median(ma.array(np.abs(a_masked - ma.median(a_masked)), dtype=np.float32, mask=mask))
actual = stats.median_absolute_deviation(a, mask=mask)
np.testing.assert_allclose(actual, np.float32(expected), atol=1e-4)
def test_mad_2d_axis_none_mask():
for i in range(5):
size1 = np.random.randint(1, 300)
size2 = np.random.randint(1, 300)
mean = np.random.uniform(-1000, 1000)
sigma = | np.random.uniform(0, 1000) | numpy.random.uniform |
"""Test functions for matrix module
"""
from numpy.testing import (
assert_equal, assert_array_equal, assert_array_max_ulp,
assert_array_almost_equal, assert_raises, assert_
)
from numpy import (
arange, add, fliplr, flipud, zeros, ones, eye, array, diag, histogram2d,
tri, mask_indices, triu_indices, triu_indices_from, tril_indices,
tril_indices_from, vander,
)
import numpy as np
from numpy.core.tests.test_overrides import requires_array_function
def get_mat(n):
data = arange(n)
data = add.outer(data, data)
return data
class TestEye(object):
def test_basic(self):
assert_equal(eye(4),
array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]]))
assert_equal( | eye(4, dtype='f') | numpy.eye |
import numpy as np
def dist_sphere(r0,the0,phi0,r1,the1,phi1):
# (r0**2 + r1**2 - 2*r0*r1*(np.sin(the0)*np.sin(the1)*np.cos(phi0-phi1) + np.cos(the0)*np.cos(the1)))**.5
a = r0**2 + r1**2
b = 2*r0*r1
c = np.sin(the0)*np.sin(the1)* | np.cos(phi0-phi1) | numpy.cos |
#Differential Photometry script written in April 2019 by SKB, MP, KWD for WIYN 0.9m HDI data
#This script calculates photometry and differential photometry for all stars in an image and takes target positions to pull out differential photometry of target stars. Auto calculates comparison stars based on lowest percentile of variability of stars in the image.
# Script is run through a shell jupyter notebook script.
#Initially created by <NAME> as a juypter notebook 2018
#Turned into modular form by <NAME> April 2019
#Modified by <NAME>, <NAME>, <NAME> April 2019
# python 2/3 compatibility
from __future__ import print_function
# numerical python
import numpy as np
# file management tools
import glob
import os
# good module for timing tests
import time
# plotting stuff
import matplotlib.pyplot as plt
import matplotlib.cm as cm
# ability to read/write fits files
from astropy.io import fits
# fancy image combination technique
from astropy.stats import sigma_clip
# median absolute deviation: for photometry
from astropy.stats import mad_std
# photometric utilities
from photutils import DAOStarFinder,aperture_photometry, CircularAperture, CircularAnnulus, Background2D, MedianBackground
# periodograms
from astropy.stats import LombScargle
from regions import read_ds9, write_ds9
from astropy.wcs import WCS
import warnings
import pandas as pd
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.visualization import ZScaleInterval
import numpy.ma as ma
warnings.filterwarnings("ignore")
np.set_printoptions(suppress=True)
def construct_astrometry(hdr_wcs):
'''
construct_astrometry
make the pixel to RA/Dec conversion (and back) from the header of an astrometry.net return
inputs
------------------------------
hdr_wcs : header with astrometry information, typically from astrometry.net
returns
------------------------------
w : the WCS instance
'''
# initialize the World Coordinate System
w = WCS(naxis=2)
# specify the pixel to RA/Dec conversion
w.wcs.ctype = ["RA---TAN", "DEC--TAN"]
w.wcs.cd = np.array([[hdr_wcs['CD1_1'],hdr_wcs['CD1_2']],[hdr_wcs['CD2_1'],hdr_wcs['CD2_2']]])
w.wcs.crpix = [hdr_wcs['CRPIX1'], hdr_wcs['CRPIX2']]
w.wcs.crval = [hdr_wcs['CRVAL1'],hdr_wcs['CRVAL2']]
w.wcs.cunit = [hdr_wcs['CUNIT1'],hdr_wcs['CUNIT2']]
w.wcs.latpole = hdr_wcs['LATPOLE']
#w.wcs.lonpole = hdr_wcs['LONPOLE']
w.wcs.theta0 = hdr_wcs['LONPOLE']
w.wcs.equinox = hdr_wcs['EQUINOX']
# calculate the RA/Dec to pixel conversion
w.wcs.fix()
w.wcs.cdfix()
w.wcs.set()
# return the instance
return w
def StarFind(imname, FWHM, nsigma):
'''
StarFind
find all stars in a .fits image
inputs
----------
imname: name of .fits image to open.
FWHM: fwhm of stars in field
nsigma: number of sigma above background above which to select sources. (~3 to 4 is a good estimate)
outputs
--------
xpos: x positions of sources
ypos: y positions of sources
nstars: number of stars found in image
'''
#open image
im,hdr=fits.getdata(imname, header=True)
im = np.array(im).astype('float')
#determine background
bkg_sigma = mad_std(im)
print('begin: DAOStarFinder')
daofind = DAOStarFinder(fwhm=FWHM, threshold=nsigma*bkg_sigma, exclude_border=True)
sources = daofind(im)
#x and y positions
xpos = sources['xcentroid']
ypos = sources['ycentroid']
#number of stars found
nstars = len(xpos)
print('found ' + str(nstars) + ' stars')
return xpos, ypos, nstars
def makeApertures(xpos, ypos, aprad,skybuff, skywidth):
'''
makeApertures
makes a master list of apertures and the annuli
inputs
---------
xpos: list - x positions of stars in image
ypos: list - y positions of stars in image
aprad: float - aperture radius
skybuff: float - sky annulus inner radius
skywidth: float - sky annulus outer radius
outputs
--------
apertures: list - list of aperture positions and radius
annulus_apertures: list - list of annuli positions and radius
see: https://photutils.readthedocs.io/en/stable/api/photutils.CircularAperture.html#photutils.CircularAperture
for more details
'''
# make the master list of apertures
apertures = CircularAperture((xpos, ypos), r=aprad)
annulus_apertures = CircularAnnulus((xpos, ypos), r_in=aprad+skybuff, r_out=aprad+skybuff+skywidth)
apers = [apertures, annulus_apertures]
return apertures, annulus_apertures
def apertureArea(apertures):
''' returns the area of the aperture'''
return apertures.area() ### should be apertures
def backgroundArea(back_aperture):
'''returns the area of the annuli'''
return back_aperture.area() ### should be annulus_apertures
def doPhotometry(imglist, xpos, ypos, aprad, skybuff, skywidth,timekey='MJD-OBS',verbose=1):
'''
doPhotomoetry*
determine the flux for each star from aperture photometry
inputs
-------
imglist: list - list of .fits images
xpos, ypos: lists - lists of x and y positions of stars
aprad, skybuff, skywidth: floats - aperture, sky annuli inner, sky annuli outer radii
outputs
-------
Times: list - time stamps of each observation from the .fits header
Photometry: list - aperture photometry flux values found at each xpos, ypos position
'''
#number of images
nimages = len(imglist)
nstars = len(xpos)
print('Found {} images'.format(nimages))
#create lists for timestamps and flux values
Times = np.zeros(nimages)
Photometry = np.zeros((nimages,nstars))
print('making apertures')
#make the apertures around each star
apertures, annulus_apertures = makeApertures(xpos, ypos, aprad, skybuff, skywidth)
#plot apertures
plt.figure(figsize=(12,12))
interval = ZScaleInterval()
vmin, vmax = interval.get_limits(fits.getdata(imglist[0]))
plt.imshow(fits.getdata(imglist[0]), vmin=vmin,vmax=vmax, origin='lower')
apertures.plot(color='white', lw=2)
#annulus_apertures.plot(color='red', lw=2)
plt.title('apertures')
plt.show()
#determine area of apertures
area_of_ap = apertureArea(apertures)
#determine area of annuli
area_of_background = backgroundArea(annulus_apertures)
checknum = np.linspace(0,nimages,10).astype(int)
#go through each image and run aperture photometry
for ind in np.arange(nimages):
if ((ind in checknum) & (verbose==1)):
print('running aperture photometry on image: ', ind )
if (verbose>1):
print('running aperture photometry on image: ', ind )
#open image
data_image, hdr = fits.getdata(imglist[ind], header=True)
#find time stamp and append to list
Times[ind] = hdr[timekey]
#do photometry
phot_table = aperture_photometry(data_image, (apertures,annulus_apertures))
#determine flux: (aperture flux) - [(area of aperture * annuli flux)/area of background ]
flux0 = np.array(phot_table['aperture_sum_0']) - (area_of_ap/area_of_background)*np.array(phot_table['aperture_sum_1'])
#append to list
Photometry[ind,:] = flux0
return Times,Photometry
def doPhotometryError(imglist,xpos, ypos,aprad, skybuff, skywidth, flux0, GAIN=1.3, manual = False, **kwargs):
'''
doPhotometryError
determine error in photometry from background noise
two options:
- use sigma clipping and use whole background
- manually input background box positions as kwargs
inputs
--------
imglist: list - list of .fits images
xpos, ypos: lists - lists of x and y positions of stars
aprad, skybuff, skywidth: floats - aperture, sky annuli inner, sky annuli outer radii
flux0: list - aperture photometry found from doPhotometry() function
GAIN: float - average gain
manual: boolean - switch between manually inputting box (True) or using sigma clipping (False)
if True -- must have kwargs
manual = False is default
**kwargs
kwargs[xboxcorner]: float - x edge of box in pixel coords
kwargs[yboxcorner]: float - y edge of box in pixel coords
kwargs[boxsize]: float - size of box in pixel coords
'''
# find number of images in list
nimages = len(imglist)
nstars = len(xpos)
print('Found {} images'.format(nimages))
#make apertures
apertures, annulus_apertures = makeApertures(xpos, ypos, aprad, skybuff, skywidth)
#find areas of apertures and annuli
area_of_ap = apertureArea(apertures)
area_of_background = backgroundArea(annulus_apertures)
checknum = np.linspace(0,nimages,10).astype(int)
#find error in photometry
ePhotometry = np.zeros((nimages,nstars))
for ind in np.arange(nimages):
#open images
im = fits.getdata(imglist[ind])
if ind in checknum:
print('running error analysis on image ', ind)
#determine variance in background
if manual == True: #manual method -- choose back size
skyvar = np.std(im[kwargs['xboxcorner']:kwargs['xboxcorner']+kwargs['boxsize'],kwargs['yboxcorner']:kwargs['yboxcorner']+kwargs['boxsize']])**2.
err1 = skyvar*(area_of_ap)**2./(kwargs['boxsize']*kwargs['boxsize']) # uncertainty in mean sky brightness
if manual == False: #automatic method -- use sigma clipping
filtered_data = sigma_clip(im, sigma=3)
skyvar = np.std(filtered_data)**2.
err1 = skyvar*(area_of_ap)**2./(np.shape(im[0])[0]*np.shape(im[1])[0]) # uncertainty in mean sky brightness
err2 = area_of_ap * skyvar # scatter in sky values
err3 = flux0[ind]/GAIN # Poisson error
print ('Scatter in sky values: ',err2**0.5,', uncertainty in mean sky brightness: ',err1**0.5)
# sum souces of error in quadrature
errtot = (err1 + err2 + err3)**0.5
#append to list
ePhotometry[ind,:] = errtot
return ePhotometry
def mask(Photometry, ePhotometry, sn_thresh=3.):
Photometry_mask1 = ma.masked_where(Photometry <= 0, Photometry)
sn = Photometry_mask1 / ePhotometry
Photometry_mask2 = ma.masked_where(sn < sn_thresh, Photometry_mask1)
ePhotometry_mask1 = ma.masked_where(Photometry <= 0, ePhotometry)
sn = Photometry_mask1 / ePhotometry
ePhotometry_mask2 = ma.masked_where(sn < sn_thresh, ePhotometry_mask1)
return Photometry_mask2, ePhotometry_mask2
# detrend all stars
def detrend(idx, Photometry_initial, ePhotometry, nstars, sn_thresh):
'''
detrend
detrend the background for each night so we don't have to worry about changes in background noise levels
inputs
-------
photometry: list - list of flux values from aperture photometry
ephotometry: list - list of flux errors from aperture photometry
nstars: float - number of stars in the field
outputs
--------
finalPhot: list - final aperture photometry of sources with bad sources replaced with nans.
<< this is the list you want to use from now on. >>
cPhotometry: list - detrended aperture photometry
'''
sn = Photometry_initial / ePhotometry
Photometry_mask1 = ma.masked_where(Photometry_initial <= 0, Photometry_initial)
Photometry_mask2 = ma.masked_where(sn < sn_thresh, Photometry_mask1)
#mask out target stars
m = np.zeros_like(Photometry_mask2)
m[:,idx] = 1
Photometry_initial_mask3 = ma.masked_array(Photometry_mask2, m)
med_val = np.median(Photometry_initial_mask3, axis=0)
c = np.zeros_like(Photometry_initial_mask3)
c[:,med_val<=0] = 1
# get median flux value for each star (find percent change)
cPhotometry = ma.masked_array(Photometry_initial_mask3, c)
cPhotometry = cPhotometry / med_val
# do a check for outlier photometry?
for night in np.arange(len(cPhotometry)):
# remove large-scale image-to-image variation to find best stars
cPhotometry[night] = cPhotometry[night] / ma.median(cPhotometry[night])
# eliminate stars with outliers from consideration
cPhotometry_mask = ma.masked_where( ((cPhotometry < 0.5) | (cPhotometry > 1.5)), cPhotometry)
return Photometry_initial_mask3, cPhotometry_mask
def plotPhotometry(Times,cPhotometry):
'''plot detrended photometry'''
plt.figure()
for ind in np.arange(np.shape(cPhotometry)[1]):
plt.scatter(Times-np.nanmin(Times),cPhotometry[:,ind],s=1.,color='black')
# make the ranges a bit more general
plt.xlim(-0.1,1.1*np.max(Times-np.nanmin(Times)))
plt.ylim(np.nanmin(cPhotometry),np.nanmax(cPhotometry))
plt.xlabel('Observation Time [days]')
plt.ylabel('Detrended Flux')
plt.show()
def CaliforniaCoast(Photometry,cPhotometry,comp_num=9,flux_bins=6):
"""
Find the least-variable stars as a function of star brightness*
(it's called California Coast because the plot looks like California and we are looking for edge values: the coast)
inputs
--------------
Photometry : input Photometry catalog
cPhotometry : input detrended Photometry catalog
flux_bins : (default=10) maximum number of flux bins
comp_num : (default=5) minimum number of comparison stars to use
outputs
--------------
BinStars : dictionary of stars in each of the flux partitions
LeastVariable : dictionary of least variable stars in each of the flux partitions
"""
tmpX = np.nanmedian(Photometry,axis=0)
tmpY = np.nanstd(cPhotometry, axis=0)
xvals = tmpX[(np.isfinite(tmpX) & np.isfinite(tmpY))]
yvals = tmpY[(np.isfinite(tmpX) & np.isfinite(tmpY))]
kept_vals = np.where((np.isfinite(tmpX) & np.isfinite(tmpY)))[0]
#print('Keep',kept_vals)
# make the bins in flux, equal in percentile
flux_percents = np.linspace(0.,100.,flux_bins)
print('Bin Percentiles to check:',flux_percents)
# make the dictionary to return the best stars
LeastVariable = {}
BinStars = {}
for bin_num in range(0,flux_percents.size-1):
# get the flux boundaries for this bin
min_flux = np.percentile(xvals,flux_percents[bin_num])
max_flux = np.percentile(xvals,flux_percents[bin_num+1])
#print('Min/Max',min_flux,max_flux)
# select the stars meeting the criteria
w = np.where( (xvals >= min_flux) & (xvals < max_flux))[0]
BinStars[bin_num] = kept_vals[w]
# now look at the least variable X stars
nstars = w.size
#print('Number of stars in bin {}:'.format(bin_num),nstars)
# organize stars by flux uncertainty
binStarsX = xvals[w]
binStarsY = yvals[w]
# mininum Y stars in the bin:
lowestY = kept_vals[w[binStarsY.argsort()][0:comp_num]]
#print('Best {} stars in bin {}:'.format(comp_num,bin_num),lowestY)
LeastVariable[bin_num] = lowestY
return BinStars,LeastVariable
def findComparisonStars(Photometry, cPhotometry, accuracy_threshold = 0.2, plot=True,comp_num=6): #0.025
'''
findComparisonStars*
finds stars that are similar over the various nights to use as comparison stars
inputs
--------
Photometry: list - photometric values taken from detrend() function.
cPhotometry: list - detrended photometric values from detrend() function
accuracy_threshold: float - level of accuracy in fluxes between various nights
plot: boolean - True/False plot various stars and highlight comparison stars
outputs
--------
most_accurate: list - list of indices of the locations in Photometry which have the best stars to use as comparisons
'''
BinStars,LeastVariable = CaliforniaCoast(Photometry,cPhotometry,comp_num=comp_num)
star_err = ma.std(cPhotometry, axis=0)
if plot:
xvals = np.log10(ma.median(Photometry,axis=0))
yvals = np.log10(ma.std(cPhotometry, axis=0))
plt.figure()
plt.scatter(xvals,yvals,color='black',s=1.)
plt.xlabel('log Median Flux per star')
plt.ylabel('log De-trended Standard Deviation')
plt.text(np.nanmin(np.log10(ma.median(Photometry,axis=0))),np.nanmin(np.log10(star_err[star_err>0.])),\
'Less Variable',color='red',ha='left',va='bottom')
plt.text(np.nanmax(np.log10(ma.median(Photometry,axis=0))),np.nanmax(np.log10(star_err[star_err>0.])),\
'More Variable',color='red',ha='right',va='top')
for k in LeastVariable.keys():
plt.scatter(xvals[LeastVariable[k]],yvals[LeastVariable[k]],color='red')
# this is the middle key for safety
middle_key = np.array(list(LeastVariable.keys()))[len(LeastVariable.keys())//2]
# but now let's select the brightest one
best_key = np.array(list(LeastVariable.keys()))[-1]
return LeastVariable[best_key]
def runDifferentialPhotometry(photometry, ephotometry, nstars, most_accurate, sn_thresh):
'''
runDifferentialPhotometry
as the name says!
inputs
----------
Photometry: list - list of photometric values from detrend() function
ePhotometry: list - list of photometric error values
nstars: float - number of stars
most_accurate: list - list of indices of non variable comparison stars
outputs
---------
dPhotometry: list - differential photometry list
edPhotometry: list - scaling factors to photometry error
tePhotometry: list - differential photometry error
'''
Photometry = ma.masked_where(photometry <= 0, photometry)
ePhotometry = ma.masked_where(photometry <= 0, ephotometry)
#number of nights of photometry
nimages = len(Photometry)
#range of number of nights
imgindex = np.arange(0,nimages,1)
#create lists for diff photometry
dPhotometry = ma.ones([nimages, len(Photometry[0])])
edPhotometry = ma.ones([nimages, len(Photometry[0])])
eedPhotometry = ma.ones([nimages, len(Photometry[0])])
tePhotometry = ma.ones([nimages, len(Photometry[0])])
checknum = np.linspace(0,nstars,10).astype(int)
for star in np.arange(nstars):
if star in checknum:
print('running differential photometry on star: ', star+1, '/', nstars)
starPhotometry = Photometry[:,star]
starPhotometryerr = ePhotometry[:,star]
#create temporary photometry list for each comparison star
tmp_phot = ma.ones([nimages,len(most_accurate)])
#go through comparison stars and determine differential photometry
for ind, i in enumerate(most_accurate):
#pull out each star's photometry + error Photometry for each night and place in list
compStarPhotometry = Photometry[:,i]
#calculate differential photometry
tmp = starPhotometry*ma.median(compStarPhotometry)/(compStarPhotometry*ma.median(starPhotometry))
tmp_phot[:,ind] = tmp
#median combine differential photometry found with each comparison star for every other star
dPhotometry[:,star] = ma.median(tmp_phot,axis=1)
# apply final scaling factors to the photometric error
edPhotometry[:,star] = starPhotometryerr*( | ma.median(tmp_phot,axis=1) | numpy.ma.median |
"""
physical_models_vec.py
A module for material strength behavior to be imported into python scripts for
optimizaton or training emulators. Adapted from strength_models_add_ptw.py
Authors:
<NAME>, <EMAIL>
<NAME>, <EMAIL>
<NAME>, <EMAIL>
"""
import numpy as np
np.seterr(all = 'raise')
#import ipdb
import copy
from math import pi
from scipy.special import erf
## Error Definitions
class ConstraintError(ValueError):
pass
class PTWStressError(FloatingPointError):
pass
## Model Definitions
class BaseModel(object):
"""
Base Class for property Models (flow stress, specific heat, melt, density,
etc.). Must be instantiated as a child of MaterialModel
"""
params = []
consts = []
def value(self, *args):
return None
def update_parameters(self, x):
self.parent.parameters.update_parameters(x, self.params)
return
def __init__(self, parent):
self.parent = parent
return
# Specific Heat Models
class Constant_Specific_Heat(BaseModel):
"""
Constant Specific Heat Model
"""
consts = ['Cv0']
def value(self, *args):
return self.parent.parameters.Cv0
class Linear_Specific_Heat(BaseModel):
"""
Linear Specific Heat Model
"""
consts = ['Cv0', 'T0', 'dCdT']
def value(self, *args):
c0=self.parent.parameters.Cv0
t0=self.parent.parameters.T0
dcdt=self.parent.parameters.dCdT
tnow=self.parent.state.T
cnow=c0+(tnow-t0)*dcdt
return cnow
# Density Models
class Constant_Density(BaseModel):
"""
Constant Density Model
"""
consts = ['rho0']
def value(self, *args):
return self.parent.parameters.rho0 * np.ones(len(self.parent.state.T))
class Linear_Density(BaseModel):
"""
Linear Density Model
"""
consts = ['rho0', 'T0', 'dRhodT']
def value(self, *args):
r0=self.parent.parameters.rho0
t0=self.parent.parameters.T0
drdt=self.parent.parameters.dRhodT
tnow=self.parent.state.T
rnow=r0+drdt*(tnow-t0)
return rnow
# Melt Temperature Models
class Constant_Melt_Temperature(BaseModel):
"""
Constant Melt Temperature Model
"""
consts = ['Tmelt0']
def value(self, *args):
return self.parent.parameters.Tmelt0
class Linear_Melt_Temperature(BaseModel):
"""
Linear Melt Temperature Model
"""
consts=['Tmelt0', 'rho0', 'dTmdRho']
def value(self, *args):
tm0=self.parent.parameters.Tmelt0
rnow=self.parent.state.rho
dtdr=self.parent.parameters.dTmdRho
r0=self.parent.parameters.rho0
tmeltnow=tm0+dtdr*(rnow-r0)
return tmeltnow
class BGP_Melt_Temperature(BaseModel):
consts = ['Tm_0', 'rho_m', 'gamma_1', 'gamma_3', 'q3']
def value(self, *args):
mp = self.parent.parameters
rho = self.parent.state.rho
melt_temp = mp.Tm_0*np.power(rho/mp.rho_m, 1./3.)*np.exp(6*mp.gamma_1*(np.power(mp.rho_m,-1./3.)-np.power(rho,-1./3.))\
+2.*mp.gamma_3/mp.q3*(np.power(mp.rho_m,-mp.q3)-np.power(rho,-mp.q3)))
return melt_temp
# Shear Modulus Models
class Constant_Shear_Modulus(BaseModel):
consts = ['G0']
def value(self, *args):
return self.parent.parameters.G0
class Linear_Shear_Modulus(BaseModel):
consts = ['G0', 'rho0', 'dGdRho' ]
def value(self, *args):
g0=self.parent.parameters.G0
rho0=self.parent.parameters.rho0
dgdr=self.parent.parameters.dGdRho
rnow=self.parent.state.rho
gnow=g0+dgdr*(rnow-rho0)
return gnow
class Simple_Shear_Modulus(BaseModel):
consts = ['G0', 'alpha']
def value(self, *args):
mp = self.parent.parameters
temp = self.parent.state.T
tmelt = self.parent.state.Tmelt
return mp.G0 * (1. - mp.alpha * (temp / tmelt))
class BGP_PW_Shear_Modulus(BaseModel):
#BPG model provides cold shear, i.e. shear modulus at zero temperature as a function of density.
#PW describes the (lienar) temperature dependence of the shear modulus. (Same dependency as
#in Simple_Shear_modulus.)
#With these two models combined, we get the shear modulus as a function of density and temperature.
consts = ['G0', 'rho_0', 'gamma_1', 'gamma_2', 'q2', 'alpha']
def value(self, *args):
mp = self.parent.parameters
rho = self.parent.state.rho
temp = self.parent.state.T
tmelt = self.parent.state.Tmelt
cold_shear = mp.G0*np.exp(6.*mp.gamma_1*(np.power(mp.rho_0,-1./3.)-np.power(rho,-1./3.))\
+ 2*mp.gamma_2/mp.q2*(np.power(mp.rho_0,-mp.q2)-np.power(rho,-mp.q2)))
gnow = cold_shear*(1.- mp.alpha* (temp/tmelt))
gnow[np.where(temp >= tmelt)] = 0.
gnow[np.where(gnow < 0)] = 0.
#if temp >= tmelt: gnow = 0.0
#if gnow < 0.0: gnow = 0.0
return gnow
class Stein_Shear_Modulus(BaseModel):
#consts = ['G0', 'sgA', 'sgB']
#assuming constant density and pressure
#so we only include the temperature dependence
consts = ['G0', 'sgB']
eta = 1.0
def value(self, *args):
mp = self.parent.parameters
temp = self.parent.state.T
tmelt = self.parent.state.Tmelt
#just putting this here for completeness
#aterm = a/eta**(1.0/3.0)*pressure
aterm = 0.0
bterm = mp.sgB * (temp - 300.0)
gnow = mp.G0 * (1.0 + aterm - bterm)
#if temp >= tmelt: gnow = 0.0
#if gnow < 0.0: gnow = 0.0
gnow[np.where(temp >= tmelt)] = 0.
gnow[np.where(gnow < 0)] = 0.
return gnow
# Yield Stress Models
class Constant_Yield_Stress(BaseModel):
"""
Constant Yield Stress Model
"""
consts = ['yield_stress']
def value(self, *args):
return self.parent.parameters.yield_stress
def fast_pow(a, b):
"""
Numpy power is slow, this is faster. Gets a**b for a and b np arrays.
"""
cond = a>0
out = a * 0.
out[cond] = np.exp(b[cond] * np.log(a[cond]))
return out
pos = lambda a: (abs(a) + a) / 2 # same as max(0,a)
class JC_Yield_Stress(BaseModel):
params = ['A','B','C','n','m']
consts = ['Tref','edot0','chi']
def value(self, edot):
mp = self.parent.parameters
eps = self.parent.state.strain
t = self.parent.state.T
tmelt = self.parent.state.Tmelt
#th = np.max([(t - mp.Tref) / (tmelt - mp.Tref), 0.])
th = pos((t - mp.Tref) / (tmelt - mp.Tref))
Y = (
(mp.A + mp.B * fast_pow(eps, mp.n)) *
(1. + mp.C * np.log(edot / mp.edot0)) *
(1. - fast_pow(th, mp.m))
)
return Y
class PTW_Yield_Stress(BaseModel):
params = ['theta','p','s0','sInf','kappa','lgamma','y0','yInf','y1', 'y2']
consts = ['beta', 'matomic', 'chi']
#@profile
def value(self, edot):
"""
function used to define PTW flow stress model
arguments are:
- edot: scalar, strain rate
- material: an instance of MaterialModel class
returns the flow stress at the current material state
and specified strain rate
"""
mp = self.parent.parameters
eps = self.parent.state.strain
temp = self.parent.state.T
tmelt = self.parent.state.Tmelt
shear = self.parent.state.G
#if (np.any(mp.sInf > mp.s0) or np.any(mp.yInf > mp.y0) or
# np.any(mp.y0 > mp.s0) or np.any(mp.yInf > mp.sInf) or np.any(mp.y1 < mp.s0) or np.any(mp.y2 < mp.beta)):
# raise ConstraintError
good = (
(mp.sInf < mp.s0) * (mp.yInf < mp.y0) * (mp.y0 < mp.s0) *
(mp.yInf < mp.sInf) * (mp.y1 > mp.s0) * (mp.y2 > mp.beta)
)
if np.any(~good):
#return np.array([-999.]*len(good))
raise ConstraintError('PTW bad val')
#convert to 1/s strain rate since PTW rate is in that unit
edot = edot * 1.0E6
t_hom = temp / tmelt
#this one is commented because it is assumed that
#the material state computes the temperature dependence of
#the shear modulus
#shear = shear * (1.0 - mp.alpha * t_hom)
#print("ptw shear is "+str(shear))
afact = (4.0 / 3.0) * pi * mp.rho0 / mp.matomic
#ainv is 1/a where 4/3 pi a^3 is the atomic volume
ainv = afact ** (1.0 / 3.0)
#transverse wave velocity up to units
xfact = np.sqrt ( shear / mp.rho0 )
#PTW characteristic strain rate [ 1/s ]
xiDot = 0.5 * ainv * xfact * pow(6.022E29, 1.0 / 3.0) * 1.0E4
#if np.any(mp.gamma * xiDot / edot <= 0) or np.any(np.isinf(mp.gamma * xiDot / edot)):
# print("bad")
argErf = mp.kappa * t_hom * (mp.lgamma + np.log( xiDot / edot ))
saturation1 = mp.s0 - ( mp.s0 - mp.sInf ) * erf( argErf )
#saturation2 = mp.s0 * np.power( edot / mp.gamma / xiDot , mp.beta )
#saturation2 = mp.s0 * (edot / mp.gamma / xiDot)**mp.beta
saturation2 = mp.s0 * np.exp(mp.beta * (-mp.lgamma + np.log(edot / xiDot)))
#if saturation1 > saturation2:
# tau_s=saturation1 # thermal activation regime
#else:
# tau_s=saturation2 # phonon drag regime
sat_cond = saturation1 > saturation2
#tau_s = sat_cond*saturation1 + (~sat_cond)*saturation2
tau_s = np.copy(saturation2)
tau_s[np.where(sat_cond)] = saturation1[sat_cond]
ayield = mp.y0 - ( mp.y0 - mp.yInf ) * erf( argErf )
#byield = mp.y1 * np.power( mp.gamma * xiDot / edot , -mp.y2 )
#cyield = mp.s0 * np.power( mp.gamma * xiDot / edot , -mp.beta)
byield = mp.y1 * np.exp( -mp.y2*(mp.lgamma + np.log( xiDot / edot )))
cyield = mp.s0 * np.exp( -mp.beta*(mp.lgamma + np.log( xiDot / edot )))
#if byield < cyield:
# dyield = byield # intermediate regime
#else:
# dyield = cyield # phonon drag regime
y_cond = (byield < cyield)
#dyield = y_cond*byield + (~y_cond)*cyield
dyield = np.copy(cyield)
dyield[np.where(y_cond)] = byield[y_cond]
#if ayield > dyield:
# tau_y = ayield # thermal activation regime
#else:
# tau_y = dyield # intermediate or high rate
y_cond2 = ayield > dyield
#tau_y = y_cond2*ayield + (~y_cond2)*dyield
tau_y = np.copy(dyield)
tau_y[np.where(y_cond2)] = ayield[y_cond2]
# if mp.p > 0:
# if tau_s == tau_y:
# scaled_stress = tau_s
# else:
# try:
# eArg1 = mp.p * (tau_s - tau_y) / (mp.s0 - tau_y)
# eArg2 = eps * mp.p * mp.theta / (mp.s0 - tau_y) / (exp(eArg1) - 1.0)
# theLog = log(1.0 - (1.0 - exp(- eArg1)) * exp(-eArg2))
# except (FloatingPointError, OverflowError) as e:
# raise PTWStressError from e
# scaled_stress = (tau_s + ( mp.s0 - tau_y ) * theLog / mp.p )
# else:
# if tau_s > tau_y:
# scaled_stress = ( tau_s - ( tau_s - tau_y )
# * exp( - eps * mp.theta / (tau_s - tau_y) ) )
# else:
# scaled_stress = tau_s
small = 1.0e-10
scaled_stress = tau_s
ind = np.where((mp.p > small) * (np.abs(tau_s - tau_y) > small))
eArg1 = (mp.p * (tau_s - tau_y) / (mp.s0 - tau_y))[ind]
eArg2 = (eps * mp.p * mp.theta)[ind] / (mp.s0 - tau_y)[ind] / (np.exp(eArg1) - 1.0) # eArg1 already subsetted by ind
if (np.any((1.0 - (1.0 - np.exp(- eArg1)) * np.exp(-eArg2)) <= 0) or \
np.any(np.isinf(1.0 - (1.0 - np.exp(- eArg1)) * np.exp(-eArg2)))):
print('bad')
theLog = np.log(1.0 - (1.0 - np.exp(- eArg1)) * np.exp(-eArg2))
scaled_stress[ind] = (tau_s[ind] + ( mp.s0[ind] - tau_y[ind] ) * theLog / mp.p[ind] )
ind2 = np.where((mp.p <= small) * (tau_s>tau_y))
scaled_stress[ind2] = (
+ tau_s[ind2]
- (tau_s - tau_y)[ind2] * np.exp(- eps[ind2] * mp.theta[ind2] / (tau_s - tau_y)[ind2])
)
# should be flow stress in units of Mbar
out = scaled_stress * shear * 2.0
out[np.where(~good)] = -999.
return out
class Stein_Flow_Stress(BaseModel):
params = ['y0', 'a', 'b', 'beta', 'n', 'ymax']
consts = ['G0', 'epsi', 'chi']
def value(self, *args):
mp = self.parent.parameters
temp = self.parent.state.T
tmelt = self.parent.state.Tmelt
shear = self.parent.state.G
eps = self.parent.state.strain
fnow = fast_pow((1.0+mp.beta*(mp.epsi+eps)), mp.n)
cond1 = fnow*mp.y0 > mp.ymax
fnow[cond1] = (mp.ymax/mp.y0)[cond1]
cond2 = temp > tmelt
fnow[cond2] = 0.0
#if fnow*mp.y0 > mp.ymax: fnow = mp.ymax/mp.y0
#if temp > tmelt: fnow = 0.0
return mp.y0*fnow*shear/mp.G0
## Parameters Definition
class ModelParameters(object):
params = []
consts = []
parent = None
def update_parameters(self, x):
if type(x) is np.ndarray:
self.__dict__.update({x:y for x,y in zip(self.params,x)})
elif type(x) is dict:
for key in self.params:
self.__dict__[key] = x[key]
elif type(x) is list:
try:
assert(len(x) == len(self.params))
except AssertionError:
print('Incorrect number of parameters!')
raise
for i in range(len(self.params)):
self.__dict__[self.params[i]] = x[i]
else:
raise ValueError('Type {} is not supported.'.format(type(x)))
return
def __init__(self, parent):
self.parent = parent
return
## State Definition
class MaterialState(object):
T = None
Tmelt = None
stress = None
strain = None
G = None
def set_state(self, T = 300., strain = 0., stress = 0.):
self.T = T
self.strain = strain
self.stress = stress
return
def __init__(self, parent, T = 300., strain = 0., stress = 0.):
self.parent = parent
self.set_state(T, strain, stress)
return
## Material Model Definition
class MaterialModel(object):
def __init__(
self,
parameters = ModelParameters,
initial_state = MaterialState,
flow_stress_model = Constant_Yield_Stress,
specific_heat_model = Constant_Specific_Heat,
shear_modulus_model = Constant_Shear_Modulus,
melt_model = Constant_Melt_Temperature,
density_model = Constant_Density,
):
"""
Initialization routine for Material Model. All of the arguments
supplied are classes, which are then instantiated within the function.
The reason for doing this is that then we can pass the MaterialModel
instance to the physical models so that the model's parent can be
declared at instantiation. then the model.value() function can reach
into the parent class to find whatever it needs.
"""
self.parameters = parameters(self)
self.state = initial_state(self)
self.flow_stress = flow_stress_model(self)
self.specific_heat = specific_heat_model(self)
self.shear_modulus = shear_modulus_model(self)
self.melt_model = melt_model(self)
self.density = density_model(self)
params = (
self.flow_stress.params +
self.specific_heat.params +
self.shear_modulus.params +
self.melt_model.params +
self.density.params
)
consts = set(
self.flow_stress.consts +
self.specific_heat.consts +
self.shear_modulus.consts +
self.melt_model.consts +
self.density.consts
)
try:
assert(len(set(params)) == len(params))
except AssertionError:
print('Some Duplicate Parameters between models')
raise
try:
assert(len(set(params).intersection(set(consts))) == 0)
except AssertionError:
print('Duplicate item in parameters and constants')
raise
self.parameters.params = params
self.parameters.consts = consts
return
def get_parameter_list(self,):
"""
The list of parameters used in the model.
This also describes the order of their appearance in the sampling results
"""
return self.parameters.params
def get_constants_list(self,):
"""
List of Constants used in the model
"""
return self.parameters.consts
def update_state(self, edot, dt):
chi = self.parameters.chi
self.state.Cv = self.specific_heat.value()
self.state.rho = self.density.value()
#if we are working with microseconds, then this is a reasonable value
#if we work in seconds, it should be changed to ~1.
edotcrit=1.0e-6
#if edot > edotcrit:
# self.state.T += chi * self.state.stress * edot * dt / (self.state.Cv * self.state.rho)
cond = edot > edotcrit
#if any(cond):
self.state.T = self.state.T + cond * chi * self.state.stress * edot * dt / (self.state.Cv * self.state.rho)
self.state.strain = self.state.strain + edot * dt
self.state.Tmelt = self.melt_model.value()
self.state.G = self.shear_modulus.value()
self.state.stress = self.flow_stress.value(edot)
return
def update_parameters(self, x):
self.parameters.update_parameters(x)
return
def initialize(self, parameters, constants):
"""
Initialize the model at a given set of parameters, constants
"""
try:
self.parameters.__dict__.update(
{key : parameters[key] for key in self.parameters.params},
)
except KeyError:
print('{} missing from list of supplied parameters'.format(
set(self.parameters.params).difference(set(parameters.keys()))
))
raise
try:
self.parameters.__dict__.update(
{key : constants[key] for key in self.parameters.consts},
)
except KeyError:
print('{} missing from list of supplied constants'.format(
set(self.parameters.consts).difference(set(constants.keys()))
))
raise
return
def initialize_state(self, T = 300., stress = 0., strain = 0.):
self.state.set_state(T, stress, strain)
return
def set_history_variables(self, emax, edot, Nhist):
self.emax = emax
self.edot = edot
self.Nhist = Nhist
return
def get_history_variables(self):
return [self.emax, self.edot, self.Nhist]
def compute_state_history(self, strain_history):
strains = strain_history['strains']
times = strain_history['times']
strain_rate = strain_history['strain_rate']
# Nhist = len(strains)
# nrep = len(self.parameters.kappa)
nrep, Nhist = strains.shape # nexp * nhist array
results = np.empty((Nhist, 6, nrep))
state = self.state
self.update_state(strain_rate[:,0], 0.)
#import pdb
#pdb.set_trace()
results[0] = np.array([times[:,0], state.strain, state.stress, state.T, state.G, state.rho]) #np.repeat(state.rho,nrep)])
for i in range(1, Nhist):
self.update_state(strain_rate[:,i-1], times[:,i] - times[:,i-1])
# self.update_state(strain_rate.T[i-1], times.T[i] - times.T[i-1])
# results[i] = [times[i], state.strain, state.stress, state.T, state.G, state.rho]
results[i] = np.array([times[:,i], state.strain, state.stress, state.T, state.G,
state.rho]) #np.repeat(state.rho, nrep)])
return results
## function to generate strain history to calculate along
## Should probably make this a method of MaterialModel class
def generate_strain_history(emax, edot, Nhist):
tmax = emax / edot
strains = np.linspace(0., emax, Nhist)
nrep=len(edot)
times = | np.empty((nrep, Nhist)) | numpy.empty |
"""
epidemic_helper.py: Helper module to simulate continuous-time stochastic
SIR epidemics.
Copyright ยฉ 2018 โ LCA 4
"""
import time
import bisect
import numpy as np
import pandas as pd
import networkx as nx
import scipy
import scipy.optimize
import scipy as sp
import random as rd
import heapq
import collections
import itertools
import os
import copy
from counterfactual_tpp import sample_counterfactual, combine
from sampling_utils import thinning_T
# from . import maxcut
from settings import DATA_DIR
def sample_seeds(graph, delta, method='data', n_seeds=None, max_date=None, verbose=True):
"""
Extract seeds from the Ebola cases datasets, by choosing either:
* the first `n_seeds`.
* the first seed until the date `max_date`.
For each seed, we then simulate its recovery time and attribute it to a random node in the
corresponding district. We then start the epidemic at the time of infection of the last seed.
Note that some seeds may have already recovered at this time. In this case, they are just
ignored from the simulation altogether.
Arguments:
---------
graph : nx.Graph
The graph of individuals in districts. Nodes must have the attribute `district`.
delta : float
Recovery rate of the epidemic process. Used to sample recovery times of seeds.
n_seeds : int
Number of seeds to sample.
max_date : str
Maximum date to sample seeds (max_date is included in sampling).
method : str ('data' or 'random')
Method to sample the seeds. Can be one of:
- 'data': Use the seeds from the dataset and sample recovery time
- 'random': Sample random seeds along with their recovery time
verbose : bool
Indicate whether or not to print seed generation process.
"""
assert (n_seeds is not None) or (max_date is not None), "Either `n_seeds` or `max_date` must be given"
if method == 'data':
# Load real data
df = pd.read_csv(os.path.join(DATA_DIR, 'ebola', 'rstb20160308_si_001_cleaned.csv'))
if n_seeds:
df = df.sort_values('infection_timestamp').iloc[:n_seeds]
elif max_date:
df = df[df.infection_date <= max_date].sort_values('infection_timestamp')
# Extract the seed disctricts
seed_names = list(df['district'])
# Extract district name for each node in the graph
node_names = np.array([u for u, d in graph.nodes(data=True)])
node_districts = np.array([d['district'] for u, d in graph.nodes(data=True)])
# Get last infection time of seeds (this is time zero for the simulation)
last_inf_time = df.infection_timestamp.max()
# Init list of seed events
init_event_list = list()
for _, row in df.iterrows():
inf_time = row['infection_timestamp']
# Sample recovery time
rec_time = inf_time + rd.expovariate(delta) - last_inf_time
# Ignore seed if recovered before time zero
if rec_time > 0:
# Randomly sample one node for each seed in the corresponding district
idx = np.random.choice(np.where(node_districts == row['district'])[0])
node = node_names[idx]
# Add infection event
# node to node infection flags initial seeds in code
init_event_list.append([(node, 'inf', node), 0.0]) # Gets infection at the start
# Add recovery event
init_event_list.append([(node, 'rec', None), rec_time])
if verbose:
print(f'Add seed {node} from district {row["district"]} - inf: {0.0}, rec: {rec_time} ')
return init_event_list
elif method == 'random':
if n_seeds is None:
raise ValueError("`n_seeds` must be provided for method `random`")
init_event_list = list()
for _ in range(n_seeds):
node = np.random.choice(graph.nodes())
init_event_list.append([(node, 'inf', node), 0.0])
rec_time = rd.expovariate(delta)
init_event_list.append([(node, 'rec', None), rec_time])
return init_event_list
else:
raise ValueError('Invalid method.')
class PriorityQueue(object):
"""
PriorityQueue with O(1) update and deletion of objects
"""
def __init__(self, initial=[], priorities=[]):
self.pq = []
self.entry_finder = {} # mapping of tasks to entries
self.removed = '<removed-task>' # placeholder for a removed task
self.counter = itertools.count() # unique sequence count
assert(len(initial) == len(priorities))
for i in range(len(initial)):
self.push(initial[i], priority=priorities[i])
def push(self, task, priority=0):
"""Add a new task or update the priority of an existing task"""
if task in self.entry_finder:
self.delete(task)
count = next(self.counter)
entry = [priority, count, task]
self.entry_finder[task] = entry
heapq.heappush(self.pq, entry)
def delete(self, task):
"""Mark an existing task as REMOVED. Raise KeyError if not found."""
entry = self.entry_finder.pop(task)
entry[-1] = self.removed
def remove_all_tasks_of_type(self, type):
"""Removes all existing tasks of a specific type (for SIRSimulation)"""
keys = list(self.entry_finder.keys())
for event in keys:
u, type_, v = event
if type_ == type:
self.delete(event)
def pop_priority(self):
"""
Remove and return the lowest priority task with its priority value.
Raise KeyError if empty.
"""
while self.pq:
priority, _, task = heapq.heappop(self.pq)
if task is not self.removed:
del self.entry_finder[task]
return task, priority
raise KeyError('pop from an empty priority queue')
def pop(self):
"""
Remove and return the lowest priority task. Raise KeyError if empty.
"""
task, _ = self.pop_priority()
return task
def priority(self, task):
"""Return priority of task"""
if task in self.entry_finder:
return self.entry_finder[task][0]
else:
raise KeyError('task not in queue')
def __len__(self):
return len(self.entry_finder)
def __str__(self):
return str(self.pq)
def __repr__(self):
return repr(self.pq)
def __setitem__(self, task, priority):
self.push(task, priority=priority)
class ProgressPrinter(object):
"""
Helper object to print relevant information throughout the epidemic
"""
PRINT_INTERVAL = 0.1
_PRINT_MSG = ('{t:.2f} days elapsed '
'| '
'{S:.0f} sus., '
'{I:.0f} inf., '
'{R:.0f} rec., '
'{Tt:.0f} tre ({TI:.2f}% of inf) | '
# 'I(q): {iq} R(q): {rq} T(q): {tq} |q|: {lq} | '
'max_u {max_u:.2e}'
)
_PRINTLN_MSG = ('Epidemic stopped after {t:.2f} days '
'| '
'{S:.0f} sus., '
'{I:.0f} inf., '
'{R:.0f} rec., '
'{Tt:.0f} tre ({TI:.2f}% of inf) | '
# 'I(q): {iq} R(q): {rq} T(q): {tq} |q|: {lq}'
'max_u {max_u:.2e}'
)
def __init__(self, verbose=True):
self.verbose = verbose
self.last_print = time.time()
def print(self, sir_obj, epitime, end='', force=False):
if not self.verbose:
return
if (time.time() - self.last_print > self.PRINT_INTERVAL) or force:
S = np.sum(sir_obj.is_sus)
I = np.sum(sir_obj.is_inf * (1 - sir_obj.is_rec))
R = np.sum(sir_obj.is_rec)
T = np.sum(sir_obj.is_tre)
Tt = np.sum(sir_obj.is_tre)
TI = 100. * T / I if I > 0 else np.nan
iq = sir_obj.infs_in_queue
rq = sir_obj.recs_in_queue
tq = sir_obj.tres_in_queue
lq = len(sir_obj.queue)
print('\r', self._PRINT_MSG.format(t=epitime, S=S, I=I, R=R, Tt=Tt, TI=TI,
max_u=sir_obj.max_total_control_intensity),
sep='', end='', flush=True)
self.last_print = time.time()
def println(self, sir_obj, epitime):
if not self.verbose:
return
S = np.sum(sir_obj.is_sus)
I = np.sum(sir_obj.is_inf * (1 - sir_obj.is_rec))
R = np.sum(sir_obj.is_rec)
T = np.sum(sir_obj.is_tre)
Tt = np.sum(sir_obj.is_tre)
TI = 100. * T / I if I > 0 else np.nan
iq = sir_obj.infs_in_queue
rq = sir_obj.recs_in_queue
tq = sir_obj.tres_in_queue
lq = len(sir_obj.queue)
print('\r', self._PRINTLN_MSG.format(
t=epitime, S=S, I=I, R=R, Tt=Tt, TI=TI,
max_u=sir_obj.max_total_control_intensity),
sep='', end='\n', flush=True)
self.last_print = time.time()
class SimulationSIR(object):
"""
Simulate continuous-time SIR epidemics with treatement, with exponentially distributed
inter-event times.
The simulation algorithm works by leveraging the Markov property of the model and rejection
sampling. Events are treated in order in a priority queue. An event in the queue is a tuple
the form
`(node, event_type, infector_node)`
where elements are as follows:
`node` : is the node where the event occurs,
`event_type` : is the type of event (i.e. infected 'inf', recovery 'rec', or treatement 'tre')
`infector_node` : for infections only, the node of caused the infection.
"""
AVAILABLE_LPSOLVERS = ['scipy', 'cvxopt']
def __init__(self, G, beta, gamma, delta, rho, verbose=True):
"""
Init an SIR cascade over a graph
Arguments:
---------
G : networkx.Graph()
Graph over which the epidemic propagates
beta : float
Exponential infection rate (positive)
gamma : float
Reduction in infection rate by treatment
delta : float
Exponential recovery rate (non-negative)
rho : float
Increase in recovery rate by treatment
verbose : bool (default: True)
Indicate the print behavior, if set to False, nothing will be printed
"""
if not isinstance(G, nx.Graph):
raise ValueError('Invalid graph type, must be networkx.Graph')
self.G = G
self.A = sp.sparse.csr_matrix(nx.adjacency_matrix(self.G).toarray())
# Cache the number of nodes
self.n_nodes = len(G.nodes())
self.max_deg = np.max([d for n, d in self.G.degree()])
self.min_deg = np.min([d for n, d in self.G.degree()])
self.idx_to_node = dict(zip(range(self.n_nodes), self.G.nodes()))
self.node_to_idx = dict(zip(self.G.nodes(), range(self.n_nodes)))
# Check parameters
if isinstance(beta, (float, int)) and (beta > 0):
self.beta = beta
else:
raise ValueError("`beta` must be a positive float")
if isinstance(gamma, (float, int)) and (gamma >= 0) and (gamma <= beta):
self.gamma = gamma
else:
raise ValueError(("`gamma` must be a positive float smaller than `beta`"))
if isinstance(delta, (float, int)) and (delta >= 0):
self.delta = delta
else:
raise ValueError("`delta` must be a non-negative float")
if isinstance(rho, (float, int)) and (rho >= 0):
self.rho = rho
else:
raise ValueError("`rho` must be a non-negative float")
# Control pre-computations
self.lrsr_initiated = False # flag for initial LRSR computation
self.mcm_initiated = False # flag for initial MCM computation
# Control statistics
self.max_total_control_intensity = 0.0
# Printer for logging
self._printer = ProgressPrinter(verbose=verbose)
def expo(self, rate):
"""Samples a single exponential random variable."""
return rd.expovariate(rate)
def nodes_at_time(self, status, time):
"""
Get the status of all nodes at a given time
"""
if status == 'S':
return self.inf_occured_at > time
elif status == 'I':
return (self.rec_occured_at > time) * (self.inf_occured_at < time)
elif status == 'T':
return (self.tre_occured_at < time) * (self.rec_occured_at > time)
elif status == 'R':
return self.rec_occured_at < time
else:
raise ValueError('Invalid status.')
def _init_run(self, init_event_list, max_time):
"""
Initialize the run of the epidemic
"""
# Max time of the run
self.max_time = max_time
# Priority queue of events by time
# event invariant is ('node', event, 'node') where the second node is the infector if applicable
self.queue = PriorityQueue()
# Cache the number of ins, recs, tres in the queue
self.infs_in_queue = 0
self.recs_in_queue = 0
self.tres_in_queue = 0
# Susceptible nodes tracking: is_sus[node]=1 if node is currently susceptible)
self.initial_seed = np.zeros(self.n_nodes, dtype='bool')
self.is_sus = | np.ones(self.n_nodes, dtype='bool') | numpy.ones |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 22 14:05:18 2020
@author: danielfurman
"""
# Densification rate (dp/p)dt versus applied stress (log-log space).
# Uncertainty estimates of the linear slope {n = 1.57 ยฑ 0.22, n = 1.68 ยฑ 0.45,
# n = 3.74 ยฑ 1.02} represent the 95% confidence intervals of each linear
# regression, which are plotted below.
# These rate data also constrain a flow law model (see Firn_notebook.ipynnb)
# by taking the rate-limiting mechanism as dominant.
# Required Libraries:
import numpy as np
import matplotlib.pylab as plt
import pandas as pd
from sklearn.linear_model import LinearRegression
from scipy import stats
paper_table = pd.read_csv('data/paper_table_full.csv', delimiter=',',
header = 'infer')
# log-log linear regression of power law relationship for green series
y = np.array(paper_table['Densification rate'][6:10])
X = np.array(paper_table['applied stress'][6:10])
y = np.log(y)
X = np.log(X)
slope, intercept, r_value, p_value, std_err = stats.linregress(X,y)
reg_conf = 1.96*std_err # 95 percent confidence interval
# reshape for sklearn library
y = y.reshape(-1, 1)
X = X.reshape(-1, 1)
reg = LinearRegression().fit(X, y)
# log-log linear regression of power law relationship for blue series
y = np.array(paper_table['Densification rate'][10:15])
X = np.array(paper_table['applied stress'][10:15])
y = np.log(y)
X = np.log(X)
slope, intercept, r_value, p_value, std_err = stats.linregress(X,y)
reg1_conf = 1.96*std_err # 95 percent confidence interval
# reshape for sklearn library
y = y.reshape(-1, 1)
X = X.reshape(-1, 1)
reg1 = LinearRegression().fit(X, y)
# log-log linear regression of power law relationship for red series
y = | np.array(paper_table['Densification rate'][0:6]) | numpy.array |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# [0,0] = TN
# [1,1] = TP
# [0,1] = FP
# [1,0] = FN
# cm is a confusion matrix
# Accuracy: (TP + TN) / Total
def accuracy(cm: pd.DataFrame) -> float:
return (cm[0,0] + cm[1,1]) / cm.sum()
# Precision: TP / (TP + FP)
def precision(cm: pd.DataFrame) -> float:
return cm[1,1] / (cm[1,1] + cm[0,1])
# False positive rate: FP / N = FP / (FP + TN)
def false_positive(cm: pd.DataFrame) -> float:
return cm[0,1] / (cm[0,0] + cm[0,1])
# True positive rate: TP / P = TP / (TP + FN)
# Equivalent to sensitivity/recall
def true_positive(cm: pd.DataFrame) -> float:
return cm[1,1] / (cm[1,0] + cm[1,1])
# F1 score: 2 * precision * recall / (precision + recall)
def f_score(cm: pd.DataFrame) -> float:
return 2 * precision(cm) * true_positive(cm) / (precision(cm) + true_positive(cm))
# Returns a confusion matrix for labels and predictions
# [[TN, FP],
# [FN, TP]]
def confusion_matrix(y, y_hat):
cm = np.zeros((2, 2))
np.add.at(cm, [y.astype(int), y_hat.astype(int)], 1)
return cm
def visualize_cm(cm):
df_cm = pd.DataFrame(cm, columns=['0', '1'], index=['0', '1'])
df_cm.index.name = 'Actual'
df_cm.columns.name = 'Predicted'
plt.figure(figsize=(5, 3))
sns.heatmap(df_cm, cmap='Blues', annot=True, annot_kws={'size': 16}, fmt='g')
# Function to return two shuffled arrays, is a deep copy
def shuffle(x, y):
x_copy = x.copy()
y_copy = y.copy()
rand = np.random.randint(0, 10000)
np.random.seed(rand)
| np.random.shuffle(x_copy) | numpy.random.shuffle |
"""
This module contains all the functions needed to preprocess the satellite images
before the shorelines can be extracted. This includes creating a cloud mask and
pansharpening/downsampling the multispectral bands.
Author: <NAME>, Water Research Laboratory, University of New South Wales
"""
# load modules
import os
import numpy as np
import matplotlib.pyplot as plt
import pdb
# image processing modules
import skimage.transform as transform
import skimage.morphology as morphology
import sklearn.decomposition as decomposition
import skimage.exposure as exposure
# other modules
from osgeo import gdal
from pylab import ginput
import pickle
import geopandas as gpd
from shapely import geometry
# CoastSat modules
from coast_corr import SDS_tools
np.seterr(all='ignore') # raise/ignore divisions by 0 and nans
# Main function to preprocess a satellite image (L5,L7,L8 or S2)
def preprocess_single(fn, satname, cloud_mask_issue):
"""
Reads the image and outputs the pansharpened/down-sampled multispectral bands,
the georeferencing vector of the image (coordinates of the upper left pixel),
the cloud mask, the QA band and a no_data image.
For Landsat 7-8 it also outputs the panchromatic band and for Sentinel-2 it
also outputs the 20m SWIR band.
KV WRL 2018
Arguments:
-----------
fn: str or list of str
filename of the .TIF file containing the image. For L7, L8 and S2 this
is a list of filenames, one filename for each band at different
resolution (30m and 15m for Landsat 7-8, 10m, 20m, 60m for Sentinel-2)
satname: str
name of the satellite mission (e.g., 'L5')
cloud_mask_issue: boolean
True if there is an issue with the cloud mask and sand pixels are being masked on the images
Returns:
-----------
im_ms: np.array
3D array containing the pansharpened/down-sampled bands (B,G,R,NIR,SWIR1)
georef: np.array
vector of 6 elements [Xtr, Xscale, Xshear, Ytr, Yshear, Yscale] defining the
coordinates of the top-left pixel of the image
cloud_mask: np.array
2D cloud mask with True where cloud pixels are
im_extra : np.array
2D array containing the 20m resolution SWIR band for Sentinel-2 and the 15m resolution
panchromatic band for Landsat 7 and Landsat 8. This field is empty for Landsat 5.
im_QA: np.array
2D array containing the QA band, from which the cloud_mask can be computed.
im_nodata: np.array
2D array with True where no data values (-inf) are located
"""
#=============================================================================================#
# L5 images
#=============================================================================================#
if satname == 'L5':
# read all bands
data = gdal.Open(fn, gdal.GA_ReadOnly)
georef = np.array(data.GetGeoTransform())
bands = [data.GetRasterBand(k + 1).ReadAsArray() for k in range(data.RasterCount)]
im_ms = np.stack(bands, 2)
# down-sample to 15 m (half of the original pixel size)
nrows = im_ms.shape[0]*2
ncols = im_ms.shape[1]*2
# create cloud mask
im_QA = im_ms[:,:,5]
im_ms = im_ms[:,:,:-1]
cloud_mask = create_cloud_mask(im_QA, satname, cloud_mask_issue)
# resize the image using bilinear interpolation (order 1)
im_ms = transform.resize(im_ms,(nrows, ncols), order=1, preserve_range=True,
mode='constant')
# resize the image using nearest neighbour interpolation (order 0)
cloud_mask = transform.resize(cloud_mask, (nrows, ncols), order=0, preserve_range=True,
mode='constant').astype('bool_')
# adjust georeferencing vector to the new image size
# scale becomes 15m and the origin is adjusted to the center of new top left pixel
georef[1] = 15
georef[5] = -15
georef[0] = georef[0] + 7.5
georef[3] = georef[3] - 7.5
# check if -inf or nan values on any band and eventually add those pixels to cloud mask
im_nodata = np.zeros(cloud_mask.shape).astype(bool)
for k in range(im_ms.shape[2]):
im_inf = np.isin(im_ms[:,:,k], -np.inf)
im_nan = np.isnan(im_ms[:,:,k])
im_nodata = np.logical_or(np.logical_or(im_nodata, im_inf), im_nan)
# check if there are pixels with 0 intensity in the Green, NIR and SWIR bands and add those
# to the cloud mask as otherwise they will cause errors when calculating the NDWI and MNDWI
im_zeros = np.ones(cloud_mask.shape).astype(bool)
for k in [1,3,4]: # loop through the Green, NIR and SWIR bands
im_zeros = np.logical_and(np.isin(im_ms[:,:,k],0), im_zeros)
# add zeros to im nodata
im_nodata = np.logical_or(im_zeros, im_nodata)
# update cloud mask with all the nodata pixels
cloud_mask = np.logical_or(cloud_mask, im_nodata)
# no extra image for Landsat 5 (they are all 30 m bands)
im_extra = []
#=============================================================================================#
# L7 images
#=============================================================================================#
elif satname == 'L7':
# read pan image
fn_pan = fn[0]
data = gdal.Open(fn_pan, gdal.GA_ReadOnly)
georef = np.array(data.GetGeoTransform())
bands = [data.GetRasterBand(k + 1).ReadAsArray() for k in range(data.RasterCount)]
im_pan = np.stack(bands, 2)[:,:,0]
# size of pan image
nrows = im_pan.shape[0]
ncols = im_pan.shape[1]
# read ms image
fn_ms = fn[1]
data = gdal.Open(fn_ms, gdal.GA_ReadOnly)
bands = [data.GetRasterBand(k + 1).ReadAsArray() for k in range(data.RasterCount)]
im_ms = np.stack(bands, 2)
# create cloud mask
im_QA = im_ms[:,:,5]
cloud_mask = create_cloud_mask(im_QA, satname, cloud_mask_issue)
# resize the image using bilinear interpolation (order 1)
im_ms = im_ms[:,:,:5]
im_ms = transform.resize(im_ms,(nrows, ncols), order=1, preserve_range=True,
mode='constant')
# resize the image using nearest neighbour interpolation (order 0)
cloud_mask = transform.resize(cloud_mask, (nrows, ncols), order=0, preserve_range=True,
mode='constant').astype('bool_')
# check if -inf or nan values on any band and eventually add those pixels to cloud mask
im_nodata = np.zeros(cloud_mask.shape).astype(bool)
for k in range(im_ms.shape[2]):
im_inf = np.isin(im_ms[:,:,k], -np.inf)
im_nan = np.isnan(im_ms[:,:,k])
im_nodata = np.logical_or(np.logical_or(im_nodata, im_inf), im_nan)
# check if there are pixels with 0 intensity in the Green, NIR and SWIR bands and add those
# to the cloud mask as otherwise they will cause errors when calculating the NDWI and MNDWI
im_zeros = np.ones(cloud_mask.shape).astype(bool)
for k in [1,3,4]: # loop through the Green, NIR and SWIR bands
im_zeros = np.logical_and( | np.isin(im_ms[:,:,k],0) | numpy.isin |
#
# Copyright 2019 <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
#
# This file is part of acados.
#
# The 2-Clause BSD License
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.;
#
from acados_template import AcadosOcp, AcadosOcpSolver, AcadosModel
import numpy as np
import scipy.linalg
from linear_mass_model import *
from itertools import product
## SETTINGS:
OBSTACLE = True
SOFTEN_OBSTACLE = False
SOFTEN_TERMINAL = True
INITIALIZE = True
PLOT = False
OBSTACLE_POWER = 2
# an OCP to test Marathos effect an second order correction
def main():
# run test cases
# all setting
params = {'globalization': ['FIXED_STEP', 'MERIT_BACKTRACKING'], # MERIT_BACKTRACKING, FIXED_STEP
'line_search_use_sufficient_descent' : [0, 1],
'qp_solver' : ['FULL_CONDENSING_HPIPM', 'PARTIAL_CONDENSING_HPIPM', 'FULL_CONDENSING_QPOASES'],
'globalization_use_SOC' : [0, 1] }
keys, values = zip(*params.items())
for combination in product(*values):
setting = dict(zip(keys, combination))
if setting['globalization'] == 'FIXED_STEP' and \
(setting['globalization_use_SOC'] or setting['line_search_use_sufficient_descent']):
# skip some equivalent settings
pass
else:
solve_marathos_ocp(setting)
def solve_marathos_ocp(setting):
globalization = setting['globalization']
line_search_use_sufficient_descent = setting['line_search_use_sufficient_descent']
globalization_use_SOC = setting['globalization_use_SOC']
qp_solver = setting['qp_solver']
# create ocp object to formulate the OCP
ocp = AcadosOcp()
# set model
model = export_linear_mass_model()
ocp.model = model
nx = model.x.size()[0]
nu = model.u.size()[0]
ny = nu
# discretization
Tf = 2
N = 20
shooting_nodes = np.linspace(0, Tf, N+1)
ocp.dims.N = N
# set cost
Q = 2*np.diag([])
R = 2*np.diag([1e1, 1e1])
ocp.cost.W_e = Q
ocp.cost.W = scipy.linalg.block_diag(Q, R)
ocp.cost.cost_type = 'LINEAR_LS'
ocp.cost.cost_type_e = 'LINEAR_LS'
ocp.cost.Vx = np.zeros((ny, nx))
Vu = np.eye((nu))
ocp.cost.Vu = Vu
ocp.cost.yref = np.zeros((ny, ))
# set constraints
Fmax = 5
ocp.constraints.lbu = -Fmax * np.ones((nu,))
ocp.constraints.ubu = +Fmax * np.ones((nu,))
ocp.constraints.idxbu = np.array(range(nu))
x0 = np.array([1e-1, 1.1, 0, 0])
ocp.constraints.x0 = x0
# terminal constraint
x_goal = np.array([0, -1.1, 0, 0])
ocp.constraints.idxbx_e = np.array(range(nx))
ocp.constraints.lbx_e = x_goal
ocp.constraints.ubx_e = x_goal
if SOFTEN_TERMINAL:
ocp.constraints.idxsbx_e = np.array(range(nx))
ocp.cost.zl_e = 1e4 * np.ones(nx)
ocp.cost.zu_e = 1e4 * np.ones(nx)
ocp.cost.Zl_e = 1e6 * np.ones(nx)
ocp.cost.Zu_e = 1e6 * np.ones(nx)
# add obstacle
if OBSTACLE:
obs_rad = 1.0; obs_x = 0.0; obs_y = 0.0;
circle = (obs_x, obs_y, obs_rad)
ocp.constraints.uh = | np.array([100.0]) | numpy.array |
import warp as wp
import numpy as np
from warp.tests.test_base import *
wp.init()
@wp.kernel
def intersect_tri(v0: wp.vec3,
v1: wp.vec3,
v2: wp.vec3,
u0: wp.vec3,
u1: wp.vec3,
u2: wp.vec3,
result: wp.array(dtype=int)):
tid = wp.tid()
result[0] = wp.intersect_tri_tri(v0, v1, v2, u0, u1, u2)
def test_intersect_tri(test, device):
points_intersect = [wp.vec3(0.0, 0.0, 0.0), wp.vec3(1.0, 0.0, 0.0), wp.vec3(0.0, 0.0, 1.0),
wp.vec3(0.5, -0.5, 0.0), wp.vec3(0.5, -0.5, 1.0), wp.vec3(0.5, 0.5, 0.0)]
points_separated = [wp.vec3(0.0, 0.0, 0.0), wp.vec3(1.0, 0.0, 0.0), wp.vec3(0.0, 0.0, 1.0),
wp.vec3(-0.5, -0.5, 0.0), wp.vec3(-0.5, -0.5, 1.0), wp.vec3(-0.5, 0.5, 0.0)]
result = wp.zeros(1, dtype=int, device=device)
wp.launch(intersect_tri, dim=1, inputs=[*points_intersect, result], device=device)
assert_np_equal(result.numpy(), np.array([1]))
wp.launch(intersect_tri, dim=1, inputs=[*points_separated, result], device=device)
assert_np_equal(result.numpy(), | np.array([0]) | numpy.array |
from __future__ import annotations
import numpy as np
import pandas as pd
from sklearn import datasets
from IMLearn.metrics import mean_square_error
from sklearn.model_selection import train_test_split
from IMLearn.model_selection import cross_validate
from IMLearn.learners.regressors import PolynomialFitting, LinearRegression, RidgeRegression
from sklearn.linear_model import Lasso
from utils import *
import plotly.graph_objects as go
from plotly.subplots import make_subplots
MIN_RANGE = -1.2
MAX_RANGE = 2
MAX_DEGREE = 11
def select_polynomial_degree(n_samples: int = 100, noise: float = 5):
"""
Simulate data from a polynomial model and use cross-validation to select the best fitting degree
Parameters
----------
n_samples: int, default=100
Number of samples to generate
noise: float, default = 5
Noise level to simulate in responses
"""
# Question 1 - Generate dataset for model f(x)=(x+3)(x+2)(x+1)(x-1)(x-2) + eps for eps
# Gaussian noise and split into training- and testing portions
x = MIN_RANGE + np.random.rand(n_samples) * (MAX_RANGE - MIN_RANGE)
f_x = (x + 3) * (x + 2) * (x + 1) * (x - 1) * (x - 2)
eps = np.random.randn(n_samples) * noise
dataset = f_x + eps
train_X, test_X, train_y, test_y = train_test_split(x, dataset, train_size=2 / 3)
fig = make_subplots(rows=1, cols=2, subplot_titles=["True (noiseless) Model",
"Train and Test Samples With Noise"])
fig.add_traces([go.Scatter(x=x, y=f_x, mode="markers", showlegend=False),
go.Scatter(x=train_X, y=train_y, mode="markers", name="Train"),
go.Scatter(x=test_X, y=test_y, mode="markers", name="Test")],
rows=[1, 1, 1], cols=[1, 2, 2])
fig.show()
# Question 2 - Perform CV for polynomial fitting with degrees 0,1,...,10
avg_train_err = np.zeros(MAX_DEGREE)
avg_validation_err = np.zeros(MAX_DEGREE)
for d in range(MAX_DEGREE):
avg_train_err[d], avg_validation_err[d] = cross_validate(PolynomialFitting(d), train_X,
train_y, mean_square_error)
x_ = np.arange(MAX_DEGREE)
go.Figure(
[go.Scatter(x=x_, y=avg_train_err, mode="markers+lines", name="Train Error"),
go.Scatter(x=x_, y=avg_validation_err, mode="markers+lines", name="Validation Error")],
layout=go.Layout(title="Average Training and Validation Errors As a Function Of Degrees",
xaxis_title="Degrees", yaxis_title="Avg Errors")).show()
# Question 3 - Using best value of k, fit a k-degree polynomial model and report test error
best_k = int(np.argmin(avg_validation_err))
print(f"The polynomial degree for which the lowest validation error was achieved is: {best_k}.")
p = PolynomialFitting(best_k)
p.fit(train_X, train_y)
best_error = mean_square_error(test_y, p.predict(test_X))
print(f"Test error for best value of k is: {round(best_error, 2)}.\n")
def select_regularization_parameter(n_samples: int = 50, n_evaluations: int = 500):
"""
Using sklearn's diabetes dataset use cross-validation to select the best fitting
regularization parameter values for Ridge and Lasso regressions
Parameters
----------
n_samples: int, default=50
Number of samples to generate
n_evaluations: int, default = 500
Number of regularization parameter values to evaluate for each of the algorithms
"""
# Question 6 - Load diabetes dataset and split into training and testing portions
X, y = datasets.load_diabetes(return_X_y=True)
train_X, test_X, train_y, test_y = train_test_split(X, y, train_size=n_samples)
# Question 7 - Perform CV for different values of the regularization parameter for Ridge and Lasso regressions
lambdas = np.linspace(0.0001, 20, num=n_evaluations)
avg_t_err_ridge = np.zeros(n_evaluations)
avg_v_err_ridge = | np.zeros(n_evaluations) | numpy.zeros |
import numpy as np
import flopy
from flopy.utils.cvfdutil import to_cvfd, gridlist_to_disv_gridprops
def test_tocvfd1():
vertdict = {}
vertdict[0] = [(0, 0), (100, 0), (100, 100), (0, 100), (0, 0)]
vertdict[1] = [(100, 0), (120, 0), (120, 20), (100, 20), (100, 0)]
verts, iverts = to_cvfd(vertdict)
assert 6 in iverts[0]
def test_tocvfd2():
vertdict = {}
vertdict[0] = [(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)]
vertdict[1] = [(1, 0), (3, 0), (3, 2), (1, 2), (1, 0)]
verts, iverts = to_cvfd(vertdict)
assert [1, 4, 5, 6, 2, 1] in iverts
def test_tocvfd3():
# create the nested grid described in the modflow-usg documentation
# outer grid
nlay = 1
nrow = ncol = 7
delr = 100.0 * np.ones(ncol)
delc = 100.0 * np.ones(nrow)
tp = np.zeros((nrow, ncol))
bt = -100.0 * np.ones((nlay, nrow, ncol))
idomain = np.ones((nlay, nrow, ncol))
idomain[:, 2:5, 2:5] = 0
sg1 = flopy.discretization.StructuredGrid(
delr=delr, delc=delc, top=tp, botm=bt, idomain=idomain
)
# inner grid
nlay = 1
nrow = ncol = 9
delr = 100.0 / 3.0 * np.ones(ncol)
delc = 100.0 / 3.0 * np.ones(nrow)
tp = | np.zeros((nrow, ncol)) | numpy.zeros |
#!/usr/bin/env python
import os
import re
import subprocess
import sys
import tempfile
import numpy as np
import scipy.signal
from PyQt5 import QtCore, QtWidgets
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg, NavigationToolbar2QT
from matplotlib.figure import Figure
from .extract_energy_ui import Ui_gmx_extract_energy
class CurvesModel(QtCore.QAbstractItemModel):
# columns: name, show in left axis, show in right axis,
def __init__(self, labels):
super().__init__()
self._rows = [{'name': l, 'showonleft': True, 'showonright': False, 'factor': 1.0} for l in labels]
def columnCount(self, parent=None):
return 4
def data(self, index: QtCore.QModelIndex, role=None):
row = self._rows[index.row()]
if index.column() == 0:
if role == QtCore.Qt.DisplayRole:
return row['name']
else:
return None
elif index.column() == 1:
if role == QtCore.Qt.CheckStateRole:
return [QtCore.Qt.Unchecked, QtCore.Qt.Checked][row['showonleft']]
elif index.column() == 2:
if role == QtCore.Qt.CheckStateRole:
return [QtCore.Qt.Unchecked, QtCore.Qt.Checked][row['showonright']]
elif index.column() == 3:
if role == QtCore.Qt.DisplayRole:
return '{:g}'.format(row['factor'])
else:
return None
def flags(self, index: QtCore.QModelIndex):
row = self._rows[index.row()]
if index.column() == 0:
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemNeverHasChildren | QtCore.Qt.ItemIsSelectable
if index.column() == 1 or index.column() == 2:
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemNeverHasChildren | QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsSelectable
if index.column() == 3:
return QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemNeverHasChildren | QtCore.Qt.ItemIsSelectable
else:
return QtCore.Qt.NoItemFlags
def index(self, row, col, parent=None):
return self.createIndex(row, col, None)
def rowCount(self, parent=None):
return len(self._rows)
def parent(self, index: QtCore.QModelIndex = None):
return QtCore.QModelIndex()
def setData(self, index: QtCore.QModelIndex, newvalue, role=None):
row = self._rows[index.row()]
if index.column() == 1:
row['showonleft'] = newvalue == QtCore.Qt.Checked
self.dataChanged.emit(self.index(index.row(), 1), self.index(index.row(), 1))
return True
elif index.column() == 2:
row['showonright'] = newvalue == QtCore.Qt.Checked
self.dataChanged.emit(self.index(index.row(), 2), self.index(index.row(), 2))
return True
return False
def headerData(self, column, orientation, role=None):
if orientation != QtCore.Qt.Horizontal:
return None
if role == QtCore.Qt.DisplayRole:
return ['Name', 'Left', 'Right', 'Scaling factor'][column]
def showOnLeft(self, row):
return self._rows[row]['showonleft']
def showOnRight(self, row):
return self._rows[row]['showonright']
def factor(self, row):
return self._rows[row]['factor']
def hideAll(self):
for r in self._rows:
r['showonleft'] = r['showonright'] = False
self.dataChanged.emit(self.index(0, 1), self.index(self.rowCount(), 2))
class StatisticsModel(QtCore.QAbstractItemModel):
# columns: name, mean, median, trend, std, std (pcnt), ptp, ptp (pcnt),
def __init__(self, data, labels, tmin=None, tmax=None):
super().__init__()
self.data = data
self.labels = labels
if tmin is None:
tmin = data[:, 0].min()
if tmax is None:
tmax = data[:, 0].max()
self.tmin = tmin
self.tmax = tmax
def columnCount(self, parent=None):
return 8
def data(self, index: QtCore.QModelIndex, role=None):
datacolumn = index.row() + 1
dataidx = np.logical_and(self.data[:, 0] >= self.tmin, self.data[:, 0] <= self.tmax)
data = self.data[dataidx, datacolumn]
if role != QtCore.Qt.DisplayRole:
return None
if index.column() == 0:
return self.labels[datacolumn]
elif index.column() == 1:
return str(np.mean(data))
elif index.column() == 2:
return str(np.median(data))
elif index.column() == 3:
coeffs = np.polyfit(self.data[dataidx, 0], data, 1)
return str(coeffs[0])
elif index.column() == 4:
return str( | np.std(data) | numpy.std |
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import scipy
warnings.filterwarnings('ignore')
import matplotlib
import time#test time
import scipy.signal as signal#fftconvolve
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import caffe
import numpy as np
import os
from PIL import Image #for open image
from skimage.transform import resize
import math # for IPM
size = 1
ratio = 8
def Find_local_maximum(datase):
t = time.time()
window = signal.general_gaussian(51, p=0.5, sig=1)
peak_p = np.zeros([60, 80])
peak_p = np.argmax(datase, 2)
seed_x = [] #seed points for IPM
seed_y = [] #seed points for IPM
print("fft_convolve time 1 :", time.time() - t)
#For better local maximum
peak_p = np.max(datase, 2)
print('shape:',np.shape(peak_p))
t = time.time()
for i in range(0, 60):
peak_row = datase[i, :, :]
if(sum(np.argmax(peak_row, 1)) == 0):
continue
j = 0
while(j < 79):
l = np.array([])
while(np.argmax(peak_row[j]) > 0 and np.argmax(peak_row[j]) == np.argmax(peak_row[j+1]) and j < 79):
l = np.append(l, max(peak_row[j]))
j += 1
j += 1
if(len(l) > 0):
l = np.append(l, max(peak_row[j]))
#middle point
max_idx = j - len(l.tolist()) // 2 - 1
#local maximum
#max_idx = j - np.where(l == max(l))[0]
seed_y.append(max_idx)
seed_x.append(i)
print("Time of fftconvolve 2: ", time.time() - t)
return np.array(seed_y), np.array(seed_x)
mean = np.array([105, 117, 123])
caffe.set_mode_gpu()
caffe.set_device(2)
model_def = "./float/test.prototxt"
model_weights = "./float/trainval.caffemodel"
#load model
net = caffe.Net(model_def, # defines the structure of the model
model_weights, # contains the trained weights
caffe.TEST)
transformer = caffe.io.Transformer({'data': (net.blobs['data'].data).shape})
print((net.blobs['data'].data[0]).shape)
transformer.set_transpose('data', (2,0,1)) # move image channels to outermost dimension
transformer.set_mean('data', mean) # subtract the dataset-mean value in each channel
transformer.set_raw_scale('data', 255) # rescale from [0, 1] to [0, 255]
transformer.set_channel_swap('data', (2,1,0)) # swap channels from RGB to BGR
#caltech-lane
filename = open("./code/test/test_caltech.txt")
test_file = [i.split() for i in filename.readlines()]
img_label = []
TP_sum_nn = np.array([0,0,0])
TP_FP_sum_nn = np.array([0,0,0])
TP_FN_sum_nn = np.array([0,0,0])
TP_sum_total = np.array([0,0,0])
TP_FP_sum_total = np.array([0,0,0])
TP_FN_sum_total = np.array([0,0,0])
fig_num = 0
for file_idx in range(0, len(test_file)):
#path = "/home/chengming/chengming/VPGNet" + test_file[file_idx][0]
path = "./"+test_file[file_idx][0]
print(path)
image = caffe.io.load_image(path)
transformed_image = transformer.preprocess('data', image)
# copy the image data into the memory allocated for the net
net.blobs['data'].data[...] = transformed_image
### perform classification
t0 = time.time()
output = net.forward()
print("Time of NN : ", time.time() - t0)
datase = | np.transpose(output['multi-label'][0],(1,2,0)) | numpy.transpose |
import numpy as np
from scipy import stats
def qqplot(data, dist=stats.distributions.norm, binom_n=None):
"""
qqplot of the quantiles of x versus the ppf of a distribution.
Parameters
----------
data : array-like
1d data array
dist : scipy.stats.distribution or string
Compare x against dist. Strings aren't implemented yet. The default
is scipy.stats.distributions.norm
Returns
-------
matplotlib figure.
Examples
--------
>>> import scikits.statsmodels.api as sm
>>> from matplotlib import pyplot as plt
>>> data = sm.datasets.longley.Load()
>>> data.exog = sm.add_constant(data.exog)
>>> mod_fit = sm.OLS(data.endog, data.exog).fit()
>>> res = mod_fit.resid
>>> std_res = (res - res.mean())/res.std()
Import qqplots from the sandbox
>>> from scikits.statsmodels.sandbox.graphics import qqplot
>>> qqplot(std_res)
>>> plt.show()
Notes
-----
Only the default arguments currently work. Depends on matplotlib.
"""
try:
from matplotlib import pyplot as plt
except:
raise ImportError("matplotlib not installed")
if isinstance(dist, str):
raise NotImplementedError
names_dist = {}
names_dist.update({"norm_gen" : "Normal"})
plotname = names_dist[dist.__class__.__name__]
x = np.array(data, copy=True)
x.sort()
nobs = x.shape[0]
prob = np.linspace(1./(nobs-1), 1-1./(nobs-1), nobs)
# is the above robust for a few data points?
quantiles = np.zeros_like(x)
for i in range(nobs):
quantiles[i] = stats.scoreatpercentile(x, prob[i]*100)
# estimate shape and location using distribution.fit
# for normal, but will have to be somewhat distribution specific
loc,scale = dist.fit(x)
y = dist.ppf(prob, loc=loc, scale=scale)
# plt.figure()
plt.scatter(y, quantiles)
y_low = np.min((y.min(),quantiles.min()))-.25
y_high = np.max((y.max(),quantiles.max()))+.25
plt.plot([y.min()-.25, y.max()+.25], [y_low, y_high], 'b-')
title = '%s - Quantile Plot' % plotname
plt.title(title)
xlabel = "Quantiles of %s" % plotname
plt.xlabel(xlabel)
ylabel = "%s Quantiles" % "Data"
plt.ylabel(ylabel)
plt.axis([y.min()-.25,y.max()+.25, y_low-.25, y_high+.25])
return plt.gcf()
if __name__ == "__main__":
# sample from t-distribution with 3 degrees of freedom
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
np.random.seed(12345)
tsample = | np.random.standard_t(3, size=1000) | numpy.random.standard_t |
from __future__ import print_function, division, absolute_import
import copy as copylib
import sys
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import imgaug as ia
import imgaug.augmenters as iaa
from imgaug.testutils import reseed
import imgaug.random as iarandom
NP_VERSION = np.__version__
IS_NP_117_OR_HIGHER = (
NP_VERSION.startswith("2.")
or NP_VERSION.startswith("1.25")
or NP_VERSION.startswith("1.24")
or NP_VERSION.startswith("1.23")
or NP_VERSION.startswith("1.22")
or NP_VERSION.startswith("1.21")
or NP_VERSION.startswith("1.20")
or NP_VERSION.startswith("1.19")
or NP_VERSION.startswith("1.18")
or NP_VERSION.startswith("1.17")
)
class _Base(unittest.TestCase):
def setUp(self):
reseed()
class TestConstants(_Base):
def test_supports_new_np_rng_style_is_true(self):
assert iarandom.SUPPORTS_NEW_NP_RNG_STYLE is IS_NP_117_OR_HIGHER
def test_global_rng(self):
iarandom.get_global_rng() # creates global RNG upon first call
assert iarandom.GLOBAL_RNG is not None
class TestRNG(_Base):
@mock.patch("imgaug.random.normalize_generator_")
def test___init___calls_normalize_mocked(self, mock_norm):
_ = iarandom.RNG(0)
mock_norm.assert_called_once_with(0)
def test___init___with_rng(self):
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(rng1)
assert rng2.generator is rng1.generator
@mock.patch("imgaug.random.get_generator_state")
def test_state_getter_mocked(self, mock_get):
mock_get.return_value = "mock"
rng = iarandom.RNG(0)
result = rng.state
assert result == "mock"
mock_get.assert_called_once_with(rng.generator)
@mock.patch("imgaug.random.RNG.set_state_")
def test_state_setter_mocked(self, mock_set):
rng = iarandom.RNG(0)
state = {"foo"}
rng.state = state
mock_set.assert_called_once_with(state)
@mock.patch("imgaug.random.set_generator_state_")
def test_set_state__mocked(self, mock_set):
rng = iarandom.RNG(0)
state = {"foo"}
result = rng.set_state_(state)
assert result is rng
mock_set.assert_called_once_with(rng.generator, state)
@mock.patch("imgaug.random.set_generator_state_")
def test_use_state_of__mocked(self, mock_set):
rng1 = iarandom.RNG(0)
rng2 = mock.MagicMock()
state = {"foo"}
rng2.state = state
result = rng1.use_state_of_(rng2)
assert result == rng1
mock_set.assert_called_once_with(rng1.generator, state)
@mock.patch("imgaug.random.get_global_rng")
def test_is_global__is_global__rng_mocked(self, mock_get):
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(rng1.generator)
mock_get.return_value = rng2
assert rng1.is_global_rng() is True
@mock.patch("imgaug.random.get_global_rng")
def test_is_global_rng__is_not_global__mocked(self, mock_get):
rng1 = iarandom.RNG(0)
# different instance with same state/seed should still be viewed as
# different by the method
rng2 = iarandom.RNG(0)
mock_get.return_value = rng2
assert rng1.is_global_rng() is False
@mock.patch("imgaug.random.get_global_rng")
def test_equals_global_rng__is_global__mocked(self, mock_get):
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(0)
mock_get.return_value = rng2
assert rng1.equals_global_rng() is True
@mock.patch("imgaug.random.get_global_rng")
def test_equals_global_rng__is_not_global__mocked(self, mock_get):
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(1)
mock_get.return_value = rng2
assert rng1.equals_global_rng() is False
@mock.patch("imgaug.random.generate_seed_")
def test_generate_seed__mocked(self, mock_gen):
rng = iarandom.RNG(0)
mock_gen.return_value = -1
seed = rng.generate_seed_()
assert seed == -1
mock_gen.assert_called_once_with(rng.generator)
@mock.patch("imgaug.random.generate_seeds_")
def test_generate_seeds__mocked(self, mock_gen):
rng = iarandom.RNG(0)
mock_gen.return_value = [-1, -2]
seeds = rng.generate_seeds_(2)
assert seeds == [-1, -2]
mock_gen.assert_called_once_with(rng.generator, 2)
@mock.patch("imgaug.random.reset_generator_cache_")
def test_reset_cache__mocked(self, mock_reset):
rng = iarandom.RNG(0)
result = rng.reset_cache_()
assert result is rng
mock_reset.assert_called_once_with(rng.generator)
@mock.patch("imgaug.random.derive_generators_")
def test_derive_rng__mocked(self, mock_derive):
gen = iarandom.convert_seed_to_generator(0)
mock_derive.return_value = [gen]
rng = iarandom.RNG(0)
result = rng.derive_rng_()
assert result.generator is gen
mock_derive.assert_called_once_with(rng.generator, 1)
@mock.patch("imgaug.random.derive_generators_")
def test_derive_rngs__mocked(self, mock_derive):
gen1 = iarandom.convert_seed_to_generator(0)
gen2 = iarandom.convert_seed_to_generator(1)
mock_derive.return_value = [gen1, gen2]
rng = iarandom.RNG(0)
result = rng.derive_rngs_(2)
assert result[0].generator is gen1
assert result[1].generator is gen2
mock_derive.assert_called_once_with(rng.generator, 2)
@mock.patch("imgaug.random.is_generator_equal_to")
def test_equals_mocked(self, mock_equal):
mock_equal.return_value = "foo"
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(1)
result = rng1.equals(rng2)
assert result == "foo"
mock_equal.assert_called_once_with(rng1.generator, rng2.generator)
def test_equals_identical_generators(self):
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(rng1)
assert rng1.equals(rng2)
def test_equals_with_similar_generators(self):
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(0)
assert rng1.equals(rng2)
def test_equals_with_different_generators(self):
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(1)
assert not rng1.equals(rng2)
def test_equals_with_advanced_generator(self):
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(0)
rng2.advance_()
assert not rng1.equals(rng2)
@mock.patch("imgaug.random.advance_generator_")
def test_advance__mocked(self, mock_advance):
rng = iarandom.RNG(0)
result = rng.advance_()
assert result is rng
mock_advance.assert_called_once_with(rng.generator)
@mock.patch("imgaug.random.copy_generator")
def test_copy_mocked(self, mock_copy):
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(1)
mock_copy.return_value = rng2.generator
result = rng1.copy()
assert result.generator is rng2.generator
mock_copy.assert_called_once_with(rng1.generator)
@mock.patch("imgaug.random.RNG.copy")
@mock.patch("imgaug.random.RNG.is_global_rng")
def test_copy_unless_global_rng__is_global__mocked(self, mock_is_global,
mock_copy):
rng = iarandom.RNG(0)
mock_is_global.return_value = True
mock_copy.return_value = "foo"
result = rng.copy_unless_global_rng()
assert result is rng
mock_is_global.assert_called_once_with()
assert mock_copy.call_count == 0
@mock.patch("imgaug.random.RNG.copy")
@mock.patch("imgaug.random.RNG.is_global_rng")
def test_copy_unless_global_rng__is_not_global__mocked(self, mock_is_global,
mock_copy):
rng = iarandom.RNG(0)
mock_is_global.return_value = False
mock_copy.return_value = "foo"
result = rng.copy_unless_global_rng()
assert result is "foo"
mock_is_global.assert_called_once_with()
mock_copy.assert_called_once_with()
def test_duplicate(self):
rng = iarandom.RNG(0)
rngs = rng.duplicate(1)
assert rngs == [rng]
def test_duplicate_two_entries(self):
rng = iarandom.RNG(0)
rngs = rng.duplicate(2)
assert rngs == [rng, rng]
@mock.patch("imgaug.random.create_fully_random_generator")
def test_create_fully_random_mocked(self, mock_create):
gen = iarandom.convert_seed_to_generator(0)
mock_create.return_value = gen
rng = iarandom.RNG.create_fully_random()
mock_create.assert_called_once_with()
assert rng.generator is gen
@mock.patch("imgaug.random.derive_generators_")
def test_create_pseudo_random__mocked(self, mock_get):
rng_glob = iarandom.get_global_rng()
rng = iarandom.RNG(0)
mock_get.return_value = [rng.generator]
result = iarandom.RNG.create_pseudo_random_()
assert result.generator is rng.generator
mock_get.assert_called_once_with(rng_glob.generator, 1)
@mock.patch("imgaug.random.polyfill_integers")
def test_integers_mocked(self, mock_func):
mock_func.return_value = "foo"
rng = iarandom.RNG(0)
result = rng.integers(low=0, high=1, size=(1,), dtype="int64",
endpoint=True)
assert result == "foo"
mock_func.assert_called_once_with(
rng.generator, low=0, high=1, size=(1,), dtype="int64",
endpoint=True)
@mock.patch("imgaug.random.polyfill_random")
def test_random_mocked(self, mock_func):
mock_func.return_value = "foo"
rng = iarandom.RNG(0)
out = np.zeros((1,), dtype="float64")
result = rng.random(size=(1,), dtype="float64", out=out)
assert result == "foo"
mock_func.assert_called_once_with(
rng.generator, size=(1,), dtype="float64", out=out)
# TODO below test for generator methods are all just mock-based, add
# non-mocked versions
def test_choice_mocked(self):
self._test_sampling_func("choice", a=[1, 2, 3], size=(1,),
replace=False, p=[0.1, 0.2, 0.7])
def test_bytes_mocked(self):
self._test_sampling_func("bytes", length=[10])
def test_shuffle_mocked(self):
mock_gen = mock.MagicMock()
rng = iarandom.RNG(0)
rng.generator = mock_gen
rng.shuffle([1, 2, 3])
mock_gen.shuffle.assert_called_once_with([1, 2, 3])
def test_permutation_mocked(self):
mock_gen = mock.MagicMock()
rng = iarandom.RNG(0)
rng.generator = mock_gen
mock_gen.permutation.return_value = "foo"
result = rng.permutation([1, 2, 3])
assert result == "foo"
mock_gen.permutation.assert_called_once_with([1, 2, 3])
def test_beta_mocked(self):
self._test_sampling_func("beta", a=1.0, b=2.0, size=(1,))
def test_binomial_mocked(self):
self._test_sampling_func("binomial", n=10, p=0.1, size=(1,))
def test_chisquare_mocked(self):
self._test_sampling_func("chisquare", df=2, size=(1,))
def test_dirichlet_mocked(self):
self._test_sampling_func("dirichlet", alpha=0.1, size=(1,))
def test_exponential_mocked(self):
self._test_sampling_func("exponential", scale=1.1, size=(1,))
def test_f_mocked(self):
self._test_sampling_func("f", dfnum=1, dfden=2, size=(1,))
def test_gamma_mocked(self):
self._test_sampling_func("gamma", shape=1, scale=1.2, size=(1,))
def test_geometric_mocked(self):
self._test_sampling_func("geometric", p=0.5, size=(1,))
def test_gumbel_mocked(self):
self._test_sampling_func("gumbel", loc=0.1, scale=1.1, size=(1,))
def test_hypergeometric_mocked(self):
self._test_sampling_func("hypergeometric", ngood=2, nbad=4, nsample=6,
size=(1,))
def test_laplace_mocked(self):
self._test_sampling_func("laplace", loc=0.5, scale=1.5, size=(1,))
def test_logistic_mocked(self):
self._test_sampling_func("logistic", loc=0.5, scale=1.5, size=(1,))
def test_lognormal_mocked(self):
self._test_sampling_func("lognormal", mean=0.5, sigma=1.5, size=(1,))
def test_logseries_mocked(self):
self._test_sampling_func("logseries", p=0.5, size=(1,))
def test_multinomial_mocked(self):
self._test_sampling_func("multinomial", n=5, pvals=0.5, size=(1,))
def test_multivariate_normal_mocked(self):
self._test_sampling_func("multivariate_normal", mean=0.5, cov=1.0,
size=(1,), check_valid="foo", tol=1e-2)
def test_negative_binomial_mocked(self):
self._test_sampling_func("negative_binomial", n=10, p=0.5, size=(1,))
def test_noncentral_chisquare_mocked(self):
self._test_sampling_func("noncentral_chisquare", df=0.5, nonc=1.0,
size=(1,))
def test_noncentral_f_mocked(self):
self._test_sampling_func("noncentral_f", dfnum=0.5, dfden=1.5,
nonc=2.0, size=(1,))
def test_normal_mocked(self):
self._test_sampling_func("normal", loc=0.5, scale=1.0, size=(1,))
def test_pareto_mocked(self):
self._test_sampling_func("pareto", a=0.5, size=(1,))
def test_poisson_mocked(self):
self._test_sampling_func("poisson", lam=1.5, size=(1,))
def test_power_mocked(self):
self._test_sampling_func("power", a=0.5, size=(1,))
def test_rayleigh_mocked(self):
self._test_sampling_func("rayleigh", scale=1.5, size=(1,))
def test_standard_cauchy_mocked(self):
self._test_sampling_func("standard_cauchy", size=(1,))
def test_standard_exponential_np117_mocked(self):
fname = "standard_exponential"
arr = np.zeros((1,), dtype="float16")
args = []
kwargs = {"size": (1,), "dtype": "float16", "method": "foo",
"out": arr}
mock_gen = mock.MagicMock()
getattr(mock_gen, fname).return_value = "foo"
rng = iarandom.RNG(0)
rng.generator = mock_gen
rng._is_new_rng_style = True
result = getattr(rng, fname)(*args, **kwargs)
assert result == "foo"
getattr(mock_gen, fname).assert_called_once_with(*args, **kwargs)
def test_standard_exponential_np116_mocked(self):
fname = "standard_exponential"
arr_out = np.zeros((1,), dtype="float16")
arr_result = np.ones((1,), dtype="float16")
def _side_effect(x):
return arr_result
args = []
kwargs = {"size": (1,), "dtype": "float16", "method": "foo",
"out": arr_out}
kwargs_subcall = {"size": (1,)}
mock_gen = mock.MagicMock()
mock_gen.astype.side_effect = _side_effect
getattr(mock_gen, fname).return_value = mock_gen
rng = iarandom.RNG(0)
rng.generator = mock_gen
rng._is_new_rng_style = False
result = getattr(rng, fname)(*args, **kwargs)
getattr(mock_gen, fname).assert_called_once_with(*args,
**kwargs_subcall)
mock_gen.astype.assert_called_once_with("float16")
assert np.allclose(result, arr_result)
assert np.allclose(arr_out, arr_result)
def test_standard_gamma_np117_mocked(self):
fname = "standard_gamma"
arr = np.zeros((1,), dtype="float16")
args = []
kwargs = {"shape": 1.0, "size": (1,), "dtype": "float16", "out": arr}
mock_gen = mock.MagicMock()
getattr(mock_gen, fname).return_value = "foo"
rng = iarandom.RNG(0)
rng.generator = mock_gen
rng._is_new_rng_style = True
result = getattr(rng, fname)(*args, **kwargs)
assert result == "foo"
getattr(mock_gen, fname).assert_called_once_with(*args, **kwargs)
def test_standard_gamma_np116_mocked(self):
fname = "standard_gamma"
arr_out = np.zeros((1,), dtype="float16")
arr_result = np.ones((1,), dtype="float16")
def _side_effect(x):
return arr_result
args = []
kwargs = {"shape": 1.0, "size": (1,), "dtype": "float16",
"out": arr_out}
kwargs_subcall = {"shape": 1.0, "size": (1,)}
mock_gen = mock.MagicMock()
mock_gen.astype.side_effect = _side_effect
getattr(mock_gen, fname).return_value = mock_gen
rng = iarandom.RNG(0)
rng.generator = mock_gen
rng._is_new_rng_style = False
result = getattr(rng, fname)(*args, **kwargs)
getattr(mock_gen, fname).assert_called_once_with(*args,
**kwargs_subcall)
mock_gen.astype.assert_called_once_with("float16")
assert np.allclose(result, arr_result)
assert np.allclose(arr_out, arr_result)
def test_standard_normal_np117_mocked(self):
fname = "standard_normal"
arr = np.zeros((1,), dtype="float16")
args = []
kwargs = {"size": (1,), "dtype": "float16", "out": arr}
mock_gen = mock.MagicMock()
getattr(mock_gen, fname).return_value = "foo"
rng = iarandom.RNG(0)
rng.generator = mock_gen
rng._is_new_rng_style = True
result = getattr(rng, fname)(*args, **kwargs)
assert result == "foo"
getattr(mock_gen, fname).assert_called_once_with(*args, **kwargs)
def test_standard_normal_np116_mocked(self):
fname = "standard_normal"
arr_out = np.zeros((1,), dtype="float16")
arr_result = np.ones((1,), dtype="float16")
def _side_effect(x):
return arr_result
args = []
kwargs = {"size": (1,), "dtype": "float16", "out": arr_out}
kwargs_subcall = {"size": (1,)}
mock_gen = mock.MagicMock()
mock_gen.astype.side_effect = _side_effect
getattr(mock_gen, fname).return_value = mock_gen
rng = iarandom.RNG(0)
rng.generator = mock_gen
rng._is_new_rng_style = False
result = getattr(rng, fname)(*args, **kwargs)
getattr(mock_gen, fname).assert_called_once_with(*args,
**kwargs_subcall)
mock_gen.astype.assert_called_once_with("float16")
assert | np.allclose(result, arr_result) | numpy.allclose |
from sumo.constants import RUN_DEFAULTS
from sumo.modes.run.solver import svd_h_init, svd_si_init, SumoNMF
from sumo.network import MultiplexNet
from sumo.utils import check_matrix_symmetry
import numpy as np
import pytest
def test_svd_si_init():
a = np.random.random((20, 20))
a = (a * a.T) / 2
s = svd_si_init(a, k=3)
assert check_matrix_symmetry(s)
assert s.shape == (3, 3)
a[0, 1], a[1, 0] = 0, 1
with pytest.raises(ValueError):
svd_si_init(a, k=3)
def test_svd_h_init():
a = np.random.random((20, 20))
a = (a * a.T) / 2
h = svd_h_init(a, k=3)
assert h.shape == (20, 3)
h = svd_h_init(a, k=5)
assert h.shape == (20, 5)
a[0, 1], a[1, 0] = 0, 1
with pytest.raises(ValueError):
svd_h_init(a, k=3)
def test_init():
a0 = | np.random.random((10, 10)) | numpy.random.random |
import torch
from dataloader import MovielensDatasetLoader
from model import NeuralCollaborativeFiltering
import numpy as np
from tqdm import tqdm
from metrics import compute_metrics
import pandas as pd
class MatrixLoader:
def __init__(self, ui_matrix, default=None, seed=0):
np.random.seed(seed)
self.ui_matrix = ui_matrix
self.positives = np.argwhere(self.ui_matrix!=0)
self.negatives = np.argwhere(self.ui_matrix==0)
if default is None:
self.default = | np.array([[0, 0]]) | numpy.array |
'''
Code to plot the cross-dispersion profile of a STIS spectrum
'''
import numpy as np
from matplotlib import pyplot as plt
from astropy.io import fits
filename = 'obrc060o0_flt.fits'
columns = [20, 200, 800]
def plot_cross_dispersion(image, column):
'''
Plot the cross dispersion profile (a column)
Parameters:
-----------
image: 2D array
the image whose column you want to plot
column: int
the index of the column to display
Returns:
----------
Plots of cross-dispersion profile
'''
column_values = image[:,column]
y_pixels = np.arange(len(column_values))
mean_value = | np.mean(column_values) | numpy.mean |
import numpy as np
from hats import *
def permuted_index(n, strata=None):
if strata is None:
perms = np.random.permutation(n)
else:
perms = np.array(range(n))
elems = np.unique(strata)
for elem in elems:
inds = perms[strata == elem]
if len(inds) > 1:
perms[strata == elem] = np.random.permutation(inds)
return perms
def gen_perms(nperms, nobs, strata=None):
perms = np.zeros((nperms, nobs), dtype=np.int)
for i in range(nperms):
perms[i,:] = permuted_index(nobs, strata)
return perms
def add_original_index(perms):
nobs = perms.shape[1]
perms = np.vstack((range(nobs), perms))
return perms
def gower_center(yDis):
n = yDis.shape[0]
I = np.eye(n,n)
uno = np.ones((n,1))
A = -0.5*(yDis**2)
C = I - (1.0/n)*uno.dot(uno.T)
G = C.dot(A).dot(C)
return G
def gower_center_many(dmats):
nobs = np.sqrt(dmats.shape[0])
ntests = dmats.shape[1]
Gs = np.zeros_like(dmats)
for i in range(ntests):
Dmat = dmats[:,i].reshape(nobs,nobs)
Gs[:,i] = gower_center(Dmat).flatten()
return Gs
def gen_h2_perms(x, cols, perms):
nperms = perms.shape[0]
nobs = perms.shape[1]
H2perms = np.zeros((nobs**2, nperms))
for i in range(nperms):
H2 = gen_h2(x, cols, perms[i,:])
H2perms[:,i] = H2.flatten()
return H2perms
def gen_ih_perms(x, cols, perms):
nperms = perms.shape[0]
nobs = perms.shape[1]
I = | np.eye(nobs,nobs) | numpy.eye |
from __future__ import division
import numpy as np
from rampwf.score_types.soft_accuracy import SoftAccuracy
score_matrix_1 = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
])
score_matrix_2 = np.array([
[1, 0.5, 0],
[0.3, 1, 0.3],
[0, 0.5, 1],
])
y_true_proba_1 = np.array([1, 0, 0])
y_true_proba_2 = np.array([0, 1, 0])
y_true_proba_3 = np.array([0.5, 0.5, 0])
y_proba_1 = np.array([1, 0, 0])
y_proba_2 = np.array([0, 1, 0])
y_proba_3 = np.array([0, 0.1, 0])
y_proba_4 = np.array([-1, 0.1, -2])
y_proba_5 = np.array([0, 0, 0])
def test_soft_accuracy():
score_1 = SoftAccuracy(score_matrix=score_matrix_1)
assert score_1(np.array([y_true_proba_1]), np.array([y_proba_1])) == 1
assert score_1(np.array([y_true_proba_1]), np.array([y_proba_2])) == 0
assert score_1( | np.array([y_true_proba_1]) | numpy.array |
import multiprocessing
import os
import tempfile
import numpy as np
from collections import OrderedDict
import cloudpickle
import time
from rllab.sampler.utils import rollout
from rllab.misc import logger
from curriculum.envs.base import FixedStateGenerator
class FunctionWrapper(object):
"""Wrap a function for use with parallelized map.
"""
def __init__(self, func, *args, **kwargs):
"""Construct the function oject.
Args:
func: a top level function, or a picklable callable object.
*args and **kwargs: Any additional required enviroment data.
"""
self.func = func
self.args = args
self.kwargs = kwargs
def __call__(self, obj):
if obj is None:
return self.func(*self.args, **self.kwargs)
else:
return self.func(obj, *self.args, **self.kwargs)
def __getstate__(self):
""" Here we overwrite the default pickle protocol to use cloudpickle. """
return dict(
func=cloudpickle.dumps(self.func),
args=cloudpickle.dumps(self.args),
kwargs=cloudpickle.dumps(self.kwargs)
)
def __setstate__(self, d):
self.func = cloudpickle.loads(d['func'])
self.args = cloudpickle.loads(d['args'])
self.kwargs = cloudpickle.loads(d['kwargs'])
def disable_cuda_initializer(*args, **kwargs):
import os
os.environ['THEANO_FLAGS'] = 'device=cpu'
os.environ['CUDA_VISIBLE_DEVICES'] = ''
def parallel_map(func, iterable_object, num_processes=-1):
"""Parallelized map function based on python process
Args:
func: Pickleable callable object that takes one parameter.
iterable_object: An iterable of elements to map the function on.
num_processes: Number of process to use. When num_processes is 1,
no new process will be created.
Returns:
The list resulted in calling the func on all objects in the original list.
"""
if num_processes == 1:
return [func(x) for x in iterable_object]
if num_processes == -1:
from rllab.sampler.stateful_pool import singleton_pool
num_processes = singleton_pool.n_parallel
process_pool = multiprocessing.Pool(
num_processes,
initializer=disable_cuda_initializer
)
results = process_pool.map(func, iterable_object)
process_pool.close()
process_pool.join()
return results
def compute_rewards_from_paths(all_paths, key='rewards', as_goal=True, env=None, terminal_eps=0.1):
all_rewards = []
all_states = []
for paths in all_paths:
for path in paths:
if key == 'competence':
#goal = tuple(path['env_infos']['goal'][0])
goal_np_array = np.array(tuple(path['env_infos']['goal'][0]))
start_state = np.array(tuple(env.transform_to_goal_space(path['observations'][0])))
end_state = np.array(tuple(env.transform_to_goal_space(path['observations'][-1])))
final_dist = np.linalg.norm(goal_np_array - end_state)
initial_dist = | np.linalg.norm(start_state - goal_np_array) | numpy.linalg.norm |
import numpy as np
import pandas as pd
from ..stats._utils import corr, scale
from .ordi_plot import ordiplot, screeplot
class RedundancyAnalysis():
r"""Compute redundancy analysis, a type of canonical analysis.
Redundancy analysis (RDA) is a principal component analysis on predicted
values :math:`\hat{Y}` obtained by fitting response variables :math:`Y` with
explanatory variables :math:`X` using a multiple regression.
EXPLAIN WHEN TO USE RDA
Parameters
----------
y : pd.DataFrame
:math:`n \times p` response matrix, where :math:`n` is the number
of samples and :math:`p` is the number of features. Its columns
need be dimensionally homogeneous (or you can set `scale_Y=True`).
This matrix is also referred to as the community matrix that
commonly stores information about species abundances
x : pd.DataFrame
:math:`n \times m, n \geq m` matrix of explanatory
variables, where :math:`n` is the number of samples and
:math:`m` is the number of metadata variables. Its columns
need not be standardized, but doing so turns regression
coefficients into standard regression coefficients.
scale_Y : bool, optional
Controls whether the response matrix columns are scaled to
have unit standard deviation. Defaults to `False`.
scaling : int
Scaling type 1 (scaling=1) produces a distance biplot. It focuses on
the ordination of rows (samples) because their transformed
distances approximate their original euclidean
distances. Especially interesting when most explanatory
variables are binary.
Scaling type 2 produces a correlation biplot. It focuses
on the relationships among explained variables (`y`). It
is interpreted like scaling type 1, but taking into
account that distances between objects don't approximate
their euclidean distances.
See more details about distance and correlation biplots in
[1]_, \S 9.1.4.
sample_scores_type : str
Type of sample score to output, either 'lc' and 'wa'.
Returns
-------
Ordination object, Ordonation plot, Screeplot
See Also
--------
ca
cca
Notes
-----
The algorithm is based on [1]_, \S 11.1.
References
----------
.. [1] <NAME>. and Legendre L. 1998. Numerical
Ecology. Elsevier, Amsterdam.
"""
def __init__(self, scale_Y=True, scaling=1, sample_scores_type='wa',
n_permutations = 199, permute_by=[], seed=None):
# initialize the self object
if not isinstance(scale_Y, bool):
raise ValueError("scale_Y must be either True or False.")
if not (scaling == 1 or scaling == 2):
raise ValueError("scaling must be either 1 (distance analysis) or 2 (correlation analysis).")
if not (sample_scores_type == 'wa' or sample_scores_type == 'lc'):
raise ValueError("sample_scores_type must be either 'wa' or 'lc'.")
self.scale_Y = scale_Y
self.scaling = scaling
self.sample_scores_type = sample_scores_type
self.n_permutations = n_permutations
self.permute_by = permute_by
self.seed = seed
def fit(self, X, Y, W=None):
# I use Y as the community matrix and X as the constraining as_matrix.
# vegan uses the inverse, which is confusing since the response set
# is usually Y and the explaination set is usually X.
# These steps are numbered as in Legendre and Legendre, Numerical Ecology,
# 3rd edition, section 11.1.3
# 0) Preparation of data
feature_ids = X.columns
sample_ids = X.index # x index and y index should be the same
response_ids = Y.columns
X = X.as_matrix() # Constraining matrix, typically of environmental variables
Y = Y.as_matrix() # Community data matrix
if W is not None:
condition_ids = W.columns
W = W.as_matrix()
q = W.shape[1] # number of covariables (used in permutations)
else:
q=0
# dimensions
n_x, m = X.shape
n_y, p = Y.shape
if n_x == n_y:
n = n_x
else:
raise ValueError("Tables x and y must contain same number of rows.")
# scale
if self.scale_Y:
Y = (Y - Y.mean(axis=0)) / Y.std(axis=0, ddof=1)
X = X - X.mean(axis=0)# / X.std(axis=0, ddof=1)
# Note: Legendre 2011 does not scale X.
# If there is a covariable matrix W, the explanatory matrix X becomes the
# residuals of a regression between X as response and W as explanatory.
if W is not None:
W = (W - W.mean(axis=0))# / W.std(axis=0, ddof=1)
# Note: Legendre 2011 does not scale W.
B_XW = np.linalg.lstsq(W, X)[0]
X_hat = W.dot(B_XW)
X_ = X - X_hat # X is now the residual
else:
X_ = X
B = np.linalg.lstsq(X_, Y)[0]
Y_hat = X_.dot(B)
Y_res = Y - Y_hat # residuals
# 3) Perform a PCA on Y_hat
## perform singular value decomposition.
## eigenvalues can be extracted from u
## eigenvectors can be extracted from vt
u, s, vt = | np.linalg.svd(Y_hat, full_matrices=False) | numpy.linalg.svd |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 30 13:24:21 2020
updated on Thu Oct 15 17:02:30 2020
@author: <NAME>
"""
#reproducability
from numpy.random import seed
seed(1)
import tensorflow as tf
tf.random.set_seed(1)
import numpy as np
from bayes_opt import BayesianOptimization
from bayes_opt.logger import JSONLogger
from bayes_opt.event import Events
from bayes_opt.util import load_logs
import os
import glob
import pandas as pd
import keras as ks
import datetime
from scipy import stats
from matplotlib import pyplot
from sklearn.preprocessing import MinMaxScaler
import tensorflow as tf
gpus = tf.config.experimental.list_physical_devices('GPU')
def load_RM_GW_and_HYRAS_Data(i):
pathGW = "./GWData"
pathHYRAS = "./MeteoData"
pathconnect = "/"
GWData_list = glob.glob(pathGW+pathconnect+'*.csv');
Well_ID = GWData_list[i]
Well_ID = Well_ID.replace(pathGW+'\\', '')
Well_ID = Well_ID.replace('_GWdata.csv', '')
GWData = pd.read_csv(pathGW+pathconnect+Well_ID+'_GWdata.csv',
parse_dates=['Date'],index_col=0, dayfirst = True,
decimal = '.', sep=',')
HYRASData = pd.read_csv(pathHYRAS+pathconnect+Well_ID+'_HYRASdata.csv',
parse_dates=['Date'],index_col=0, dayfirst = True,
decimal = '.', sep=',')
data = pd.merge(GWData, HYRASData, how='inner', left_index = True, right_index = True)
#introduce GWL t-1 as additional Input
GWData_shift1 = GWData
GWData_shift1.index = GWData_shift1.index.shift(periods = 7, freq = 'D')
GWData_shift1.rename(columns={"GWL": "GWLt-1"},inplace=True)
data = pd.merge(data, GWData_shift1, how='inner', left_index = True, right_index = True)
return data, Well_ID
def split_data(data, GLOBAL_SETTINGS):
dataset = data[(data.index < GLOBAL_SETTINGS["test_start"])] #Testdaten abtrennen
TrainingData = dataset[0:round(0.8 * len(dataset))]
StopData = dataset[round(0.8 * len(dataset))+1:round(0.9 * len(dataset))]
StopData_ext = dataset[round(0.8 * len(dataset))+1-GLOBAL_SETTINGS["seq_length"]:round(0.9 * len(dataset))] #extend data according to dealys/sequence length
OptData = dataset[round(0.9 * len(dataset))+1:]
OptData_ext = dataset[round(0.9 * len(dataset))+1-GLOBAL_SETTINGS["seq_length"]:] #extend data according to dealys/sequence length
TestData = data[(data.index >= GLOBAL_SETTINGS["test_start"]) & (data.index <= GLOBAL_SETTINGS["test_end"])] #Testdaten entsprechend dem angegebenen Testzeitraum
TestData_ext = pd.concat([dataset.iloc[-GLOBAL_SETTINGS["seq_length"]:], TestData], axis=0) # extend Testdata to be able to fill sequence later
return TrainingData, StopData, StopData_ext, OptData, OptData_ext, TestData, TestData_ext
def to_supervised(data, GLOBAL_SETTINGS):
X, Y = list(), list()
# step over the entire history one time step at a time
for i in range(len(data)):
# find the end of this pattern
end_idx = i + GLOBAL_SETTINGS["seq_length"]
# check if we are beyond the dataset
if end_idx >= len(data):
break
# gather input and output parts of the pattern
seq_x, seq_y = data[i:end_idx, 1:], data[end_idx, 0]
X.append(seq_x)
Y.append(seq_y)
return np.array(X), np.array(Y)
def gwmodel(ini,GLOBAL_SETTINGS,X_train, Y_train,X_stop, Y_stop):
# define model
seed(ini)
tf.random.set_seed(ini)
model = ks.models.Sequential()
model.add(ks.layers.LSTM(GLOBAL_SETTINGS["hidden_size"], unit_forget_bias = True,
dropout = GLOBAL_SETTINGS["dropout"]))
model.add(ks.layers.Dense(1, activation='linear'))
optimizer = ks.optimizers.Adam(lr=GLOBAL_SETTINGS["learning_rate"], epsilon=10E-3, clipnorm=GLOBAL_SETTINGS["clip_norm"], clipvalue=GLOBAL_SETTINGS["clip_value"])
model.compile(loss='mse', optimizer=optimizer, metrics=['mse'])
# early stopping
es = ks.callbacks.EarlyStopping(monitor='val_loss', mode='min', verbose=0, patience=5)
# fit network
model.fit(X_train, Y_train, validation_data=(X_stop, Y_stop), epochs=GLOBAL_SETTINGS["epochs"], verbose=0,
batch_size=GLOBAL_SETTINGS["batch_size"], callbacks=[es])
return model
# this is the optimizer function but checks only if paramters are integers and calls real optimizer function
def bayesOpt_function(pp,hiddensize, seqlength, batchsize,rH,T,Tsin):
hiddensize_int = int(hiddensize)
seqlength_int = int(seqlength)
batchsize_int = int(batchsize)
pp = int(pp)
rH = int(round(rH))
T = int(round(T))
Tsin = int(round(Tsin))
return bayesOpt_function_with_discrete_params(pp, hiddensize_int, seqlength_int, batchsize_int, rH, T, Tsin)
#this is the real optimizer function
def bayesOpt_function_with_discrete_params(pp,hiddensize_int, seqlength_int, batchsize_int, rH, T, Tsin):
assert type(hiddensize_int) == int
assert type(seqlength_int) == int
assert type(batchsize_int) == int
assert type(rH) == int
assert type(T) == int
assert type(Tsin) == int
#[...]
# fixed settings for all experiments
GLOBAL_SETTINGS = {
'pp': pp,
'batch_size': batchsize_int,
'clip_norm': True,
'clip_value': 1,
'dropout': 0,
'epochs': 30,
'hidden_size': hiddensize_int,
'learning_rate': 1e-3,
'seq_length': seqlength_int,
'test_start': pd.to_datetime('02012012', format='%d%m%Y'),
'test_end': pd.to_datetime('28122015', format='%d%m%Y')
}
## load data
data, Well_ID = load_RM_GW_and_HYRAS_Data(GLOBAL_SETTINGS["pp"])
# inputs
if rH == 0:
data = data.drop(columns='rH')
if T == 0:
data = data.drop(columns='T')
if Tsin == 0:
data = data.drop(columns='Tsin')
#scale data
scaler = MinMaxScaler(feature_range=(-1, 1))
# scaler = StandardScaler()
scaler_gwl = MinMaxScaler(feature_range=(-1, 1))
scaler_gwl.fit(pd.DataFrame(data['GWL']))
data_n = pd.DataFrame(scaler.fit_transform(data), index=data.index, columns=data.columns)
#split data
TrainingData, StopData, StopData_ext, OptData, OptData_ext, TestData, TestData_ext = split_data(data, GLOBAL_SETTINGS)
TrainingData_n, StopData_n, StopData_ext_n, OptData_n, OptData_ext_n, TestData_n, TestData_ext_n = split_data(data_n, GLOBAL_SETTINGS)
#sequence data
X_train, Y_train = to_supervised(TrainingData_n.values, GLOBAL_SETTINGS)
X_stop, Y_stop = to_supervised(StopData_ext_n.values, GLOBAL_SETTINGS)
X_opt, Y_opt = to_supervised(OptData_ext_n.values, GLOBAL_SETTINGS)
X_test, Y_test = to_supervised(TestData_ext_n.values, GLOBAL_SETTINGS)
#build and train model with idifferent initializations
inimax = 5
optresults_members = np.zeros((len(X_opt), inimax))
for ini in range(inimax):
print("BayesOpt-Iteration {} - ini-Ensemblemember {}".format(len(optimizer.res)+1, ini+1))
# f = open('log_full.txt', "a")
# print("BayesOpt-Iteration {} - ini-Ensemblemember {}".format(len(optimizer.res)+1, ini+1), file = f)
# f.close()
model = gwmodel(ini,GLOBAL_SETTINGS,X_train, Y_train, X_stop, Y_stop)
opt_sim_n = model.predict(X_opt)
opt_sim = scaler_gwl.inverse_transform(opt_sim_n)
optresults_members[:, ini] = opt_sim.reshape(-1,)
opt_sim_median = np.median(optresults_members,axis = 1)
sim = np.asarray(opt_sim_median.reshape(-1,1))
obs = np.asarray(scaler_gwl.inverse_transform(Y_opt.reshape(-1,1)))
err = sim-obs
meanTrainingGWL = np.mean(np.asarray(TrainingData['GWL']))
meanStopGWL = np.mean(np.asarray(StopData['GWL']))
err_nash = obs - np.mean([meanTrainingGWL, meanStopGWL])
r = stats.linregress(sim[:,0], obs[:,0])
print("total elapsed time = {}".format(datetime.datetime.now()-time1))
print("(pp) elapsed time = {}".format(datetime.datetime.now()-time_single))
# f = open('log_full.txt', "a")
# print("elapsed time = {}".format(datetime.datetime.now()-time1), file = f)
# f.close()
return (1 - ((np.sum(err ** 2)) / (np.sum((err_nash) ** 2)))) + r.rvalue ** 2 #NSE+Rยฒ: (max = 2)
def simulate_testset(pp,hiddensize_int, seqlength_int, batchsize_int, rH, T, Tsin):
# fixed settings for all experiments
GLOBAL_SETTINGS = {
'pp': pp,
'batch_size': batchsize_int,
'clip_norm': True,
'clip_value': 1,
'dropout': 0,
'epochs': 30,
'hidden_size': hiddensize_int,
'learning_rate': 1e-3,
'seq_length': seqlength_int,
'test_start': pd.to_datetime('02012012', format='%d%m%Y'),
'test_end': pd.to_datetime('28122015', format='%d%m%Y')
}
## load data
data, Well_ID = load_RM_GW_and_HYRAS_Data(GLOBAL_SETTINGS["pp"])
# inputs
if rH == 0:
data = data.drop(columns='rH')
if T == 0:
data = data.drop(columns='T')
if Tsin == 0:
data = data.drop(columns='Tsin')
#scale data
scaler = MinMaxScaler(feature_range=(-1, 1))
# scaler = StandardScaler()
scaler_gwl = MinMaxScaler(feature_range=(-1, 1))
scaler_gwl.fit(pd.DataFrame(data['GWL']))
data_n = pd.DataFrame(scaler.fit_transform(data), index=data.index, columns=data.columns)
#split data
TrainingData, StopData, StopData_ext, OptData, OptData_ext, TestData, TestData_ext = split_data(data, GLOBAL_SETTINGS)
TrainingData_n, StopData_n, StopData_ext_n, OptData_n, OptData_ext_n, TestData_n, TestData_ext_n = split_data(data_n, GLOBAL_SETTINGS)
#sequence data
X_train, Y_train = to_supervised(TrainingData_n.values, GLOBAL_SETTINGS)
X_stop, Y_stop = to_supervised(StopData_ext_n.values, GLOBAL_SETTINGS)
X_opt, Y_opt = to_supervised(OptData_ext_n.values, GLOBAL_SETTINGS)
X_test, Y_test = to_supervised(TestData_ext_n.values, GLOBAL_SETTINGS)
#build and train model with idifferent initializations
inimax = 10
testresults_members = np.zeros((len(X_test), inimax))
for ini in range(inimax):
model = gwmodel(ini,GLOBAL_SETTINGS,X_train, Y_train, X_stop, Y_stop)
test_sim_n = model.predict(X_test)
test_sim = scaler_gwl.inverse_transform(test_sim_n)
testresults_members[:, ini] = test_sim.reshape(-1,)
test_sim_median = np.median(testresults_members,axis = 1)
# get scores
sim = np.asarray(test_sim_median.reshape(-1,1))
obs = np.asarray(scaler_gwl.inverse_transform(Y_test.reshape(-1,1)))
obs_PI = np.asarray(TestData['GWLt-1']).reshape(-1,1)
err = sim-obs
err_rel = (sim-obs)/(np.max(data['GWL'])-np.min(data['GWL']))
err_nash = obs - np.mean(np.asarray(data['GWL'][(data.index < GLOBAL_SETTINGS["test_start"])]))
err_PI = obs-obs_PI
NSE = 1 - ((np.sum(err ** 2)) / (np.sum((err_nash) ** 2)))
r = stats.linregress(sim[:,0], obs[:,0])
R2 = r.rvalue ** 2
RMSE = np.sqrt(np.mean(err ** 2))
rRMSE = np.sqrt(np.mean(err_rel ** 2)) * 100
Bias = np.mean(err)
rBias = | np.mean(err_rel) | numpy.mean |
import matplotlib.pyplot as plt
import numpy as np
# Line styles
# 'solid' (default) '-'
# 'dotted' ':'
# 'dashed' '--'
# 'dashdot' '-.'
# 'None' '' or ' '
# Apply Line Styles
ypoints = | np.array([3, 8, 1, 10]) | numpy.array |
import copy
import numpy as np
from char_detection.object_point import data_operator as dt_ope
from image_processing import image_proc
class TranslateAugmentation_9case_ThreshScoreWeightedAve:
"""
ๅบๆบ็ตๆใๅบใซใใชใใธใงใฏใใฎๅญๅจใใๆๅค้จใๆคๅบใๆๅค้จใฎๅคๅดใฎ็ฏๅฒใง็ปๅใใทใใใใใใ
ใทใใใใใ็ปๅใงๅพใใใใใผใใใใใใตใคใบใใชใใปใใใ้ใทใใใใๅนณๅใใใ
"""
def __init__(self,
ratio_shift_to_gap_w,
ratio_shift_to_gap_h,
iou_threshold,
score_threshold,
score_threshold_for_weight):
self.RATIO_SHIFT_TO_GAP_W = ratio_shift_to_gap_w
self.RATIO_SHIFT_TO_GAP_H = ratio_shift_to_gap_h
self.IOU_THRESHOLD = iou_threshold
self.SCORE_THRESHOLD = score_threshold
self.SCORE_THRESHOLD_FOR_WEIGHT = score_threshold_for_weight
self.SHIFT_UNIT_SIZE = 4
self.__initilization()
return
def __initilization(self):
return
def augment_image(self, images, pred_base_heatmaps, pred_base_sizes, pred_base_offsets):
"""
Args:
images: shape=(num_data, H, W, 1)
pred_heatmaps: shape = (num_data, hm_y, hm_x, num_class)
pred_obj_sizes: shape = (num_data, hm_y, hm_x, num_class * 2), 2=(w, h)
pred_offsets: shape = (num_data, hm_y, hm_x, num_class * 2), 2=(w, h)
"""
image_shape = images.shape[1:]
num_class = pred_base_heatmaps.shape[3]
# outer most positions : [most left x, most up y, most right x, most bottom y] * sample_num
outermost_positions = self.__calc_outermost_position(image_shape, num_class,
pred_base_heatmaps,
pred_base_sizes,
pred_base_offsets)
shifted_imgs = []
# loop of data
for img, outmost_posi in zip(images, outermost_positions):
aug_8img = []
# have no bbox
if len(outmost_posi) == 0:
pass
else:
# shift size
w_shift_1, w_shift_2, h_shift_1, h_shift_2 = self.__calc_shift_size(image_shape, outmost_posi)
# shift image
w_sfts, h_sfts = np.meshgrid([w_shift_1, 0, w_shift_2], [h_shift_1, 0, h_shift_2])
# loop of aug
for w_sft, h_sft in zip(w_sfts.flatten(), h_sfts.flatten()):
# if true, image is not augment (shift=0)
if not(w_sft == 0 and h_sft == 0):
aug_img = image_proc.ImageProcessing.translate(img, w_sft, h_sft, mode='edge')
aug_8img.append(aug_img)
# append
shifted_imgs.append(aug_8img)
shifted_imgs = np.array(shifted_imgs)
return shifted_imgs
def integrate_heatmap_size_offset(self, image,
pred_base_heatmap, pred_base_obj_size, pred_base_offset,
pred_auged_heatmaps, pred_auged_obj_sizes, pred_auged_offsets):
"""
Args:
pred_base_heatmaps: shape = (hm_y, hm_x, num_class)
pred_base_obj_sizes: shape = (hm_y, hm_x, num_class * 2), 2=(w, h)
pred_base_offsets: shape = (hm_y, hm_x, num_class * 2), 2=(w, h)
pred_auged_heatmaps: shape = (num_aug, hm_y, hm_x, num_class)
pred_auged_obj_sizes: shape = (num_aug, hm_y, hm_x, num_class * 2), 2=(w, h)
pred_auged_offsets: shape = (num_aug, hm_y, hm_x, num_class * 2), 2=(w, h)
"""
# inverse shifted
intg_hms, intg_szs, intg_ofss = self.__inv_shift_heatmap_size_offset(image,
pred_base_heatmap, pred_base_obj_size, pred_base_offset,
pred_auged_heatmaps, pred_auged_obj_sizes, pred_auged_offsets)
if len(intg_hms) == 0:
return copy.copy(pred_base_heatmap), copy.copy(pred_base_obj_size), copy.copy(pred_base_offset)
else:
# concatenate [base, auged]
# integrated hm : shape(num_aug+1, H, W, num_class)
# integrated sz : shape(num_aug+1, H, W, num_class*2)
# integrated ofs : shape(num_aug+1, H, W, num_class*2)
intg_hms = np.concatenate([intg_hms, pred_base_heatmap[np.newaxis,:,:,:]], axis=0)
intg_szs = np.concatenate([intg_szs, pred_base_obj_size[np.newaxis,:,:,:]], axis=0)
intg_ofss = np.concatenate([intg_ofss, pred_base_offset[np.newaxis,:,:,:]], axis=0)
# threshold score weight
# thresh_score_w : shape(num_aug+1, H, W, num_class)
#thresh_score_w = np.maximum(intg_hms - self.SCORE_THRESHOLD_FOR_WEIGHT, 0) / (1 - self.SCORE_THRESHOLD_FOR_WEIGHT)
thresh_score_w = np.sign(np.maximum(intg_hms - self.SCORE_THRESHOLD_FOR_WEIGHT, 0)) * intg_hms
thresh_score_w = thresh_score_w / (np.sum(thresh_score_w, axis=0) + 1e-7)
# average with weight
intg_hms = np.sum(thresh_score_w * intg_hms, axis=0)
intg_szs = np.sum(thresh_score_w * intg_szs, axis=0)
intg_ofss = np.sum(thresh_score_w * intg_ofss, axis=0)
return intg_hms, intg_szs, intg_ofss
def __calc_shift_size(self, image_shape, outmost_posi):
"""
Args:
outmost_posi: [most left x, most up y, most right x, most bottom y]
Returns:
w_shift_to_right, w_shift_to_left, h_shift_to_down, w_shift_to_up
"""
# have no bbox
if len(outmost_posi) == 0:
w_shift_1 = 0
w_shift_2 = 0
h_shift_1 = 0
h_shift_2 = 0
else:
# out of area including object
left_gap = np.maximum(outmost_posi[0], 0)
right_gap = | np.maximum(image_shape[1] - outmost_posi[2], 0) | numpy.maximum |
"""
Module: libfmp.c6.c6s2_tempo_analysis
Author: <NAME>, <NAME>
License: The MIT license, https://opensource.org/licenses/MIT
This file is part of the FMP Notebooks (https://www.audiolabs-erlangen.de/FMP)
"""
import numpy as np
import librosa
from scipy import signal
from scipy.interpolate import interp1d
from matplotlib import pyplot as plt
from numba import jit
import IPython.display as ipd
import libfmp.b
import libfmp.c6
@jit(nopython=True)
def compute_tempogram_fourier(x, Fs, N, H, Theta=np.arange(30, 601, 1)):
"""Compute Fourier-based tempogram [FMP, Section 6.2.2]
Notebook: C6/C6S2_TempogramFourier.ipynb
Args:
x (np.ndarray): Input signal
Fs (scalar): Sampling rate
N (int): Window length
H (int): Hop size
Theta (np.ndarray): Set of tempi (given in BPM) (Default value = np.arange(30, 601, 1))
Returns:
X (np.ndarray): Tempogram
T_coef (np.ndarray): Time axis (seconds)
F_coef_BPM (np.ndarray): Tempo axis (BPM)
"""
win = np.hanning(N)
N_left = N // 2
L = x.shape[0]
L_left = N_left
L_right = N_left
L_pad = L + L_left + L_right
# x_pad = np.pad(x, (L_left, L_right), 'constant') # doesn't work with jit
x_pad = np.concatenate((np.zeros(L_left), x, np.zeros(L_right)))
t_pad = np.arange(L_pad)
M = int(np.floor(L_pad - N) / H) + 1
K = len(Theta)
X = np.zeros((K, M), dtype=np.complex_)
for k in range(K):
omega = (Theta[k] / 60) / Fs
exponential = np.exp(-2 * np.pi * 1j * omega * t_pad)
x_exp = x_pad * exponential
for n in range(M):
t_0 = n * H
t_1 = t_0 + N
X[k, n] = np.sum(win * x_exp[t_0:t_1])
T_coef = np.arange(M) * H / Fs
F_coef_BPM = Theta
return X, T_coef, F_coef_BPM
def compute_sinusoid_optimal(c, tempo, n, Fs, N, H):
"""Compute windowed sinusoid with optimal phase
Notebook: C6/C6S2_TempogramFourier.ipynb
Args:
c (complex): Coefficient of tempogram (c=X(k,n))
tempo (float): Tempo parameter corresponding to c (tempo=F_coef_BPM[k])
n (int): Frame parameter of c
Fs (scalar): Sampling rate
N (int): Window length
H (int): Hop size
Returns:
kernel (np.ndarray): Windowed sinusoid
t_kernel (np.ndarray): Time axis (samples) of kernel
t_kernel_sec (np.ndarray): Time axis (seconds) of kernel
"""
win = np.hanning(N)
N_left = N // 2
omega = (tempo / 60) / Fs
t_0 = n * H
t_1 = t_0 + N
phase = - np.angle(c) / (2 * np.pi)
t_kernel = np.arange(t_0, t_1)
kernel = win * np.cos(2 * np.pi * (t_kernel*omega - phase))
t_kernel_sec = (t_kernel - N_left) / Fs
return kernel, t_kernel, t_kernel_sec
def plot_signal_kernel(x, t_x, kernel, t_kernel, xlim=None, figsize=(8, 2), title=None):
"""Visualize signal and local kernel
Notebook: C6/C6S2_TempogramFourier.ipynb
Args:
x: Signal
t_x: Time axis of x (given in seconds)
kernel: Local kernel
t_kernel: Time axis of kernel (given in seconds)
xlim: Limits for x-axis (Default value = None)
figsize: Figure size (Default value = (8, 2))
title: Title of figure (Default value = None)
Returns:
fig: Matplotlib figure handle
"""
if xlim is None:
xlim = [t_x[0], t_x[-1]]
fig = plt.figure(figsize=figsize)
plt.plot(t_x, x, 'k')
plt.plot(t_kernel, kernel, 'r')
plt.title(title)
plt.xlim(xlim)
plt.tight_layout()
return fig
# @jit(nopython=True) # not possible because of np.correlate with mode='full'
def compute_autocorrelation_local(x, Fs, N, H, norm_sum=True):
"""Compute local autocorrelation [FMP, Section 6.2.3]
Notebook: C6/C6S2_TempogramAutocorrelation.ipynb
Args:
x (np.ndarray): Input signal
Fs (scalar): Sampling rate
N (int): Window length
H (int): Hop size
norm_sum (bool): Normalizes by the number of summands in local autocorrelation (Default value = True)
Returns:
A (np.ndarray): Time-lag representation
T_coef (np.ndarray): Time axis (seconds)
F_coef_lag (np.ndarray): Lag axis
"""
# L = len(x)
L_left = round(N / 2)
L_right = L_left
x_pad = np.concatenate((np.zeros(L_left), x, np.zeros(L_right)))
L_pad = len(x_pad)
M = int(np.floor(L_pad - N) / H) + 1
A = np.zeros((N, M))
win = np.ones(N)
if norm_sum is True:
lag_summand_num = np.arange(N, 0, -1)
for n in range(M):
t_0 = n * H
t_1 = t_0 + N
x_local = win * x_pad[t_0:t_1]
r_xx = np.correlate(x_local, x_local, mode='full')
r_xx = r_xx[N-1:]
if norm_sum is True:
r_xx = r_xx / lag_summand_num
A[:, n] = r_xx
Fs_A = Fs / H
T_coef = np.arange(A.shape[1]) / Fs_A
F_coef_lag = np.arange(N) / Fs
return A, T_coef, F_coef_lag
def plot_signal_local_lag(x, t_x, local_lag, t_local_lag, lag, xlim=None, figsize=(8, 1.5), title=''):
"""Visualize signal and local lag [FMP, Figure 6.14]
Notebook: C6/C6S2_TempogramAutocorrelation.ipynb
Args:
x: Signal
t_x: Time axis of x (given in seconds)
local_lag: Local lag
t_local_lag: Time axis of kernel (given in seconds)
lag: Lag (given in seconds)
xlim: Limits for x-axis (Default value = None)
figsize: Figure size (Default value = (8, 1.5))
title: Title of figure (Default value = '')
Returns:
fig: Matplotlib figure handle
"""
if xlim is None:
xlim = [t_x[0], t_x[-1]]
fig = plt.figure(figsize=figsize)
plt.plot(t_x, x, 'k:', linewidth=0.5)
plt.plot(t_local_lag, local_lag, 'k', linewidth=3.0)
plt.plot(t_local_lag+lag, local_lag, 'r', linewidth=2)
plt.title(title)
plt.ylim([0, 1.1 * np.max(x)])
plt.xlim(xlim)
plt.tight_layout()
return fig
# @jit(nopython=True)
def compute_tempogram_autocorr(x, Fs, N, H, norm_sum=False, Theta=np.arange(30, 601)):
"""Compute autocorrelation-based tempogram
Notebook: C6/C6S2_TempogramAutocorrelation.ipynb
Args:
x (np.ndarray): Input signal
Fs (scalar): Sampling rate
N (int): Window length
H (int): Hop size
norm_sum (bool): Normalizes by the number of summands in local autocorrelation (Default value = False)
Theta (np.ndarray): Set of tempi (given in BPM) (Default value = np.arange(30, 601))
Returns:
tempogram (np.ndarray): Tempogram tempogram
T_coef (np.ndarray): Time axis T_coef (seconds)
F_coef_BPM (np.ndarray): Tempo axis F_coef_BPM (BPM)
A_cut (np.ndarray): Time-lag representation A_cut (cut according to Theta)
F_coef_lag_cut (np.ndarray): Lag axis F_coef_lag_cut
"""
tempo_min = Theta[0]
tempo_max = Theta[-1]
lag_min = int(np.ceil(Fs * 60 / tempo_max))
lag_max = int(np.ceil(Fs * 60 / tempo_min))
A, T_coef, F_coef_lag = compute_autocorrelation_local(x, Fs, N, H, norm_sum=norm_sum)
A_cut = A[lag_min:lag_max+1, :]
F_coef_lag_cut = F_coef_lag[lag_min:lag_max+1]
F_coef_BPM_cut = 60 / F_coef_lag_cut
F_coef_BPM = Theta
tempogram = interp1d(F_coef_BPM_cut, A_cut, kind='linear',
axis=0, fill_value='extrapolate')(F_coef_BPM)
return tempogram, T_coef, F_coef_BPM, A_cut, F_coef_lag_cut
def compute_cyclic_tempogram(tempogram, F_coef_BPM, tempo_ref=30,
octave_bin=40, octave_num=4):
"""Compute cyclic tempogram
Notebook: C6/C6S2_TempogramCyclic.ipynb
Args:
tempogram (np.ndarray): Input tempogram
F_coef_BPM (np.ndarray): Tempo axis (BPM)
tempo_ref (float): Reference tempo (BPM) (Default value = 30)
octave_bin (int): Number of bins per tempo octave (Default value = 40)
octave_num (int): Number of tempo octaves to be considered (Default value = 4)
Returns:
tempogram_cyclic (np.ndarray): Cyclic tempogram tempogram_cyclic
F_coef_scale (np.ndarray): Tempo axis with regard to scaling parameter
tempogram_log (np.ndarray): Tempogram with logarithmic tempo axis
F_coef_BPM_log (np.ndarray): Logarithmic tempo axis (BPM)
"""
F_coef_BPM_log = tempo_ref * np.power(2, | np.arange(0, octave_num*octave_bin) | numpy.arange |
import os
import json
import shutil
import datetime
import numpy as np
import pandas as pd
from sklearn.metrics.pairwise import \
euclidean_distances
from floris.utils.tools import farm_config as fconfig
from floris.utils.visualization.wind_resource import winds_pdf
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# OPTIMIZATION #
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
def wind_speed_dist(type="weibull"):
def weibull_pdf(v, scale, shape):
return (shape / scale) * (v / scale)**(shape - 1) * np.exp(-(v / scale) ** shape)
def weibull_cdf(v, scale, shape):
return 1 - | np.exp(-(v / scale) ** shape) | numpy.exp |
from numpy import zeros, arange, zeros
from math import pi, e
import random
import matplotlib. pyplot as plt
class MBdist():
def maxwell_boltzmann(self, temperature, mass, speed):
mass = mass * 1.6726219*10**(-27)
k = 1.380648*10**(-23)
p = 4*pi*speed**2 * (mass / (2*pi*k*temperature))**(3.0/2.0) * \
e**(-mass*speed**2 /(2*k*temperature))
return p
#prepared the cumulative weights
def prepare(self):
self.z[0] = self.maxwell_boltzmann(self.T, self.m, 0)
for i in range(1, self.L):
self.z[i] = self.z[i-1] + self.maxwell_boltzmann(self.T, self.m, i)
def __init__(self, T, m, L):
self.T = T
self.m = m
self.L = L
self.z = | zeros(L) | numpy.zeros |
import numpy as np
import pytest
from pycqed.measurement import measurement_control
from pycqed.measurement.sweep_functions import None_Sweep
import pycqed.measurement.detector_functions as det
from pycqed.instrument_drivers.physical_instruments.dummy_instruments \
import DummyParHolder
from qcodes import station
class TestDetectors:
@classmethod
def setup_class(cls):
cls.station = station.Station()
cls.MC = measurement_control.MeasurementControl(
'MC', live_plot_enabled=False, verbose=False)
cls.MC.station = cls.station
cls.station.add_component(cls.MC)
cls.mock_parabola = DummyParHolder('mock_parabola')
cls.station.add_component(cls.mock_parabola)
def test_function_detector_simple(self):
def dummy_function(val_a, val_b):
return val_a
# Testing input of a simple dict
d = det.Function_Detector(dummy_function, value_names=['a'],
value_units=None,
msmt_kw={'val_a': 5.5, 'val_b': 1})
assert d.value_names == ['a']
assert d.value_units == ['a.u.']
self.MC.set_sweep_function(None_Sweep(sweep_control='soft'))
self.MC.set_sweep_points(np.linspace(0, 10, 10))
self.MC.set_detector_function(d)
np.seterr()
dat = self.MC.run()
# dset = dat["dset"]
# np.testing.assert_array_almost_equal(np.ones(10)*5.5, dset[:, 1])
def test_function_detector_parameter(self):
def dummy_function(val_a, val_b):
return val_a+val_b
# Testing input of a simple dict
x = self.mock_parabola.x
d = det.Function_Detector(dummy_function, value_names=['xvals+1'],
value_units=['s'],
msmt_kw={'val_a': x, 'val_b': 1})
assert d.value_names == ['xvals+1']
assert d.value_units == ['s']
xvals = np.linspace(0, 10, 10)
self.MC.set_sweep_function(self.mock_parabola.x)
self.MC.set_sweep_points(xvals)
self.MC.set_detector_function(d)
dat = self.MC.run()
dset = dat["dset"]
np.testing.assert_array_almost_equal(xvals+1, dset[:, 1])
def test_function_detector_dict_all_keys(self):
def dummy_function(val_a, val_b):
return {'a': val_a, 'b': val_b}
# Testing input of a simple dict
d = det.Function_Detector(dummy_function, value_names=['aa', 'b'],
result_keys=['a', 'b'],
value_units=['s', 's'],
msmt_kw={'val_a': 5.5, 'val_b': 1})
xvals = np.linspace(0, 10, 10)
self.MC.set_sweep_function(self.mock_parabola.x)
self.MC.set_sweep_points(xvals)
self.MC.set_detector_function(d)
dat = self.MC.run()
dset = dat["dset"]
np.testing.assert_array_almost_equal(np.ones(10)*5.5, dset[:, 1])
np.testing.assert_array_almost_equal(np.ones(10)*1, dset[:, 2])
assert | np.shape(dset) | numpy.shape |
#!/usr/bin/env python
"""
PROCESS IDS IMAGES INTO STYLE TAG IMAGES FOR STYLIT.
Background:
------------------------------------------------------------------------------
For some of Creative Flow styles we use Stylit stylization technique
(Fiser et al, SIGGRAPH 2016). One of the inputs is an image tagging styles in
the target images with style tags available in the exemplar. Because we have
a limited number of exemplars and we only randomize colors sometimes, we
typically have fewer style tags than objectids. This script turns objectids
images into images with a specific number of style tags by grouping labels.
This script is also used to create style-tagged image for style exemplars,
where a random number of color variations of original style may be created
programmatically.
"""
import argparse
import glob
import numpy as np
import os
import random
from skimage.io import imread, imsave
from misc_util import generate_unique_colors
def get_unique_colors(img):
return np.unique(img.reshape(-1, img.shape[2]), axis=0)
def read_image(fname):
img = imread(fname).astype(np.uint8)
if len(img.shape) == 2:
img = np.expand_dims(img, axis=2)
if img.shape[2] > 3:
img = img[:,:,0:3]
elif img.shape[2] == 1:
img = np.tile(img, [1,1,3])
return img
class UniqueColors(object):
def __init__(self):
self.colors = []
self.counts = []
self.nimages = 0
self.index = {}
def to_file(self, fname):
with open(fname, 'w') as f:
f.write(' '.join([ ('%0.7f' % x) for x in self.counts ]) + '\n')
f.write(' '.join([ ('%d %d %d' % (x[0], x[1], x[2])) for x in self.colors ]) + '\n')
f.write('%d\n' % self.nimages)
f.write(' '.join([ ('%d %d' % (x[0], x[1])) for x in self.index.items() ]) + '\n')
def from_file(self, fname):
if not os.path.isfile(fname):
raise RuntimeError('File does not exist or empty name: %s' % fname)
with open(fname) as f:
lines = f.readlines()
lines = [x.strip() for x in lines]
counts = [ float(x) for x in lines[0].split() if len(x) > 0 ]
colors = [ int(x) for x in lines[1].split() if len(x) > 0 ]
nimages = int(lines[2])
index = [ int(x) for x in lines[3].split() if len(x) > 0 ]
if len(counts) * 3 != len(colors) or len(counts) * 2 != len(index):
raise RuntimeError('Malformed file: %d counts, %d colors, %d index' %
(len(counts), len(colors), len(index)))
self.counts = counts
self.colors = [np.array([colors[i*3], colors[i*3 + 1], colors[i*3 + 2]],
dtype=np.uint8)
for i in range(len(counts)) ]
self.index = dict([ (index[i*2], index[i*2 + 1])
for i in range(len(counts)) ])
self.nimages = nimages
def __idx(self, color_row):
return (color_row[0] << 16) + (color_row[1] << 8) + color_row[2]
def add_image_colors(self, img):
colors,counts = np.unique(img.reshape(-1, img.shape[2]), axis=0, return_counts=True)
for c in range(colors.shape[0]):
color = colors[c, :]
count = counts[c] / float(img.shape[0] * img.shape[1])
idx = self.__idx(color)
if idx in self.index:
self.counts[self.index[idx]] = (
self.nimages / (self.nimages + 1.0) *
self.counts[self.index[idx]] + count / (self.nimages + 1.0))
else:
self.colors.append(color)
self.counts.append(count / (self.nimages + 1.0))
self.index[idx] = len(self.colors) - 1
self.nimages += 1
def num(self):
return len(self.colors)
def has_black(self):
return self.__idx([0,0,0]) in self.index
def sorted(self, no_black=True):
res = list(zip(self.counts, [x.tolist() for x in self.colors]))
if no_black and self.has_black():
del res[self.index[self.__idx([0,0,0])]]
res.sort(reverse=True)
print('Color usage:')
print(res)
return [x[1] for x in res]
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Processes output object ids for the purpose of applying ' +
'stylit stylization method.')
parser.add_argument(
'--ids_images', action='store', type=str, required=True,
help='Image files with rendered image IDs, a glob; in the case of ' +
'an animated sequence, all frame renders should be input at once, as ' +
'some objects may be hidden in some scenes, causing inconsistencies.')
parser.add_argument(
'--from_src_template', action='store_true', default=False,
help='If set, will assume that ids image is an ids template with ' +
'only one non-zero color and will concatenate copies of this image ' +
'with unique color id set instead.')
parser.add_argument(
'--nids', action='store', type=int, default=-1,
help='Desired number of output IDs; required for --from_src_template and ' +
'in the other case caps the total number of ids created (e.g. if input ' +
'files have fewer total ids, fewer will be present in the outputs.')
parser.add_argument(
'--save_colors_file', action='store', type=str, default='',
help='If true, (and not --from_src_template), will save all the colors ' +
'in a checkpoint and skip analysis of existing colors step next time.')
parser.add_argument(
'--out_dir', action='store', type=str, required=True,
help='Output directory to write to; basename(s) will be same as for ids_images.')
args = parser.parse_args()
input_files = glob.glob(args.ids_images)
if len(input_files) == 0:
raise RuntimeError('No files matched glob %s' % args.ids_images)
ucolors = UniqueColors()
if args.from_src_template:
if len(input_files) > 1:
raise RuntimeError('Cannot process more than one file with --from_src_template')
if args.nids <= 0:
raise RuntimeError('Must specify --nids when running with --from_src_template')
img = read_image(input_files[0])
ucolors.add_image_colors(img)
if not ucolors.has_black() or ucolors.num() != 2:
print(ucolors.colors)
raise RuntimeError(
'Error processing %s with --from_src_template: '
'template must have 2 colors, exactly one black, but '
' %d colors found' %
(args.ids_image, len(ucolors.colors)))
res = np.zeros((img.shape[0], img.shape[1] * args.nids, 3), dtype=np.uint8)
out_colors = generate_unique_colors(args.nids)
for c in range(args.nids):
not_black = (img[:,:,0] != 0) | (img[:,:,1] != 0) | (img[:,:,2] != 0)
img[not_black] = | np.array(out_colors[c], dtype=np.uint8) | numpy.array |
import matplotlib.pyplot as plt
import numpy as np
import json
'''
This script plots multiple frame-by-frame results for PSNR and SSIM for testing video sequences.
The raw results files are generated from eval_video.m using MATLAB.
'''
### Three sets of results ###
# Loading result text files into JSON format
path1 = "test/vid4/alt_only_cur_downsize_20200916_ff_0.txt"
f1 = open(path1, 'r')
frameData1 = json.load(f1)
path2 = "test/vid4/alt_only_cur_downsize_20200916_obj2_HR_10_20201008.txt"
f2 = open(path2, 'r')
frameData2 = json.load(f2)
path3 = "test/vid4/alt_only_cur_downsize_20200916_info_recycle_ff_0_20201002.txt"
f3 = open(path3, 'r')
frameData3 = json.load(f3)
# Iterate through each video sequence
for (vid1, vid2, vid3) in zip(frameData1, frameData2, frameData3):
# Initialise result arrays
psnr_arr1 = []
ssim_arr1 = []
psnr_arr2 = []
ssim_arr2 = []
psnr_arr3 = []
ssim_arr3 = []
# Do not plot the final average of average result from the test since it is not a video sequence
if vid1 == 'average of average' or vid2 == 'average of average' or vid3 == 'average of average':
continue
#iterate through each frame
for (frames1, frames2, frames3) in zip(frameData1[vid1]['frame'][0],frameData2[vid2]['frame'][0], frameData3[vid3]['frame'][0]):
psnr1 = frameData1[vid1]['frame'][0][frames1][0]
ssim1 = frameData1[vid1]['frame'][0][frames1][1]
psnr_arr1.append(psnr1)
ssim_arr1.append(ssim1)
psnr2 = frameData2[vid2]['frame'][0][frames2][0]
ssim2 = frameData2[vid2]['frame'][0][frames2][1]
psnr_arr2.append(psnr2)
ssim_arr2.append(ssim2)
psnr3 = frameData3[vid3]['frame'][0][frames3][0]
ssim3 = frameData3[vid3]['frame'][0][frames3][1]
psnr_arr3.append(psnr3)
ssim_arr3.append(ssim3)
psnr_arr1 = np.array(psnr_arr1)
ssim_arr1 = np.array(ssim_arr1)
psnr_arr2 = np.array(psnr_arr2)
ssim_arr2 = | np.array(ssim_arr2) | numpy.array |
import datetime
import logging
import numpy as np
import os.path
from PIL import Image
from scipy import ndimage
from lxml import etree
import time
from mengenali.io import read_image
def classify_number(input_file, order, layers):
cv_image = read_image(input_file)
classify_number_in_memory(cv_image, order, layers)
def classify_number_in_memory(cv_image, order, layers):
input_image = Image.fromarray(cv_image)
input_image = np.array(input_image.getdata()).reshape(input_image.size[0], input_image.size[1])
input_image = input_image.astype(np.float32)
input_image /= input_image.max()
input_image = input_image.reshape((input_image.shape[0], input_image.shape[1], 1))
# run through the layers
first_fully_connected = True
for layer_name, type in order:
if type == 'conv':
input_image = convolve_image_stack(input_image, layers[layer_name])
elif type == 'pool':
input_image = pool_image_stack(input_image, layers[layer_name])
else:
if first_fully_connected:
input_image = np.swapaxes(input_image, 1, 2)
input_image = np.swapaxes(input_image, 0, 1)
input_image = input_image.flatten('C')
first_fully_connected = False
input_image = apply_fully_connected(input_image, layers[layer_name])
#input image now contains the raw network output, apply softmax
input_image = np.exp(input_image)
sum = np.sum(input_image)
out = input_image / sum
return out
def classify_numbers(input_dir, order, layers):
for file_name in os.listdir(input_dir):
input_file = input_dir + "\\" + file_name;
classify_number(input_file, order, layers)
def parse_network(network):
xml_net = etree.parse(network)
# store the layer info
order = []
layers = dict()
for child in xml_net.getroot():
if child.tag == "layer":
tp = child.find('type')
if tp is not None:
logging.info(tp.text)
nm = child.attrib['name']
if tp.text == 'conv':
order.append((nm, tp.text))
layers[nm] = parse_convolution_layer(child)
elif tp.text == 'pool':
order.append((nm, tp.text))
layers[nm] = parse_pool_layer(child)
elif tp.text == 'fc':
order.append((nm, tp.text))
layers[nm] = parse_fully_connected_layer(child)
return order, layers
start_time = time.time()
np.set_printoptions(precision=6)
| np.set_printoptions(suppress=True) | numpy.set_printoptions |
"""
..
Copyright (c) 2016-2017, Magni developers.
All rights reserved.
See LICENSE.rst for further information.
Module prodiving performance indicator determination functionality.
Routine listings
----------------
calculate_coherence(Phi, Psi, norm=None)
Calculate the coherence of the Phi Psi matrix product.
calculate_mutual_coherence(Phi, Psi, norm=None)
Calculate the mutual coherence of Phi and Psi.
calculate_relative_energy(Phi, Psi, method=None)
Calculate the relative energy of Phi Psi matrix product atoms.
Notes
-----
For examples of uses of the performance indicators, see the related papers on
predicting/modelling reconstruction quality [1]_, [2]_.
References
----------
.. [1] <NAME>, <NAME>, and <NAME>, "Predicting Reconstruction
Quality within Compressive Sensing for Atomic Force Microscopy," *2015 IEEE
Global Conference on Signal and Information Processing (GlobalSIP)*,
Orlando, FL, 2015, pp. 418-422. doi: 10.1109/GlobalSIP.2015.7418229
.. [2] <NAME>, <NAME>, and <NAME>, "Modelling Reconstruction
Quality of Lissajous Undersampled Atomic Force Microscopy Images," *2016
IEEE 13th International Symposium on Biomedical Imaging (ISBI)*, Prague,
Czech Republic, 2016, pp. 245-248. doi: 10.1109/ISBI.2016.7493255
"""
from __future__ import division
import numpy as np
from magni.utils.validation import decorate_validation as _decorate_validation
from magni.utils.validation import validate_numeric as _numeric
from magni.utils.validation import validate_generic as _generic
def calculate_coherence(Phi, Psi, norm=None):
r"""
Calculate the coherence of the Phi Psi matrix product.
In the context of Compressive Sensing, coherence usually refers to the
maximum absolute correlation between two columns of the Phi Psi matrix
product. This function allows the usage of a different normalised norm
where the infinity-norm yields the usual case.
Parameters
----------
Phi : magni.utils.matrices.Matrix or numpy.ndarray
The measurement matrix.
Psi : magni.utils.matrices.Matrix or numpy.ndarray
The dictionary matrix.
norm : int or float
The normalised norm used for the calculation (the default value is None
which implies that the 0-, 1-, 2-, and infinity-norms are returned).
Returns
-------
coherence : float or dict
The coherence value(s).
Notes
-----
If `norm` is None, the function returns a dict containing the coherence
using the 0-, 1-, 2-, and infinity-norms. Otherwise, the function returns
the coherence using the specified norm.
The coherence is calculated as:
.. math::
\left(\frac{1}{n^2 - n}
\sum_{i = 1}^n \sum_{\substack{j = 1 \\ j \neq i}}^n \left(
\frac{|\Psi_{:, i}^T \Phi^T \Phi \Psi_{:, j}|}
{||\Phi \Psi_{:, i}||_2 ||\Phi \Psi_{:, j}||_2}
\right)^{\text{norm}}\right)^{\frac{1}{\text{norm}}}
where `n` is the number of columns in `Psi`. In the case of the 0-norm,
the coherence is calculated as:
.. math::
\frac{1}{n^2 - n}
\sum_{i = 1}^n \sum_{\substack{j = 1 \\ j \neq i}}^n \mathbf{1}
\left(\frac{|\Psi_{:, i}^T \Phi^T \Phi \Psi_{:, j}|}
{||\Phi \Psi_{:, i}||_2 ||\Phi \Psi_{:, j}||_2}\right)
where :math:`\mathbf{1}(a)` is 1 if `a` is non-zero and 0 otherwise. In the
case of the infinity-norm, the coherence is calculated as:
.. math::
\max_{\substack{i, j \in \{1, \dots, n\} \\ i \neq j}}
\left(\frac{|\Psi_{:, i}^T \Phi^T \Phi \Psi_{:, j}|}
{||\Phi \Psi_{:, i}||_2 ||\Phi \Psi_{:, j}||_2}\right)
Examples
--------
For example,
>>> import numpy as np
>>> import magni
>>> from magni.cs.indicators import calculate_coherence
>>> Phi = np.zeros((5, 9))
>>> Phi[0, 0] = Phi[1, 2] = Phi[2, 4] = Phi[3, 6] = Phi[4, 8] = 1
>>> Psi = magni.imaging.dictionaries.get_DCT((3, 3))
>>> for item in sorted(calculate_coherence(Phi, Psi).items()):
... print('{}-norm: {:.3f}'.format(*item))
0-norm: 0.222
1-norm: 0.141
2-norm: 0.335
inf-norm: 1.000
The above values can be calculated individually by specifying a norm:
>>> for norm in (0, 1, 2, np.inf):
... value = calculate_coherence(Phi, Psi, norm=norm)
... print('{}-norm: {:.3f}'.format(norm, value))
0-norm: 0.222
1-norm: 0.141
2-norm: 0.335
inf-norm: 1.000
"""
@_decorate_validation
def validate_input():
_numeric('Phi', ('integer', 'floating', 'complex'), shape=(-1, -1))
_numeric('Psi', ('integer', 'floating', 'complex'),
shape=(Phi.shape[1], -1))
_numeric('norm', ('integer', 'floating'), range_='[0;inf]',
ignore_none=True)
validate_input()
A = np.zeros((Phi.shape[0], Psi.shape[1]))
e = np.zeros((A.shape[1], 1))
for i in range(A.shape[1]):
e[i] = 1
A[:, i] = Phi.dot(Psi.dot(e)).reshape(-1)
e[i] = 0
M = np.zeros((A.shape[1], A.shape[1]))
PhiT = Phi.T
PsiT = Psi.T
for i in range(A.shape[1]):
M[:, i] = np.abs(PsiT.dot(PhiT.dot(
A[:, i].reshape(-1, 1)))).reshape(-1)
M[i, i] = 0
w = 1 / np.linalg.norm(A, axis=0).reshape(-1, 1)
M = M * w * w.T
if norm is None:
entries = (M.size - M.shape[0])
value = {0: np.sum(M > 1e-9) / entries,
1: np.sum(M) / entries,
2: (np.sum(M**2) / entries)**(1 / 2),
np.inf: np.max(M)}
elif norm == 0:
value = np.sum(M > 1e-9) / (M.size - M.shape[0])
elif norm == np.inf:
value = np.max(M)
else:
value = (np.sum(M**norm) / (M.size - M.shape[0]))**(1 / norm)
return value
def calculate_mutual_coherence(Phi, Psi, norm=None):
r"""
Calculate the mutual coherence of Phi and Psi.
In the context of Compressive Sensing, mutual coherence usually refers to
the maximum absolute correlation between two columns of Phi and Psi. This
function allows the usage of a different normalised norm where the
infinity-norm yields the usual case.
Parameters
----------
Phi : magni.utils.matrices.Matrix or numpy.ndarray
The measurement matrix.
Psi : magni.utils.matrices.Matrix or numpy.ndarray
The dictionary matrix.
norm : int or float
The normalised norm used for the calculation (the default value is None
which implies that the 0-, 1-, 2-, and infinity-norms are returned).
Returns
-------
mutual_coherence : float or dict
The mutual_coherence value(s).
Notes
-----
If `norm` is None, the function returns a dict containing the mutual
coherence using the 0-, 1-, 2-, and infinity-norms. Otherwise, the function
returns the mutual coherence using the specified norm.
The mutual coherence is calculated as:
.. math::
\left(\frac{1}{m n} \sum_{i = 1}^m \sum_{j = 1}^n
|\Phi_{i, :} \Psi_{:, j}|^{\text{norm}}\right)^{\frac{1}{\text{norm}}}
where `m` is the number of rows in `Phi` and `n` is the number of columns
in `Psi`. In the case of the 0-norm, the mutual coherence is calculated as:
.. math::
\frac{1}{m n} \sum_{i = 1}^m \sum_{j = 1}^n \mathbf{1}
(|\Phi_{i, :} \Psi_{:, j}|)
where :math:`\mathbf{1}(a)` is 1 if `a` is non-zero and 0 otherwise. In the
case of the infinity-norm, the mutual coherence is calculated as:
.. math::
\max_{i \in \{1, \dots, m\}, j \in \{1, \dots, n\}}
|\Phi_{i, :} \Psi_{:, j}|
Examples
--------
For example,
>>> import numpy as np
>>> import magni
>>> from magni.cs.indicators import calculate_mutual_coherence
>>> Phi = np.zeros((5, 9))
>>> Phi[0, 0] = Phi[1, 2] = Phi[2, 4] = Phi[3, 6] = Phi[4, 8] = 1
>>> Psi = magni.imaging.dictionaries.get_DCT((3, 3))
>>> for item in sorted(calculate_mutual_coherence(Phi, Psi).items()):
... print('{}-norm: {:.3f}'.format(*item))
0-norm: 0.889
1-norm: 0.298
2-norm: 0.333
inf-norm: 0.667
The above values can be calculated individually by specifying a norm:
>>> for norm in (0, 1, 2, np.inf):
... value = calculate_mutual_coherence(Phi, Psi, norm=norm)
... print('{}-norm: {:.3f}'.format(norm, value))
0-norm: 0.889
1-norm: 0.298
2-norm: 0.333
inf-norm: 0.667
"""
@_decorate_validation
def validate_input():
_numeric('Phi', ('integer', 'floating', 'complex'), shape=(-1, -1))
_numeric('Psi', ('integer', 'floating', 'complex'),
shape=(Phi.shape[1], -1))
_numeric('norm', ('integer', 'floating'), range_='[0;inf]',
ignore_none=True)
validate_input()
M = np.zeros((Phi.shape[0], Psi.shape[1]))
e = np.zeros((Psi.shape[1], 1))
for i in range(M.shape[1]):
e[i] = 1
M[:, i] = np.abs(Phi.dot(Psi.dot(e))).reshape(-1)
e[i] = 0
if norm is None:
value = {0: np.sum(M > 1e-9) / M.size,
1: np.sum(M) / M.size,
2: (np.sum(M**2) / M.size)**(1 / 2),
np.inf: | np.max(M) | numpy.max |
# Mostly based on the code written by <NAME>:
# https://github.com/mrharicot/monodepth/blob/master/utils/evaluation_utils.py
import numpy as np
# import pandas as pd
import os
import cv2
from collections import Counter
import pickle
def compute_errors(gt, pred):
thresh = np.maximum((gt / pred), (pred / gt))
a1 = (thresh < 1.25 ).mean()
a2 = (thresh < 1.25 ** 2).mean()
a3 = (thresh < 1.25 ** 3).mean()
rmse = (gt - pred) ** 2
rmse = np.sqrt(rmse.mean())
rmse_log = (np.log(gt) - np.log(pred)) ** 2
rmse_log = np.sqrt(rmse_log.mean())
abs_rel = np.mean(np.abs(gt - pred) / gt)
sq_rel = np.mean(((gt - pred)**2) / gt)
return abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3
###############################################################################
####################### KITTI
width_to_focal = dict()
width_to_focal[1242] = 721.5377
width_to_focal[1241] = 718.856
width_to_focal[1224] = 707.0493
width_to_focal[1238] = 718.3351
def load_gt_disp_kitti(path):
gt_disparities = []
for i in range(200):
disp = cv2.imread(path + "/training/disp_noc_0/" + str(i).zfill(6) + "_10.png", -1)
disp = disp.astype(np.float32) / 256
gt_disparities.append(disp)
return gt_disparities
def convert_disps_to_depths_kitti(gt_disparities, pred_disparities):
gt_depths = []
pred_depths = []
pred_disparities_resized = []
for i in range(len(gt_disparities)):
gt_disp = gt_disparities[i]
height, width = gt_disp.shape
pred_disp = pred_disparities[i]
pred_disp = width * cv2.resize(pred_disp, (width, height), interpolation=cv2.INTER_LINEAR)
pred_disparities_resized.append(pred_disp)
mask = gt_disp > 0
gt_depth = width_to_focal[width] * 0.54 / (gt_disp + (1.0 - mask))
pred_depth = width_to_focal[width] * 0.54 / pred_disp
gt_depths.append(gt_depth)
pred_depths.append(pred_depth)
return gt_depths, pred_depths, pred_disparities_resized
###############################################################################
####################### EIGEN
def read_text_lines(file_path):
f = open(file_path, 'r')
lines = f.readlines()
f.close()
lines = [l.rstrip() for l in lines]
return lines
def read_file_data(files, data_root):
gt_files = []
gt_calib = []
im_sizes = []
im_files = []
cams = []
num_probs = 0
for filename in files:
filename = filename.split()[0]
splits = filename.split('/')
# camera_id = filename[-1] # 2 is left, 3 is right
date = splits[0]
im_id = splits[4][:10]
file_root = '{}/{}'
im = filename
vel = '{}/{}/velodyne_points/data/{}.bin'.format(splits[0], splits[1], im_id)
if os.path.isfile(data_root + im):
gt_files.append(data_root + vel)
gt_calib.append(data_root + date + '/')
im_sizes.append(cv2.imread(data_root + im).shape[:2])
im_files.append(data_root + im)
cams.append(2)
else:
num_probs += 1
print('{} missing'.format(data_root + im))
# print(num_probs, 'files missing')
return gt_files, gt_calib, im_sizes, im_files, cams
def load_velodyne_points(file_name):
# adapted from https://github.com/hunse/kitti
points = np.fromfile(file_name, dtype=np.float32).reshape(-1, 4)
points[:, 3] = 1.0 # homogeneous
return points
def lin_interp(shape, xyd):
# taken from https://github.com/hunse/kitti
m, n = shape
ij, d = xyd[:, 1::-1], xyd[:, 2]
f = LinearNDInterpolator(ij, d, fill_value=0)
J, I = np.meshgrid(np.arange(n), | np.arange(m) | numpy.arange |
import numpy as np
from scipy import special
from zenquant.ctastrategy import (
CtaTemplate,
StopOrder,
TickData,
BarData,
TradeData,
OrderData,
BarGenerator,
)
from zenquant.trader.constant import (
Status,
Direction,
Offset,
Exchange
)
import lightgbm as lgb
from tzlocal import get_localzone
from datetime import datetime
from zenquant.trader.utility import round_to
from zenquant.feed.data import BarDataFeed
from zenquant.env.observer import Observer
from zenquant.utils.get_indicators_info import get_bar_level_indicator_info
def softmax(x):
exp_x = np.exp(x - | np.max(x) | numpy.max |
#%% [markdown]
## Chapter 2 Lab: Introduction to R (now in Python!)
# Please note that, the purpose of this file is *not* to demonstrate Python's basic functionalities (there are much more comprehensive guides, [like this](https://learnxinyminutes.com/docs/python3/)) but to mirror ISLR's lab in R as much as possible.
### Basic Commands
x = [1, 3, 2, 5]
print(x)
y = [1, 4, 3]
print(y)
#%% [markdown]
#### Get the length of a variable
print(len(x))
print(len(y))
#%% [markdown]
#### Element-wise addition of two lists
##### Pure Python
# Use [map](https://docs.python.org/2/library/functions.html#map) with [operator.add](https://docs.python.org/2/library/operator.html#operator.add) ([source](https://stackoverflow.com/a/18713494/4173146)):
from operator import add
x = [1, 6, 2]
print(list(map(add, x, y)))
#%% [markdown]
# or [zip](https://docs.python.org/2/library/functions.html#zip) with a list comprehension:
print([sum(i) for i in zip(x, y)])
#%% [markdown]
##### Using NumPy (will be faster than pure Python) ([source](https://stackoverflow.com/a/18713494/4173146)):
import numpy as np
x2 = np.array([1, 6, 2])
y2 = np.array([1, 4, 3])
print(x2 + y2)
#%% [markdown]
#### List all the variables
def printvars():
tmp = globals().copy()
[print(k,' : ',v,' type:' , type(v)) for k,v in tmp.items() if not k.startswith('_') and k!='tmp' and k!='In' and k!='Out' and not hasattr(v, '__call__')]
printvars()
#%% [markdown]
#### Clear a variable's content
x = None
print(x)
#%% [markdown]
#### Delete a variable (its reference)
del y
print(y)
#%% [markdown]
#### Delete all varialbes ([source](https://stackoverflow.com/a/53415612/4173146))
for name in dir():
if not name.startswith('_'):
del globals()[name]
for name in dir():
if not name.startswith('_'):
del locals()[name]
print(x2)
print(y2)
#%% [markdown]
# or simply restart the interpreter.
#%% [markdown]
#### Declare matrices ([source](https://stackoverflow.com/questions/6667201/how-to-define-a-two-dimensional-array-in-python))
##### Pure Python
rowCount = 4
colCount = 3
mat = [[0 for x in range(colCount)] for x in range(rowCount)]
print(mat)
#%% [markdown]
# or a shorter version:
mat = [[0] * colCount for i in range(rowCount)]
print(mat)
#%% [markdown]
# However, it is best to use numpy arrays to represent matrices.
import numpy
mat = numpy.zeros((rowCount, colCount))
print(mat)
#%% [markdown]
#### The sqaure root of each element of a vector or matrix (numpy array)
import numpy as np
mat = [[16] * colCount for i in range(rowCount)]
mat = np.asarray(mat)
print(np.sqrt(mat))
#%% [markdown]
#### Generate a vector of random normal variables
# Dimensions are provided as arguements to the numpy function.
#
# For random samples from a Normal distribution with mean *mu* and standard deviation *sigma*, use:
# `sigma * np.random.randn(...) + mu` according to the [documentation](https://docs.scipy.org/doc/numpy-1.16.0/reference/generated/numpy.random.randn.html#numpy.random.randn)
import numpy as np
x = np.random.randn(50)
y = x + ( 0.1 * np.random.randn(50) + 50 )
#%% [markdown]
# To compute the [Pearson correlation coefficient](https://en.wikipedia.org/wiki/Pearson_correlation_coefficient), or simply correlation, between the two vectors:
print(np.corrcoef(x, y))
#%% [markdown]
# To set the seed for random number generation, in Python:
import random
random.seed(0)
#%% [markdown]
# Or in numpy:
np.random.seed(0)
x = np.random.randn(50)
y = x + ( 0.1 * np.random.randn(50) + 50 )
#%% [markdown]
#### To compute the mean, variance, and standard deviation of a vector of numbers:
print(np.mean(x))
print(np.var(x))
print(np.std(x))
print(np.mean(y))
print(np.var(y))
print(np.std(y))
#%% [markdown]
### Graphics
#### Using matplotlib
import numpy as np
import matplotlib.pyplot as plt
| np.random.seed(0) | numpy.random.seed |
import numpy as np
import scipy.special as special
def loopbrz( Ra, I0, Nturns, R, Z ):
# Input
# Ra [m] Loop radius
# I0 [A] Loop current
# Nturns Loop number of turns (windings)
# R [m] Radial coordinate of the point
# Z [m] Axial coordinate of the point
# Output
# Br, Bz [T] Radial and Axial components of B-field at (R,Z)
#
# (Note that singularities are not handled here)
mu0 = 4.0e-7 * np.pi
B0 = mu0/2.0/Ra * I0 * Nturns
alfa = np.absolute(R)/Ra
beta = Z/Ra
gamma = (Z+1.0e-10)/(R+1.0e-10)
Q = (1+alfa)**2 + beta**2
ksq = 4.0 * alfa / Q
asq = alfa * alfa
bsq = beta * beta
Qsp = 1.0/np.pi/np.sqrt(Q)
K = special.ellipk(ksq)
E = special.ellipe(ksq)
Br = gamma * B0*Qsp * ( E * (1+asq+bsq)/(Q-4.0*alfa) - K )
Bz = B0*Qsp * ( E * (1-asq-bsq)/(Q-4.0*alfa) + K )
return Br, Bz
def roto(EulerAngles):
# Classic (proper) Euler Angles (p,t,f)
# with Z-X-Z rotation sequence:
# (psi,z), (theta,x), (phi,z)
# p=psi, t=theta, f=phi angles in [rad]
p=EulerAngles[0]
t=EulerAngles[1]
f=EulerAngles[2]
sp=np.sin(p)
st=np.sin(t)
sf=np.sin(f)
cp=np.cos(p)
ct=np.cos(t)
cf= | np.cos(f) | numpy.cos |
import numpy as np
import time
def hms2dec(h,m,s):
return 15*(h + (m/60) + (s/3600))
def dms2dec(d,m,s):
if d>=0:
return (d + (m/60) + (s/3600))
return (d - (m/60) - (s/3600))
def angular_dist(r1, d1, r2, d2): # remove radians, radians mein hi ghusenge
# r1= np.radians(r1)
# r2= np.radians(r2)
# d1= np.radians(d1)
# d2= np.radians(d2)
a = (np.sin(np.abs(d1 - d2)/2))**2
b = np.cos(d1)*np.cos(d2)*np.sin(np.abs(r1 - r2)/2)**2
d = 2*np.arcsin(np.sqrt(a + b))
return d
def crossmatch(cat1, cat2, max_dist):
max_dist = np.radians(max_dist)
matches = []
no_matches = []
start = time.perf_counter()
cat1 = np.radians(cat1)
cat2 = np.radians(cat2)
ra2s = cat2[:, 0]
dec2s = cat2[:, 1]
id_1 = 0
for i in cat1:
best = (0,0,max_dist+1)
dists = angular_dist(i[0], i[1], ra2s, dec2s)
min_dist = np.min(dists)
id_2 = np.argmin(dists)
best = (id_1,id_2,min_dist)
if best[2]<=max_dist:
matches.append(best)
else:
no_matches.append(id_1)
id_1+=1
return (matches, no_matches, time.perf_counter() - start)
if __name__ == '__main__':
ra1, dec1 = np.radians([180, 30])
cat2 = [[180, 32], [55, 10], [302, -44]]
cat2 = np.radians(cat2)
ra2s, dec2s = cat2[:,0], cat2[:,1]
dists = angular_dist(ra1, dec1, ra2s, dec2s)
print(np.degrees(dists))
cat1 = | np.array([[180, 30], [45, 10], [300, -45]]) | numpy.array |
from __future__ import division
from __future__ import print_function
#TODO: there are multiple implementations of functions like _apply_by_file_index. these should be consolidated into one
#common function that is used and called multiple times. In addition, aggregator and transform functions that are used
#across apply_by_file wrappers should be shared (rather than defined multiple times). We could also call_apply_by_file_index
#"groupby" to conform to the pandas style. e.g. bo.groupby(session) returns a generator whose produces are brain objects
#each of one session. we could then use bo.groupby(session).aggregate(xform) to produce a list of objects, where each is
#comprised of the xform applied to the brain object containing one session worth of data from the original object.
import copy
import os
import numpy.matlib as mat
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import imageio
import nibabel as nib
import hypertools as hyp
import shutil
import warnings
from nilearn import plotting as ni_plt
from nilearn import image
from nilearn.input_data import NiftiMasker
from scipy.stats import kurtosis, zscore, pearsonr
from scipy.spatial.distance import pdist
from scipy.spatial.distance import cdist
from scipy.spatial.distance import squareform
from scipy.special import logsumexp
from scipy import linalg
from scipy.ndimage.interpolation import zoom
try:
from itertools import zip_longest
except:
from itertools import izip_longest as zip_longest
def _std(res=None):
"""
Load a Nifti image of the standard MNI 152 brain at the given resolution
Parameters
----------
res : int or float or None
If int or float: (for cubic voxels) or a list or array of 3D voxel dimensions
If None, returns loaded gray matter masked brain
Returns
----------
results : Nifti1Image
Nifti image of the standard brain
"""
from .nifti import Nifti
from .load import load
std_img = load('std')
if res:
return _resample_nii(std_img, res)
else:
return std_img
def _gray(res=None):
"""
Load a Nifti image of the gray matter masked MNI 152 brain at the given resolution
Parameters
----------
res : int or float or None
If int or float: (for cubic voxels) or a list or array of 3D voxel dimensions
If None, returns loaded gray matter masked brain
Returns
----------
results : Nifti1Image
Nifti image of gray masked brain
"""
from .nifti import Nifti
from .load import load
gray_img = load('gray')
threshold = 100
gray_data = gray_img.get_data()
gray_data[np.isnan(gray_data) | (gray_data < threshold)] = 0
if np.iterable(res) or np.isscalar(res):
return _resample_nii(Nifti(gray_data, gray_img.affine), res)
else:
return Nifti(gray_data, gray_img.affine)
def _resample_nii(x, target_res, precision=5):
"""
Resample a Nifti image to have the given voxel dimensions
Parameters
----------
x : Nifti1Image
Input Nifti image (a nibel Nifti1Image object)
target_res : int or float or None
Int or float (for cubic voxels) or a list or array of 3D voxel dimensions
precision : int
Number of decimal places in affine transformation matrix for resulting image (default: 5)
Returns
----------
results : Nifti1Image
Re-scaled Nifti image
"""
from .nifti import Nifti
if np.any(np.isnan(x.get_data())):
img = x.get_data()
img[np.isnan(img)] = 0.0
x = nib.nifti1.Nifti1Image(img, x.affine)
res = x.header.get_zooms()[0:3]
scale = np.divide(res, target_res).ravel()
target_affine = x.affine
target_affine[0:3, 0:3] /= scale
target_affine = np.round(target_affine, decimals=precision)
# correct for 1-voxel shift
target_affine[0:3, 3] -= np.squeeze(np.multiply(np.divide(target_res, 2.0), np.sign(target_affine[0:3, 3])))
target_affine[0:3, 3] += np.squeeze(np.sign(target_affine[0:3, 3]))
if len(scale) < np.ndim(x.get_data()):
assert np.ndim(x.get_data()) == 4, 'Data must be 3D or 4D'
scale = np.append(scale, x.shape[3])
z = zoom(x.get_data(), scale)
try:
z[z < 1e-5] = np.nan
except:
pass
return Nifti(z, target_affine)
def _apply_by_file_index(bo, xform, aggregator):
"""
Session dependent function application and aggregation
Parameters
----------
bo : Brain object
Contains data
xform : function
The function to apply to the data matrix from each filename
aggregator: function
Function for aggregating results across multiple iterations
Returns
----------
results : numpy ndarray
Array of aggregated results
"""
for idx, session in enumerate(bo.sessions.unique()):
session_xform = xform(bo.get_slice(sample_inds=np.where(bo.sessions == session)[0], inplace=False))
if idx is 0:
results = session_xform
else:
results = aggregator(results, session_xform)
return results
def _kurt_vals(bo):
"""
Function that calculates maximum kurtosis values for each channel
Parameters
----------
bo : Brain object
Contains data
Returns
----------
results: 1D ndarray
Maximum kurtosis across sessions for each channel
"""
sessions = bo.sessions.unique()
results = list(map(lambda s: kurtosis(bo.data[(s==bo.sessions).values]), sessions))
return np.max(np.vstack(results), axis=0)
def _get_corrmat(bo):
"""
Function that calculates the average subject level correlation matrix for brain object across session
Parameters
----------
bo : Brain object
Contains data
Returns
----------
results: 2D np.ndarray
The average correlation matrix across sessions
"""
def aggregate(p, n):
return p + n
def zcorr_xform(bo):
return np.multiply(bo.dur, _r2z(1 - squareform(pdist(bo.get_data().T, 'correlation'))))
summed_zcorrs = _apply_by_file_index(bo, zcorr_xform, aggregate)
#weight each session by recording time
return _z2r(summed_zcorrs / np.sum(bo.dur))
def _z_score(bo):
"""
Function that calculates the average subject level correlation matrix for brain object across session
Parameters
----------
bo : Brain object
Contains data
Returns
----------
results: 2D np.ndarray
The average correlation matrix across sessions
"""
def z_score_xform(bo):
return zscore(bo.get_data())
def vstack_aggregrate(x1, x2):
return | np.vstack((x1, x2)) | numpy.vstack |
import pytest
import numpy as np
import sparse
from sparse import DOK
from sparse._utils import assert_eq
@pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 4)])
@pytest.mark.parametrize("density", [0.1, 0.3, 0.5, 0.7])
def test_random_shape_nnz(shape, density):
s = sparse.random(shape, density, format="dok")
assert isinstance(s, DOK)
assert s.shape == shape
expected_nnz = density * np.prod(shape)
assert np.floor(expected_nnz) <= s.nnz <= np.ceil(expected_nnz)
def test_convert_to_coo():
s1 = sparse.random((2, 3, 4), 0.5, format="dok")
s2 = sparse.COO(s1)
assert_eq(s1, s2)
def test_convert_from_coo():
s1 = sparse.random((2, 3, 4), 0.5, format="coo")
s2 = DOK(s1)
assert_eq(s1, s2)
def test_convert_from_numpy():
x = np.random.rand(2, 3, 4)
s = DOK(x)
assert_eq(x, s)
def test_convert_to_numpy():
s = sparse.random((2, 3, 4), 0.5, format="dok")
x = s.todense()
assert_eq(x, s)
@pytest.mark.parametrize(
"shape, data",
[
(2, {0: 1}),
((2, 3), {(0, 1): 3, (1, 2): 4}),
((2, 3, 4), {(0, 1): 3, (1, 2, 3): 4, (1, 1): [6, 5, 4, 1]}),
],
)
def test_construct(shape, data):
s = DOK(shape, data)
x = np.zeros(shape, dtype=s.dtype)
for c, d in data.items():
x[c] = d
assert_eq(x, s)
@pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 4)])
@pytest.mark.parametrize("density", [0.1, 0.3, 0.5, 0.7])
def test_getitem(shape, density):
s = sparse.random(shape, density, format="dok")
x = s.todense()
for _ in range(s.nnz):
idx = np.random.randint(np.prod(shape))
idx = np.unravel_index(idx, shape)
assert np.isclose(s[idx], x[idx])
@pytest.mark.parametrize(
"shape, index, value",
[
((2,), slice(None), | np.random.rand() | numpy.random.rand |
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
plt.rcParams["font.family"] = 'NanumBarunGothic'
plt.rcParams["font.size"] = 32
mpl.rcParams['axes.unicode_minus'] = False
# ๊ฐ๋ฅํ ํ๋๋ค: ํํธ, ์คํ ๋
HIT = 0 # ์ ์นด๋ ์์ฒญ(ํํธ).
STICK = 1 # ์นด๋ ์์ฒญ ์ข
๋ฃ.
ACTIONS = [HIT, STICK]
# ํ๋ ์ด์ด์ ์ ๋ต
# ํ์ฌ ์นด๋์ ์ดํฉ์ด 20, 21์ธ ๊ฒฝ์ฐ STICK. ๊ทธ ์ธ์ HIT
POLICY_OF_PLAYER = np.zeros(22, dtype=np.int)
for i in range(12, 20):
POLICY_OF_PLAYER[i] = HIT
POLICY_OF_PLAYER[20] = POLICY_OF_PLAYER[21] = STICK
# ํ๋ ์ด์ด์ ํ๊น ์ ์ฑ
(target policy, off-policy์์ ๊ฐ์ ์ ์ฌ์ฉ๋๋ ์ ์ฑ
)์ ํจ์ ํํ
def target_policy_player(usable_ace_player, player_sum, dealer_card):
return POLICY_OF_PLAYER[player_sum]
# ํ๋ ์ด์ด์ ํ์ ์ ์ฑ
(behavior policy, off-policy์์ ํ๋ ์ ํ์ ์ํ ์ ์ฑ
)์ ํจ์ ํํ
def behavior_policy_player(usable_ace_player, player_sum, dealer_card):
if np.random.binomial(1, 0.5) == 1:
return STICK
return HIT
# ๋๋ฌ์ ์ ๋ต
POLICY_OF_DEALER = np.zeros(22)
for i in range(12, 17):
POLICY_OF_DEALER[i] = HIT
for i in range(17, 22):
POLICY_OF_DEALER[i] = STICK
# ์๋ก์ด ์นด๋ ํ๋
# ์นด๋ ์๋์ ๋ฌดํํ๋ค๊ณ ๊ฐ์
def get_card():
card = np.random.randint(1, 14)
card = min(card, 10)
return card
# ์นด๋์ ์ซ์ ๋ฐํ(Ace๋ 11)
def card_value(card_id):
return 11 if card_id == 1 else card_id
# ๋ธ๋ ์ญ ๊ฒ์ ์งํ
# @policy_of_player: ํ๋ ์ด์ด๋ฅผ ์ํ ์ ์ฑ
์ง์
# @initial_state: ์ฃผ์ด์ง๋ ์ด๊ธฐ ์ํ(ํ๋ ์ด์ด ์นด๋ ๋ด ์ฌ์ฉ๊ฐ๋ฅ Ace ์กด์ฌ ์ฌ๋ถ, ํ๋ ์ด์ด ์นด๋ ์ดํฉ, ๋๋ฌ์ ๊ณต๊ฐ๋ ์นด๋)
# @initial_action: ์ด๊ธฐ ํ๋
def play_black_jack(policy_of_player, initial_state=None, initial_action=None):
# =============== ๋ธ๋ ์ญ ๊ฒ์ ์ด๊ธฐ ์ค์ =============== #
# ํ๋ ์ด์ด ์์ง ์นด๋์ ์ดํฉ
player_cards_sum = 0
# ํ๋ ์ด์ด๊ฐ ๊ฒช๋ ๊ฒฝํ(์ํ, ํ๋ ์)๋ค์ ์ ์ฅํ ๋ณ์
player_experience_trajectory = []
# ํ๋ ์ด์ด์ ์์ด์ค ์นด๋ 11 ์ฌ์ฉ ์ฌ๋ถ
usable_ace_player = False
# ๋๋ฌ ๊ด๋ จ ๋ณ์
dealer_card1 = 0
dealer_card2 = 0
usable_ace_dealer = False
if initial_state is None:
# ์์์ ์ด๊ธฐ ์ํ ์์ฑ
while player_cards_sum < 12:
# ํ๋ ์ด์ด ์นด๋ ์ดํฉ์ด 12๋ณด๋ค ๋ฎ๋ค๋ฉด ๋ฌด์กฐ๊ฑด HIT ์ ํ
card = get_card()
player_cards_sum += card_value(card)
# ํ๋ ์ด์ด ์นด๋ ์ดํฉ์ด 21์ ๋๊ธด ๊ฒฝ์ฐ 11์์ 1๋ก ์๊ฐํ ์์ด์ค ์นด๋ ์กด์ฌ ์ฌ๋ถ ํ์ธ
if player_cards_sum > 21:
assert player_cards_sum == 22
player_cards_sum -= 10
else:
usable_ace_player = usable_ace_player | (1 == card)
# ๋๋ฌ์๊ฒ ์นด๋ ์ ๋ฌ. ์ฒซ ๋ฒ์งธ ์นด๋๋ฅผ ๊ณต๊ฐํ๋ค๊ณ ๊ฐ์
dealer_card1 = get_card()
dealer_card2 = get_card()
else:
# ์ง์ ๋ ์ด๊ธฐ ์ํ๋ฅผ ํจ์์ ์ธ์๋ก ๋๊ฒจ๋ฐ์ ๊ฒฝ์ฐ
usable_ace_player, player_cards_sum, dealer_card1 = initial_state
dealer_card2 = get_card()
# ๊ฒ์์ ์ํ
state = [usable_ace_player, player_cards_sum, dealer_card1]
# ๋๋ฌ์ ์นด๋ ์ดํฉ ๊ณ์ฐ
dealer_cards_sum = card_value(dealer_card1) + card_value(dealer_card2)
usable_ace_dealer = 1 in (dealer_card1, dealer_card2)
# ์ด๊ธฐ ์ํ์์ ์ดํฉ์ด 21์ ๋๊ธฐ๋ฉด ์์ด์ค๊ฐ 2๊ฐ์ธ ๊ฒ์ด๋ฏ๋ก ํ๋๋ฅผ 1๋ก์จ ์ทจ๊ธ
if dealer_cards_sum > 21:
assert dealer_cards_sum == 22
dealer_cards_sum -= 10
assert dealer_cards_sum <= 21
assert player_cards_sum <= 21
# =============== ๋ธ๋ ์ญ ๊ฒ์ ์งํ =============== #
# ํ๋ ์ด์ด์ ์ฐจ๋ก
while True:
if initial_action is not None:
action = initial_action
initial_action = None
else:
# ํ์ฌ ์ํ๋ฅผ ๊ธฐ๋ฐ์ผ๋ก ํ๋ ์ ํ
action = policy_of_player(usable_ace_player, player_cards_sum, dealer_card1)
# ์ค์๋ ์ํ๋ง(Importance Sampling)์ ์ํ์ฌ ํ๋ ์ด์ด์ ๊ฒฝํ ๊ถค์ ์ ์ถ์
player_experience_trajectory.append([(usable_ace_player, player_cards_sum, dealer_card1), action])
if action == STICK:
break
elif action == HIT:
new_card = get_card()
# ํ๋ ์ด์ด๊ฐ ๊ฐ์ง ์์ด์ค ์นด๋์ ๊ฐ์ ์ถ์
player_ace_count = int(usable_ace_player)
if new_card == 1:
player_ace_count += 1
player_cards_sum += card_value(new_card)
# ๋ฒ์คํธ(bust)๋ฅผ ํผํ๊ธฐ ์ํด ์์ด์ค ์นด๋๊ฐ ์๋ค๋ฉด 1๋ก์จ ์ทจ๊ธ
while player_cards_sum > 21 and player_ace_count:
player_cards_sum -= 10
player_ace_count -= 1
# ํ๋ ์ด์ด ๋ฒ์คํธ(bust)
if player_cards_sum > 21:
return state, -1, player_experience_trajectory
assert player_cards_sum <= 21
usable_ace_player = (player_ace_count == 1)
# ๋๋ฌ์ ์ฐจ๋ก
while True:
action = POLICY_OF_DEALER[dealer_cards_sum]
if action == STICK:
break
new_card = get_card()
dealer_ace_count = int(usable_ace_dealer)
if new_card == 1:
dealer_ace_count += 1
dealer_cards_sum += card_value(new_card)
while dealer_cards_sum > 21 and dealer_ace_count:
dealer_cards_sum -= 10
dealer_ace_count -= 1
if dealer_cards_sum > 21:
return state, 1, player_experience_trajectory
usable_ace_dealer = (dealer_ace_count == 1)
# =============== ๋ธ๋ ์ญ ๊ฒ์ ๊ฒฐ๊ณผ ํ์ =============== #
# ํ๋ ์ด์ด์ ๋๋ฌ ๊ฐ์ ์นด๋ ์ฐจ์ด(๊ฒ์ ๊ฒฐ๊ณผ) ํ์ธ
assert player_cards_sum <= 21 and dealer_cards_sum <= 21
if player_cards_sum > dealer_cards_sum:
return state, 1, player_experience_trajectory
elif player_cards_sum == dealer_cards_sum:
return state, 0, player_experience_trajectory
else:
return state, -1, player_experience_trajectory
# On-Policy๋ก ์์ฑ๋ ๋ชฌํ
์นด๋ฅผ๋ก ๋ฐฉ๋ฒ
def monte_carlo_on_policy(episodes):
# ์์ด์ค ์นด๋ ์ฌ์ฉ ๊ฒฝ์ฐ์ ๊ทธ๋ ์ง ์์ ๊ฒฝ์ฐ๋ฅผ ๋ถ๋ฆฌํ์ฌ ์๊ฐ
states_usable_ace = np.zeros((10, 10))
states_usable_ace_count = np.ones((10, 10))
states_no_usable_ace = np.zeros((10, 10))
states_no_usable_ace_count = | np.ones((10, 10)) | numpy.ones |
##################################################
# Train a RAW-to-RGB model using training images #
##################################################
import tensorflow as tf
import imageio
import numpy as np
import sys
from datetime import datetime
from load_dataset import load_train_patch, load_val_data
from model import PUNET
import utils
import vgg
# Processing command arguments
dataset_dir, model_dir, result_dir, vgg_dir, dslr_dir, phone_dir,\
arch, LEVEL, inst_norm, num_maps_base, restore_iter, patch_w, patch_h,\
batch_size, train_size, learning_rate, eval_step, num_train_iters, save_mid_imgs = \
utils.process_command_args(sys.argv)
# Defining the size of the input and target image patches
PATCH_WIDTH, PATCH_HEIGHT = patch_w//2, patch_h//2
DSLR_SCALE = float(1) / (2 ** (max(LEVEL,0) - 1))
TARGET_WIDTH = int(PATCH_WIDTH * DSLR_SCALE)
TARGET_HEIGHT = int(PATCH_HEIGHT * DSLR_SCALE)
TARGET_DEPTH = 3
TARGET_SIZE = TARGET_WIDTH * TARGET_HEIGHT * TARGET_DEPTH
| np.random.seed(0) | numpy.random.seed |
import os
import sys
import cv2
import numpy as np
import six.moves.urllib as urllib
import tensorflow as tf
from PIL import Image, ImageDraw, ImageFont
from sqlalchemy.sql.sqltypes import BOOLEAN
from timeit import default_timer as timer
import tarfile
from collections import defaultdict
from distutils.version import StrictVersion
from io import StringIO
from object_detection.utils import label_map_util, ops as utils_ops, visualization_utils as vis_util
from tensorflow.python.framework import graph_util
from tensorflow.python.platform import gfile
from tensorflow.python.tools import optimize_for_inference_lib
if StrictVersion(tf.__version__) < StrictVersion('1.9.0'):
raise ImportError(
'Please upgrade your TensorFlow installation to v1.9.* or later!')
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
def load_model_by_tf_interface(pb_path,num_classes,score_threshold):
input_1 = tf.placeholder(shape=[None, None, 3], name="input_image", dtype=tf.uint8)
new_img_dims = tf.expand_dims(input_1, 0)
# Load a (frozen) Tensorflow model into memory
with tf.gfile.GFile(pb_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='', input_map={"image_tensor:0": new_img_dims})
_generate_graph(score_threshold,num_classes)
def _generate_graph(score_threshold,num_classes):
# Get handles to input and output tensors
tensor_num= tf.get_default_graph().get_tensor_by_name('num_detections:0')
tensor_scores= tf.get_default_graph().get_tensor_by_name('detection_scores:0')
tensor_boxes= tf.get_default_graph().get_tensor_by_name('detection_boxes:0')
tensor_classes= tf.get_default_graph().get_tensor_by_name('detection_classes:0')
#print(tensor_dict)
num_detections= tf.cast(tensor_num[0],tf.int32)
detection_scores=tensor_scores[0]
detection_boxes = tensor_boxes[0]
detection_classes = tf.cast(tensor_classes[0],tf.uint8)
mask=detection_scores >= score_threshold
scores_ =tf.boolean_mask(detection_scores,mask)
boxes_ =tf.boolean_mask(detection_boxes,mask)
classes_ =tf.boolean_mask(detection_classes,mask)
num_=tf.shape(scores_)
# all outputs are float32 numpy arrays, so convert types as appropriate
output_nodes={}
output_nodes['num'] = tf.identity(num_, name="output_num")
output_nodes['classes'] = tf.identity(classes_, name="output_classes")
output_nodes['boxes'] = tf.identity(boxes_, name="output_boxes")
output_nodes['scores'] =tf.identity(scores_, name="output_scores")
def load_model_by_unity_interface(pb_path,num_classes):
with tf.gfile.GFile(pb_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
# Run inference
def get_nodes(sess):
get_output={}
get_input={}
get_input["input_1"] = sess.graph.get_tensor_by_name("input_image:0")
get_output["boxes"] = sess.graph.get_tensor_by_name("output_boxes:0")
get_output["scores"] = sess.graph.get_tensor_by_name("output_scores:0")
get_output["classes"] = sess.graph.get_tensor_by_name("output_classes:0")
get_output["num"] = sess.graph.get_tensor_by_name("output_num:0")
return get_input,get_output
def boxes_reversize(boxes,in_shape,out_shape):
for box in boxes:
box[0] = box[0]*in_shape[1]
box[1] = box[1]*in_shape[0]
box[2] = box[2]*in_shape[1]
box[3] = box[3]*in_shape[0]
def detect(sess,image,get_input,get_output,force_image_resize):
image_data = load_image_into_numpy_array(image)
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
start = timer()
output_dict = sess.run(get_output,
feed_dict={get_input["input_1"]: image_data})
end = timer()
print("detect time %s s" % (end - start))
boxes_reversize(output_dict["boxes"],image.size,image.size)
print(output_dict)
return output_dict
def write_pb( sess,output_pb_dir, output_pb_file,get_input,get_output):
input_nodes = [get_input["input_1"].name.split(":")[0]]
output_nodes = [get_output["boxes"].name.split(":")[0], get_output["scores"].name.split(":")[
0], get_output["classes"].name.split(":")[0],get_output["num"].name.split(":")[0]]
print("input nodes:", input_nodes)
print("output nodes:", output_nodes)
constant_graph = graph_util.convert_variables_to_constants(
sess, sess.graph.as_graph_def(), output_nodes)
optimize_Graph = optimize_for_inference_lib.optimize_for_inference(
constant_graph,
input_nodes, # an array of the input node(s)
output_nodes, # an array of output nodes
tf.float32.as_datatype_enum)
optimize_for_inference_lib.ensure_graph_is_valid(optimize_Graph)
with tf.gfile.GFile(os.path.join(output_pb_dir, output_pb_file), "wb") as f:
f.write(constant_graph.SerializeToString())
def generate_colors(class_names):
import colorsys
# Generate colors for drawing bounding boxes.
hsv_tuples = [(x / len(class_names), 1., 1.)
for x in range(len(class_names))]
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),colors))
np.random.seed(10101) # Fixed seed for consistent colors across runs.
# Shuffle colors to decorrelate adjacent classes.
np.random.shuffle(colors)
np.random.seed(None) # Reset seed to default.
return colors
def get_class(classes_path_raw):
return label_map_util.create_category_index_from_labelmap(classes_path_raw, use_display_name=True)
def draw( image,class_names,colors, draw_score_threshold, out_boxes, out_scores, out_classes):
font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 300
for i, c in reversed(list(enumerate(out_classes))):
predicted_class = class_names[c]
box = out_boxes[i]
score = out_scores[i]
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
print(label, (left, top), (right, bottom))
if top - label_size[1] >= 0:
text_origin = | np.array([left, top - label_size[1]]) | numpy.array |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""test cases for Beta distribution"""
import numpy as np
from scipy import stats
from scipy import special
import mindspore.context as context
import mindspore.nn as nn
import mindspore.nn.probability.distribution as msd
from mindspore import Tensor
from mindspore import dtype
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
class Prob(nn.Cell):
"""
Test class: probability of Beta distribution.
"""
def __init__(self):
super(Prob, self).__init__()
self.b = msd.Beta(np.array([3.0]), np.array([1.0]), dtype=dtype.float32)
def construct(self, x_):
return self.b.prob(x_)
def test_pdf():
"""
Test pdf.
"""
beta_benchmark = stats.beta(np.array([3.0]), np.array([1.0]))
expect_pdf = beta_benchmark.pdf([0.25, 0.75]).astype(np.float32)
pdf = Prob()
output = pdf(Tensor([0.25, 0.75], dtype=dtype.float32))
tol = 1e-6
assert (np.abs(output.asnumpy() - expect_pdf) < tol).all()
class LogProb(nn.Cell):
"""
Test class: log probability of Beta distribution.
"""
def __init__(self):
super(LogProb, self).__init__()
self.b = msd.Beta(np.array([3.0]), np.array([1.0]), dtype=dtype.float32)
def construct(self, x_):
return self.b.log_prob(x_)
def test_log_likelihood():
"""
Test log_pdf.
"""
beta_benchmark = stats.beta(np.array([3.0]), np.array([1.0]))
expect_logpdf = beta_benchmark.logpdf([0.25, 0.75]).astype(np.float32)
logprob = LogProb()
output = logprob(Tensor([0.25, 0.75], dtype=dtype.float32))
tol = 1e-6
assert (np.abs(output.asnumpy() - expect_logpdf) < tol).all()
class KL(nn.Cell):
"""
Test class: kl_loss of Beta distribution.
"""
def __init__(self):
super(KL, self).__init__()
self.b = msd.Beta(np.array([3.0]), np.array([4.0]), dtype=dtype.float32)
def construct(self, x_, y_):
return self.b.kl_loss('Beta', x_, y_)
def test_kl_loss():
"""
Test kl_loss.
"""
concentration1_a = np.array([3.0]).astype(np.float32)
concentration0_a = np.array([4.0]).astype(np.float32)
concentration1_b = np.array([1.0]).astype(np.float32)
concentration0_b = np.array([1.0]).astype(np.float32)
total_concentration_a = concentration1_a + concentration0_a
total_concentration_b = concentration1_b + concentration0_b
log_normalization_a = np.log(special.beta(concentration1_a, concentration0_a))
log_normalization_b = np.log(special.beta(concentration1_b, concentration0_b))
expect_kl_loss = (log_normalization_b - log_normalization_a) \
- (special.digamma(concentration1_a) * (concentration1_b - concentration1_a)) \
- (special.digamma(concentration0_a) * (concentration0_b - concentration0_a)) \
+ (special.digamma(total_concentration_a) * (total_concentration_b - total_concentration_a))
kl_loss = KL()
concentration1 = Tensor(concentration1_b, dtype=dtype.float32)
concentration0 = Tensor(concentration0_b, dtype=dtype.float32)
output = kl_loss(concentration1, concentration0)
tol = 1e-6
assert (np.abs(output.asnumpy() - expect_kl_loss) < tol).all()
class Basics(nn.Cell):
"""
Test class: mean/sd/mode of Beta distribution.
"""
def __init__(self):
super(Basics, self).__init__()
self.b = msd.Beta(np.array([3.0]), np.array([3.0]), dtype=dtype.float32)
def construct(self):
return self.b.mean(), self.b.sd(), self.b.mode()
def test_basics():
"""
Test mean/standard deviation/mode.
"""
basics = Basics()
mean, sd, mode = basics()
beta_benchmark = stats.beta(np.array([3.0]), np.array([3.0]))
expect_mean = beta_benchmark.mean().astype(np.float32)
expect_sd = beta_benchmark.std().astype(np.float32)
expect_mode = [0.5]
tol = 1e-6
assert (np.abs(mean.asnumpy() - expect_mean) < tol).all()
assert (np.abs(mode.asnumpy() - expect_mode) < tol).all()
assert (np.abs(sd.asnumpy() - expect_sd) < tol).all()
class Sampling(nn.Cell):
"""
Test class: sample of Beta distribution.
"""
def __init__(self, shape, seed=0):
super(Sampling, self).__init__()
self.b = msd.Beta(np.array([3.0]), np.array([1.0]), seed=seed, dtype=dtype.float32)
self.shape = shape
def construct(self, concentration1=None, concentration0=None):
return self.b.sample(self.shape, concentration1, concentration0)
def test_sample():
"""
Test sample.
"""
shape = (2, 3)
seed = 10
concentration1 = Tensor([2.0], dtype=dtype.float32)
concentration0 = Tensor([2.0, 2.0, 2.0], dtype=dtype.float32)
sample = Sampling(shape, seed=seed)
output = sample(concentration1, concentration0)
assert output.shape == (2, 3, 3)
class EntropyH(nn.Cell):
"""
Test class: entropy of Beta distribution.
"""
def __init__(self):
super(EntropyH, self).__init__()
self.b = msd.Beta(np.array([3.0]), np.array([1.0]), dtype=dtype.float32)
def construct(self):
return self.b.entropy()
def test_entropy():
"""
Test entropy.
"""
beta_benchmark = stats.beta(np.array([3.0]), np.array([1.0]))
expect_entropy = beta_benchmark.entropy().astype(np.float32)
entropy = EntropyH()
output = entropy()
tol = 1e-6
assert (np.abs(output.asnumpy() - expect_entropy) < tol).all()
class CrossEntropy(nn.Cell):
"""
Test class: cross entropy between Beta distributions.
"""
def __init__(self):
super(CrossEntropy, self).__init__()
self.b = msd.Beta( | np.array([3.0]) | numpy.array |
from numpy.core.fromnumeric import size
import pandas as pd
import numpy as np
import torch
#file = "conceptnet-5.7.0-rel.csv"
def conceptnet_to_dict(csv_path):
data = pd.read_csv(csv_path, delimiter=',')
data['start'] = data['start'].apply(lambda str: str.split("en/")[1].split("/")[0])
data['end'] = data['end'].apply(lambda str: str.split("en/")[1].split("/")[0])
data['relation'] = data['relation'].apply(lambda str: str.split('/r/')[-1])
data['start'] = data['start'].apply(lambda str: str.lower())
data['end'] = data['end'].apply(lambda str: str.lower())
data['relation'] = data['relation'].apply(lambda str: str.lower())
net_dict = {}
for (a,b,c) in zip(data['start'], data['end'], data['relation']):
if a not in net_dict:
#net_dict[(a, b)] = [set([i]), set([c])]
net_dict[a] = {}
net_dict[a][b] = set([c])
elif b not in net_dict[a]:
#net_dict[(a, b)][0].add(i)
#net_dict[(a, b)][1].add(c)
net_dict[a][b] = set([c])
else:
net_dict[a][b].add(c)
for x in net_dict:
for i, j in net_dict[x].items():
net_dict[x][i] = len(j)
return net_dict
def cal_path_reliability(net_dict, cached_dict, key, init_resource = 1, step = 2, allow_loop = True): #BFS algorithm
#print("in cal",net_dict)
flag_set = set() #bfs'flags
if not allow_loop: flag_set.add(key)
if key not in cached_dict: cached_dict[key] = {}
cached_dict[key][key] = init_resource
query = set([key])
bfs_next = set()
while step:
for q in query:
distr = 0
next_step = set()
if q not in net_dict: continue
for word, paths in net_dict[q].items():
if not allow_loop and word in flag_set : continue
else:
distr += paths
next_step.add(word)
resource = cached_dict[key][q]
distrib_res = resource / distr if distr else 0
for word in next_step:
if word not in cached_dict[key]:
cached_dict[key][word] = distrib_res * net_dict[q][word]
else: cached_dict[key][word] += distrib_res * net_dict[q][word]
#print("sss: ",q,word,next_step, cached_dict)
bfs_next = bfs_next.union(next_step)
#print("/n in bfs, step", 2-step, query, bfs_next, flag_set, cached_dict)
if not allow_loop: flag_set = flag_set.union(bfs_next)
query.clear()
query = query.union(bfs_next)
bfs_next.clear()
step -= 1
def point2point_reliability(net_dict, cached_dict, query_text, key_text, query_map, key_map, qcon, kcon, max_sent_len, steps=2, allow_loop=True,):
# text (B, text)
max_len = 0
kg_score = np.zeros((len(query_map), len(key_map)))
i,j = 0,0
for qword in query_text:
j = 0
#print(cached_dict)
for kword in key_text:
#print(qword, kword)
if kword in net_dict:
if qword not in cached_dict:
cal_path_reliability(net_dict, cached_dict, qword, step=steps, allow_loop=allow_loop)
#input()
flag = key_map[j]
while(j < len(key_map) and flag == key_map[j]):
if kword in cached_dict[qword]: kg_score[i][j] += cached_dict[qword][kword]
j += 1
#print(kg_score, kword, cached_dict)
#print(i, query_map[i])
flag = query_map[i]
while(i < len(query_map) and flag == query_map[i]):
try:
kg_score[i] = kg_score[flag]
except(IndexError):
print(i, flag, kg_score.shape)
i += 1
kg_score1 = np.concatenate((np.zeros((kg_score.shape[0], 1)), kg_score), 1) if len(key_map) < max_sent_len else kg_score
kg_score2 = np.concatenate((np.zeros((1, kg_score1.shape[1])), kg_score1), 0) if len(query_map) < max_sent_len else kg_score1
return kg_score2
def word_segment_map(sent_list):
ret_sent_list = []
word_map_list = []
for i in range(len(sent_list)):
ret_sent_list.append([])
word_map_list.append([])
i = 0
for sent in sent_list:
cur_word = ""
cur_punctuation = ""
for word in sent:
word = word.lower()
#import pdb; pdb.set_trace()
if word[0] == 'โ':
if cur_punctuation != "" and cur_punctuation != cur_word:
ret_sent_list[i][-1] = ret_sent_list[i][-1][:-1]
ret_sent_list[i].append(cur_punctuation)
word_map_list[i][-1] += 1
cur_word = word[1:]
cur_punctuation = ""
word_map_list[i].append(word_map_list[i][-1] + 1 if len(word_map_list[i]) else 0)
ret_sent_list[i].append(cur_word)
elif word.isalnum():
word_map_list[i].append(word_map_list[i][-1] if len(word_map_list[i]) else 0)
ret_sent_list[i][-1] += word
cur_word += word
cur_punctuation = ""
else:
#import pdb;pdb.set_trace()
if cur_punctuation == "":
#print("now1 ", word)
cur_punctuation += word
cur_word += word
word_map_list[i].append(word_map_list[i][-1] if len(word_map_list[i]) else 0)
ret_sent_list[i][-1] += word
elif cur_punctuation == cur_word:
cur_punctuation += word
cur_word += word
ret_sent_list[i].append(word)
word_map_list[i].append(word_map_list[i][-1] + 1 if len(word_map_list[i]) else 0)
else:
#print("now2 ", word, cur_punctuation, word)
ret_sent_list[i][-1] = ret_sent_list[i][-1][:-1]
ret_sent_list[i].append(cur_punctuation)
ret_sent_list[i].append(word)
word_map_list[i][-1] += 1
word_map_list[i].append(word_map_list[i][-1] + 1 if len(word_map_list[i]) else 0)
cur_word = word
cur_punctuation = word
#print("test\n", cur_punctuation, cur_word)
if cur_punctuation != "" and cur_punctuation != cur_word:
ret_sent_list[i][-1] = ret_sent_list[i][-1][:-1]
ret_sent_list[i].append(cur_punctuation)
word_map_list[i][-1] += 1
i += 1
return ret_sent_list, word_map_list
def cal_reliability_tensor(word_map_list, ret_sent_list, content_lengths, net_dict, cached_dict, mem_len, max_sent_len, content):
#print("word len", len(word_map_list))
#import pdb; pdb.set_trace()
reliability_tensor = [] # list, (num of u, Bsz, qlen, klen)
for i in range(len(word_map_list)):
max_len = max(l for l in content_lengths[i])
#print(content_lengths[i], len(word_map_list[i][0]), "sdad")
tensor_for_timestep = []
for bnum in range(len(word_map_list[i])):
mat_for_bnum = | np.zeros((content_lengths[i][bnum], 0)) | numpy.zeros |
import numpy as np
import pytest
import time
from spn.algorithms.Inference import log_likelihood
from spn.structure.Base import Product, Sum
from spn.structure.leaves.parametric.Parametric import Gaussian
from spnc.cpu import CPUCompiler
@pytest.mark.skipif(not CPUCompiler.isVectorizationSupported(), reason="CPU vectorization not supported")
def test_vector_slp_tree():
g0 = Gaussian(mean=0.11, stdev=1, scope=0)
g1 = Gaussian(mean=0.12, stdev=0.75, scope=1)
g2 = Gaussian(mean=0.13, stdev=0.5, scope=2)
g3 = Gaussian(mean=0.14, stdev=0.25, scope=3)
g4 = Gaussian(mean=0.15, stdev=1, scope=4)
g5 = Gaussian(mean=0.16, stdev=0.25, scope=5)
g6 = Gaussian(mean=0.17, stdev=0.5, scope=6)
g7 = Gaussian(mean=0.18, stdev=0.75, scope=7)
g8 = Gaussian(mean=0.19, stdev=1, scope=8)
p0 = Product(children=[g0, g1, g2, g4])
p1 = Product(children=[g3, g4, g4, g5])
p2 = Product(children=[g6, g4, g7, g8])
p3 = Product(children=[g8, g6, g4, g2])
s0 = Sum(children=[g0, g1, g2, p0], weights=[0.25, 0.25, 0.25, 0.25])
s1 = Sum(children=[g3, g4, g5, p1], weights=[0.25, 0.25, 0.25, 0.25])
s2 = Sum(children=[g6, g7, g8, p2], weights=[0.25, 0.25, 0.25, 0.25])
s3 = Sum(children=[g0, g4, g8, p3], weights=[0.25, 0.25, 0.25, 0.25])
spn = Product(children=[s0, s1, s2, s3])
# Randomly sample input values from Gaussian (normal) distributions.
num_samples = 100
inputs = np.column_stack((np.random.normal(loc=0.5, scale=1, size=num_samples),
np.random.normal(loc=0.125, scale=0.25, size=num_samples),
| np.random.normal(loc=0.345, scale=0.24, size=num_samples) | numpy.random.normal |
import numpy as np
from sklearn import datasets
from matplotlib import pyplot as plt
from utils import PCA
from gaussian_mixture_model import GMM
def plot_results(X, y, title = None):
pca = PCA(num_components = 2)
X_transformed = pca.transform(X)
x_coords = X_transformed[:, 0]
y_coords = X_transformed[:, 1]
y = np.array(y).astype(int)
classes = np.unique(y)
cmap = plt.get_cmap('viridis')
colors = [cmap(val) for val in np.linspace(0, 1, len(classes))]
for idx, cls in enumerate(classes):
x_coord = x_coords[y == cls]
y_coord = y_coords[y == cls]
color = colors[idx]
plt.scatter(x_coord, y_coord, color = color)
plt.xlabel('Component I')
plt.ylabel('Component II')
if title is not None:
plt.title(title)
plt.show()
def iris_classification():
print('\nIris classification using GMM\n')
print('Initiating Data Load...')
iris = datasets.load_iris()
X, y = iris.data, iris.target
# X, y = datasets.make_blobs()
size = len(X)
indices = list(range(size))
np.random.shuffle(indices)
X, y = np.array([X[idx] for idx in indices]), | np.array([y[idx] for idx in indices]) | numpy.array |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.