prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import numpy as np
import scipy.stats as ss
def dnorm(x, mu, sigma):
sigma += np.eye(sigma.shape[0]) * 1e-8
return ss.multivariate_normal.logpdf(x, mu, sigma)
class GMM_EM:
"""
GMM by EM.
Methods:
fit(data, max_iter, threshold): Fit the model to data.
predict(x): Predict cluster labels for x.
"""
def __init__(self, n_clusters):
"""
Constructor Methods:
Args:
n_clusters(int): number of clusters.
"""
self.n_clusters = n_clusters
self.pi = None
self.mus = [None] * self.n_clusters
self.sigmas = [None] * self.n_clusters
def fit(self, data, max_iter=200, threshold=1e-8):
"""
Fit the model to data.
Args:
data: Array-like, shape (n_samples, n_dim)
max_iter: maximum number of EM steps.
threshold: threshold to step iteration.
"""
assert data.ndim == 2
n_data = data.shape[0]
# Initialize
z = self._initialization(data)
self.pi = np.array([np.mean(z == cluster) for cluster in range(self.n_clusters)])
self.mus = [np.mean(data[z == cluster, :], axis=0) for cluster in range(self.n_clusters)]
self.sigmas = [np.cov(data[z == cluster, :].T) for cluster in range(self.n_clusters)]
old_ll = 0
for iter in range(max_iter):
# E-step
log_p = np.array([dnorm(data, mu=self.mus[cluster], sigma=self.sigmas[cluster]) + np.log(self.pi[cluster])
for cluster in range(self.n_clusters)])
max_p = np.max(log_p, axis=0)
sum_p = np.log(np.sum(np.exp(log_p - max_p), axis=0)) + max_p
log_p -= sum_p
p = np.exp(log_p)
# M-step
self.pi = np.sum(p, axis=1) / n_data
for cluster in range(self.n_clusters):
effective_size = np.sum(p, axis=1)[cluster]
self.mus[cluster] = np.sum(p[cluster].reshape(-1, 1) * data, axis=0) / effective_size
self.sigmas[cluster] = ((data - self.mus[cluster]).T * p[cluster]) @ (data - self.mus[cluster]) / effective_size
# Calculate (negative) log_likelihood
new_ll = -np.sum(sum_p)
if abs(new_ll-old_ll) <= threshold:
break
else:
old_ll = new_ll
return self
def predict(self, x):
"""
Predict cluster labels for x.
Args:
x: Array-like, shape (n_samples, n_dim)
Return:
Array-like, shape (n_samples, )
"""
log_prob = [dnorm(x, self.mus[cluster], self.sigmas[cluster]) + np.log(self.pi[cluster])
for cluster in range(self.n_clusters)]
log_prob = np.vstack(log_prob)
z = np.argmax(log_prob, axis=0)
return z
def _initialization(self, data, max_iter=50):
"""
Initialization by K-Means.
"""
means = data[np.random.choice(data.shape[0], self.n_clusters, replace=False)] # pick random samples as center
z = np.zeros(data.shape[0])
for iter in range(max_iter):
dist = [np.sum((data - means[cluster]) ** 2, axis=1) for cluster in range(self.n_clusters)]
dist = np.vstack(dist)
z = | np.argmin(dist, axis=0) | numpy.argmin |
"""
Copyright 2013 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cvxpy.expressions.expression import Expression
from cvxpy.constraints.constraint import Constraint
from cvxpy.expressions.variable import Variable
import cvxpy.interface.matrix_utilities as intf
import numpy
import scipy.sparse as sp
import unittest
import sys
PY35 = sys.version_info >= (3, 5)
class TestMatrices(unittest.TestCase):
""" Unit tests for testing different forms of matrices as constants. """
def assertExpression(self, expr, shape) -> None:
"""Asserts that expr is an Expression with dimension shape.
"""
assert isinstance(expr, Expression) or isinstance(expr, Constraint)
self.assertEqual(expr.shape, shape)
def setUp(self) -> None:
self.a = Variable(name='a')
self.b = Variable(name='b')
self.c = Variable(name='c')
self.x = Variable(2, name='x')
self.y = Variable(3, name='y')
self.z = Variable(2, name='z')
self.A = Variable((2, 2), name='A')
self.B = Variable((2, 2), name='B')
self.C = Variable((3, 2), name='C')
# Test numpy arrays
def test_numpy_arrays(self) -> None:
# Vector
v = numpy.arange(2)
self.assertExpression(self.x + v, (2,))
self.assertExpression(v + self.x, (2,))
self.assertExpression(self.x - v, (2,))
self.assertExpression(v - self.x, (2,))
self.assertExpression(self.x <= v, (2,))
self.assertExpression(v <= self.x, (2,))
self.assertExpression(self.x == v, (2,))
self.assertExpression(v == self.x, (2,))
# Matrix
A = numpy.arange(8).reshape((4, 2))
self.assertExpression(A @ self.x, (4,))
# PSD inequalities.
A = | numpy.ones((2, 2)) | numpy.ones |
import numpy as np
import sys
sys.path.append("../behavioral_analysis")
import segmentation
import pytest
import skimage.filters
from hypothesis import given
import hypothesis.strategies
import hypothesis.extra.numpy
# test functions for simple segmentation based tracking code
def test_im_shape():
im = np.array([[[1, 2], [1, 2]], [[1, 2], [1, 2]]])
with pytest.raises(RuntimeError) as excinfo:
segmentation._check_image_input(im)
excinfo.match("Need to provide an array with shape \(n, m\). Provided array has shape \(2, 2, 2\)")
def test_im_data_type_list():
im = [[1, 2, 3], [1, 2, 3]]
with pytest.raises(RuntimeError) as excinfo:
segmentation._check_image_input(im)
excinfo.match("Need to provide a numpy array, image has type <class 'list'>")
def test_im_data_type_string():
im = '[[1, 2, 3], [1, 2, 3]]'
with pytest.raises(RuntimeError) as excinfo:
segmentation._check_image_input(im)
excinfo.match("Need to provide a numpy array, image has type <class 'str'>")
def test_im_shape_segment():
im = np.array([[[1, 2], [1, 2]], [[1, 2], [1, 2]]])
with pytest.raises(RuntimeError) as excinfo:
segmentation.segment(im)
excinfo.match("Need to provide an array with shape \(n, m\). Provided array has shape \(2, 2, 2\)")
def test_im_data_type_list_segment():
im = [[1, 2, 3], [1, 2, 3]]
with pytest.raises(RuntimeError) as excinfo:
segmentation.segment(im)
excinfo.match("Need to provide a numpy array, image has type <class 'list'>")
def test_im_data_type_string_segment():
im = '[[1, 2, 3], [1, 2, 3]]'
with pytest.raises(RuntimeError) as excinfo:
segmentation.segment(im)
excinfo.match("Need to provide a numpy array, image has type <class 'str'>")
def test_provided_function_callable():
im = np.array([[1, 2, 3], [1, 2, 3]])
with pytest.raises(RuntimeError) as excinfo:
segmentation.segment(im, thresh_func='Hello, world.')
excinfo.match("The provided function is not callable")
def test_provided_function_callable_mat():
im = np.array([[1, 2, 3], [1, 2, 3]])
args = (3,)
assert segmentation._check_function_input(im, skimage.filters.threshold_local, args) == True
def test_provided_function_returns_correct_shape():
im = np.array([[1, 2, 3], [1, 2, 3]])
def bad_func(im):
return(np.array([[1, 2], [1, 2]]))
with pytest.raises(RuntimeError) as excinfo:
segmentation.segment(im, thresh_func=bad_func)
excinfo.match("Array output of the function must have same shape as the image \
the output array has shape \(2, 2\), image has shape \(2, 3\)")
def test_provided_function_returns_correct_types():
im = np.array([[1, 2, 3], [1, 2, 3]])
def bad_func(im):
return('Hello, world!')
with pytest.raises(RuntimeError) as excinfo:
segmentation.segment(im, thresh_func=bad_func)
excinfo.match("The provided function must output a numeric or array \
provided function returns type <class 'str'>")
def test_check_numeric_function():
assert segmentation._check_numeric_types(np.int32(1)) == True
def test_bg_subtract_im_type():
im1 = np.array([[1, 2, 3], [1, 2, 3]])
im2 = np.array([[[1, 2], [1, 2]], [[1, 2], [1, 2]]])
with pytest.raises(RuntimeError) as excinfo:
segmentation.bg_subtract(im1, im2)
excinfo.match("Need to provide an array with shape \(n, m\). Provided array has shape \(2, 2, 2\)")
def test_bg_subtract_im_dims():
im1 = np.array([[1, 2, 3], [1, 2, 3]])
im2 = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]])
with pytest.raises(RuntimeError) as excinfo:
segmentation.bg_subtract(im1, im2)
excinfo.match("The provided images have different dimension \
im1: \(2, 3\), im2: \(3, 3\)")
def test_im_normalization_range():
im = np.array([[1, 2, 3], [1, 2, 3]])
new_im = segmentation.normalize_convert_im(im)
assert new_im.max() == 1
assert new_im.min() == 0
@given(hypothesis.extra.numpy.arrays(dtype=int, shape=(50,50)))
def test_im_normalization_range_int(im):
if np.isclose(im.max(), 0) and np.isclose(im.min(), 0):
with pytest.raises(RuntimeError) as excinfo:
segmentation.normalize_convert_im(im)
excinfo.match("Inputed image is near to zero for all values")
elif np.isclose((im.max() - im.min()), 0):
with pytest.raises(RuntimeError) as excinfo:
segmentation.normalize_convert_im(im)
excinfo.match("Inputed image has nearly the same value for all pixels. Check input")
else:
new_im = segmentation.normalize_convert_im(im)
assert new_im.max() == 1
assert new_im.min() == 0
@given(hypothesis.extra.numpy.arrays(dtype=float, shape=(50,50)))
def test_im_normalization_range_float(im):
if np.isclose(im.max(), 0) and np.isclose(im.min(), 0):
with pytest.raises(RuntimeError) as excinfo:
segmentation.normalize_convert_im(im)
excinfo.match("Inputed image is near to zero for all values")
elif np.any(np.isnan(im)):
with pytest.raises(RuntimeError) as excinfo:
segmentation.normalize_convert_im(im)
excinfo.match("Data contains a nan, decide how to handle missing data")
elif np.any(np.isinf(im)):
with pytest.raises(RuntimeError) as excinfo:
segmentation.normalize_convert_im(im)
excinfo.match("Data contains an np.inf, decide how to handle infinite values")
elif np.isclose((im.max() - im.min()), 0):
with pytest.raises(RuntimeError) as excinfo:
segmentation.normalize_convert_im(im)
excinfo.match("Inputed image has nearly the same value for all pixels. Check input")
else:
new_im = segmentation.normalize_convert_im(im)
assert new_im.max() == 1
assert new_im.min() == 0
@given(hypothesis.extra.numpy.arrays(dtype=np.float128, shape=(50,50)))
def test_im_normalization_range_float128(im):
with pytest.raises(RuntimeError) as excinfo:
segmentation.normalize_convert_im(im)
excinfo.match("Provided image has unsuported type: float128")
def test_im_near_zero():
im = np.array([[0, 0, 0], [0, 0, 0]])
with pytest.raises(RuntimeError) as excinfo:
segmentation.segment(im)
excinfo.match("Inputed image is near to zero for all values")
def test_im_has_nan():
im = np.array([[np.nan, 0, 0], [0, 0, 0]])
with pytest.raises(RuntimeError) as excinfo:
segmentation.segment(im)
excinfo.match("Data contains a nan, decide how to handle missing data")
def test_im_has_nan():
im = np.array([[np.inf, 0, 0], [0, 0, 0]])
with pytest.raises(RuntimeError) as excinfo:
segmentation.segment(im)
excinfo.match("Data contains an np.inf, decide how to handle infinite values")
def test_int_types():
assert segmentation._check_int_types(1.0) == False
assert segmentation._check_int_types(1) == True
assert segmentation._check_int_types(np.int32(1)) == True
assert segmentation._check_int_types( | np.uint64(1) | numpy.uint64 |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from src.env.fp_mining import FrequentPatternMining
from src.env.db_loader import load_db
from src.env.fp_mining import Pattern
@pytest.fixture
def seed():
np.random.seed(0)
@pytest.fixture
def env(seed):
env = FrequentPatternMining(
delta=0.4,
data_path="./data/contextPasquier99.txt",
used=1.0,
head=0.0,
shuffle_db=False,
plus_reward=200,
minus_reward=-1,
max_steps=100,
cache_limit=10000,
)
return env
def test_load_db_1():
"""
Properly load the dataset
"""
transaction, bit_map, b2i_dict, i2b_dict = load_db(
"./data/contextPasquier99.txt", 1.0, 0.0, False,
)
t = np.array([[1, 3, 4], [2, 3, 5], [1, 2, 3, 5], [2, 5], [1, 2, 3, 5]])
bmap = np.array(
[
[1, 0, 1, 1, 0],
[0, 1, 1, 0, 1],
[1, 1, 1, 0, 1],
[0, 1, 0, 0, 1],
[1, 1, 1, 0, 1],
]
)
b2i = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5}
i2b = {1: 0, 2: 1, 3: 2, 4: 3, 5: 4}
assert (
(transaction == t).all()
and (bit_map == bmap).all()
and (b2i_dict == b2i)
and (i2b_dict == i2b)
)
def test_load_db_2():
"""
Properly load the dataset(60%)
"""
transaction, bit_map, b2i_dict, i2b_dict = load_db(
"./data/contextPasquier99.txt", 0.6, 0.0, False,
)
t = np.array([[1, 3, 4], [2, 3, 5], [1, 2, 3, 5]])
bmap = np.array([[1, 0, 1, 1, 0], [0, 1, 1, 0, 1], [1, 1, 1, 0, 1]])
b2i = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5}
i2b = {1: 0, 2: 1, 3: 2, 4: 3, 5: 4}
assert (
(transaction == t).all()
and (bit_map == bmap).all()
and (b2i_dict == b2i)
and (i2b_dict == i2b)
)
def test_load_db_3():
"""
Properly load the dataset(the other 40%)
"""
transaction, bit_map, b2i_dict, i2b_dict = load_db(
"./data/contextPasquier99.txt", 0.4, 0.6, False,
)
t = np.array([[2, 5], [1, 2, 3, 5]])
bmap = np.array([[0, 1, 0, 1], [1, 1, 1, 1]])
b2i = {0: 1, 1: 2, 2: 3, 3: 5}
i2b = {1: 0, 2: 1, 3: 2, 5: 3}
assert (
(transaction == t).all()
and (bit_map == bmap).all()
and (b2i_dict == b2i)
and (i2b_dict == i2b)
)
def test_env_arg(env):
assert (env.delta, env.plus_reward, env.minus_reward, env.max_steps) == (
0.4,
200,
-1,
100,
)
def test_env_init(env):
db_total_support = 5
min_sup = 2
each_htwfp_support = np.array([3, 4, 4, 4])
assert (
(env.db_total_support == db_total_support)
and (env.min_sup == min_sup)
and np.allclose(env.each_htwfp_support, each_htwfp_support)
)
def test_setup_db(env):
htwsp_1 = set([1, 2, 3, 5])
b2i_dict = {0: 1, 1: 2, 2: 3, 3: 5}
i2b_dict = {1: 0, 2: 1, 3: 2, 5: 3}
bit_map = np.array(
[[1, 0, 1, 0], [0, 1, 1, 1], [1, 1, 1, 1], [0, 1, 0, 1], [1, 1, 1, 1]]
)
prob_bit_stand = np.array([0.6, 0.8, 0.8, 0.8])
assert (
(env.htwsp_1 == htwsp_1)
and (env.b2i_dict == b2i_dict)
and (env.i2b_dict == i2b_dict)
and np.allclose(env.bit_map, bit_map)
and np.allclose(env.prob_bit_stand, prob_bit_stand)
)
def test_step_1(env):
# empty itemsets = {}
bv = np.array([0, 0, 0, 0])
env.bit_vector = bv
env.step(0)
assert np.allclose(env.bit_vector, np.array([1, 0, 0, 0]))
def test_step_2(env):
# empty itemsets = {1}
bv = np.array([1, 0, 0, 0])
action = 0
env.bit_vector = bv
env.step(action)
assert np.allclose(env.bit_vector, np.array([0, 0, 0, 0]))
def test_step_3(env):
# empty itemsets = {}
bv = np.array([0, 0, 0, 0])
action = 4
env.bit_vector = bv
env.step(action)
assert not (
np.allclose(env.bit_vector, np.array([1, 0, 0, 0]))
or np.allclose(env.bit_vector, np.array([0, 1, 0, 0]))
or np.allclose(env.bit_vector, np.array([0, 0, 1, 0]))
or np.allclose(env.bit_vector, np.array([0, 0, 0, 1]))
)
def test_get_next_state_support_1(env):
# FP itemsets = {1, 3} = sup(3)
bv = np.array([1, 0, 1, 0])
log_db = np.log1p(5)
next_state_support = np.array(
[
np.log1p(4) / log_db,
np.log1p(2) / log_db,
np.log1p(3) / log_db,
| np.log1p(2) | numpy.log1p |
from mpi4py import MPI
import numpy as np
import common
def create_sendcounts(size, count=8192):
# Array describing how many elements to send to each process
sendcounts = []
sendcount = count
for i in range(size):
sendcount = sendcount // 2
sendcounts.append(sendcount)
sendcounts[-1] = count - (sum(sendcounts) - sendcounts[-1])
return sendcounts
def create_displs(sendcounts):
# Array describing the displacements where each segment begins
displs = []
head = 0
for count in sendcounts:
displs.append(head)
head += count
return displs
def create_sendbuf(rank, sendcounts, displs, count=8192):
if rank == 0:
sendbuf = np.arange(count, dtype=np.float32)
else:
sendbuf = | np.array([0], dtype=np.float32) | numpy.array |
import unittest
import numpy as np
import solution # your code with logistic regression and evaluation
# should be in solution.py
def data1():
X = np.array([[5.0, 3.6, 1.4, 0.2],
[5.4, 3.9, 1.7, 0.4],
[4.6, 3.4, 1.4, 0.3],
[5.0, 3.4, 1.5, 0.2],
[5.6, 2.9, 3.6, 1.3],
[6.7, 3.1, 4.4, 1.4],
[5.6, 3.0, 4.5, 1.5],
[5.8, 2.7, 4.1, 1.0]])
y = np.array([0, 0, 0, 0, 1, 1, 1, 1])
return X, y
def data2():
X, y = data1()
X = X[:6]
y = y[:6]
return X[:6], y[:6]
class TestLogisticRegression(unittest.TestCase):
def test_h(self):
X, y = data1()
self.assertAlmostEqual(solution.h(X[0], | np.array([0, 0, 0, 0]) | numpy.array |
# data loader
from __future__ import print_function, division
import glob
import torch
# from skimage import io, transform, color
import numpy as np
import random
import math
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from PIL import Image
import os
import cv2
#==========================dataset load==========================
class RescaleT(object):
def __init__(self,output_size):
assert isinstance(output_size,(int,tuple))
self.output_size = output_size
def __call__(self,sample):
imidx, image, label = sample['imidx'], sample['image'],sample['label']
h, w = image.shape[:2]
if isinstance(self.output_size,int):
if h > w:
new_h, new_w = self.output_size*h/w,self.output_size
else:
new_h, new_w = self.output_size,self.output_size*w/h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
# #resize the image to new_h x new_w and convert image from range [0,255] to [0,1]
# img = transform.resize(image,(new_h,new_w),mode='constant')
# lbl = transform.resize(label,(new_h,new_w),mode='constant', order=0, preserve_range=True)
img = transform.resize(image,(self.output_size,self.output_size),mode='constant')
lbl = transform.resize(label,(self.output_size,self.output_size),mode='constant', order=0, preserve_range=True)
return {'imidx':imidx, 'image':img,'label':lbl}
class Rescale(object):
def __init__(self,output_size):
assert isinstance(output_size,(int,tuple))
self.output_size = output_size
def __call__(self,sample):
imidx, image, label = sample['imidx'], sample['image'],sample['label']
if random.random() >= 0.5:
image = image[::-1]
label = label[::-1]
h, w = image.shape[:2]
if isinstance(self.output_size,int):
if h > w:
new_h, new_w = self.output_size*h/w,self.output_size
else:
new_h, new_w = self.output_size,self.output_size*w/h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
# #resize the image to new_h x new_w and convert image from range [0,255] to [0,1]
img = transform.resize(image,(new_h,new_w),mode='constant')
lbl = transform.resize(label,(new_h,new_w),mode='constant', order=0, preserve_range=True)
return {'imidx':imidx, 'image':img,'label':lbl}
class RandomCrop(object):
def __init__(self,output_size):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
def __call__(self,sample):
imidx, image, label = sample['imidx'], sample['image'], sample['label']
if random.random() >= 0.5:
image = image[::-1]
label = label[::-1]
h, w = image.shape[:2]
new_h, new_w = self.output_size
top = np.random.randint(0, h - new_h)
left = np.random.randint(0, w - new_w)
image = image[top: top + new_h, left: left + new_w]
label = label[top: top + new_h, left: left + new_w]
return {'imidx':imidx,'image':image, 'label':label}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
imidx, image, label = sample['imidx'], sample['image'], sample['label']
tmpImg = np.zeros((image.shape[0],image.shape[1],3))
tmpLbl = np.zeros(label.shape)
image = image/np.max(image)
if(np.max(label)<1e-6):
label = label
else:
label = label/np.max(label)
if image.shape[2]==1:
tmpImg[:,:,0] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,1] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,2] = (image[:,:,0]-0.485)/0.229
else:
tmpImg[:,:,0] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,1] = (image[:,:,1]-0.456)/0.224
tmpImg[:,:,2] = (image[:,:,2]-0.406)/0.225
tmpLbl[:,:,0] = label[:,:,0]
# change the r,g,b to b,r,g from [0,255] to [0,1]
#transforms.Normalize(mean = (0.485, 0.456, 0.406), std = (0.229, 0.224, 0.225))
tmpImg = tmpImg.transpose((2, 0, 1))
tmpLbl = label.transpose((2, 0, 1))
return {'imidx':torch.from_numpy(imidx), 'image': torch.from_numpy(tmpImg), 'label': torch.from_numpy(tmpLbl)}
class ToTensorLab(object):
"""Convert ndarrays in sample to Tensors."""
def __init__(self,flag=0):
self.flag = flag
def __call__(self, sample):
imidx, image, label =sample['imidx'], sample['image'], sample['label']
tmpLbl = np.zeros(label.shape)
if( | np.max(label) | numpy.max |
"""
Module of plotting functions.
Each function creates, and optionally saves, a plot of fields
from a ROMS history file.
INPUT: in_dict: a tuple with information to pass to the plot, such as:
- fn: text string with the full path name of the history file to plot
- fn_out: text string with full path of output file name
- auto_vlims: a boolean governing how color limits are set
- testing: a boolean for testing (e.g. shorter, faster particle tracking)
OUTPUT: either a screen image or a graphics file
"""
import numpy as np
import xarray as xr
import pickle
from datetime import datetime, timedelta
import pandas as pd
from cmocean import cm
from lo_tools import Lfun, zfun, zrfun
from lo_tools import plotting_functions as pfun
import pinfo
from importlib import reload
reload(pfun)
reload(pinfo)
Ldir = Lfun.Lstart()
if '_mac' in Ldir['lo_env']: # mac version
pass
else: # remote linux version
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
def P_basic(in_dict):
# START
ds = xr.open_dataset(in_dict['fn'])
# find aspect ratio of the map
aa = pfun.get_aa(ds)
# AR is the aspect ratio of the map: Vertical/Horizontal
AR = (aa[3] - aa[2]) / (np.sin(np.pi*aa[2]/180)*(aa[1] - aa[0]))
fs = 14
hgt = 10
pfun.start_plot(fs=fs, figsize=(int(hgt*2.5/AR),int(hgt)))
fig = plt.figure()
# PLOT CODE
vn_list = ['salt', 'temp']
ii = 1
for vn in vn_list:
if in_dict['auto_vlims']:
pinfo.vlims_dict[vn] = ()
ax = fig.add_subplot(1, len(vn_list), ii)
cs = pfun.add_map_field(ax, ds, vn, pinfo.vlims_dict,
cmap=pinfo.cmap_dict[vn], fac=pinfo.fac_dict[vn], vlims_fac=pinfo.range_dict[vn])
fig.colorbar(cs)
pfun.add_coast(ax)
ax.axis(pfun.get_aa(ds))
pfun.dar(ax)
ax.set_title('Surface %s %s' % (pinfo.tstr_dict[vn],pinfo.units_dict[vn]), fontsize=1.2*fs)
ax.set_xlabel('Longitude')
if ii == 1:
ax.set_ylabel('Latitude')
pfun.add_info(ax, in_dict['fn'])
#pfun.add_windstress_flower(ax, ds)
pfun.add_bathy_contours(ax, ds, txt=True)
elif ii == 2:
ax.set_yticklabels([])
pfun.add_velocity_vectors(ax, ds, in_dict['fn'])
ii += 1
#fig.tight_layout()
# FINISH
ds.close()
pfun.end_plot()
if len(str(in_dict['fn_out'])) > 0:
plt.savefig(in_dict['fn_out'])
plt.close()
else:
plt.show()
def P_fancy(in_dict):
# START
ds = xr.open_dataset(in_dict['fn'])
# find aspect ratio of the map
aa = pfun.get_aa(ds)
# AR is the aspect ratio of the map: Vertical/Horizontal
AR = (aa[3] - aa[2]) / (np.sin(np.pi*aa[2]/180)*(aa[1] - aa[0]))
fs = 14
hgt = 10
pfun.start_plot(fs=fs, figsize=(int(hgt*2.5/AR),int(hgt)))
fig = plt.figure()
# PLOT CODE
vn_list = ['salt', 'temp']
ii = 1
for vn in vn_list:
if in_dict['auto_vlims']:
pinfo.vlims_dict[vn] = ()
if vn == 'salt':
cmap = 'jet'
vlims_fac = .5
elif vn == 'temp':
cmap = 'RdYlBu_r'
vlims_fac = 1
ax = fig.add_subplot(1, len(vn_list), ii)
cs = pfun.add_map_field(ax, ds, vn, pinfo.vlims_dict,
cmap=cmap, fac=pinfo.fac_dict[vn], vlims_fac=vlims_fac)
fig.colorbar(cs)
pfun.add_coast(ax)
ax.axis(pfun.get_aa(ds))
pfun.dar(ax)
ax.set_title('Surface %s %s' % (pinfo.tstr_dict[vn],pinfo.units_dict[vn]), fontsize=1.2*fs)
ax.set_xlabel('Longitude')
if ii == 1:
ax.set_ylabel('Latitude')
pfun.add_info(ax, in_dict['fn'])
#pfun.add_windstress_flower(ax, ds)
pfun.add_bathy_contours(ax, ds, txt=True)
elif ii == 2:
ax.set_yticklabels([])
pfun.add_velocity_vectors(ax, ds, in_dict['fn'])
ii += 1
#fig.tight_layout()
# FINISH
ds.close()
pfun.end_plot()
if len(str(in_dict['fn_out'])) > 0:
plt.savefig(in_dict['fn_out'])
plt.close()
else:
plt.show()
def P_dive_vort(in_dict):
# START
ds = xr.open_dataset(in_dict['fn'])
# find aspect ratio of the map
aa = pfun.get_aa(ds)
# AR is the aspect ratio of the map: Vertical/Horizontal
AR = (aa[3] - aa[2]) / (np.sin(np.pi*aa[2]/180)*(aa[1] - aa[0]))
fs = 14
hgt = 10
pfun.start_plot(fs=fs, figsize=(int(hgt*2.5/AR),int(hgt)))
fig = plt.figure()
# create fields
u = ds.u[0,-1,:,:].values
v = ds.v[0,-1,:,:].values
dx = 1/ds.pm.values
dy = 1/ds.pn.values
# dive is on the trimmed rho grid
dive = np.diff(u[1:-1,:], axis=1)/dx[1:-1,1:-1] + np.diff(v[:,1:-1],axis=0)/dy[1:-1,1:-1]
# vort is on the psi grid (plot with lon_rho, lat_rho)
vort = np.diff(v,axis=1)/dx[1:,1:] - np.diff(u,axis=0)/dy[1:,1:]
# set color limits
vv = 2*np.nanstd(vort)
# PLOT CODE
if in_dict['auto_vlims']:
pinfo.vlims_dict['vort'] = (-vv, vv)
pinfo.vlims_dict['dive'] = (-vv, vv)
vmin = pinfo.vlims_dict['vort'][0]
vmax = pinfo.vlims_dict['vort'][1]
for ii in [1,2]:
ax = fig.add_subplot(1, 2, ii)
cmap = 'RdYlBu_r'
if ii == 1:
plon, plat = pfun.get_plon_plat(ds.lon_rho[1:-1,1:-1].values, ds.lat_rho[1:-1,1:-1].values)
cs = plt.pcolormesh(plon, plat, dive, cmap=cmap, vmin = vmin, vmax = vmax)
ax.set_title('Surface Divergence $[s^{-1}]$', fontsize=1.2*fs)
elif ii == 2:
cs = plt.pcolormesh(ds.lon_rho.values, ds.lat_rho.values, vort, cmap=cmap, vmin = vmin, vmax = vmax)
ax.set_title('Surface Vorticity $[s^{-1}]$', fontsize=1.2*fs)
fig.colorbar(cs)
pfun.add_coast(ax)
ax.axis(aa)
pfun.dar(ax)
ax.set_xlabel('Longitude')
if ii == 1:
ax.set_ylabel('Latitude')
pfun.add_info(ax, in_dict['fn'])
#pfun.add_windstress_flower(ax, ds)
pfun.add_bathy_contours(ax, ds, txt=True)
elif ii == 2:
pass
#pfun.add_velocity_vectors(ax, ds, in_dict['fn'])
ii += 1
#fig.tight_layout()
# FINISH
ds.close()
pfun.end_plot()
if len(str(in_dict['fn_out'])) > 0:
plt.savefig(in_dict['fn_out'])
plt.close()
else:
plt.show()
def P_dive_vort2(in_dict):
# same as dive_vort but focused on a specific region
# JdF:
aa = [-125, -122.3, 47.8, 48.8]
# START
ds = xr.open_dataset(in_dict['fn'])
# find aspect ratio of the map
# aa = pfun.get_aa(ds)
# AR is the aspect ratio of the map: Vertical/Horizontal
AR = (aa[3] - aa[2]) / (np.sin(np.pi*aa[2]/180)*(aa[1] - aa[0]))
fs = 14
hgt = 6
pfun.start_plot(fs=fs, figsize=(10,10))
fig = plt.figure()
# create fields
u = ds.u[0,-1,:,:].values
v = ds.v[0,-1,:,:].values
dx = 1/ds.pm.values
dy = 1/ds.pn.values
# dive is on the trimmed rho grid
dive = np.diff(u[1:-1,:], axis=1)/dx[1:-1,1:-1] + np.diff(v[:,1:-1],axis=0)/dy[1:-1,1:-1]
# vort is on the psi grid (plot with lon_rho, lat_rho)
vort = np.diff(v,axis=1)/dx[1:,1:] - np.diff(u,axis=0)/dy[1:,1:]
# set color limits
vv = 4*np.nanstd(vort)
# PLOT CODE
if in_dict['auto_vlims']:
pinfo.vlims_dict['vort'] = (-vv, vv)
pinfo.vlims_dict['dive'] = (-vv, vv)
vmin = pinfo.vlims_dict['vort'][0]
vmax = pinfo.vlims_dict['vort'][1]
for ii in [1,2]:
ax = fig.add_subplot(2, 1, ii)
cmap = 'RdYlBu_r'
if ii == 1:
plon, plat = pfun.get_plon_plat(ds.lon_rho[1:-1,1:-1].values, ds.lat_rho[1:-1,1:-1].values)
cs = plt.pcolormesh(plon, plat, dive, cmap=cmap, vmin = vmin, vmax = vmax)
ax.set_title('Surface Divergence $[s^{-1}]$', fontsize=1.2*fs)
elif ii == 2:
cs = plt.pcolormesh(ds.lon_rho.values, ds.lat_rho.values, vort, cmap=cmap, vmin = vmin, vmax = vmax)
ax.set_title('Surface Vorticity $[s^{-1}]$', fontsize=1.2*fs)
fig.colorbar(cs)
pfun.add_coast(ax)
ax.axis(aa)
pfun.dar(ax)
ax.set_ylabel('Latitude')
if ii == 1:
pfun.add_info(ax, in_dict['fn'])
#pfun.add_windstress_flower(ax, ds)
#pfun.add_bathy_contours(ax, ds, txt=True)
elif ii == 2:
ax.set_xlabel('Longitude')
#pfun.add_velocity_vectors(ax, ds, in_dict['fn'])
ii += 1
#fig.tight_layout()
# FINISH
ds.close()
pfun.end_plot()
if len(str(in_dict['fn_out'])) > 0:
plt.savefig(in_dict['fn_out'])
plt.close()
else:
plt.show()
def P_ri(in_dict):
"""
Simplified Richardson number
"""
# START
fs = 10
pfun.start_plot(fs=fs, figsize=(20,10))
fig = plt.figure()
ds = xr.open_dataset(in_dict['fn'])
# PLOT CODE
xrho = ds['lon_rho'][0,:].values
yrho = ds['lat_rho'][:,0].values
# define box
aa = [-123.25, -122.1, 47, 48.75]
ix0 = zfun.find_nearest_ind(xrho, aa[0])
ix1 = zfun.find_nearest_ind(xrho, aa[1])
iy0 = zfun.find_nearest_ind(yrho, aa[2])
iy1 = zfun.find_nearest_ind(yrho, aa[3])
h = ds.h[iy0:iy1, ix0:ix1].values
rho_bot = ds.rho[0, 0, iy0:iy1, ix0:ix1].values
rho_top = ds.rho[0, -1, iy0:iy1, ix0:ix1].values
drho = rho_bot - rho_top
u = ds.ubar[0, iy0:iy1, ix0-1:ix1].values
v = ds.vbar[0, iy0-1:iy1, ix0:ix1].values
u[np.isnan(u)] = 0
v[np.isnan(v)] = 0
uu = (u[:, 1:] + u[:, :-1])/2
vv = (v[1:, :] + v[:-1, :])/2
spd2 = uu**2 + vv**2
spd2[np.isnan(drho)] = np.nan
spd2[spd2 < .001] = .001 # avoid divide by zero errors
# approximate Richardson number
rho0 = ds.rho0.values
g = 9.8
Ri = g * drho * h / (rho0 * spd2)
# psi_grid coordinates
x, y = np.meshgrid(ds.lon_u.values[0,ix0-1:ix1], ds.lat_v.values[iy0-1:iy1,0])
# PLOTTING
plt.close('all')
pfun.start_plot(fs=10, figsize=(18,10))
fig = plt.figure()
xt = [-123.2, -122.2]
yt = [47, 47.5, 48, 48.5]
ax = fig.add_subplot(131)
cs = ax.pcolormesh(x, y, drho, vmin=0, vmax=5, cmap=cm.dense)
fig.colorbar(cs, ax=ax)
pfun.dar(ax)
pfun.add_coast(ax)
ax.axis(aa)
ax.set_title(r'$\Delta\rho\ [kg\ m^{-3}]$')
ax.set_xticks(xt)
ax.set_yticks(yt)
ax = fig.add_subplot(132)
cs = ax.pcolormesh(x, y, np.sqrt(spd2), vmin=0, vmax=2, cmap=cm.speed)
fig.colorbar(cs, ax=ax)
pfun.dar(ax)
pfun.add_coast(ax)
ax.axis(aa)
ax.set_title(r'Speed $[m\ s^{-1}]$')
ax.set_xticks(xt)
ax.set_yticks(yt)
ax.set_yticklabels([])
ax = fig.add_subplot(133)
cs = ax.pcolormesh(x, y, 4*Ri, vmin=0, vmax = 2, cmap='RdYlBu')
fig.colorbar(cs, ax=ax)
pfun.dar(ax)
pfun.add_coast(ax)
ax.axis(aa)
ax.set_title(r'$4 x Ri$')
ax.set_xticks(xt)
ax.set_yticks(yt)
ax.set_yticklabels([])
fig.tight_layout()
# FINISH
ds.close()
pfun.end_plot()
if len(str(in_dict['fn_out'])) > 0:
plt.savefig(in_dict['fn_out'])
plt.close()
else:
plt.show()
def P_Chl_DO(in_dict):
# START
fs = 14
pfun.start_plot(fs=fs, figsize=(14,10))
fig = plt.figure()
ds = xr.open_dataset(in_dict['fn'])
# PLOT CODE
vn_list = ['phytoplankton', 'oxygen']
fs = 14
ii = 1
for vn in vn_list:
if vn == 'phytoplankton':
slev = -1
stext = 'Surface'
elif vn == 'oxygen':
slev = 0
stext = 'Bottom'
if in_dict['auto_vlims']:
pinfo.vlims_dict[vn] = ()
ax = fig.add_subplot(1, len(vn_list), ii)
cs = pfun.add_map_field(ax, ds, vn, pinfo.vlims_dict, slev=slev,
cmap=pinfo.cmap_dict[vn], fac=pinfo.fac_dict[vn],
vlims_fac=pinfo.range_dict[vn], do_mask_edges=True)
fig.colorbar(cs)
pfun.add_coast(ax)
ax.axis(pfun.get_aa(ds))
pfun.dar(ax)
ax.set_title('%s %s %s' % (stext, pinfo.tstr_dict[vn],pinfo.units_dict[vn]), fontsize=1.2*fs)
ax.set_xlabel('Longitude')
pfun.add_bathy_contours(ax, ds, txt=True)
if ii == 1:
ax.set_ylabel('Latitude')
pfun.add_info(ax, in_dict['fn'])
pfun.add_windstress_flower(ax, ds)
ii += 1
fig.tight_layout()
# FINISH
ds.close()
pfun.end_plot()
if len(str(in_dict['fn_out'])) > 0:
plt.savefig(in_dict['fn_out'])
plt.close()
else:
plt.show()
def P_DO_WA_shelf(in_dict):
# Focus on bottom DO on the WA shelf
aa = [-126.1, -123.7, 45.8, 48.8]
xtl = [-126, -125, -124]
ytl = [46, 47, 48]
# START
fs = 14
pfun.start_plot(fs=fs, figsize=(7,10))
fig = plt.figure()
ds = xr.open_dataset(in_dict['fn'])
# PLOT CODE
vn = 'oxygen'
slev = 0
stext = 'Bottom'
if in_dict['auto_vlims']:
pinfo.vlims_dict[vn] = ()
ax = fig.add_subplot(111)
cs = pfun.add_map_field(ax, ds, vn, pinfo.vlims_dict, slev=slev,
cmap=pinfo.cmap_dict[vn], fac=pinfo.fac_dict[vn],
vlims_fac=pinfo.range_dict[vn], do_mask_edges=True)
fig.colorbar(cs)
pfun.add_coast(ax)
ax.axis(aa)
pfun.dar(ax)
ax.set_title('%s %s %s' % (stext, pinfo.tstr_dict[vn],pinfo.units_dict[vn]), fontsize=1.2*fs)
ax.set_xlabel('Longitude')
pfun.add_bathy_contours(ax, ds, txt=False)
ax.set_ylabel('Latitude')
ax.set_xticks(xtl)
ax.set_yticks(ytl)
pfun.add_info(ax, in_dict['fn'], loc='upper_right')
pfun.add_windstress_flower(ax, ds, t_scl=0.5, t_leglen=0.1, center=(.85,.65), fs=12)
# ADD MEAN WINDSTRESS VECTOR
# t_scl: scale windstress vector (smaller to get longer arrows)
# t_leglen: # Pa for wind stress vector legend
fig.tight_layout()
# FINISH
ds.close()
pfun.end_plot()
if len(str(in_dict['fn_out'])) > 0:
plt.savefig(in_dict['fn_out'])
plt.close()
else:
plt.show()
def P_ths(in_dict):
# Plot property-property plots, like theta vs. s
# START
fs = 14
pfun.start_plot(fs=fs, figsize=(10,10))
fig = plt.figure()
ds = xr.open_dataset(in_dict['fn'])
# PLOT CODE
# make a potential density field
import seawater as sw
s0 = 25; s1 = 35
th0 = 0; th1 = 20
SS, TH = np.meshgrid(np.linspace(s0, s1, 50), np.linspace(th0, th1, 50))
SIG = sw.dens0(SS, TH) - 1000
S = zrfun.get_basic_info(in_dict['fn'], only_S=True)
h = ds['h'].values
z = zrfun.get_z(h, 0*h, S, only_rho=True)
s = ds['salt'].values.squeeze()
th = ds['temp'].values.squeeze()
ax = fig.add_subplot(111)
ax.set_xlabel('Salinity')
ax.set_ylabel('Theta (deg C)')
ax.contour(SS, TH, SIG, 20)
nsub = 500
alpha = .1
mask = z > -10
ax.plot(s[mask][::nsub], th[mask][::nsub], '.r', alpha=alpha)
mask = (z < -10) & (z > -200)
ax.plot(s[mask][::nsub], th[mask][::nsub], '.g', alpha=alpha)
mask = z < -200
ax.plot(s[mask][::nsub], th[mask][::nsub], '.b', alpha=alpha)
ax.set_xlim(s0, s1)
ax.set_ylim(th0, th1)
# FINISH
ds.close()
pfun.end_plot()
if len(str(in_dict['fn_out'])) > 0:
plt.savefig(in_dict['fn_out'])
plt.close()
else:
plt.show()
def P_debug(in_dict):
# Focused on debugging
vn_list = ['u', 'v', 'zeta']
do_wetdry = False
# START
fs = 10
pfun.start_plot(fs=fs, figsize=(8*len(vn_list),10))
fig = plt.figure()
ds = xr.open_dataset(in_dict['fn'])
# PLOT CODE
ii = 1
for vn in vn_list:
if 'lon_rho' in ds[vn].coords:
tag = 'rho'
if 'lon_u' in ds[vn].coords:
tag = 'u'
if 'lon_v' in ds[vn].coords:
tag = 'v'
x = ds['lon_'+tag].values
y = ds['lat_'+tag].values
px, py = pfun.get_plon_plat(x,y)
if vn in ['u', 'v']:
v = ds[vn][0,-1,:,:].values
vmin = -2
vmax = 2
cmap='hsv_r'
elif vn == 'zeta':
v = ds[vn][0,:,:].values
h = ds.h.values
mr = ds.mask_rho.values
v[mr==0] = np.nan
h[mr==0] = np.nan
v = v + h
vn = 'depth'
vmin = 2
vmax = 4
cmap='RdYlGn'
else:
v = ds[vn][0, -1,:,:].values
ax = fig.add_subplot(1, len(vn_list), ii)
ax.set_xticks([])
ax.set_yticks([])
cs = ax.pcolormesh(px, py, v, cmap=cmap, vmin=vmin, vmax=vmax)
pfun.add_coast(ax)
ax.axis(pfun.get_aa(ds))
pfun.dar(ax)
if ii == 1:
pfun.add_info(ax, in_dict['fn'], his_num=True)
vmax, vjmax, vimax, vmin, vjmin, vimin = pfun.maxmin(v)
ax.plot(x[vjmax,vimax], y[vjmax,vimax],'*y', mec='k', markersize=15)
ax.plot(x[vjmin,vimin], y[vjmin,vimin],'oy', mec='k', markersize=10)
ax.set_title(('%s ((*)max=%0.1f, (o)min=%0.1f)' % (vn, vmax, vmin)))
ii += 1
# FINISH
ds.close()
pfun.end_plot()
if len(str(in_dict['fn_out'])) > 0:
plt.savefig(in_dict['fn_out'])
plt.close()
else:
plt.show()
def P_layer(in_dict):
# START
fs = 14
pfun.start_plot(fs=fs, figsize=(14,10))
fig = plt.figure()
ds = xr.open_dataset(in_dict['fn'])
# PLOT CODE
vn_list = ['oxygen', 'temp']
z_level = -250
zfull = pfun.get_zfull(ds, in_dict['fn'], 'rho')
ii = 1
for vn in vn_list:
if in_dict['auto_vlims']:
pinfo.vlims_dict[vn] = ()
ax = fig.add_subplot(1, len(vn_list), ii)
laym = pfun.get_laym(ds, zfull, ds['mask_rho'][:], vn, z_level)
v_scaled = pinfo.fac_dict[vn]*laym
vlims = pinfo.vlims_dict[vn]
if len(vlims) == 0:
vlims = pfun.auto_lims(v_scaled)
pinfo.vlims_dict[vn] = vlims
cs = ax.pcolormesh(ds['lon_psi'][:], ds['lat_psi'][:], v_scaled[1:-1,1:-1],
vmin=vlims[0], vmax=vlims[1], cmap=pinfo.cmap_dict[vn])
cb = fig.colorbar(cs)
pfun.add_bathy_contours(ax, ds, txt=True)
pfun.add_coast(ax)
ax.axis(pfun.get_aa(ds))
pfun.dar(ax)
ax.set_xlabel('Longitude')
ax.set_title('%s %s on Z = %d (m)' % (pinfo.tstr_dict[vn], pinfo.units_dict[vn], z_level))
if ii == 1:
pfun.add_info(ax, in_dict['fn'])
ax.set_ylabel('Latitude')
pfun.add_windstress_flower(ax, ds)
if ii == 2:
pfun.add_velocity_vectors(ax, ds, in_dict['fn'], zlev=z_level)
ii += 1
fig.tight_layout()
# FINISH
ds.close()
pfun.end_plot()
if len(str(in_dict['fn_out'])) > 0:
plt.savefig(in_dict['fn_out'])
plt.close()
else:
plt.show()
def P_sect(in_dict):
"""
This plots a map and a section (distance, z), and makes sure
that the color limits are identical. If the color limits are
set automatically then the section is the preferred field for
setting the limits.
I think this works best with -avl False (the default).
"""
# START
fs = 14
pfun.start_plot(fs=fs, figsize=(20,9))
fig = plt.figure()
ds = xr.open_dataset(in_dict['fn'])
# PLOT CODE
vn = 'phytoplankton'
# GET DATA
G, S, T = zrfun.get_basic_info(in_dict['fn'])
# CREATE THE SECTION
# create track by hand
if False:
lon = G['lon_rho']
lat = G['lat_rho']
zdeep = -3500
x = np.linspace(lon.min(), lon.max(), 500)
y = 47 * np.ones(x.shape)
# or read in a section (or list of sections)
else:
tracks_path = Ldir['data'] / 'section_lines'
tracks = ['Line_jdf_v0.p', 'Line_ps_main_v0.p']
zdeep = -300
xx = np.array([])
yy = np.array([])
for track in tracks:
track_fn = tracks_path / track
# get the track to interpolate onto
pdict = pickle.load(open(track_fn, 'rb'))
xx = np.concatenate((xx,pdict['lon_poly']))
yy = np.concatenate((yy,pdict['lat_poly']))
for ii in range(len(xx)-1):
x0 = xx[ii]
x1 = xx[ii+1]
y0 = yy[ii]
y1 = yy[ii+1]
nn = 20
if ii == 0:
x = np.linspace(x0, x1, nn)
y = np.linspace(y0,y1, nn)
else:
x = np.concatenate((x, np.linspace(x0, x1, nn)[1:]))
y = np.concatenate((y, np.linspace(y0, y1, nn)[1:]))
v2, v3, dist, idist0 = pfun.get_section(ds, vn, x, y, in_dict)
# COLOR
# scaled section data
sf = pinfo.fac_dict[vn] * v3['sectvarf']
# now we use the scaled section as the preferred field for setting the
# color limits of both figures in the case -avl True
if in_dict['auto_vlims']:
pinfo.vlims_dict[vn] = pfun.auto_lims(sf)
# PLOTTING
# map with section line
ax = fig.add_subplot(1, 3, 1)
cs = pfun.add_map_field(ax, ds, vn, pinfo.vlims_dict,
cmap=pinfo.cmap_dict[vn], fac=pinfo.fac_dict[vn], do_mask_edges=True)
# fig.colorbar(cs, ax=ax) # It is identical to that of the section
pfun.add_coast(ax)
aaf = [-125.5, -122.1, 46.8, 50.3] # focus domain
ax.axis(aaf)
pfun.dar(ax)
pfun.add_info(ax, in_dict['fn'], loc='upper_right')
ax.set_title('Surface %s %s' % (pinfo.tstr_dict[vn],pinfo.units_dict[vn]))
ax.set_xlabel('Longitude')
ax.set_ylabel('Latitude')
# add section track
ax.plot(x, y, '-r', linewidth=2)
ax.plot(x[idist0], y[idist0], 'or', markersize=5, markerfacecolor='w',
markeredgecolor='r', markeredgewidth=2)
ax.set_xticks([-125, -124, -123])
ax.set_yticks([47, 48, 49, 50])
# section
ax = fig.add_subplot(1, 3, (2, 3))
ax.plot(dist, v2['zbot'], '-k', linewidth=2)
ax.plot(dist, v2['zeta'], '-b', linewidth=1)
ax.set_xlim(dist.min(), dist.max())
ax.set_ylim(zdeep, 5)
# plot section
svlims = pinfo.vlims_dict[vn]
cs = ax.pcolormesh(v3['distf'], v3['zrf'], sf,
vmin=svlims[0], vmax=svlims[1], cmap=pinfo.cmap_dict[vn])
fig.colorbar(cs, ax=ax)
ax.set_xlabel('Distance (km)')
ax.set_ylabel('Z (m)')
ax.set_title('Section %s %s' % (pinfo.tstr_dict[vn],pinfo.units_dict[vn]))
fig.tight_layout()
# FINISH
ds.close()
pfun.end_plot()
if len(str(in_dict['fn_out'])) > 0:
plt.savefig(in_dict['fn_out'])
plt.close()
else:
plt.show()
def P_sect_soundspeed(in_dict):
"""
Soundspeed section plot
"""
import gsw
ds = xr.open_dataset(in_dict['fn'])
# create track by hand
x = np.linspace(-124.85,-124.2, 100) # shelf only
#x = np.linspace(-126,-124.2, 100) # shows SOFAR channel
y = 47 * np.ones(x.shape)
v2, v3, dist, idist0 = pfun.get_section(ds, 'salt', x, y, in_dict)
s = v3['sectvarf']
v2, v3, dist, idist0 = pfun.get_section(ds, 'temp', x, y, in_dict)
th = v3['sectvarf']
X = v3['distf']
Z = v3['zrf']
# adjust Z so surface is at 0
Z = Z - Z[-1,:]
p = gsw.p_from_z(Z, 47)
SA = gsw.SA_from_SP(s, p, -125, 47)
CT = gsw.CT_from_pt(SA, th)
spd = gsw.sound_speed(SA, CT, p)
# START
fs = 14
pfun.start_plot(fs=fs, figsize=(16,9))
fig, axes = plt.subplots(nrows=3, ncols=2)
ax = axes[0,0]
cs = ax.pcolormesh(X, Z, SA, cmap='jet')
fig.colorbar(cs, ax=ax)
ax.text(.95, .05, 'Absolute Salinity', transform=ax.transAxes, ha='right')
ax = axes[1,0]
cs = ax.pcolormesh(X, Z, CT, cmap='jet')
fig.colorbar(cs, ax=ax)
ax.text(.95, .05, 'Conservative Temperature', transform=ax.transAxes, ha='right')
ax = axes[2,0]
cs = ax.pcolormesh(X, Z, spd, cmap='jet')
fig.colorbar(cs, ax=ax)
ax.text(.95, .05, 'Soundspeed [m/s]', transform=ax.transAxes, ha='right')
ax = axes[0,1]
ax.plot(SA,Z, alpha=.2)
ax.text(.05, .05, 'Absolute Salinity', transform=ax.transAxes, ha='left')
ax = axes[1,1]
ax.plot(CT,Z, alpha=.2)
ax.text(.95, .05, 'Conservative Temperature', transform=ax.transAxes, ha='right')
ax = axes[2,1]
ax.plot(spd,Z, alpha=.2)
ax.text(.95, .05, 'Soundspeed [m/s]', transform=ax.transAxes, ha='right')
fig.suptitle(str(in_dict['fn']))
# FINISH
ds.close()
pfun.end_plot()
if len(str(in_dict['fn_out'])) > 0:
plt.savefig(in_dict['fn_out'])
plt.close()
else:
plt.show()
def P_splash(in_dict):
"""
This makes a fancy plot suitable for the landing page of the LiveOcean
website. Eventually I could automate making this new every day.
"""
# START
fs = 14
pfun.start_plot(fs=fs, figsize=(12,9))
fig = plt.figure()
ds = xr.open_dataset(in_dict['fn'])
# PREPARING FIELDS
from PyCO2SYS import CO2SYS
import seawater as sw
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from warnings import filterwarnings
filterwarnings('ignore') # skip a warning from PyCO2SYS
Ldir = Lfun.Lstart()
do_topo = True
# model output
fn = in_dict['fn']
T = zrfun.get_basic_info(fn, only_T=True)
x = ds.lon_psi.values
y = ds.lat_psi.values
th = ds['temp'][0,-1,1:-1,1:-1].values
if do_topo:
# topography
tfn = (Ldir['data'] / 'topo' / 'srtm15' / 'topo15.nc')
tds = xr.open_dataset(tfn)
step = 3
tx = tds['lon'][::step].values
ty = tds['lat'][::step].values
tz = tds['z'][::step,::step].values
tz[tz<0] = np.nan
def get_arag(ds, fn, aa, nlev):
G = zrfun.get_basic_info(fn, only_G=True)
# find indices that encompass region aa
i0 = zfun.find_nearest_ind(G['lon_rho'][0,:], aa[0]) - 1
i1 = zfun.find_nearest_ind(G['lon_rho'][0,:], aa[1]) + 2
j0 = zfun.find_nearest_ind(G['lat_rho'][:,0], aa[2]) - 1
j1 = zfun.find_nearest_ind(G['lat_rho'][:,0], aa[3]) + 2
px = G['lon_psi'][j0:j1-1, i0:i1-1]
py = G['lat_psi'][j0:j1-1, i0:i1-1]
lat = G['lat_rho'][j0:j1,i0:i1] # used in sw.pres
# first extract needed fields and save in v_dict
v_dict = {}
vn_in_list = ['temp', 'salt' , 'rho', 'alkalinity', 'TIC']
for cvn in vn_in_list:
L = ds[cvn][0,nlev,j0:j1,i0:i1].values
v_dict[cvn] = L
# ------------- the CO2SYS steps -------------------------
# create pressure
Ld = G['h'][j0:j1,i0:i1]
Lpres = sw.pres(Ld, lat)
# get in situ temperature from potential temperature
Ltemp = sw.ptmp(v_dict['salt'], v_dict['temp'], 0, Lpres)
# convert from umol/L to umol/kg using in situ dentity
Lalkalinity = 1000 * v_dict['alkalinity'] / (v_dict['rho'] + 1000)
Lalkalinity[Lalkalinity < 100] = np.nan
LTIC = 1000 * v_dict['TIC'] / (v_dict['rho'] + 1000)
LTIC[LTIC < 100] = np.nan
CO2dict = CO2SYS(Lalkalinity, LTIC, 1, 2, v_dict['salt'], Ltemp, Ltemp,
Lpres, Lpres, 50, 2, 1, 10, 1, NH3=0.0, H2S=0.0)
# PH = CO2dict['pHout']
# PH = zfun.fillit(PH.reshape((v_dict['salt'].shape)))
ARAG = CO2dict['OmegaARout']
ARAG = ARAG.reshape((v_dict['salt'].shape))
ARAG = ARAG[1:-1, 1:-1]
return px, py, ARAG
# LARGE MAP
ax = fig.add_subplot(121)
cmap = 'RdYlBu_r'
cs = ax.pcolormesh(x,y,th, cmap=cmap, vmin=11, vmax=20)
# Inset colorbar
cbaxes = inset_axes(ax, width="4%", height="40%", loc='lower left')
fig.colorbar(cs, cax=cbaxes, orientation='vertical')
if do_topo:
cmap = 'gist_earth'
cs = ax.pcolormesh(tx,ty,tz, cmap=cmap, shading='nearest', vmin=-1000, vmax=2000)
pfun.add_coast(ax)
pfun.dar(ax)
ax.axis([-130, -122, 42, 52])
ax.set_xticks([-129, -127, -125, -123])
ax.set_yticks([42, 44, 46, 48, 50, 52])
ax.set_xlabel('Longitude')
ax.set_ylabel('Latitude')
tstr = T['dt'].strftime(Lfun.ds_fmt)
ax.text(.98,.99,'LiveOcean', size=fs*1.2,
ha='right', va='top', weight='bold', transform=ax.transAxes)
ax.text(.98,.95,'Surface water\nTemperature $[^{\circ}C]$\n'+tstr,
ha='right', va='top', weight='bold', transform=ax.transAxes,
bbox=dict(facecolor='w', edgecolor='None',alpha=.5))
# box for Willapa and Grays Harbor
aa = [-124.6, -123.65, 46, 47.2]
nlev = 0
# draw box on the large map
pfun.draw_box(ax, aa, linestyle='-', color='g', alpha=1, linewidth=2, inset=0)
# SMALL. MAP
ax = fig.add_subplot(122)
px, py, ARAG = get_arag(ds, fn, aa, nlev)
cs = ax.pcolormesh(px,py,ARAG, cmap='coolwarm_r', vmin=0, vmax=3)
# Inset colorbar
cbaxes = inset_axes(ax, width="4%", height="40%", loc='lower left')
fig.colorbar(cs, cax=cbaxes, orientation='vertical')
pfun.add_coast(ax)
pfun.dar(ax)
ax.axis(aa)
ax.set_xticks([-124.5, -124])
ax.set_yticks([46, 47])
ax.set_xlabel('Longitude')
ax.text(.98,.99,'Bottom water\nAragonite\nSaturation\nState',
ha='right', va='top', weight='bold', transform=ax.transAxes)
fig.tight_layout()
# FINISH
ds.close()
pfun.end_plot()
if len(str(in_dict['fn_out'])) > 0:
plt.savefig(in_dict['fn_out'])
plt.close()
else:
plt.show()
def P_splash2(in_dict):
"""
This makes a fancy plot suitable for the landing page of the LiveOcean
website. This one is focused on the Salish Sea.
"""
# START
fs = 14
pfun.start_plot(fs=fs, figsize=(12,9))
fig = plt.figure()
ds = xr.open_dataset(in_dict['fn'])
# PREPARING FIELDS
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
Ldir = Lfun.Lstart()
do_topo = True
# model output
fn = in_dict['fn']
T = zrfun.get_basic_info(fn, only_T=True)
x = ds.lon_psi.values
y = ds.lat_psi.values
th = ds['temp'][0,-1,1:-1,1:-1].values
ox = pinfo.fac_dict['oxygen'] * ds['oxygen'][0,0,1:-1,1:-1].values
if do_topo:
# topography
tfn = (Ldir['data'] / 'topo' / 'srtm15' / 'topo15.nc')
tds = xr.open_dataset(tfn)
step = 3
tx = tds['lon'][::step].values
ty = tds['lat'][::step].values
tz = tds['z'][::step,::step].values
tz[tz<0] = np.nan
# LARGE MAP
ax = fig.add_subplot(121)
cmap = 'RdYlBu_r'
cs = ax.pcolormesh(x,y,th, cmap=cmap, vmin=11, vmax=20)
# Inset colorbar
cbaxes = inset_axes(ax, width="5%", height="40%", loc='lower left')
fig.colorbar(cs, cax=cbaxes, orientation='vertical')
if do_topo:
cmap = 'gist_earth'
cs = ax.pcolormesh(tx,ty,tz, cmap=cmap, shading='nearest', vmin=-1000, vmax=2000)
pfun.add_coast(ax)
pfun.dar(ax)
ax.axis([-130, -122, 42, 52])
ax.set_xticks([-129, -127, -125, -123])
ax.set_yticks([42, 44, 46, 48, 50, 52])
ax.set_xlabel('Longitude')
ax.set_ylabel('Latitude')
tstr = T['dt'].strftime(Lfun.ds_fmt)
ax.text(.98,.99,'LiveOcean', size=fs*1.5,
ha='right', va='top', weight='bold', transform=ax.transAxes)
ax.text(.03,.45,'Surface water\nTemperature $[^{\circ}C]$\n'+tstr,
ha='left', va='bottom', weight='bold', transform=ax.transAxes,
bbox=dict(facecolor='w', edgecolor='None',alpha=.5))
# box for Salish Sea
aa = [-125.3, -122.1, 46.8, 50.3]
nlev = 0
# draw box on the large map
pfun.draw_box(ax, aa, linestyle='-', color='g', alpha=1, linewidth=2, inset=0)
fs2 = fs*.9
fs3 = fs*.8
ax.text(-123.072,46.7866,'Washington', size=fs2,
style='italic',ha='center',va='center',rotation=-45)
ax.text(-122.996,44.5788,'Oregon', size=fs2,
style='italic',ha='center',va='center',rotation=-45)
ah = ax.text(-125.3,49.4768,'Vancouver\nIsland', size=fs2,
style='italic',ha='center',va='center',rotation=-45)
ax.text(-126.3,50.2,'Johnstone\nStrait', size=.7*fs2,
style='italic',ha='center',va='center',rotation=-10)
# SMALL MAP
from cmocean import cm
ax = fig.add_subplot(122)
cs = ax.pcolormesh(x,y,ox, cmap=cm.oxy, vmin=0, vmax=10)
# Inset colorbar
cbaxes = inset_axes(ax, width="5%", height="30%", loc='upper right', borderpad=2)
fig.colorbar(cs, cax=cbaxes, orientation='vertical')
pfun.add_coast(ax)
pfun.dar(ax)
ax.axis(aa)
ax.set_xticks([-125, -124, -123])
ax.set_yticks([47, 48, 49, 50])
ax.set_xlabel('Longitude')
ax.text(.84,.95,'Salish Sea\n\nBottom Oxygen\n$[mg\ L^{-1}]$',
ha='right', va='top', weight='bold', transform=ax.transAxes)
# add labels
ax.text(-122.8,49.335,'Fraser\nRiver',size=fs2,
style='italic',ha='center',va='center',rotation=0)
ax.text(-123.7,49.2528,'Strait of Georgia',size=fs2,
style='italic',ha='center',va='center',rotation=-30)
ax.text(-123.5,48.28,'Strait of Juan de Fuca',size=fs2,
style='italic',ha='center',va='center',rotation=0,
color='w')
ax.text(-123.3,47.6143,'Puget\nSound',size=fs2,
style='italic',ha='center',va='center',rotation=+55)
ax.text(-122.3,48.48,'Skagit\nRiver',size=fs3,
style='italic',ha='center',va='center',
bbox=dict(facecolor='w', edgecolor='None',alpha=.5))
ax.text(-123.173,48.44,'Haro\nStrait',size=fs3,
style='italic',ha='center',va='center',
color='w')
fig.tight_layout()
# FINISH
ds.close()
pfun.end_plot()
if len(str(in_dict['fn_out'])) > 0:
plt.savefig(in_dict['fn_out'])
plt.close()
else:
plt.show()
def P_splash3(in_dict):
"""
This makes a fancy plot suitable for the landing page of the LiveOcean
website. This one is focused on the Puget Sound.
"""
# START
fs = 14
pfun.start_plot(fs=fs, figsize=(15,12))
fig = plt.figure()
ds = xr.open_dataset(in_dict['fn'])
# PREPARING FIELDS
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
Ldir = Lfun.Lstart()
do_topo = True
# model output
fn = in_dict['fn']
T = zrfun.get_basic_info(fn, only_T=True)
x,y = pfun.get_plon_plat(ds.lon_rho.values, ds.lat_rho.values)
th = ds['temp'][0,-1,:,:].values
if do_topo:
# topography
tfn = (Ldir['data'] / 'topo' / 'srtm15' / 'topo15.nc')
tds = xr.open_dataset(tfn)
step = 3
tx = tds['lon'][::step].values
ty = tds['lat'][::step].values
tz = tds['z'][::step,::step].values
tz[tz<0] = np.nan
# LARGE MAP
ax = fig.add_subplot(121)
cs = ax.pcolormesh(x,y,th, cmap='RdYlBu_r', vmin=11, vmax=20)
# Inset colorbar
cbaxes = inset_axes(ax, width="5%", height="40%", loc='lower left')
fig.colorbar(cs, cax=cbaxes, orientation='vertical')
if do_topo:
cmap = 'gist_earth'
cs = ax.pcolormesh(tx,ty,tz, cmap='gist_earth', shading='nearest', vmin=-1000, vmax=2000)
pfun.add_coast(ax)
pfun.dar(ax)
ax.axis([-130, -122, 42, 52])
ax.set_xticks([-129, -127, -125, -123])
ax.set_yticks([42, 44, 46, 48, 50, 52])
ax.set_xlabel('Longitude')
ax.set_ylabel('Latitude')
tstr = T['dt'].strftime(Lfun.ds_fmt)
ax.text(.98,.99,'LiveOcean', size=fs*1.5,
ha='right', va='top', weight='bold', transform=ax.transAxes)
ax.text(.03,.45,'Surface water\nTemperature $[^{\circ}C]$\n'+tstr,
ha='left', va='bottom', weight='bold', transform=ax.transAxes,
bbox=dict(facecolor='w', edgecolor='None',alpha=.5))
# box for Puget Sound and the San Juans
aa = [-123.4, -122, 47, 48.8]
nlev = 0
# draw box on the large map
pfun.draw_box(ax, aa, linestyle='-', color='m', alpha=1, linewidth=2, inset=0)
fs2 = fs*.9
fs3 = fs*.8
ax.text(-123.072,46.7866,'Washington', size=fs2,
style='italic',ha='center',va='center',rotation=-45)
ax.text(-122.996,44.5788,'Oregon', size=fs2,
style='italic',ha='center',va='center',rotation=-45)
ah = ax.text(-125.3,49.4768,'Vancouver\nIsland', size=fs2,
style='italic',ha='center',va='center',rotation=-45)
ax.text(-126.3,50.2,'Johnstone\nStrait', size=.7*fs2,
style='italic',ha='center',va='center',rotation=-10)
# SMALL MAP
ax = fig.add_subplot(122)
cs = ax.pcolormesh(x,y,th, cmap='RdYlBu_r', vmin=11, vmax=20)
# Inset colorbar
# cbaxes = inset_axes(ax, width="5%", height="30%", loc='upper right', borderpad=2)
# fig.colorbar(cs, cax=cbaxes, orientation='vertical')
pfun.add_coast(ax)
pfun.dar(ax)
ax.axis(aa)
ax.set_xticks([-123, -122.5, -122])
ax.set_yticks([47, 48])
ax.set_xlabel('Longitude')
ax.text(.03,.5,'Puget Sound &\nSan Juans',
ha='left', va='center', weight='bold', transform=ax.transAxes)
#fig.tight_layout()
# FINISH
ds.close()
pfun.end_plot()
if len(str(in_dict['fn_out'])) > 0:
plt.savefig(in_dict['fn_out'])
plt.close()
else:
plt.show()
def P_superplot_salt(in_dict):
# Plot salinity maps and section, with forcing time-series.
# Super clean design. Updated to avoid need for tide data, which it
# now just gets from the same mooring extraction it uses for wind.
vn = 'salt'
vlims = (28.5, 33) # full map
vlims2 = (22, 32) # PS map
vlims3 = (29, 32) # PS section
cmap = 'Spectral_r'
# get model fields
ds = xr.open_dataset(in_dict['fn'])
gtagex = str(in_dict['fn']).split('/')[-3]
year_str = str(in_dict['fn']).split('/')[-2].split('.')[0][1:]
# get forcing fields
ffn = Ldir['LOo'] / 'extract' / gtagex / 'superplot' / ('forcing_' + gtagex + '_' + year_str + '.p')
fdf = pd.read_pickle(ffn)
fdf['yearday'] = fdf.index.dayofyear - 0.5 # .5 to 364.5
# get section
G, S, T = zrfun.get_basic_info(in_dict['fn'])
# read in a section (or list of sections)
tracks_path = Ldir['data'] / 'section_lines'
tracks = ['Line_ps_main_v0.p']
zdeep = -300
xx = np.array([])
yy = np.array([])
for track in tracks:
track_fn = tracks_path / track
# get the track to interpolate onto
pdict = pickle.load(open(track_fn, 'rb'))
xx = np.concatenate((xx,pdict['lon_poly']))
yy = np.concatenate((yy,pdict['lat_poly']))
for ii in range(len(xx)-1):
x0 = xx[ii]
x1 = xx[ii+1]
y0 = yy[ii]
y1 = yy[ii+1]
nn = 20
if ii == 0:
x = np.linspace(x0, x1, nn)
y = np.linspace(y0,y1, nn)
else:
x = np.concatenate((x, np.linspace(x0, x1, nn)[1:]))
y = np.concatenate((y, np.linspace(y0, y1, nn)[1:]))
v2, v3, dist, idist0 = pfun.get_section(ds, vn, x, y, in_dict)
# PLOTTING
fig = plt.figure(figsize=(17,9))
fs = 18 # fontsize
# Full map
ax = fig.add_subplot(131)
lon = ds['lon_psi'].values
lat = ds['lat_psi'].values
v =ds[vn][0, -1, 1:-1, 1:-1].values
fac=pinfo.fac_dict[vn]
vv = fac * v
vv[:, :6] = np.nan
vv[:6, :] = np.nan
cs = ax.pcolormesh(lon, lat, vv, vmin=vlims[0], vmax=vlims[1], cmap=cmap)
pfun.add_coast(ax)
ax.axis(pfun.get_aa(ds))
pfun.dar(ax)
ax.set_axis_off()
# add a box for the subplot
aa = [-123.5, -122.1, 47.03, 48.8]
pfun.draw_box(ax, aa, color='c', alpha=.5, linewidth=5, inset=.01)
# labels
ax.text(.95, .07, 'LiveOcean\nSalinity\n'
+ datetime.strftime(T['dt'], '%Y'), fontsize=fs, color='k',
transform=ax.transAxes, horizontalalignment='center',
fontweight='bold')
ax.text(.95, .03, datetime.strftime(T['dt'], '%Y.%m.%d'), fontsize=fs*.7, color='k',
transform=ax.transAxes, horizontalalignment='center')
ax.text(.99,.97,'S range\n'+ str(vlims), transform=ax.transAxes,
va='top', ha='right', c='orange', size=.6*fs, weight='bold')
# PS map
ax = fig.add_subplot(132)
cs = ax.pcolormesh(lon, lat, vv, vmin=vlims2[0], vmax=vlims2[1],
cmap=cmap)
#fig.colorbar(cs)
pfun.add_coast(ax)
ax.axis(aa)
pfun.dar(ax)
pfun.draw_box(ax, aa, color='c', alpha=.5, linewidth=5, inset=.01)
ax.set_axis_off()
# add section track
sect_color = 'violet'
n_ai = int(len(x)/6)
n_tn = int(4.5*len(x)/7)
ax.plot(x, y, linestyle='--', color='k', linewidth=2)
ax.plot(x[n_ai], y[n_ai], marker='*', color=sect_color, markersize=14,
markeredgecolor='k')
ax.plot(x[n_tn], y[n_tn], marker='o', color=sect_color, markersize=10,
markeredgecolor='k')
ax.text(.93,.97,'S range\n'+ str(vlims2), transform=ax.transAxes,
va='top', ha='right', c='orange', size=.6*fs, weight='bold')
# Section
ax = fig.add_subplot(433)
ax.plot(dist, v2['zeta']+5, linestyle='--', color='k', linewidth=2)
ax.plot(dist[n_ai], v2['zeta'][n_ai] + 5, marker='*', color=sect_color,
markersize=14, markeredgecolor='k')
ax.plot(dist[n_tn], v2['zeta'][n_tn] + 5, marker='o', color=sect_color,
markersize=10, markeredgecolor='k')
ax.set_xlim(dist.min(), dist.max())
ax.set_ylim(zdeep, 25)
sf = pinfo.fac_dict[vn] * v3['sectvarf']
# plot section
cs = ax.pcolormesh(v3['distf'], v3['zrf'], sf,
vmin=vlims3[0], vmax=vlims3[1], cmap=cmap)
ax.text(.99,.4,'S range\n'+ str(vlims3), transform=ax.transAxes,
va='bottom', ha='right', c='orange', size=.6*fs, weight='bold')
#fig.colorbar(cs)
# labels
ax.text(0, 0, 'SECTION\nPuget Sound', fontsize=fs, color='b',
transform=ax.transAxes)
ax.set_axis_off()
# get the day
tm = T['dt'] # datetime
TM = datetime(tm.year, tm.month, tm.day)
# get yearday
yearday = fdf['yearday'].values
this_yd = fdf.loc[TM, 'yearday']
# Tides
alpha = .4
ax = fig.add_subplot(436)
ax.plot(yearday, fdf['RMS Tide Height (m)'].values, '-k',
lw=3, alpha=alpha)
# time marker
ax.plot(this_yd, fdf.loc[TM, 'RMS Tide Height (m)'],
marker='o', color='r', markersize=7)
# labels
ax.text(1, .05, 'NEAP TIDES', transform=ax.transAxes,
alpha=alpha, fontsize=fs, horizontalalignment='right')
ax.text(1, .85, 'SPRING TIDES', transform=ax.transAxes,
alpha=alpha, fontsize=fs, horizontalalignment='right')
# limits
ax.set_xlim(0,365)
ax.set_ylim(0,1.5)
ax.set_axis_off()
# Wind
alpha=.5
ax = fig.add_subplot(439)
w = fdf['8-day NS Wind Stress (Pa)'].values
wp = w.copy()
wp[w<0] = np.nan
wm = w.copy()
wm[w>0] = np.nan
tt = np.arange(len(w))
ax.fill_between(yearday, wp, y2=0*w, color='g', alpha=alpha)
ax.fill_between(yearday, wm, y2=0*w, color='b', alpha=alpha)
# time marker
ax.plot(this_yd, fdf.loc[TM,'8-day NS Wind Stress (Pa)'],
marker='o', color='r', markersize=7)
# labels
ax.text(0, .85, 'DOWNWELLING WIND', transform=ax.transAxes,
color='g', alpha=alpha, fontsize=fs)
ax.text(0, .05, 'UPWELLING WIND', transform=ax.transAxes,
color='b', alpha=alpha, fontsize=fs)
# limits
ax.set_xlim(0,365)
ax.set_ylim(-.15, .25)
ax.set_axis_off()
# Rivers
alpha = .6
cr = fdf['Columbia R. Flow (1000 m3/s)'].values
fr = fdf['Fraser R. Flow (1000 m3/s)'].values
sr = fdf['Skagit R. Flow (1000 m3/s)'].values
this_yd = fdf.loc[TM, 'yearday']
ax = fig.add_subplot(4,3,12)
ax.fill_between(yearday, cr, 0*yearday, color='orange', alpha=alpha)
ax.fill_between(yearday, fr, 0*yearday, color='violet', alpha=alpha)
ax.fill_between(yearday, sr, 0*yearday, color='brown', alpha=alpha)
# time markers
ax.plot(this_yd, fdf.loc[TM, 'Columbia R. Flow (1000 m3/s)'],
marker='o', color='r', markersize=7)
ax.plot(this_yd, fdf.loc[TM, 'Fraser R. Flow (1000 m3/s)'],
marker='o', color='r', markersize=7)
ax.plot(this_yd, fdf.loc[TM, 'Skagit R. Flow (1000 m3/s)'],
marker='o', color='r', markersize=7)
# labels
ax.text(.9, .85, 'Columbia River', transform=ax.transAxes,
color='orange', fontsize=fs, horizontalalignment='right', alpha=alpha)
ax.text(.9, .70, 'Fraser River', transform=ax.transAxes,
color='violet', fontsize=fs, horizontalalignment='right', alpha=alpha)
ax.text(.9, .55, 'Skagit River', transform=ax.transAxes,
color='brown', fontsize=fs, horizontalalignment='right', alpha=alpha)
# limits
ax.set_xlim(0,365)
ax.set_ylim(-5,20)
ax.set_axis_off()
# Time Axis
clist = ['gray', 'gray', 'gray', 'gray']
if tm.month in [1, 2, 3]:
clist[0] = 'r'
if tm.month in [4, 5, 6]:
clist[1] = 'r'
if tm.month in [7, 8, 9]:
clist[2] = 'r'
if tm.month in [10, 11, 12]:
clist[3] = 'r'
ax.text(0, 0, 'WINTER', transform=ax.transAxes, color=clist[0],
fontsize=fs, horizontalalignment='left', style='italic')
ax.text(.4, 0, 'SPRING', transform=ax.transAxes, color=clist[1],
fontsize=fs, horizontalalignment='center', style='italic')
ax.text(.68, 0, 'SUMMER', transform=ax.transAxes, color=clist[2],
fontsize=fs, horizontalalignment='center', style='italic')
ax.text(1, 0, 'FALL', transform=ax.transAxes, color=clist[3],
fontsize=fs, horizontalalignment='right', style='italic')
fig.tight_layout()
# FINISH
ds.close()
if len(str(in_dict['fn_out'])) > 0:
plt.savefig(in_dict['fn_out'])
plt.close()
else:
plt.show()
def P_superplot_oxygen(in_dict):
# Plot bottom oxygen maps and section, with forcing time-series.
# Super clean design. Updated to avoid need for tide data, which it
# now just gets from the same mooring extraction it uses for wind.
vn = 'oxygen'
vlims = (0, 10) # full map
vlims2 = (0, 10) # PS map
vlims3 = (0, 10) # PS section
from cmocean import cm
cmap = cm.oxy
# get model fields
ds = xr.open_dataset(in_dict['fn'])
gtagex = str(in_dict['fn']).split('/')[-3]
year_str = str(in_dict['fn']).split('/')[-2].split('.')[0][1:]
# get forcing fields
ffn = Ldir['LOo'] / 'extract' / gtagex / 'superplot' / ('forcing_' + gtagex + '_' + year_str + '.p')
fdf = pd.read_pickle(ffn)
fdf['yearday'] = fdf.index.dayofyear - 0.5 # .5 to 364.5
# get section
G, S, T = zrfun.get_basic_info(in_dict['fn'])
# read in a section (or list of sections)
tracks_path = Ldir['data'] / 'section_lines'
tracks = ['Line_HC_thalweg_long.p']
zdeep = -250
xx = np.array([])
yy = np.array([])
for track in tracks:
track_fn = tracks_path / track
# get the track to interpolate onto
pdict = pickle.load(open(track_fn, 'rb'))
xx = np.concatenate((xx,pdict['lon_poly']))
yy = np.concatenate((yy,pdict['lat_poly']))
for ii in range(len(xx)-1):
x0 = xx[ii]
x1 = xx[ii+1]
y0 = yy[ii]
y1 = yy[ii+1]
nn = 20
if ii == 0:
x = np.linspace(x0, x1, nn)
y = np.linspace(y0,y1, nn)
else:
x = np.concatenate((x, np.linspace(x0, x1, nn)[1:]))
y = np.concatenate((y, np.linspace(y0, y1, nn)[1:]))
v2, v3, dist, idist0 = pfun.get_section(ds, vn, x, y, in_dict)
# PLOTTING
fig = plt.figure(figsize=(17,9))
fs = 18 # fontsize
# Full map
ax = fig.add_subplot(131)
lon = ds['lon_psi'].values
lat = ds['lat_psi'].values
v =ds[vn][0, 0, 1:-1, 1:-1].values
fac=pinfo.fac_dict[vn]
vv = fac * v
vv[:, :6] = np.nan
vv[:6, :] = np.nan
cs = ax.pcolormesh(lon, lat, vv, vmin=vlims[0], vmax=vlims[1], cmap=cmap)
pfun.add_coast(ax)
ax.axis(pfun.get_aa(ds))
pfun.dar(ax)
ax.set_axis_off()
# add a box for the subplot
aa = [-123.5, -122.1, 47.03, 48.8]
pfun.draw_box(ax, aa, color='c', alpha=.5, linewidth=5, inset=.01)
# labels
ax.text(.95, .07, 'LiveOcean\nBottom Oxygen\n'
+ datetime.strftime(T['dt'], '%Y'), fontsize=fs, color='k',
transform=ax.transAxes, horizontalalignment='center',
fontweight='bold')
ax.text(.95, .03, datetime.strftime(T['dt'], '%Y.%m.%d'), fontsize=fs*.7, color='k',
transform=ax.transAxes, horizontalalignment='center')
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
cbaxes = inset_axes(ax, width="40%", height="4%", loc='upper right', borderpad=2)
cb = fig.colorbar(cs, cax=cbaxes, orientation='horizontal')
cb.ax.tick_params(labelsize=.85*fs)
ax.text(1, .85, r'$[mg\ L^{-1}]$', transform=ax.transAxes, fontsize=fs, ha='right')
# ax.text(.99,.97,'S range\n'+ str(vlims), transform=ax.transAxes,
# va='top', ha='right', c='orange', size=.6*fs, weight='bold')
# PS map
ax = fig.add_subplot(132)
cs = ax.pcolormesh(lon, lat, vv, vmin=vlims2[0], vmax=vlims2[1],
cmap=cmap)
#fig.colorbar(cs)
pfun.add_coast(ax)
ax.axis(aa)
pfun.dar(ax)
pfun.draw_box(ax, aa, color='c', alpha=.5, linewidth=5, inset=.01)
ax.set_axis_off()
# add section track
sect_color = 'violet'
n_ai = int(len(x)/6)
n_tn = int(4.5*len(x)/7)
ax.plot(x, y, linestyle='--', color='k', linewidth=2)
ax.plot(x[n_ai], y[n_ai], marker='*', color=sect_color, markersize=14,
markeredgecolor='k')
ax.plot(x[n_tn], y[n_tn], marker='o', color=sect_color, markersize=10,
markeredgecolor='k')
# ax.text(.93,.97,'S range\n'+ str(vlims2), transform=ax.transAxes,
# va='top', ha='right', c='orange', size=.6*fs, weight='bold')
# Section
ax = fig.add_subplot(433)
ax.plot(dist, v2['zeta']+5, linestyle='--', color='k', linewidth=2)
ax.plot(dist[n_ai], v2['zeta'][n_ai] + 5, marker='*', color=sect_color,
markersize=14, markeredgecolor='k')
ax.plot(dist[n_tn], v2['zeta'][n_tn] + 5, marker='o', color=sect_color,
markersize=10, markeredgecolor='k')
ax.set_xlim(dist.min(), dist.max())
ax.set_ylim(zdeep, 25)
sf = pinfo.fac_dict[vn] * v3['sectvarf']
# plot section
cs = ax.pcolormesh(v3['distf'], v3['zrf'], sf,
vmin=vlims3[0], vmax=vlims3[1], cmap=cmap)
# ax.text(.99,.4,'S range\n'+ str(vlims3), transform=ax.transAxes,
# va='bottom', ha='right', c='orange', size=.6*fs, weight='bold')
#fig.colorbar(cs)
# labels
ax.text(0, 0, 'SECTION\nHood Canal', fontsize=fs, color='b',
transform=ax.transAxes)
ax.set_axis_off()
# get the day
tm = T['dt'] # datetime
TM = datetime(tm.year, tm.month, tm.day)
# get yearday
yearday = fdf['yearday'].values
this_yd = fdf.loc[TM, 'yearday']
# Tides
alpha = .4
ax = fig.add_subplot(436)
ax.plot(yearday, fdf['RMS Tide Height (m)'].values, '-k',
lw=3, alpha=alpha)
# time marker
ax.plot(this_yd, fdf.loc[TM, 'RMS Tide Height (m)'],
marker='o', color='r', markersize=7)
# labels
ax.text(1, .05, 'NEAP TIDES', transform=ax.transAxes,
alpha=alpha, fontsize=fs, horizontalalignment='right')
ax.text(1, .85, 'SPRING TIDES', transform=ax.transAxes,
alpha=alpha, fontsize=fs, horizontalalignment='right')
# limits
ax.set_xlim(0,365)
ax.set_ylim(0,1.5)
ax.set_axis_off()
# Wind
alpha=.5
ax = fig.add_subplot(439)
w = fdf['8-day NS Wind Stress (Pa)'].values
wp = w.copy()
wp[w<0] = np.nan
wm = w.copy()
wm[w>0] = np.nan
tt = np.arange(len(w))
ax.fill_between(yearday, wp, y2=0*w, color='g', alpha=alpha)
ax.fill_between(yearday, wm, y2=0*w, color='b', alpha=alpha)
# time marker
ax.plot(this_yd, fdf.loc[TM,'8-day NS Wind Stress (Pa)'],
marker='o', color='r', markersize=7)
# labels
ax.text(0, .85, 'DOWNWELLING WIND', transform=ax.transAxes,
color='g', alpha=alpha, fontsize=fs)
ax.text(0, .05, 'UPWELLING WIND', transform=ax.transAxes,
color='b', alpha=alpha, fontsize=fs)
# limits
ax.set_xlim(0,365)
ax.set_ylim(-.15, .25)
ax.set_axis_off()
# Rivers
alpha = .6
cr = fdf['Columbia R. Flow (1000 m3/s)'].values
fr = fdf['Fraser R. Flow (1000 m3/s)'].values
sr = fdf['Skagit R. Flow (1000 m3/s)'].values
this_yd = fdf.loc[TM, 'yearday']
ax = fig.add_subplot(4,3,12)
ax.fill_between(yearday, cr, 0*yearday, color='orange', alpha=alpha)
ax.fill_between(yearday, fr, 0*yearday, color='violet', alpha=alpha)
ax.fill_between(yearday, sr, 0*yearday, color='brown', alpha=alpha)
# time markers
ax.plot(this_yd, fdf.loc[TM, 'Columbia R. Flow (1000 m3/s)'],
marker='o', color='r', markersize=7)
ax.plot(this_yd, fdf.loc[TM, 'Fraser R. Flow (1000 m3/s)'],
marker='o', color='r', markersize=7)
ax.plot(this_yd, fdf.loc[TM, 'Skagit R. Flow (1000 m3/s)'],
marker='o', color='r', markersize=7)
# labels
ax.text(.9, .85, 'Columbia River', transform=ax.transAxes,
color='orange', fontsize=fs, horizontalalignment='right', alpha=alpha)
ax.text(.9, .70, 'Fraser River', transform=ax.transAxes,
color='violet', fontsize=fs, horizontalalignment='right', alpha=alpha)
ax.text(.9, .55, 'Skagit River', transform=ax.transAxes,
color='brown', fontsize=fs, horizontalalignment='right', alpha=alpha)
# limits
ax.set_xlim(0,365)
ax.set_ylim(-5,20)
ax.set_axis_off()
# Time Axis
clist = ['gray', 'gray', 'gray', 'gray']
if tm.month in [1, 2, 3]:
clist[0] = 'r'
if tm.month in [4, 5, 6]:
clist[1] = 'r'
if tm.month in [7, 8, 9]:
clist[2] = 'r'
if tm.month in [10, 11, 12]:
clist[3] = 'r'
ax.text(0, 0, 'WINTER', transform=ax.transAxes, color=clist[0],
fontsize=fs, horizontalalignment='left', style='italic')
ax.text(.4, 0, 'SPRING', transform=ax.transAxes, color=clist[1],
fontsize=fs, horizontalalignment='center', style='italic')
ax.text(.68, 0, 'SUMMER', transform=ax.transAxes, color=clist[2],
fontsize=fs, horizontalalignment='center', style='italic')
ax.text(1, 0, 'FALL', transform=ax.transAxes, color=clist[3],
fontsize=fs, horizontalalignment='right', style='italic')
fig.tight_layout()
# FINISH
ds.close()
if len(str(in_dict['fn_out'])) > 0:
plt.savefig(in_dict['fn_out'])
plt.close()
else:
plt.show()
def P_superplot_chl(in_dict):
# Plot phytoplankton maps and section, with forcing time-series.
# Super clean design. Updated to avoid need for tide data, which it
# now just gets from the same mooring extraction it uses for wind.
vn = 'phytoplankton'
vlims = (0, 25) # full map
vlims2 = (0, 25) # PS map
vlims3 = (0, 25) # PS section
cmap = 'Spectral_r'
# get model fields
ds = xr.open_dataset(in_dict['fn'])
gtagex = str(in_dict['fn']).split('/')[-3]
year_str = str(in_dict['fn']).split('/')[-2].split('.')[0][1:]
# get forcing fields
ffn = Ldir['LOo'] / 'extract' / gtagex / 'superplot' / ('forcing_' + gtagex + '_' + year_str + '.p')
fdf = pd.read_pickle(ffn)
fdf['yearday'] = fdf.index.dayofyear - 0.5 # .5 to 364.5
# get section
G, S, T = zrfun.get_basic_info(in_dict['fn'])
# read in a section (or list of sections)
tracks_path = Ldir['data'] / 'section_lines'
tracks = ['Line_ps_main_v0.p']
zdeep = -300
xx = np.array([])
yy = np.array([])
for track in tracks:
track_fn = tracks_path / track
# get the track to interpolate onto
pdict = pickle.load(open(track_fn, 'rb'))
xx = np.concatenate((xx,pdict['lon_poly']))
yy = | np.concatenate((yy,pdict['lat_poly'])) | numpy.concatenate |
# Copyright (c) 2018, MD2K Center of Excellence
# - Md <NAME> <<EMAIL>; <EMAIL>>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from cerebralcortex.core.datatypes.datastream import DataStream
from cerebralcortex.core.datatypes.datastream import DataPoint
from cerebralcortex.core.datatypes.stream_types import StreamTypes
from core.computefeature import ComputeFeatureBase
import numpy as np
from datetime import timedelta
import datetime
import traceback
import copy
from sklearn.mixture import GaussianMixture
from typing import List, Callable, Any
feature_class_name = 'PhoneScreenTouchFeaturesAllApp'
class PhoneScreenTouchFeaturesAllApp(ComputeFeatureBase):
"""
Compute all features related to phone touch screen which needed all days data of a user and can not be paralleled.
"""
def get_filtered_data(self, data: List[DataPoint],
admission_control: Callable[[Any], bool] = None) -> List[DataPoint]:
"""
Return the filtered list of DataPoints according to the admission control provided
:param List(DataPoint) data: Input data list
:param Callable[[Any], bool] admission_control: Admission control lambda function, which accepts the sample and
returns a bool based on the data sample validity
:return: Filtered list of DataPoints
:rtype: List(DataPoint)
"""
if admission_control is None:
return data
filtered_data = []
for d in data:
if admission_control(d.sample):
filtered_data.append(d)
elif type(d.sample) is list and len(d.sample) == 1 and admission_control(d.sample[0]):
d.sample = d.sample[0]
filtered_data.append(d)
return filtered_data
def get_data_by_stream_name(self, stream_name: str, user_id: str, day: str,
localtime: bool=True) -> List[DataPoint]:
"""
Combines data from multiple streams data of same stream based on stream name.
:param str stream_name: Name of the stream
:param str user_id: UUID of the stream owner
:param str day: The day (YYYYMMDD) on which to operate
:param bool localtime: The way to structure time, True for operating in participant's local time, False for UTC
:return: Combined stream data if there are multiple stream id
:rtype: List(DataPoint)
"""
stream_ids = self.CC.get_stream_id(user_id, stream_name)
data = []
for stream in stream_ids:
if stream is not None:
ds = self.CC.get_stream(stream['identifier'], user_id=user_id, day=day, localtime=localtime)
if ds is not None:
if ds.data is not None:
data += ds.data
if len(stream_ids)>1:
data = sorted(data, key=lambda x: x.start_time)
return data
def inter_event_time_list(self, data: List[DataPoint]) -> List[float]:
"""
Helper function to compute inter-event times
:param List(DataPoint) data: A list of DataPoints
:return: Time deltas between DataPoints in seconds
:rtype: list(float)
"""
if not data:
return None
last_end = data[0].end_time
ret = []
flag = False
for cd in data:
if flag == False:
flag = True
continue
dif = cd.start_time - last_end
ret.append(max(0, dif.total_seconds()))
last_end = max(last_end, cd.end_time)
return list(filter(lambda x: x != 0.0, ret))
def get_screen_touch_variance_hourly(self, data: List[DataPoint], typing_episodes: List) -> List[DataPoint]:
"""
This method returns hourly variance of time between two consecutive touch in a typing episode. In case of
multiple typing episode, variance is calculated for each typing episode and combined using standard formula
to combine multiple variances.
:param List(DataPoint) data: screen touch stream data points
:param List(Tuple) typing_episodes: (start_time, end_time) for each item in the list, the starting and end time
of a typing episode
:return: A list of variances for each hour (if there is input data for this hour) of a day.
:rtype: List(DataPoint)
"""
if len(data) <= 1:
return None
combined_data = copy.deepcopy(data)
for s in combined_data:
s.end_time = s.start_time
new_data = []
tmp_time = copy.deepcopy(combined_data[0].start_time)
tmp_time = tmp_time.replace(hour=0, minute=0, second=0, microsecond=0)
for h in range(0, 24):
datalist = []
start = tmp_time.replace(hour=h)
end = start + datetime.timedelta(minutes=59)
for d in combined_data:
if start <= d.start_time <= end or start <= d.end_time <= end:
datalist.append(d)
if len(datalist) <= 1:
continue
splitted_data = [[]]*len(typing_episodes)
for i, ep in enumerate(typing_episodes):
for d in datalist:
if ep[0]<= d.start_time and d.end_time <= ep[1]:
splitted_data[i].append(d)
splitted_data = list(filter(lambda x: len(x)>1, splitted_data))
if not splitted_data:
continue
episode_data = list(map(self.inter_event_time_list, splitted_data))
Xc = | np.mean(episode_data) | numpy.mean |
import gym
import random
import numpy as np
from collections import deque
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout
def create_model(state_size, action_size, training=False, weights_file=None):
model = Sequential()
model.add(Dense(24, input_dim=state_size, activation='relu'))
if training:
model.add(Dropout(0.2))
model.add(Dense(24, activation='relu'))
model.add(Dense(action_size, activation='linear'))
model.compile(loss='mse', optimizer=keras.optimizers.Adam(lr=0.001))
if not training:
model.load_weights(weights_file)
return model
def main():
# in order to change time limit or other default configs coming with the game model
# take a look at: https://github.com/openai/gym/issues/463
env = gym.make('CartPole-v1')
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
# take 32 random samples from game play history
batch_size = 32
game_history = deque(maxlen=3000)
model = create_model(state_size, action_size, training=True, weights_file="dql.h5")
epsilon = 1.0
eps_min = 0.02
eps_decay = 0.994
for i in range(1000):
sc = 0
state = env.reset()
state = state.reshape(1, -1) # 1 = row, -1 = if you don't know column size
for j in range(3000):
env.render()
# if rand value is less than epsilon value, do random action so gain experience
# (kindda like What happens when you do what, etc)
if np.random.rand() <= epsilon:
action = random.randrange(action_size)
else:
act_values = model.predict(state)
action = | np.argmax(act_values[0]) | numpy.argmax |
import matplotlib.pyplot as plt
import numpy as np
# Fonts for plot
plt.rc("font", size=14, family="serif", serif="Computer Sans")
plt.rc("text", usetex=True)
ts = np.arange(1, 1001)
kernel1 = np.zeros(len(ts))
kernel1[ts >= 50] += np.exp(-ts[ts >= 50]/576)
kernel1 = kernel1/np.max(kernel1)
kernel2 = np.zeros(len(ts))
kernel2[ts >= 550] += | np.exp(-ts[ts >= 550]/576) | numpy.exp |
import os
import math
import numpy as np
import tensorflow as tf
from concept import Concept
import pdb
np.set_printoptions(precision=5, suppress=True)
class Teacher:
def __init__(self, sess, rl_gamma, boltzman_beta,
belief_var_1d, num_distractors, attributes_size,
message_space_size):
self.sess = sess
self.num_distractors_ = num_distractors
self.attributes_size_ = attributes_size
self.message_space_size_ = message_space_size
self.rl_gamma_ = rl_gamma
self.boltzman_beta_ = boltzman_beta
self.belief_var_1d_ = belief_var_1d
################
# Placeholders #
################
with tf.variable_scope('Teacher'):
self.distractors_ = tf.placeholder(tf.float32, name = 'distractors',
shape = [None, self.num_distractors_, self.attributes_size_])
self.distractors_tensor_ = tf.expand_dims(self.distractors_, 2)
self.message_ = tf.placeholder(tf.float32, shape = [None, self.message_space_size_], name = 'message')
self.teacher_belief_ = tf.placeholder(tf.float32, shape = [None, self.num_distractors_], name = 'teacher_belief')
self.student_belief_ = tf.placeholder(tf.float32, shape = [None, self.num_distractors_], name = 'student_belief')
self.student_belief_spvs_ = tf.placeholder(tf.float32, shape = [None, self.num_distractors_], name = 'student_belief_spvs')
self.q_net_spvs_ = tf.placeholder(tf.float32, shape = [None])
########################
# Belief Update Module #
########################
self.belief_update_opt_ = tf.train.AdamOptimizer(learning_rate = 1e-3)
with tf.variable_scope('Belief_Update'):
self.df1_ = tf.layers.conv2d(self.distractors_tensor_, 3 * self.message_space_size_, kernel_size = [1, 1],
kernel_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1),
activation = tf.nn.leaky_relu)
self.df2_ = tf.layers.conv2d(self.df1_, 3 * self.message_space_size_, kernel_size = [1, 1],
kernel_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-1),
activation = tf.nn.leaky_relu)
# self.df3_ = tf.layers.conv2d(self.df2_, 1 * self.message_space_size_, kernel_size = [1, 1],
# kernel_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-2),
# activation = None)
self.msg_from_df_1_ = []
for _ in range(self.num_distractors_):
self.msg_from_df_1_.append(tf.layers.conv2d(self.df2_, 2 * self.message_space_size_, kernel_size = [self.num_distractors_, 1],
kernel_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-1),
padding = 'valid', activation = tf.nn.leaky_relu))
self.msg_est_tensor_1_ = tf.concat(self.msg_from_df_1_, axis = 1)
self.msg_from_df_2_ = []
for _ in range(self.num_distractors_):
self.msg_from_df_2_.append(tf.layers.conv2d(self.msg_est_tensor_1_, 1 * self.message_space_size_, kernel_size = [self.num_distractors_, 1],
kernel_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-2),
padding = 'valid', activation = None))
self.msg_est_tensor_2_ = tf.concat(self.msg_from_df_2_, axis = 1)
self.reg_varlist_ = [v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if v.name.startswith('Belief')]
#######################
#network belief update#
#######################
self.msg_est_tensor_2d_ = tf.squeeze(self.msg_est_tensor_2_, axis = 2)
self.belief_var_1d_ = tf.exp(tf.Variable(initial_value = self.belief_var_1d_, trainable = True, dtype = tf.float32))
# self.belief_var_ = tf.layers.conv2d(self.msg_est_tensor_3_, 1, kernel_size = [self.num_distractors_, 1],
# kernel_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-3),
# padding = 'valid', activation = None)
# self.belief_var_1d_ = tf.squeeze(self.belief_var_, axis = 2)
self.boltzman_beta_ = tf.Variable(initial_value = self.boltzman_beta_, trainable = False, dtype = tf.float32, name = 'boltzman_beta')
self.msg_indices_ = tf.where(tf.not_equal(self.message_, 0))
self.df_msg_match_ = tf.exp(self.boltzman_beta_ * self.msg_est_tensor_2d_)
self.df_msg_match_norm_ = tf.div_no_nan(self.df_msg_match_, tf.reduce_sum(self.df_msg_match_, axis = 2, keepdims = True))
self.df_msg_2_norm_ = tf.gather_nd(tf.transpose(self.df_msg_match_norm_, perm = [0, 2, 1]),
self.msg_indices_)
#self.df_msg_1_ = tf.multiply(self.dfb_merge_pre_3_, tf.expand_dims(tf.expand_dims(self.message_, 1), 1))
#self.df_msg_2_ = tf.exp(self.boltzman_beta_ * tf.reduce_sum(tf.squeeze(self.df_msg_1_, 2), axis = 2))
#self.df_msg_2_norm_ = tf.nn.relu(self.df_msg_2_ + self.belief_var_1_)
self.belief_pred_1_ = tf.multiply(self.df_msg_2_norm_, self.student_belief_)
self.belief_pred_full_ = tf.concat([self.belief_pred_1_, self.belief_var_1d_ * tf.slice(tf.ones_like(self.belief_pred_1_), [0, 0], [-1, 1])], axis = 1)
#######################
#network belief update#
#######################
'''
######################
#kernel belief update#
######################
self.kernel_columns_ = []
for i in range(self.num_distractors_):
self.df_msg_1_ = tf.multiply(self.msg_est_tensor_2_, tf.expand_dims(tf.expand_dims(self.message_, 1), 1))
self.df_msg_2_ = tf.contrib.layers.fully_connected(tf.layers.flatten(self.df_msg_1_),\
2 * self.num_distractors_, activation_fn = tf.nn.leaky_relu)
self.df_msg_3_ = tf.contrib.layers.fully_connected(self.df_msg_2_,
self.num_distractors_, activation_fn = None)
kernel_column = tf.nn.relu(self.df_msg_3_)
self.kernel_columns_.append(tf.expand_dims(tf.div_no_nan(kernel_column,
tf.reduce_sum(kernel_column, axis = 1, keepdims = True)), -1))
self.kernel_pre_norm_ = tf.no_op()
self.kernel_ = tf.concat(self.kernel_columns_, axis = 2)
print('<Belief Update Kernel Generator Constructed>')
self.belief_pred_ = tf.nn.relu(tf.squeeze(tf.matmul(self.kernel_, tf.expand_dims(self.student_belief_, -1)), -1))
######################
#kernel belief update#
######################
'''
self.belief_pred_full_norm_ = tf.div_no_nan(self.belief_pred_full_, tf.reduce_sum(self.belief_pred_full_, axis = 1, keepdims = True))
self.belief_pred_ = tf.slice(self.belief_pred_full_norm_, [0, 0], [-1, self.num_distractors_])
self.regularization_ = 1e-4 * tf.add_n([ tf.nn.l2_loss(v) for v in self.reg_varlist_ if 'bias' not in v.name ])
self.cross_entropy_1_ = -1 * tf.reduce_mean(tf.reduce_sum(tf.multiply(self.student_belief_spvs_, tf.math.log(self.belief_pred_)), axis = 1))
self.cross_entropy_2_ = -1 * tf.reduce_mean(tf.reduce_sum(tf.multiply(self.belief_pred_, tf.math.log(self.student_belief_spvs_ + 1e-9)), axis = 1))
self.cross_entropy_ = self.cross_entropy_1_ + self.cross_entropy_2_ + self.regularization_
self.belief_train_varlist_ = [v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if v.name.startswith('Belief_Update')]
self.belief_update_varlist_ = [v for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) if v.name.startswith('Belief_Update')]
self.belief_update_train_op_ = self.belief_update_opt_.minimize(self.cross_entropy_, var_list = self.belief_train_varlist_)
self.belief_update_saver_ = tf.train.Saver()
self.belief_update_loader_ = tf.train.Saver(self.belief_update_varlist_)
####################
# Q-network Module #
####################
self.q_net_opt_ = tf.train.AdamOptimizer(learning_rate = 1e-5)
with tf.variable_scope('q_net'):
self.distct_feat_1_ = tf.layers.conv2d(self.distractors_tensor_, 3 * self.message_space_size_, kernel_size = [1, 1],
kernel_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-1),
activation = tf.nn.leaky_relu)
self.distct_feat_2_ = tf.layers.conv2d(self.distct_feat_1_, 2 * self.message_space_size_, kernel_size = [1, 1],
kernel_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-1),
activation = tf.nn.leaky_relu)
self.distct_feat_2_weighted_ = tf.multiply(self.distct_feat_2_, tf.expand_dims(tf.expand_dims(self.belief_pred_, -1), -1))
self.distcts_feat_1_ = []
for _ in range(self.num_distractors_):
self.distcts_feat_1_.append(tf.layers.conv2d(self.distct_feat_2_weighted_, 1 * self.message_space_size_, kernel_size = [self.num_distractors_, 1],
kernel_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-1),
padding = 'valid', activation = tf.nn.leaky_relu))
self.distcts_feat_tensor_1_ = tf.concat(self.distcts_feat_1_, axis = 1)
self.distcts_feat_2_ = []
for _ in range(self.num_distractors_):
self.distcts_feat_2_.append(tf.layers.conv2d(self.distcts_feat_tensor_1_, 1 * self.message_space_size_, kernel_size = [self.num_distractors_, 1],
kernel_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-1),
padding = 'valid', activation = tf.nn.leaky_relu))
self.distcts_feat_tensor_2_ = tf.concat(self.distcts_feat_2_, axis = 1)
self.custome_activaiton_ = lambda x: tf.where(tf.math.greater(x, 0), (tf.exp(x) - 1), (-1 * tf.exp(-x) + 1))
self.distcts_feat_3_ = []
for _ in range(self.num_distractors_):
self.distcts_feat_3_.append(tf.layers.conv2d(self.distcts_feat_tensor_2_, 1, kernel_size = [self.num_distractors_, 1],
kernel_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-1),
padding = 'valid', activation = self.custome_activaiton_))
self.distcts_feat_tensor_3_ = tf.concat(self.distcts_feat_3_, axis = 1)
self.value_param_1_ = tf.Variable(initial_value = -1, trainable = False, dtype = tf.float32)
self.value_ = tf.reduce_sum(tf.multiply(tf.squeeze(self.distcts_feat_tensor_3_), self.teacher_belief_), axis = 1) +\
(1 - tf.reduce_sum(self.belief_pred_, axis = 1)) * self.value_param_1_
'''
self.df_b1_ = tf.multiply(tf.squeeze(self.distct_feat_2_, axis = 2), tf.expand_dims(self.teacher_belief_, -1))
self.df_b2_ = tf.multiply(tf.squeeze(self.distct_feat_2_, axis = 2), tf.expand_dims(self.belief_pred_, -1))
self.concat_df_b_ = tf.layers.flatten(tf.concat((self.df_b1_, self.df_b2_), axis = 2))
# self.dfb_merge_pre_ = tf.contrib.layers.fully_connected(tf.reduce_sum(tf.abs(self.df_b1_ - self.df_b2_), axis = 1), 4, activation_fn = tf.nn.leaky_relu,
# weights_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-2))
self.dfb_merge_pre_1_ = tf.contrib.layers.fully_connected(self.concat_df_b_, 6, activation_fn = tf.nn.leaky_relu,
weights_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-2))
self.dfb_merge_pre_2_ = tf.contrib.layers.fully_connected(self.dfb_merge_pre_1_, 4, activation_fn = tf.nn.leaky_relu,
weights_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-2))
self.dfb_merge_ = tf.contrib.layers.fully_connected(self.dfb_merge_pre_2_, 1, activation_fn = None,
weights_initializer = tf.random_normal_initializer(mean = 0.0, stddev = 1e-2))
self.value_ = tf.squeeze(self.dfb_merge_)
'''
# self.dfb_merge_ = tf.reduce_sum(tf.square(self.df_b1_ - self.df_b2_), axis = [1, 2])
# self.value_param_0_ = tf.squeeze(tf.contrib.layers.fully_connected(self.concat_df_b_, 1, activation_fn = None))
# self.value_param_00_ = tf.squeeze(tf.contrib.layers.fully_connected(self.dfb_merge_pre_1_, 1, activation_fn = None))
# self.value_param_000_ = tf.squeeze(tf.contrib.layers.fully_connected(self.dfb_merge_pre_1_, 1, activation_fn = None))
# self.value_param_0000_ = tf.squeeze(tf.contrib.layers.fully_connected(self.dfb_merge_pre_1_, 1, activation_fn = None))
# self.value_param_1_ = tf.Variable(initial_value = -1, trainable = True, dtype = tf.float32)
# self.value_param_2_ = tf.Variable(initial_value = 1, trainable = True, dtype = tf.float32)
# self.value_param_3_ = tf.Variable(initial_value = -1, trainable = True, dtype = tf.float32)
#self.value_param_2_ * tf.exp(self.value_param_1_ * tf.squeeze(self.dfb_merge_)) + self.value_param_3_
#self.value_ = 1 - tf.squeeze(tf.contrib.layers.fully_connected(tf.reduce_sum(self.df_b1_ - self.df_b2_, axis = 2), 1, activation_fn = None))
self.reg_varlist_q_ = [v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if v.name.startswith('q_net')]
self.regularization_q_ = 1e-4 * tf.add_n([ tf.nn.l2_loss(v) for v in self.reg_varlist_q_ if 'bias' not in v.name ])
self.q_net_loss_pre_ = tf.square(self.value_ - self.q_net_spvs_)
self.success_mask_ = tf.to_float(tf.math.greater(self.q_net_spvs_, 0.0))
self.fail_mask_ = tf.to_float(tf.math.greater(0.0, self.q_net_spvs_))
self.imbalance_penalty_ = self.success_mask_ + self.fail_mask_ * tf.div_no_nan(tf.reduce_sum(self.success_mask_), tf.reduce_sum(self.fail_mask_))
#self.q_net_loss_ = tf.reduce_mean(self.q_net_loss_pre_ * tf.to_float(self.q_net_loss_pre_ > 0.05) * self.imbalance_penalty_) + self.regularization_q_
self.q_net_loss_ = tf.reduce_mean(self.q_net_loss_pre_ * self.imbalance_penalty_) + self.regularization_q_
self.q_net_varlist_ = [v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if v.name.startswith('q_net')]
self.q_net_train_op_ = self.q_net_opt_.minimize(self.q_net_loss_, var_list = self.q_net_varlist_)
self.total_loader_ = tf.train.Saver([v for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) if 'Adam' not in v.name])
self.total_saver_ = tf.train.Saver()
def train_belief_update(self, data_batch):
_, cross_entropy, belief_pred, posterior, likelihood = self.sess.run([self.belief_update_train_op_, self.cross_entropy_, self.belief_pred_, self.belief_pred_1_, self.df_msg_2_norm_],
feed_dict = {self.student_belief_: data_batch['prev_belief'],
self.message_: data_batch['message'],
self.distractors_: data_batch['distractors'],
self.student_belief_spvs_: data_batch['new_belief']})
return cross_entropy, belief_pred, posterior[:10], likelihood[:10]
def pretrain_bayesian_belief_update(self, concept_generator, teacher_pretraining_steps, teacher_pretrain_batch_size,
teacher_pretrain_ckpt_dir, teacher_pretrain_ckpt_name, continue_steps = 0, silent = False):
if not os.path.exists(teacher_pretrain_ckpt_dir):
os.makedirs(teacher_pretrain_ckpt_dir)
ckpt = tf.train.get_checkpoint_state(teacher_pretrain_ckpt_dir)
train_steps = teacher_pretraining_steps
if ckpt:
self.belief_update_loader_.restore(self.sess, ckpt.model_checkpoint_path)
print('Loaded teacher belief update ckpt from %s' % teacher_pretrain_ckpt_dir)
train_steps = continue_steps
else:
print('Cannot loaded teacher belief update ckpt from %s' % teacher_pretrain_ckpt_dir)
accuracies = []
l1_diffs = []
bayesian_wrongs = []
for ts in range(train_steps):
data_batch = concept_generator.generate_batch(teacher_pretrain_batch_size)
cross_entropy, belief_pred, posterior, likelihood = self.train_belief_update(data_batch)
l1_diff = np.sum(abs(belief_pred - data_batch['new_belief']), axis = 1)
correct = (l1_diff <= 5e-2)
bayesian_wrong = np.mean(np.sum((data_batch['new_belief'] == 0) * (belief_pred > 1e-5), axis = 1) > 0)
accuracies.append(np.mean(correct))
l1_diffs.append(np.mean(l1_diff))
bayesian_wrongs.append(bayesian_wrong)
if np.sum(np.isnan(belief_pred)) != 0:
pdb.set_trace()
if ts % 1000 == 0 and not silent:
print('[T%d] batch mean cross entropy: %f, mean accuracies: %f, mean l1: %f, bayesian wrong: %f'\
% (ts + 1, cross_entropy, np.mean(accuracies), np.mean(l1_diffs), np.mean(bayesian_wrongs)))
boltzman_beta, belief_var_1d = self.sess.run([self.boltzman_beta_, self.belief_var_1d_])
print('boltzman_beta: %f, belief_var_1d: %f' % (boltzman_beta, belief_var_1d))
print('new_belief: ')
print(data_batch['new_belief'][:10])
print('prior: ')
print(data_batch['prev_belief'][:10])
print('likelihood: ')
print(likelihood)
print('posterior: ')
print(posterior)
print('predict_belief: ')
print(belief_pred[:10])
if np.mean(accuracies) > 0.9:
#idx = np.random.randint(teacher_pretrain_batch_size)
idx = teacher_pretrain_batch_size
for i in range(idx):
print('\t target:', data_batch['new_belief'][i, :])
print('\t predict', belief_pred[i, :])
accuracies = []
l1_diffs = []
bayesian_wrongs = []
if (ts + 1) % 10000 == 0:
self.belief_update_saver_.save(self.sess, os.path.join(teacher_pretrain_ckpt_dir,
teacher_pretrain_ckpt_name),
global_step = teacher_pretraining_steps)
print('Saved teacher belief update ckpt to %s after %d training'\
% (teacher_pretrain_ckpt_dir, ts))
if train_steps != 0:
self.belief_update_saver_.save(self.sess, os.path.join(teacher_pretrain_ckpt_dir,
teacher_pretrain_ckpt_name),
global_step = teacher_pretraining_steps)
print('Saved teacher belief update ckpt to %s after %d training'\
% (teacher_pretrain_ckpt_dir, train_steps))
def train_q_net(self, data_batch):
_, q_net_loss, value = self.sess.run([self.q_net_train_op_, self.q_net_loss_, self.value_],\
feed_dict = {self.q_net_spvs_: data_batch['target_q'],
self.student_belief_: data_batch['student_belief'],
self.message_: data_batch['message'],
self.distractors_: data_batch['distractors'],
self.teacher_belief_: data_batch['teacher_belief']})
print('Q learning loss: %f' % q_net_loss)
ridx = np.random.randint(value.shape[0])
#print(value[ridx], data_batch['target_q'][ridx])
print('0.8: %f, 0.2: %f' % (np.sum(value * (data_batch['target_q'] == 0.8)) / np.sum(data_batch['target_q'] == 0.8),
np.sum(value * (data_batch['target_q'] == -0.2)) / np.sum(data_batch['target_q'] == -0.2)))
print('Teacher value est:', value[ridx: ridx + 10], data_batch['target_q'][ridx: ridx + 10])
#print(distcts_feat_tensor_3[ridx, :])
return q_net_loss
def get_q_value_for_all_msg(self, teacher_belief, student_belief, embeded_concepts):
all_msg_embeddings = np.identity(self.message_space_size_)
teacher_belief_tile = np.tile(teacher_belief, (self.message_space_size_, 1))
student_belief_tile = np.tile(student_belief, (self.message_space_size_, 1))
embeded_concepts_tile = np.tile(embeded_concepts, (self.message_space_size_, 1, 1))
q_values, belief_pred, distcts_feat_tensor_3, belief_dst, msg_est_tensor = self.sess.run([self.value_, self.belief_pred_, self.distcts_feat_tensor_3_, self.value_, self.msg_est_tensor_2_],
feed_dict = {self.distractors_: embeded_concepts_tile,
self.message_: all_msg_embeddings,
self.teacher_belief_: teacher_belief_tile,
self.student_belief_: student_belief_tile})
return q_values, belief_pred, distcts_feat_tensor_3, belief_dst, msg_est_tensor[0]
def update_net(self, belief_update_tuples, q_learning_tuples, update_term = 'Both'):
debug_structure = {}
belief_update_batch = {}
belief_update_batch['prev_belief'] = []
belief_update_batch['new_belief'] = []
belief_update_batch['message'] = []
belief_update_batch['distractors'] = []
for belief_tuple in belief_update_tuples:
belief_update_batch['distractors'].append(belief_tuple[0])
belief_update_batch['prev_belief'].append(belief_tuple[1])
belief_update_batch['message'].append(belief_tuple[2])
belief_update_batch['new_belief'].append(belief_tuple[3])
for k in belief_update_batch:
belief_update_batch[k] = | np.array(belief_update_batch[k]) | numpy.array |
#Implementation of the MNIST dataset using CNN
# Architecture :- LeNet
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from util import get_data
def error_rate(a,b):
return np.mean(a!=b)
def y2indicator(y):
N=len(y)
D=len(set(y))
ind = np.zeros((N,D),dtype=np.float32)
for i in range(N):
ind[i,y[i]]=1
return ind
def init_filter(shape, pool_size=(2,2)):
W = np.random.randn(*shape)/np.sqrt(np.prod(shape[:-1])+shape[-1]*np.prod(shape[:-2]/np.prod(pool_size)))
return W
#converting the input matrix NxD into a tensor of shape NxWxHxC for tensorflow
def rearrange(X):
N = X.shape[0]
out = np.zeros((N,int(np.sqrt(X.shape[1])),int(np.sqrt(X.shape[1])),1),dtype=np.float32)
for i in range(N):
out[i,:,:,0] = X[i,:].reshape(int(np.sqrt(X.shape[1])),int(np.sqrt(X.shape[1])))
return out
def convpool(X,W,b,padding_type):
conv_out = tf.nn.conv2d(input=X,filter=W,strides=[1,1,1,1],padding=padding_type)
conv_out = tf.nn.bias_add(conv_out, b)
conv_out = tf.nn.relu(conv_out)
out = tf.nn.max_pool(conv_out,ksize=[1,2,2,1],strides=[1,2,2,1],padding="SAME")
out = tf.tanh(out)
return out
#Normalization not done yet
def main():
X,Y = get_data("train")
Xtrain = X[:-1000,]
Ytrain = Y[:-1000,]
Xtest = X[-1000:,]
Ytest = Y[-1000:,]
Xtest = rearrange(Xtest)/255
Ytest_ind = y2indicator(Ytest)
Ytrain_ind = y2indicator(Ytrain)
Xtrain = rearrange(Xtrain)/255
W1_shape=(5,5,1,6)
W1_init = init_filter(W1_shape)
b1_init = np.zeros((W1_shape[-1]),dtype=np.float32)
W2_shape=(5,5,6,16)
W2_init = init_filter(W2_shape)
b2_init = np.zeros((W2_shape[-1]),dtype=np.float32)
M = 500 # Number of hidden layers
K = 10
#Weights for fully connected layer
W3_init = np.random.randn(W2_shape[-1]*5*5,M)/np.sqrt(W2_shape[-1]*5*5 + M)
b3_init = np.zeros(M,dtype=np.float32)
W4_init = np.random.randn(M,K)/np.sqrt(M+K)
b4_init = | np.zeros(K,dtype=np.float32) | numpy.zeros |
"""This module contains functionality for sensitivity analysis in ``UQpy``.
The module currently contains the following classes:
- ``Morris``: Class to compute sensitivity indices based on the Morris method.
"""
from typing import Union, Annotated
from beartype import beartype
from beartype.vale import Is
from UQpy.utilities.Utilities import process_random_state
from UQpy.utilities.ValidationTypes import RandomStateType, PositiveInteger, NumpyFloatArray
from UQpy.distributions import *
from UQpy.run_model.RunModel import RunModel
import numpy as np
from scipy.stats import randint
class MorrisSensitivity:
@beartype
def __init__(
self,
runmodel_object: RunModel,
distributions: Union[JointIndependent, Union[list, tuple]],
n_levels: Annotated[int, Is[lambda x: x >= 3]],
delta: Union[float, int] = None,
random_state: RandomStateType = None,
n_trajectories: PositiveInteger = None,
maximize_dispersion: bool = False,
):
"""
Compute sensitivity indices based on the Morris screening method.
:param runmodel_object: The computational model. It should be of type :class:`.RunModel`. The
output QoI can be a scalar or vector of length :code:`ny`, then the sensitivity indices of all :code:`ny` outputs are
computed independently.
:param distributions: List of :class:`.Distribution` objects corresponding to each random variable, or
:class:`.JointIndependent` object (multivariate RV with independent marginals).
:param n_levels: Number of levels that define the grid over the hypercube where evaluation points are
sampled. Must be an integer :math:`\ge 3`.
:param delta: Size of the jump between two consecutive evaluation points, must be a multiple of delta should be
in :code:`{1/(n_levels-1), ..., 1-1/(n_levels-1)}`.
Default: :math:`delta=\\frac{levels\_number}{2 * (levels\_number-1)}` if `n_levels` is even,
:math:`delta=0.5` if n_levels is odd.
:param random_state: Random seed used to initialize the pseudo-random number generator. Default is :any:`None`.
:param n_trajectories: Number of random trajectories, usually chosen between :math:`5` and :math:`10`.
The number of model evaluations is :code:`n_trajectories * (d+1)`. If None, the `Morris` object is created
but not run (see :py:meth:`run` method)
:param maximize_dispersion: If :any:`True`, generate a large number of design trajectories and keep the ones
that maximize dispersion between all trajectories, allows for a better coverage of the input space.
Default :any:`False`.
"""
# Check RunModel object and distributions
self.runmodel_object = runmodel_object
marginals = (distributions.marginals if isinstance(distributions, JointIndependent) else distributions)
self.icdfs = [getattr(dist, "icdf", None) for dist in marginals]
if any(icdf is None for icdf in self.icdfs):
raise ValueError("At least one of the distributions provided has a None icdf")
self.dimension = len(self.icdfs)
if self.dimension != len(self.runmodel_object.model.var_names):
raise ValueError("The number of distributions provided does not match the number of RunModel variables")
self.n_levels = n_levels
self.delta = delta
self.check_levels_delta()
self.random_state = process_random_state(random_state)
self.maximize_dispersion = maximize_dispersion
self.trajectories_unit_hypercube: NumpyFloatArray = None
"""Trajectories in the unit hypercube, :class:`numpy.ndarray` of shape :code:`(n_trajectories, d+1, d)`"""
self.trajectories_physical_space: NumpyFloatArray = None
"""Trajectories in the physical space, :class:`numpy.ndarray` of shape :code:`(n_trajectories, d+1, d)`"""
self.elementary_effects: NumpyFloatArray = None
"""Elementary effects :math:`EE_{k}`, :class:`numpy.ndarray` of shape :code:`(n_trajectories, d, ny)`."""
self.mustar_indices: NumpyFloatArray = None
"""First Morris sensitivity index :math:`\mu_{k}^{\star}`, :class:`numpy.ndarray` of shape :code:`(d, ny)`"""
self.sigma_indices: NumpyFloatArray = None
"""Second Morris sensitivity index :math:`\sigma_{k}`, :class:`numpy.ndarray` of shape :code:`(d, ny)`"""
if n_trajectories is not None:
self.run(n_trajectories)
def check_levels_delta(self):
# delta should be in {1/(nlevels-1), ..., 1-1/(nlevels-1)}
if (self.delta is None) and (self.n_levels % 2) == 0:
# delta = trial_probability / (2 * (trial_probability-1))
self.delta = self.n_levels / (2 * (self.n_levels - 1))
elif (self.delta is None) and (self.n_levels % 2) == 1:
self.delta = (1 / 2) # delta = (trial_probability-1) / (2 * (trial_probability-1))
elif not (isinstance(self.delta, (int, float)) and float(self.delta)
in [float(j / (self.n_levels - 1)) for j in range(1, self.n_levels - 1)]):
raise ValueError("UQpy: delta should be in {1/(nlevels-1), ..., 1-1/(nlevels-1)}")
@beartype
def run(self, n_trajectories: PositiveInteger):
"""
Run the Morris indices evaluation.
The code first sample trajectories in the unit hypercube and transform them to the physical space (see method
:py:meth:`sample_trajectories`), then runs the forward model to compute the elementary effects,
and finally computes the sensitivity indices.
:param n_trajectories: Number of random trajectories. Usually chosen between :math:`5` and :math:`10`.
The number of model evaluations is :code:`n_trajectories * (d+1)`.
"""
# Compute trajectories and elementary effects - append if any already exist
(trajectories_unit_hypercube, trajectories_physical_space,) = \
self.sample_trajectories(n_trajectories=n_trajectories, maximize_dispersion=self.maximize_dispersion,)
elementary_effects = self._compute_elementary_effects(trajectories_physical_space)
self.store_data(elementary_effects, trajectories_physical_space, trajectories_unit_hypercube)
self.mustar_indices, self.sigma_indices = self._compute_indices(self.elementary_effects)
def store_data(
self,
elementary_effects,
trajectories_physical_space,
trajectories_unit_hypercube,
):
if self.elementary_effects is None:
self.elementary_effects = elementary_effects
self.trajectories_unit_hypercube = trajectories_unit_hypercube
self.trajectories_physical_space = trajectories_physical_space
else:
self.elementary_effects = np.concatenate([self.elementary_effects, elementary_effects], axis=0)
self.trajectories_unit_hypercube = np.concatenate([self.trajectories_unit_hypercube,
trajectories_unit_hypercube], axis=0)
self.trajectories_physical_space = np.concatenate([self.trajectories_physical_space,
trajectories_physical_space], axis=0)
@beartype
def sample_trajectories(self, n_trajectories: PositiveInteger, maximize_dispersion: bool = False):
"""
Create the trajectories, first in the unit hypercube then transform them in the physical space.
:param n_trajectories: Number of random trajectories. Usually chosen between :math:`5` and :math:`10`.
The number of model evaluations is :code:`n_trajectories * (d+1)`.
:param maximize_dispersion: If :any:`True`, generate a large number of design trajectories and keep the ones
that maximize dispersion between all trajectories, allows for a better coverage of the input space.
Default :any:`False`.
"""
trajectories_unit_hypercube = []
perms_indices = []
ntrajectories_all = (10 * n_trajectories if maximize_dispersion else 1 * n_trajectories)
for r in range(ntrajectories_all):
if self.random_state is None:
perms = np.random.permutation(self.dimension)
else:
perms = self.random_state.permutation(self.dimension)
initial_state = (1.0 / (self.n_levels - 1) *
randint(low=0, high=int((self.n_levels - 1) * (1 - self.delta) + 1))
.rvs(size=(1, self.dimension), random_state=self.random_state))
trajectory_uh = np.tile(initial_state, [self.dimension + 1, 1])
for count_d, d in enumerate(perms):
trajectory_uh[count_d + 1:, d] = initial_state[0, d] + self.delta
trajectories_unit_hypercube.append(trajectory_uh)
perms_indices.append(perms)
trajectories_unit_hypercube = np.array(trajectories_unit_hypercube) # ndarray (r, d+1, d)
# if maximize_dispersion, compute the 'best' trajectories
if maximize_dispersion:
from itertools import combinations
distances = np.zeros((ntrajectories_all, ntrajectories_all))
for r in range(ntrajectories_all):
des_r = np.tile(trajectories_unit_hypercube[r, :, :][np.newaxis, :, :],[self.dimension + 1, 1, 1],)
for r2 in range(r + 1, ntrajectories_all):
des_r2 = np.tile(trajectories_unit_hypercube[r2, :, :][:, np.newaxis, :],
[1, self.dimension + 1, 1],)
distances[r, r2] = np.sum(np.sqrt(np.sum((des_r - des_r2) ** 2, axis=-1)))
# try 20000 combinations of ntrajectories trajectories, keep the one that maximizes the distance
def compute_combi_and_dist():
if self.random_state is None:
combi = np.random.choice(ntrajectories_all, replace=False, size=n_trajectories)
else:
combi = self.random_state.choice(ntrajectories_all, replace=False, size=n_trajectories)
dist_combi = 0.0
for pairs in list(combinations(combi, 2)):
dist_combi += distances[min(pairs), max(pairs)] ** 2
return combi, np.sqrt(dist_combi)
comb_to_keep, dist_comb = compute_combi_and_dist()
for _ in range(1, 20000):
comb, new_dist_comb = compute_combi_and_dist()
if new_dist_comb > dist_comb:
comb_to_keep, dist_comb = comb, new_dist_comb
trajectories_unit_hypercube = np.array([trajectories_unit_hypercube[j] for j in comb_to_keep])
# Avoid 0 and 1 cdf values
trajectories_unit_hypercube[trajectories_unit_hypercube < 0.01] = 0.01
trajectories_unit_hypercube[trajectories_unit_hypercube > 0.99] = 0.99
# Transform to physical space via icdf
trajectories_physical_space = []
for trajectory_uh in trajectories_unit_hypercube:
trajectory_ps = np.zeros_like(trajectory_uh)
for count_d, (design_d, icdf_d) in enumerate(zip(trajectory_uh.T, self.icdfs)):
trajectory_ps[:, count_d] = icdf_d(x=design_d)
trajectories_physical_space.append(trajectory_ps)
trajectories_physical_space = np.array(trajectories_physical_space)
return trajectories_unit_hypercube, trajectories_physical_space
def _compute_elementary_effects(self, trajectories_physical_space):
r, _, d = trajectories_physical_space.shape
# Run the model for all replicates
elementary_effects = []
for samples in trajectories_physical_space:
self.runmodel_object.run(samples=samples, append_samples=False)
qoi = np.array(self.runmodel_object.qoi_list)
el_effect = np.zeros((self.dimension,))
perms = [np.argwhere(bi != 0.0)[0, 0] for bi in (samples[1:] - samples[:-1])]
for count_d, d in enumerate(perms):
el_effect[d] = (qoi[count_d + 1] - qoi[count_d]) / self.delta
elementary_effects.append(el_effect)
return | np.array(elementary_effects) | numpy.array |
from abc import ABC, abstractmethod
import cv2
import numpy as np
import pandas as pd
from skimage.draw import line as raster_line
from .suite import Suite, project_points, compute_pose_error
# delete me
import matplotlib.pyplot as plt
def compute_3d_coordinates(oc, pts, model):
if not len(pts):
return np.empty((0, 3))
colors = oc[pts[:, 1], pts[:, 0]]
if np.any(colors[:, -1] != 255):
raise NotImplementedError("The object coordinate masks have issues")
return colors[:, :3] * model.size / 255 + model.min
def draw_lines(lines, img, color):
paths = np.concatenate(
[
np.stack(
raster_line(line[0, 1], line[0, 0], line[1, 1], line[1, 0]), axis=-1
)
for line in lines
]
)
out = img.copy()
out[paths[:, 0], paths[:, 1]] = color
return out
def extract_sift_keypoints(rgb):
gray = cv2.cvtColor(rgb[:, :, :3], cv2.COLOR_RGB2GRAY)
sift = cv2.xfeatures2d.SIFT_create()
detections = sift.detect(gray, None)
# store unique keypoints
keypoints = np.unique(
np.array([kp.pt for kp in detections]).astype(np.uint32), axis=0
)
return keypoints
def extract_line_segments(rgb):
gray = cv2.cvtColor(rgb[:, :, :3], cv2.COLOR_RGB2GRAY)
ld = cv2.line_descriptor.LSDDetector_createLSDDetector()
keylines = ld.detect(gray, 1, 1)
paths = []
idx = []
for i, keyline in enumerate(keylines):
start = np.round(keyline.getStartPoint()).astype(int)
end = np.round(keyline.getEndPoint()).astype(int)
path = np.stack(raster_line(start[1], start[0], end[1], end[0]), axis=-1)
paths.append(path)
idx.append(np.full(len(path), i))
paths = np.concatenate(paths)
idx = np.concatenate(idx)
# ensure max bounds are not overstepped
max_bound = np.array(rgb.shape[:2]) - 1
paths = np.minimum(paths, max_bound)
return paths, idx
def extract_point_correspondences(oid, frame, keypoints, model):
# filter keypoints to object mask and object coordinate data
pts_2d = keypoints[
np.logical_and(
frame["mask"][keypoints[:, 1], keypoints[:, 0]] == oid,
frame["oc"][keypoints[:, 1], keypoints[:, 0], -1] == 255,
)
]
# objects get the corresponding object coordinates
pts_3d = compute_3d_coordinates(frame["oc"], pts_2d, model)
return pts_2d, pts_3d
def extract_line_correspondences(oid, frame, lines, model):
paths, idx = lines
# prune line segments to masks. assume masks are convex
mask = np.logical_and(frame["mask"] == oid, frame["oc"][:, :, -1] == 255)
line_2d = []
for pid in range(idx[-1]):
path = paths[idx == pid]
if not np.any(mask[path[:, 0], path[:, 1]]):
continue
line = np.empty((2, 2), dtype=int)
# clamp at start and at the end
start, end = None, None
for i, (r, c) in enumerate(path):
if mask[r, c]:
line[0] = (c, r)
start = i
break
for i, (r, c) in enumerate(reversed(path)):
if mask[r, c]:
line[1] = (c, r)
end = len(path) - i
break
# Reject very small segments
if end - start < 5:
continue
line_2d.append(line)
line_2d = np.array(line_2d) # array can cope with empty lists
# # debug
# img = draw_lines(line_2d, frame["rgb"], np.array([255, 255, 255], dtype=np.uint8))
# plt.imshow(img); plt.show()
# # objects get the corresponding object coordinates
line_3d = compute_3d_coordinates(
frame["oc"], line_2d.reshape(-1, 2), model
).reshape(-1, 2, 3)
return line_2d, line_3d
class RealSuite(Suite, ABC):
def __init__(self, methods, timed=True):
super().__init__(methods, timed)
self.data = None # dataset placeholder
# Since each dataset has a different number of sequences, frames
# objects per frames and even instance per objects, we need to
# store everything in a flat array and store indexes for each
# instance
self.did = None # datasets
self.sid = None # sequences
self.fid = None # frames
self.oid = None # objects
def init_run(self, data):
self.data = data
self.results = {
"angular": [],
"translation": [],
}
if self.timed:
self.results["time"] = []
# Initialize accumulators
self.did = [] # datasets
self.sid = [] # sequences
self.fid = [] # frames
self.oid = [] # objects
@abstractmethod
def extract_features(self, rgb):
pass
@abstractmethod
def extract_correspondences(self, oid, frame, features, model):
pass
def run(self, data):
self.init_run(data)
# Can we print some progress statistics
n_prog, i_prog = 0, 0
for ds in self.data:
n_prog += len(ds)
print("Progress: 0.00%", end="", flush=True)
# Looping over datasets
for did, ds in enumerate(self.data):
# looping over sequences
for sid, seq in enumerate(ds):
# looping over frames
for frame in seq:
# extract features in each frame
features = self.extract_features(frame["rgb"])
# Iterate through each object in frame
for oid, pose in frame["poses"].items():
# plt.imsave(f'/tmp/images/{seq.name:02d}_{frame["id"]:04d}.m.png', frame["mask"])
# plt.imsave(f'/tmp/images/{seq.name:02d}_{frame["id"]:04d}.o.png', frame["oc"])
mmask = frame["mask"].astype(bool)
moc = frame["oc"][:, :, -1] == 255
iou = np.sum(np.logical_and(mmask, moc)) / np.sum(
np.logical_or(mmask, moc)
)
# there are legit occlusion cases lower than 0.6 iou
if iou < 0.5:
error_msg = "IoU issues between mask and object coordinates"
raise RuntimeError(error_msg)
# extract correspondences
correspondences = self.extract_correspondences(
oid, frame, features, ds.models[str(oid)]
)
# Pre allocate placeholders storing results
nm = len(self.methods)
ang_all = np.full(nm, np.nan)
trans_all = np.full(nm, np.nan)
time_all = np.full(nm, np.nan)
groundtruth = (pose[:, :3], pose[:, -1])
for mid, method in enumerate(self.methods):
# get a pose estimate
(R, t), time_all[mid] = self.estimate_pose(
method, groundtruth, ds.camera.K, **correspondences
)
# Sanitize results
if np.any(np.isnan(R)) or np.any(np.isnan(t)):
continue
# store error results in the object
ang_all[mid], trans_all[mid] = compute_pose_error(
groundtruth, (R, t)
)
# let each method compute the pose compute pose
self.did.append(did)
self.sid.append(sid)
self.fid.append(frame["id"])
self.oid.append(oid)
self.results["angular"].append(ang_all)
self.results["translation"].append(trans_all)
if self.timed:
self.results["time"].append(time_all)
# progress only reported at frame level
i_prog += 1
percent = i_prog * 100 / n_prog
print(f"\rProgress: {percent:>6.2f}%", end="", flush=True)
print("\rProgress: 100.00%", flush=True)
# merge everything together
self.did = np.array(self.did)
self.sid = np.array(self.sid)
self.fid = np.array(self.fid)
self.oid = np.array(self.oid)
self.results["angular"] = np.stack(self.results["angular"])
self.results["translation"] = np.stack(self.results["translation"])
if self.timed:
self.results["time"] = np.stack(self.results["time"])
def _aggregate_results(self):
# build tables for angular error, translation errors, timings and nan counts
angular = []
translation = []
timings = []
nans = []
dids = []
sids = []
# filter out all nans
good_mask = np.logical_not(
np.logical_or.reduce(np.isnan(self.results["angular"]).T)
)
# Looping over datasets
for did, ds in enumerate(self.data):
for sid, seq in enumerate(ds):
dids.append(type(ds).__name__)
# sids.append(str(seq.name))
sids.append(type(ds).seq_names[sid])
mask_with_nans = np.logical_and(self.did == did, self.sid == sid)
mask = np.logical_and(mask_with_nans, good_mask)
angular.append(np.nanmedian(self.results["angular"][mask], axis=0))
translation.append(
np.nanmedian(self.results["translation"][mask], axis=0)
)
nans.append(
np.sum(np.isnan(self.results["angular"][mask_with_nans]), axis=0)
)
if self.timed:
timings.append(np.nanmean(self.results["time"][mask], axis=0))
# last row is over the entire data set
angular.append(np.nanmedian(self.results["angular"][good_mask], axis=0))
translation.append(np.nanmedian(self.results["translation"][good_mask], axis=0))
nans.append(np.sum(np.isnan(self.results["angular"]), axis=0))
if self.timed:
timings.append(np.nanmean(self.results["time"][good_mask], axis=0))
# dids.append("all")
# sids.append("all")
# Aggregate
angular = np.stack(angular)
translation = np.stack(translation)
timings = | np.stack(timings) | numpy.stack |
from itertools import count
from typing import Any, Dict, List, Tuple
import numpy as np
import pandas as pd
import CosmOrc.gauspar as gauspar
import CosmOrc.orpar as orpar
from CosmOrc.cospar import Jobs
import pysnooper
from yaml import dump, load
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
R = 8.31441
h = 6.626176e-34
kB = 1.380662e-23 # J/K
N0 = 6.022_140_85774e-23 # 1/mol
PROGRAM_LIST = ('orca', 'gaussian')
class Compound:
_ids = count(1)
# __slots__ = []
def __init__(self,
qm_program: str = 'gaussian',
qm_data: pd.Series = None,
path_to_file: str = None,
linear: bool = False,
atom: bool = False,
name: str = None,
sn: int = 1):
# class instance counter
self.id = next(self._ids)
self.qm_program = qm_program
self.atom = atom
self.sn = sn
self.path_to_file = path_to_file
if name:
self.name = name
else:
self.name = str(self.id)
if path_to_file:
self.file = path_to_file
else:
_msg = 'compound {} is missing a file'.format(self.name)
# logger.error(_msg)
raise TypeError(
"__init__() missing 1 required positional argument: 'path_to_file'"
)
if linear:
self.linear_coefficient = 1.0
else:
self.linear_coefficient = 1.5
try:
if self.qm_program.lower() == 'gaussian':
self.qm_data = gauspar.file_pars(self.file)
elif self.qm_program.lower() == 'orca':
self.qm_data = orpar.file_pars(self.file)
else:
_msg = f'Failed to load "{self.file}"'
# logger.error(_msg)
print('This program does not supported in current version\n')
# print(PROGRAM_LIST)
raise ValueError
except Exception as err:
_msg = '{} while parsing file {}'.format(repr(err), self.file)
# logger.error(_msg)
raise err
if self.qm_data['natoms'] == 2:
if self.linear_coefficient == 1.5:
print(f'{self.name}:\n')
print(
'Warning! The molecule consists of two atoms, but is marked as non-linear.\n'
)
_msg = f'{self.name} molecule marked as non-linear, but have only 2 atoms'
# logger.warning(_msg)
# Если молекула 2х атомная, то в ней 1 частота
# Нужно чтобы вернуло не 1 значение, а список
self.freqs = np.array([self.qm_data['freq.']])
elif self.qm_data['atom']:
self.freqs = np.array([0])
else:
self.freqs = self.qm_data['freq.']
if not self.atom:
self.vib_temp = np.fromiter(
map(lambda x: 299792458 / (1 / x / 100) * 4.79924466221135e-11,
self.freqs), np.float64)
else:
# Если нет частот, то не пытаемся пересчитать
self.vib_temp = np.array([0])
def __repr__(self):
return self.path_to_file
@classmethod
def from_dict(cls, some_dict: dict):
return cls(qm_program=some_dict.get('qm_program', 'gaussian'),
path_to_file=some_dict.get('path_to_file'),
linear=some_dict.get('linear', False),
atom=some_dict.get('atom', False),
name=some_dict.get('name'),
sn=some_dict.get('sn', 1))
@classmethod
def from_series(cls, series: pd.Series, name: str, sn: int = 1):
print('Проверьте правильность данных в таблице\n')
try:
return cls(qm_program=series.get('qm_program', 'gaussian'),
linear=series.get('linear', False),
atom=series.get('atom', False),
qm_data=series,
name=name,
sn=sn)
except Exception as err:
# TODO
_msg = f'initialization error in {name}'
# logger.error(_msg)
raise err
def vib_temp_t(self, temperature: np.array) -> np.array:
return np.array(list(map(lambda f: f / temperature, self.vib_temp))).T
def vibrational_enthalpy(self, temperature: np.array,
pressure: np.array) -> np.array:
_hvs = map(
lambda x: R * np.sum(self.vib_temp / np.expm1(x) + 0.5 * self.
vib_temp), self.vib_temp_t(temperature))
df = pd.DataFrame(index=pressure,
columns=temperature,
data=[np.fromiter(_hvs, np.float)] * len(pressure))
return df
def enthalpy(self, temperature: np.array, pressure: np.array) -> np.array:
# WORK
# gaussian
# Hcorr = Etot + kB*T
# kB = 1.380649e-23 J/K (Boltzmann constant)
# Panin`s book
# H(T) = Ht + Hr + Hv + E0 + R*T
# E0 - Полная электронная энергия
# Ht = Hr = 1.5*R*T for nonlinear molecules
# Hr = R*T for linear molecules
# Hv = N0*h∑(vi/(expUi - 1))
Ht = 1.5 * R * temperature
rt = R * temperature
if self.linear_coefficient == 1:
Hr = rt
else:
Hr = Ht
# Check if compound have only 1 atom
if self.atom:
Hv = 0
Hr = 0
else:
Hv = self.vibrational_enthalpy(temperature=temperature,
pressure=pressure)
Htot = Ht + Hr + rt + self.qm_data.get('scf energy')
df = pd.DataFrame(index=pressure,
columns=temperature,
data=[Htot] * len(pressure))
return df + Hv
def translation_entropy(self, temperature: np.array,
pressure: np.array) -> np.array:
# WORK
# https://mipt.ru/dbmp/utrapload/566/OXF_3-arphlf42s21.pdf
# 3.18 eq
# St = 1.5*R*ln(M) + 2.5*R*ln(T) - R*ln(P) - 9.69
# M - g/mol, T - K, P - atm
Ts = np.array(
list(
map(
lambda p:
(R *
(1.5 * np.log(self.qm_data.get('molecular mass')) + 2.5 *
np.log(temperature) - np.log(p)) - 9.69), pressure)))
df = pd.DataFrame(index=pressure, columns=temperature, data=Ts)
return df
def rotational_entropy(self, temperature: np.array,
pressure: np.array) -> np.array:
# WORK
# Sr = R*(ln(qr) + 1.5) for nonlinear molecules
# Sr = R*(ln(qr) + 1) for linear molecules
# y = (exp(Sr0/R - x)/T0**x)
# qr = y*T**x
srot = self.qm_data.get('Rotational entropy')
# if self.qm_program == 'gaussian':
# srot = self.qm_data.get('Rotational Entropy')
# elif self.qm_program == 'orca':
# srot = self.qm_data[f'{self.sn} s(rot)'] / \
# self.qm_data.get('Temperature', 298.15)
y = np.exp(srot / R - self.linear_coefficient
) / self.qm_data['Temperature']**self.linear_coefficient
qr = y * temperature**self.linear_coefficient
index = pressure
columns = temperature
data = [R * (np.log(qr) + self.linear_coefficient)] * len(pressure)
try:
df = pd.DataFrame(index=index, columns=columns, data=data)
return df
except Exception as e:
error_txt = f'\nError: {e}\n index:{index},\n columns:{columns}\n data:{data}\n'
raise Exception(error_txt)
def vibrational_entropy(self, temperature: np.array,
pressure: np.array) -> np.array:
Ua = self.vib_temp_t(temperature)
svib = map(
lambda x: R * np.sum((x / np.expm1(x)) - np.log(1 - np.exp(-x))),
Ua)
df = pd.DataFrame(index=pressure,
columns=temperature,
data=[np.fromiter(svib, np.float)] * len(pressure))
return df
def total_entropy(self, temperature: np.array,
pressure: np.array) -> np.array:
# Stot = St + Sr + Sv + Se
# Se = R*LnW - const
Se = self.qm_data.get('Electronic Entropy', 0)
if self.atom:
Sv = 0
Sr = 0
else:
Sr = self.rotational_entropy(temperature=temperature,
pressure=pressure)
Sv = self.vibrational_entropy(temperature=temperature,
pressure=pressure)
St = self.translation_entropy(temperature=temperature,
pressure=pressure)
return (St + Sr + Sv + Se)
def gibbs_energy(self, temperature: np.array,
pressure: np.array) -> np.array:
return self.enthalpy(
temperature=temperature,
pressure=pressure) - temperature * self.total_entropy(
temperature=temperature, pressure=pressure)
class Reaction:
def __init__(self,
reaction: str,
compounds: List[Compound],
condition: Dict[str, np.array] = None,
name: str = None):
if name:
self.name = name
else:
self.name = reaction
if condition:
self.condition = condition
else:
self.condition = {
'temperature': np.array([298.15]),
'pressure': np.array([1])
}
self.reaction = reaction
self.compounds = compounds
self.reaction_dict = self.reaction_pars(reaction=self.reaction,
compounds=self.compounds)
def reaction_pars(self, reaction: str, compounds: List[Compound]
) -> List[Dict[str, Tuple[float, Compound]]]:
# element[0] - reagents, elements[1] - products
# reaction look like '2*A + 3*C = D'
reaction_dict: List[Dict[str, Tuple[float, Compound]]] = []
_compounds_dict: Dict[str, Compound] = {x.name: x for x in compounds}
for element in reaction.split('='):
half_reaction: Dict[str, Tuple[float, Compound]] = {}
for compound in element.split('+'):
# compound look like '2*S'
if '*' in compound:
coefficient = float(compound.split('*')[0].strip())
compound_name = compound.split('*')[1].strip()
half_reaction[compound_name] = (
coefficient, _compounds_dict[compound_name])
else:
compound_name = compound.strip()
half_reaction[compound.strip()] = (
1, _compounds_dict[compound_name])
# где dict_repr_reaction[0] - словарь {имя в-ва: коэффициент} c реагентами
# dict_repr_reaction[1] - аналогичный словарь для продуктов р-ции
reaction_dict.append(half_reaction)
return reaction_dict
# TODO: Можно заменить функцию лямбой полностью
def _g_half_reaction(self,
half_reaction: Dict[str, Tuple[float, Compound]],
condition: Dict[str, np.array]) -> pd.DataFrame:
# g = 0
# for compound in half_reaction.keys():
# g += half_reaction[compound][0]*half_reaction[compound][1].gibbs_energy(temperature=condition.get(
# 'temperature', np.array([298.15])), pressure=condition.get('pressure', np.array([1])))
# return g
return sum(
map(
lambda compound: half_reaction[compound][0] * half_reaction[
compound][1].gibbs_energy(temperature=condition.get(
'temperature', np.array([298.15])),
pressure=condition.get(
'pressure', np.array([1]))),
half_reaction.keys()))
def g_reaction(self) -> pd.DataFrame:
reaction_dict = self.reaction_pars(reaction=self.reaction,
compounds=self.compounds)
g_prod = self._g_half_reaction(half_reaction=reaction_dict[1],
condition=self.condition)
g_reag = self._g_half_reaction(half_reaction=reaction_dict[0],
condition=self.condition)
return g_prod - g_reag
class Reaction_COSMO(Reaction):
def __init__(self,
reaction: str,
compounds: List[Compound],
cosmo: str,
name: str = None,
ideal: list = None):
self.settings = Jobs(cosmo).settings_df()
self.cdata = Jobs(cosmo).small_df(invert=1,
columns=('Gsolv', 'ln(gamma)', 'Nr'))
p = np.array([1])
t = self.settings.loc['T='].to_numpy()
self.condition = {'temperature': t, 'pressure': p}
super().__init__(reaction=reaction,
compounds=compounds,
name=name,
condition=self.condition)
self.gas_reaction = self.g_reaction().T
if ideal:
self.ideal = ideal
else:
self.ideal = []
def _rtln_half_reaction(self,
half_reaction: Dict[str, Tuple[float, Compound]]):
_ = []
for compound in half_reaction.keys():
# Reaction coefficient
comp_coef = half_reaction[compound][0]
# Compound number in tab file
comp_nr = self.cdata.loc[compound]['Nr'].iloc[0]
# Concentration, = 1 if compound not in setting table
if compound in self.ideal:
comp_x = 1
else:
comp_x = self.settings.loc[str(comp_nr)] if str(
comp_nr) in self.settings.index.values.tolist() else 1
if not isinstance(comp_x, (int, float)):
comp_x.replace(0, 1, inplace=True)
else:
pass
lnx = | np.log(comp_x) | numpy.log |
#!/usr/bin/env python
#
# Copyright (c) 2017 10X Genomics, Inc. All rights reserved.
#
import cellranger.analysis.io as analysis_io
import cellranger.analysis.constants as analysis_constants
import cellranger.h5_constants as h5_constants
import cellranger.io as cr_io
import cellranger.analysis.stats as analysis_stats
import collections
from irlb import irlb
import numpy as np
import os
import tables
# The RUNPCA stage attempts to run the PCA at this threshold, and if that
# fails it reruns at zero. In the event thresholding prevents us from
# returning the requested number of components and we are at this threshold
# value, we throw an exception.
DEFAULT_RUNPCA_THRESHOLD = 2
from sklearn.utils import sparsefuncs
class MatrixRankTooSmallException(Exception):
pass
PCA = collections.namedtuple('PCA', ['transformed_pca_matrix', 'components', 'variance_explained', 'dispersion', 'features_selected'])
def get_original_columns_used(cols_not_removed, cols_used_after_removal):
"""If a matrix is subset down to only have columns indexed by cols_not_removed, and then is further subset to
only contain cols_used_after removal, in that order, than this method returns the index of which columns in the old
matrix correspond the the columns in the new matrix."""
return [cols_not_removed[x] for x in cols_used_after_removal]
def run_pca(matrix, pca_features=None, pca_bcs=None, n_pca_components=None, random_state=None, min_count_threshold=0):
""" Run a PCA on the matrix using the IRLBA matrix factorization algorithm. Prior to the PCA analysis, the
matrix is modified so that all barcodes/columns have the same counts, and then the counts are transformed
by a log2(1+X) operation.
If desired, only a subset of features (e.g. sample rows) can be selected for PCA analysis. Each feature is ranked
by its dispersion relative to other features that have a similar mean count. The top `pca_features` as ranked by
this method will then be used for the PCA.
One can also select to subset number of barcodes to use (e.g. sample columns), but in this case they are simply
randomly sampled.
Args:
matrix (CountMatrix): The matrix to perform PCA on.
pca_features (int): Number of features to subset from matrix and use in PCA. The top pca_features ranked by
dispersion are used
pca_bcs (int): Number of barcodes to randomly sample for the matrix.
n_pca_components (int): How many PCA components should be used.
random_state (int): The seed for the RNG
min_count_threshold (int): The minimum sum of each row/column for that row/column to be passed to PCA
(this filter is prior to any subsetting that occurs).
Returns:
A PCA object
"""
if random_state is None:
random_state=analysis_constants.RANDOM_STATE
np.random.seed(0)
# Threshold the rows/columns of matrix, will throw error if an empty matrix results.
thresholded_matrix, _, thresholded_features = matrix.select_axes_above_threshold(min_count_threshold)
# If requested, we can subsample some of the barcodes to get a smaller matrix for PCA
pca_bc_indices = np.arange(thresholded_matrix.bcs_dim)
if pca_bcs is None:
pca_bcs = thresholded_matrix.bcs_dim
pca_bc_indices = np.arange(thresholded_matrix.bcs_dim)
elif pca_bcs < thresholded_matrix.bcs_dim:
pca_bc_indices = np.sort(np.random.choice(np.arange(thresholded_matrix.bcs_dim), size=pca_bcs, replace=False))
elif pca_bcs > thresholded_matrix.bcs_dim:
msg = ("You requested {} barcodes but the matrix after thresholding only "
"included {}, so the smaller amount is being used.").format(pca_bcs, thresholded_matrix.bcs_dim)
print(msg)
pca_bcs = thresholded_matrix.bcs_dim
pca_bc_indices = | np.arange(thresholded_matrix.bcs_dim) | numpy.arange |
# Simulation
import numpy as np
import numba
from numba import jit
# standard HMC
def hmc_mh_resample_uni(u_func, du_func, epsilon, nt, m, M, theta_init):
"""
This is a function to realize Hamiltonian Monte Carlo with Metropolis-Hastings
correction in unidimensional cases with resampling procedure.
"""
theta = [theta_init]
r = []
for t in range(nt):
r.append(np.random.normal(0, np.sqrt(M)))
theta0, r0 = theta[-1], r[-1]
r0 = r0 - epsilon/2*du_func(theta0)
for i in range(m):
theta0 = theta0 + epsilon*1/M*r0
r0 = r0 - epsilon*du_func(theta0)
r0 = r0 - epsilon/2*du_func(theta0)
# Metropolis-Hastings correction
u = np.random.uniform()
H1 = u_func(theta0) + 1/2*r0**2*1/M
H2 = u_func(theta[-1]) + 1/2*r[-1]**2*1/M
p = np.exp(H2 - H1)
if u < min(1,p):
theta.append(theta0)
return [theta[:-1], r]
def hmc_nomh_resample_uni(du_func, epsilon, nt, m, M, theta_init):
"""
This is a function to realize Hamiltonian Monte Carlo without Metropolis-Hastings
correction in unidimensional cases with resampling procedure.
"""
theta = [theta_init]
r = []
for t in range(nt):
r.append(np.random.normal(0, np.sqrt(M)))
theta0, r0 = theta[-1], r[-1]
r0 = r0 - epsilon/2*du_func(theta0)
for i in range(m):
theta0 = theta0 + epsilon*1/M*r0
r0 = r0 - epsilon*du_func(theta0)
r0 = r0 - epsilon/2*du_func(theta0)
# No Metropolis-Hastings correction
theta.append(theta0)
return [theta[:-1], r]
def hmc_mh_resample_multi(u_func, du_func, epsilon, nt, m, M, theta_init):
"""
This is a function to realize Hamiltonian Monte Carlo with Metropolis-Hastings
correction in multidimensional cases with resampling procedure.
"""
theta = [theta_init]
r = []
for t in range(nt):
r.append(np.random.multivariate_normal(np.zeros(M.shape[0]), M))
theta0, r0 = theta[-1], r[-1]
r0 = r0 - epsilon/2*du_func(theta0)
for i in range(m):
theta0 = theta0 + epsilon*np.linalg.inv(M)@r0
r0 = r0 - epsilon*du_func(theta0)
r0 = r0 - epsilon/2*du_func(theta0)
# Metropolis-Hastings correction
u = np.random.uniform()
H1 = u_func(theta0) + 1/2*[email protected](M)@r0
H2 = u_func(theta[-1]) + 1/2*r[-1][email protected](M)@r[-1]
p = np.exp(H2 - H1)
if u < min(1,p):
theta.append(theta0)
return [theta[:-1], r]
def hmc_nomh_resample_multi(du_func, epsilon, nt, m, M, theta_init):
"""
This is a function to realize Hamiltonian Monte Carlo without Metropolis-Hastings
correction in unidimensional cases with resampling procedure.
"""
theta = [theta_init]
r = []
for t in range(nt):
r.append(np.random.multivariate_normal(np.zeros(M.shape[0]), M))
theta0, r0 = theta[-1], r[-1]
r0 = r0 - epsilon/2*du_func(theta0)
for i in range(m):
theta0 = theta0 + epsilon*np.linalg.inv(M)@r0
r0 = r0 - epsilon*du_func(theta0)
r0 = r0 - epsilon/2*du_func(theta0)
# No Metropolis-Hastings correction
theta.append(theta0)
return [theta[:-1], r]
def hmc_mh_noresample_uni(u_func, du_func, epsilon, nt, m, M, theta_init, r_init):
"""
This is a function to realize Hamiltonian Monte Carlo with Metropolis-Hastings
correction in unidimensional cases without resampling procedure.
"""
theta = [theta_init]
r = [r_init]
for t in range(nt-1):
theta0, r0 = theta[-1], r[-1]
r0 = r0 - epsilon/2*du_func(theta0)
for i in range(m):
theta0 = theta0 + epsilon*1/M*r0
r0 = r0 - epsilon*du_func(theta0)
r0 = r0 - epsilon/2*du_func(theta0)
# Metropolis-Hastings correction
u = np.random.uniform()
H1 = u_func(theta0) + 1/2*r0**2*1/M
H2 = u_func(theta[-1]) + 1/2*r[-1]**2*1/M
p = np.exp(H2 - H1)
if u < min(1,p):
theta.append(theta0)
r.append(r0)
return [theta, r]
def hmc_nomh_noresample_uni(du_func, epsilon, nt, m, M, theta_init, r_init):
"""
This is a function to realize Hamiltonian Monte Carlo without Metropolis-Hastings
correction in unidimensional cases without resampling procedure.
"""
theta = [theta_init]
r = [r_init]
for t in range(nt-1):
theta0, r0 = theta[-1], r[-1]
r0 = r0 - epsilon/2*du_func(theta0)
for i in range(m):
theta0 = theta0 + epsilon*1/M*r0
r0 = r0 - epsilon*du_func(theta0)
r0 = r0 - epsilon/2*du_func(theta0)
# No Metropolis-Hastings correction
theta.append(theta0)
r.append(r0)
return [theta, r]
def hmc_mh_noresample_multi(u_func, du_func, epsilon, nt, m, M, theta_init, r_init):
"""
This is a function to realize Hamiltonian Monte Carlo with Metropolis-Hastings
correction in multidimensional cases without resampling procedure.
"""
theta = [theta_init]
r = [r_init]
for t in range(nt-1):
theta0, r0 = theta[-1], r[-1]
r0 = r0 - epsilon/2*du_func(theta0)
for i in range(m):
theta0 = theta0 + epsilon*np.linalg.inv(M)@r0
r0 = r0 - epsilon*du_func(theta0)
r0 = r0 - epsilon/2*du_func(theta0)
# Metropolis-Hastings correction
u = np.random.uniform()
H1 = u_func(theta0) + 1/2*[email protected](M)@r0
H2 = u_func(theta[-1]) + 1/2*r[-1][email protected](M)@r[-1]
p = np.exp(H2 - H1)
if u < min(1,p):
theta.append(theta0)
r.append(r0)
return [theta, r]
def hmc_nomh_noresample_multi(du_func, epsilon, nt, m, M, theta_init, r_init):
"""
This is a function to realize Hamiltonian Monte Carlo without Metropolis-Hastings
correction in multidimensional cases without resampling procedure.
"""
theta = [theta_init]
r = [r_init]
for t in range(nt-1):
theta0, r0 = theta[-1], r[-1]
r0 = r0 - epsilon/2*du_func(theta0)
for i in range(m):
theta0 = theta0 + epsilon*np.linalg.inv(M)@r0
r0 = r0 - epsilon*du_func(theta0)
r0 = r0 - epsilon/2*du_func(theta0)
# No Metropolis-Hastings correction
theta.append(theta0)
r.append(r0)
return [theta, r]
def hmc_summarize(u_func, du_func, epsilon, nt, m, M, theta_init, r_init, MH = True, resample = True):
"""
This is a function to realize Hamiltonian Monte Carlo under different conditions.
If theta_init is unidimensional, it needs to be a numeric number.
If theta_init is multidimensional, it needs to be an array.
formula: a function of iteration index t.
"""
if isinstance(theta_init, np.ndarray):
# multidimensional cases
if resample:
# resampling
if MH:
# Metropolis-Hastings correction
return hmc_mh_resample_multi(u_func, du_func, epsilon, nt, m, M, theta_init)
else:
# No Metropolis-Hastings correction
return hmc_nomh_resample_multi(du_func, epsilon, nt, m, M, theta_init)
else:
# no resampling
if MH:
# Metropolis-Hastings correction
return hmc_mh_noresample_multi(u_func, du_func, epsilon, nt, m, M, theta_init, r_init)
else:
# No Metropolis-Hastings correction
return hmc_nomh_noresample_multi(du_func, epsilon, nt, m, M, theta_init, r_init)
else:
# unidimensional cases
if resample:
# resampling
if MH:
# Metropolis-Hastings correction
return hmc_mh_resample_uni(u_func, du_func, epsilon, nt, m, M, theta_init)
else:
# No Metropolis-Hastings correction
return hmc_nomh_resample_uni(du_func, epsilon, nt, m, M, theta_init)
else:
# no resampling
if MH:
# Metropolis-Hastings correction
return hmc_mh_noresample_uni(u_func, du_func, epsilon, nt, m, M, theta_init, r_init)
else:
# No Metropolis-Hastings correction
return hmc_nomh_noresample_uni(du_func, epsilon, nt, m, M, theta_init, r_init)
# Naive SGHMC
def sghmc_naive_mh_resample_uni(u_hat_func, du_hat_func, epsilon, nt, m, M, V, theta_init, formula):
"""
This is a function to realize Naive Stochastic Gradient Hamiltonian Monte Carlo
with Metropolis-Hastings correction in unidimensional cases with resampling
procedure.
"""
B = 1/2*epsilon*V
theta = [theta_init]
r = []
for t in range(nt):
epsilon0 = max(epsilon, formula(t))
r.append(np.random.normal(0, np.sqrt(M)))
theta0, r0 = theta[-1], r[-1]
for i in range(m):
theta0 = theta0 + epsilon0*1/M*r0
r0 = r0 - epsilon0*du_hat_func(theta0) + np.random.normal(0, np.sqrt(2*B*epsilon0))
# Metropolis-Hastings correction
u = np.random.uniform()
H1 = u_hat_func(theta0) + 1/2*r0**2*1/M
H2 = u_hat_func(theta[-1]) + 1/2*r[-1]**2*1/M
p = np.exp(H2 - H1)
if u < min(1,p):
theta.append(theta0)
return [theta[:-1], r]
def sghmc_naive_nomh_resample_uni(du_hat_func, epsilon, nt, m, M, V, theta_init, formula):
"""
This is a function to realize Naive Stochastic Gradient Hamiltonian Monte Carlo
without Metropolis-Hastings correction in unidimensional cases with resampling
procedure.
"""
B = 1/2*epsilon*V
theta = [theta_init]
r = []
for t in range(nt):
epsilon0 = max(epsilon, formula(t))
r.append(np.random.normal(0, np.sqrt(M)))
theta0, r0 = theta[-1], r[-1]
for i in range(m):
theta0 = theta0 + epsilon0*1/M*r0
r0 = r0 - epsilon0*du_hat_func(theta0) + np.random.normal(0, np.sqrt(2*B*epsilon0))
# No Metropolis-Hastings correction
theta.append(theta0)
return [theta[:-1], r]
def sghmc_naive_mh_resample_multi(u_hat_func, du_hat_func, epsilon, nt, m, M, V, theta_init, formula):
"""
This is a function to realize Naive Stochastic Gradient Hamiltonian Monte Carlo
with Metropolis-Hastings correction in multidimensional cases with resampling
procedure.
"""
B = 1/2*epsilon*V
theta = [theta_init]
r = []
for t in range(nt):
epsilon0 = max(epsilon, formula(t))
r.append(np.random.multivariate_normal(np.zeros(M.shape[0]), M))
theta0, r0 = theta[-1], r[-1]
for i in range(m):
theta0 = theta0 + epsilon0*np.linalg.inv(M)@r0
r0 = r0 - epsilon0*du_hat_func(theta0) + np.random.multivariate_normal(np.zeros(M.shape[0]), 2*epsilon0*B)
# Metropolis-Hastings correction
u = np.random.uniform()
H1 = u_hat_func(theta0) + 1/2*[email protected](M)@r0
H2 = u_hat_func(theta[-1]) + 1/2*r[-1][email protected](M)@r[-1]
p = np.exp(H2 - H1)
if u < min(1,p):
theta.append(theta0)
return [theta[:-1], r]
def sghmc_naive_nomh_resample_multi(du_hat_func, epsilon, nt, m, M, V, theta_init, formula):
"""
This is a function to realize Naive Stochastic Gradient Hamiltonian Monte Carlo
without Metropolis-Hastings correction in multidimensional cases with resampling
procedure.
"""
B = 1/2*epsilon*V
theta = [theta_init]
r = []
for t in range(nt):
epsilon0 = max(epsilon, formula(t))
r.append(np.random.multivariate_normal(np.zeros(M.shape[0]), M))
theta0, r0 = theta[-1], r[-1]
for i in range(m):
theta0 = theta0 + epsilon0*np.linalg.inv(M)@r0
r0 = r0 - epsilon0*du_hat_func(theta0) + np.random.multivariate_normal(np.zeros(M.shape[0]), 2*epsilon0*B)
# No Metropolis-Hastings correction
theta.append(theta0)
return [theta[:-1], r]
def sghmc_naive_mh_noresample_uni(u_hat_func, du_hat_func, epsilon, nt, m, M, V, theta_init, r_init, formula):
"""
This is a function to realize Naive Stochastic Gradient Hamiltonian Monte Carlo
with Metropolis-Hastings correction in unidimensional cases without resampling
procedure.
"""
B = 1/2*epsilon*V
theta = [theta_init]
r = [r_init]
for t in range(nt-1):
epsilon0 = max(epsilon, formula(t))
theta0, r0 = theta[-1], r[-1]
for i in range(m):
theta0 = theta0 + epsilon0*1/M*r0
r0 = r0 - epsilon0*du_hat_func(theta0) + np.random.normal(0, np.sqrt(2*B*epsilon0))
# Metropolis-Hastings correction
u = np.random.uniform()
H1 = u_hat_func(theta0) + 1/2*r0**2*1/M
H2 = u_hat_func(theta[-1]) + 1/2*r[-1]**2*1/M
p = np.exp(H2 - H1)
if u < min(1,p):
theta.append(theta0)
r.append(r0)
return [theta, r]
def sghmc_naive_nomh_noresample_uni(du_hat_func, epsilon, nt, m, M, V, theta_init, r_init, formula):
"""
This is a function to realize Naive Stochastic Gradient Hamiltonian Monte Carlo
without Metropolis-Hastings correction in unidimensional cases without resampling
procedure.
"""
B = 1/2*epsilon*V
theta = [theta_init]
r = [r_init]
for t in range(nt-1):
epsilon0 = max(epsilon, formula(t))
theta0, r0 = theta[-1], r[-1]
for i in range(m):
theta0 = theta0 + epsilon0*1/M*r0
r0 = r0 - epsilon0*du_hat_func(theta0) + np.random.normal(0, np.sqrt(2*B*epsilon0))
# No Metropolis-Hastings correction
theta.append(theta0)
r.append(r0)
return [theta, r]
def sghmc_naive_mh_noresample_multi(u_hat_func, du_hat_func, epsilon, nt, m, M, V, theta_init, r_init, formula):
"""
This is a function to realize Naive Stochastic Gradient Hamiltonian Monte Carlo
with Metropolis-Hastings correction in multidimensional cases without resampling
procedure.
"""
B = 1/2*epsilon*V
theta = [theta_init]
r = [r_init]
for t in range(nt-1):
epsilon0 = max(epsilon, formula(t))
theta0, r0 = theta[-1], r[-1]
for i in range(m):
theta0 = theta0 + epsilon0*np.linalg.inv(M)@r0
r0 = r0 - epsilon0*du_hat_func(theta0) + np.random.multivariate_normal(np.zeros(M.shape[0]), 2*epsilon0*B)
# Metropolis-Hastings correction
u = np.random.uniform()
H1 = u_hat_func(theta0) + 1/2*[email protected](M)@r0
H2 = u_hat_func(theta[-1]) + 1/2*r[-1][email protected](M)@r[-1]
p = np.exp(H2 - H1)
if u < min(1,p):
theta.append(theta0)
r.append(r0)
return [theta, r]
def sghmc_naive_nomh_noresample_multi(du_hat_func, epsilon, nt, m, M, V, theta_init, r_init, formula):
"""
This is a function to realize Naive Stochastic Gradient Hamiltonian Monte Carlo
without Metropolis-Hastings correction in multidimensional cases without resampling
procedure.
"""
B = 1/2*epsilon*V
theta = [theta_init]
r = [r_init]
for t in range(nt-1):
epsilon0 = max(epsilon, formula(t))
theta0, r0 = theta[-1], r[-1]
for i in range(m):
theta0 = theta0 + epsilon0*1/M*r0
r0 = r0 - epsilon0*du_hat_func(theta0) + np.random.multivariate_normal(np.zeros(M.shape[0]), 2*epsilon0*B)
# No Metropolis-Hastings correction
theta.append(theta0)
r.append(r0)
return [theta, r]
def sghmc_naive_summarize(u_hat_func, du_hat_func, epsilon, nt, m, M, V, theta_init, r_init, formula, MH = True, resample = True):
"""
This is a function to realize Naive Stochastic Gradient Hamiltonian Monte Carlo
under different conditions.
If theta_init is unidimensional, it needs to be a numeric number.
If theta_init is multidimensional, it needs to be an array.
formula: a function of iteration index t.
"""
if isinstance(theta_init, np.ndarray):
# multidimensional cases
if resample:
# resampling
if MH:
# Metropolis-Hastings correction
return sghmc_naive_mh_resample_multi(u_hat_func, du_hat_func, epsilon, nt, m, M, V, theta_init, formula)
else:
# No Metropolis-Hastings correction
return sghmc_naive_nomh_resample_multi(du_hat_func, epsilon, nt, m, M, V, theta_init, formula)
else:
# no resampling
if MH:
# Metropolis-Hastings correction
return sghmc_naive_mh_noresample_multi(u_hat_func, du_hat_func, epsilon, nt, m, M, V, theta_init, r_init, formula)
else:
# No Metropolis-Hastings correction
return sghmc_naive_nomh_noresample_multi(du_hat_func, epsilon, nt, m, M, V, theta_init, r_init, formula)
else:
# unidimensional cases
if resample:
# resampling
if MH:
# Metropolis-Hastings correction
return sghmc_naive_mh_resample_uni(u_hat_func, du_hat_func, epsilon, nt, m, M, V, theta_init, formula)
else:
# No Metropolis-Hastings correction
return sghmc_naive_nomh_resample_uni(du_hat_func, epsilon, nt, m, M, V, theta_init, formula)
else:
# no resampling
if MH:
# Metropolis-Hastings correction
return sghmc_naive_mh_noresample_uni(u_hat_func, du_hat_func, epsilon, nt, m, M, V, theta_init, r_init, formula)
else:
# No Metropolis-Hastings correction
return sghmc_naive_nomh_noresample_uni(du_hat_func, epsilon, nt, m, M, V, theta_init, r_init, formula)
# SGHMC
def sghmc_resample_uni(du_hat_func, epsilon, nt, m, M, C, B_hat, theta_init, formula):
"""
This is a function to realize Stochastic Gradient Hamiltonian Monte Carlo in
unidimensional cases with resampling procedure.
"""
du_hat_func = numba.njit(du_hat_func)
formula = numba.njit(formula)
@numba.njit
def jit_du(x):
return du_hat_func(x)
@numba.njit
def jit_formula(x):
return formula(x)
theta = | np.zeros(nt) | numpy.zeros |
import roslib
import sys
import rospy
import cv2
import math
import imutils
import statistics
import numpy as np
from std_msgs.msg import String
from sensor_msgs.msg import Image
from std_msgs.msg import Float64MultiArray, Float64
from cv_bridge import CvBridge, CvBridgeError
from scipy.spatial import distance as dist
class image_converter:
# Defines publisher and subscriber
def __init__(self):
# initialize the node named image_processing
rospy.init_node('image_processing', anonymous=True)
# initialize a publisher to send images from camera1 to a topic named image_topic1
self.image_pub1 = rospy.Publisher("image_topic1", Image, queue_size=1)
self.image_pub2 = rospy.Publisher("image_topic2", Image, queue_size=1)
#Initialize a publisher to send joints angular posiion toa topic called joints_pos
self.joints_pub=rospy.Publisher("joints_pos",Float64MultiArray,queue_size=10)
#initialize a publisher for the robot end effector
self.vision_end_effector_pub=rospy.Publisher("vision_end_effector",Float64MultiArray,queue_size=10)
self.fk_end_effector_pub = rospy.Publisher("fk_end_effector", Float64MultiArray, queue_size=10)
self.actual_target_trajectory_pub = rospy.Publisher("actual_target_trajectory", Float64MultiArray,queue_size=10)
self.vision_target_trajectory_pub = rospy.Publisher("vision_target_trajectory", Float64MultiArray,queue_size=10)
#initialize a publisher for the four angles
self.robot_joint1_pub = rospy.Publisher("/robot/joint1_position_controller/command", Float64, queue_size=10)
self.robot_joint2_pub = rospy.Publisher("/robot/joint2_position_controller/command", Float64, queue_size=10)
self.robot_joint3_pub = rospy.Publisher("/robot/joint3_position_controller/command", Float64, queue_size=10)
self.robot_joint4_pub = rospy.Publisher("/robot/joint4_position_controller/command", Float64, queue_size=10)
#Initialize the publisher for t target
self.target_x_pub = rospy.Publisher("/target/x_position_controller/command", Float64, queue_size=10)
self.target_y_pub = rospy.Publisher("/target/y_position_controller/command", Float64, queue_size=10)
self.target_z_pub = rospy.Publisher("/target/z_position_controller/command", Float64, queue_size=10)
# initialize a subscriber to recieve messages rom a topic named /robot/camera1/image_raw and use callback function to recieve data
self.image_sub1 = rospy.Subscriber("/camera1/robot/image_raw", Image, self.callback1)
self.image_sub2 = rospy.Subscriber("/camera2/robot/image_raw", Image, self.callback2)
#initialize a publisher to send desired trajectory
self.time_trajectory = rospy.get_time()
#initialize variables
self.red = np.array([0.0, 0.0, 0.0, 0.0], dtype='float64')
self.green = np.array([0.0, 0.0, 0.0, 0.0], dtype='float64')
self.p2m = np.array([0.0], dtype='float64')
self.joint1 = np.array([0.0], dtype='float64')
self.joint2 = np.array([0.0], dtype='float64')
self.joint3 = np.array([0.0], dtype='float64')
self.joint4 = np.array([0.0], dtype='float64')
# initialize errors
self.time_previous_step = np.array([rospy.get_time()], dtype='float64')
self.time_previous_step2 = np.array([rospy.get_time()], dtype='float64')
# initialize error and derivative of error for trajectory tracking
self.error = np.array([0.0, 0.0,0.0], dtype='float64')
self.error_d = np.array([0.0, 0.0,0.0], dtype='float64')
# initialize the bridge between openCV and ROS
self.bridge = CvBridge()
# Recieve data from camera 1, process it, and publish
def callback1(self, data):
# Recieve the image
try:
self.image1 = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
def callback2(self, data):
# Recieve the image
try:
self.image2 = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
#Blob detection starts here-------------------------------------------------------
#Same to 2_1_joint_estimation.py
def detect_red(self,image1, image2):
image_gau_blur1 = cv2.GaussianBlur(image1, (1, 1), 0)
hsv1 = cv2.cvtColor(image_gau_blur1, cv2.COLOR_BGR2HSV)
lower_red1 = np.array([0, 200, 0])
higher_red1 = np.array([0, 255, 255])
red_range1 = cv2.inRange(hsv1, lower_red1, higher_red1)
res_red1 = cv2.bitwise_and(image_gau_blur1, image_gau_blur1, mask=red_range1)
red_s_gray1 = cv2.cvtColor(res_red1, cv2.COLOR_BGR2GRAY)
canny_edge1 = cv2.Canny(red_s_gray1, 30, 70)
contours1, hierarchy1 = cv2.findContours(canny_edge1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
(x1, y1), radius1 = cv2.minEnclosingCircle(contours1[0])
cy, cz1 = (int(x1), int(y1))
radius1 = int(radius1)
image_gau_blur2 = cv2.GaussianBlur(image2, (1, 1), 0)
hsv2 = cv2.cvtColor(image_gau_blur2, cv2.COLOR_BGR2HSV)
lower_red2 = np.array([0, 200, 0])
higher_red2 = np.array([0, 255, 255])
red_range2 = cv2.inRange(hsv2, lower_red2, higher_red2)
res_red2 = cv2.bitwise_and(image_gau_blur2, image_gau_blur2, mask=red_range2)
red_s_gray2 = cv2.cvtColor(res_red2, cv2.COLOR_BGR2GRAY)
canny_edge2 = cv2.Canny(red_s_gray2, 30, 70)
contours2, hierarchy2 = cv2.findContours(canny_edge2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
(x2, y2), radius2 = cv2.minEnclosingCircle(contours2[0])
cx, cz2 = (int(x2), int(y2))
radius2 = int(radius2)
return np.array([cx, cy, cz1, cz2])
def detect_blue(self,image1, image2):
image_gau_blur1 = cv2.GaussianBlur(image1, (1, 1), 0)
hsv1 = cv2.cvtColor(image_gau_blur1, cv2.COLOR_BGR2HSV)
lower_red1 = np.array([70, 0, 0])
higher_red1 = np.array([255, 255, 255])
red_range1 = cv2.inRange(hsv1, lower_red1, higher_red1)
res_red1 = cv2.bitwise_and(image_gau_blur1, image_gau_blur1, mask=red_range1)
red_s_gray1 = cv2.cvtColor(res_red1, cv2.COLOR_BGR2GRAY)
canny_edge1 = cv2.Canny(red_s_gray1, 30, 70)
contours1, hierarchy1 = cv2.findContours(canny_edge1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
(x1, y1), radius1 = cv2.minEnclosingCircle(contours1[0])
cy, cz1 = (int(x1), int(y1))
radius1 = int(radius1)
image_gau_blur2 = cv2.GaussianBlur(image2, (1, 1), 0)
hsv2 = cv2.cvtColor(image_gau_blur2, cv2.COLOR_BGR2HSV)
lower_red2 = np.array([70, 0, 0])
higher_red2 = np.array([255, 255, 255])
red_range2 = cv2.inRange(hsv2, lower_red2, higher_red2)
res_red2 = cv2.bitwise_and(image_gau_blur2, image_gau_blur2, mask=red_range2)
red_s_gray2 = cv2.cvtColor(res_red2, cv2.COLOR_BGR2GRAY)
canny_edge2 = cv2.Canny(red_s_gray2, 30, 70)
contours2, hierarchy2 = cv2.findContours(canny_edge2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
(x2, y2), radius2 = cv2.minEnclosingCircle(contours2[0])
cx, cz2 = (int(x2), int(y2))
radius2 = int(radius2)
return np.array([cx, cy, cz1, cz2])
def detect_green(self,image1, image2):
image_gau_blur1 = cv2.GaussianBlur(image1, (1, 1), 0)
hsv1 = cv2.cvtColor(image_gau_blur1, cv2.COLOR_BGR2HSV)
lower_red1 = np.array([55, 0, 0])
higher_red1 = np.array([100, 255, 255])
red_range1 = cv2.inRange(hsv1, lower_red1, higher_red1)
res_red1 = cv2.bitwise_and(image_gau_blur1, image_gau_blur1, mask=red_range1)
red_s_gray1 = cv2.cvtColor(res_red1, cv2.COLOR_BGR2GRAY)
canny_edge1 = cv2.Canny(red_s_gray1, 30, 70)
contours1, hierarchy1 = cv2.findContours(canny_edge1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
(x1, y1), radius1 = cv2.minEnclosingCircle(contours1[0])
cy, cz1 = (int(x1), int(y1))
radius1 = int(radius1)
image_gau_blur2 = cv2.GaussianBlur(image2, (1, 1), 0)
hsv2 = cv2.cvtColor(image_gau_blur2, cv2.COLOR_BGR2HSV)
lower_red2 = np.array([55, 0, 0])
higher_red2 = np.array([100, 255, 255])
red_range2 = cv2.inRange(hsv2, lower_red2, higher_red2)
res_red2 = cv2.bitwise_and(image_gau_blur2, image_gau_blur2, mask=red_range2)
red_s_gray2 = cv2.cvtColor(res_red2, cv2.COLOR_BGR2GRAY)
canny_edge2 = cv2.Canny(red_s_gray2, 30, 70)
contours2, hierarchy2 = cv2.findContours(canny_edge2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
(x2, y2), radius2 = cv2.minEnclosingCircle(contours2[0])
cx, cz2 = (int(x2), int(y2))
radius2 = int(radius2)
return np.array([cx, cy, cz1, cz2])
def detect_yellow(self,image1, image2):
image_gau_blur1 = cv2.GaussianBlur(image1, (1, 1), 0)
hsv1 = cv2.cvtColor(image_gau_blur1, cv2.COLOR_BGR2HSV)
lower_red1 = np.array([16, 244, 0])
higher_red1 = np.array([51, 255, 255])
red_range1 = cv2.inRange(hsv1, lower_red1, higher_red1)
res_red1 = cv2.bitwise_and(image_gau_blur1, image_gau_blur1, mask=red_range1)
red_s_gray1 = cv2.cvtColor(res_red1, cv2.COLOR_BGR2GRAY)
canny_edge1 = cv2.Canny(red_s_gray1, 30, 70)
contours1, hierarchy1 = cv2.findContours(canny_edge1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
(x1, y1), radius1 = cv2.minEnclosingCircle(contours1[0])
cy, cz1 = (int(x1), int(y1))
radius1 = int(radius1)
image_gau_blur2 = cv2.GaussianBlur(image2, (1, 1), 0)
hsv2 = cv2.cvtColor(image_gau_blur2, cv2.COLOR_BGR2HSV)
lower_red2 = np.array([16, 244, 0])
higher_red2 = np.array([51, 255, 255])
red_range2 = cv2.inRange(hsv2, lower_red2, higher_red2)
res_red2 = cv2.bitwise_and(image_gau_blur2, image_gau_blur2, mask=red_range2)
red_s_gray2 = cv2.cvtColor(res_red2, cv2.COLOR_BGR2GRAY)
canny_edge2 = cv2.Canny(red_s_gray2, 30, 70)
contours2, hierarchy2 = cv2.findContours(canny_edge2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
(x2, y2), radius2 = cv2.minEnclosingCircle(contours2[0])
cx, cz2 = (int(x2), int(y2))
radius2 = int(radius2)
return np.array([cx, cy, cz1, cz2])
def detect_blue_contours(image1):
image_gau_blur1 = cv2.GaussianBlur(image1, (1, 1), 0)
hsv1 = cv2.cvtColor(image_gau_blur1, cv2.COLOR_BGR2HSV)
lower_red1 = np.array([70, 0, 0])
higher_red1 = np.array([255, 255, 255])
red_range1 = cv2.inRange(hsv1, lower_red1, higher_red1)
res_red1 = cv2.bitwise_and(image_gau_blur1, image_gau_blur1, mask=red_range1)
red_s_gray1 = cv2.cvtColor(res_red1, cv2.COLOR_BGR2GRAY)
canny_edge1 = cv2.Canny(red_s_gray1, 30, 70)
contours1, hierarchy1 = cv2.findContours(canny_edge1,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
return np.array([contours1])
def detect_yellow_contours(image1):
image_gau_blur1 = cv2.GaussianBlur(image1, (1, 1), 0)
hsv1 = cv2.cvtColor(image_gau_blur1, cv2.COLOR_BGR2HSV)
lower_red1 = np.array([16, 244, 0])
higher_red1 = np.array([51, 255, 255])
red_range1 = cv2.inRange(hsv1, lower_red1, higher_red1)
res_red1 = cv2.bitwise_and(image_gau_blur1, image_gau_blur1, mask=red_range1)
red_s_gray1 = cv2.cvtColor(res_red1, cv2.COLOR_BGR2GRAY)
canny_edge1 = cv2.Canny(red_s_gray1, 30, 70)
contours1, hierarchy1 = cv2.findContours(canny_edge1,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
(x1, y1), radius1 = cv2.minEnclosingCircle(contours1[0])
cy,cz1 = (int(x1), int(y1))
return np.array([contours1])
def get_y1_y2(yellow_contours, blue_contours):
y1 = np.min(yellow_contours, axis = 0)
y1 = y1[0][1]
y1 = y1[:,1]
y2 = np.max(blue_contours, axis = 0)
y2 = y2[0][1]
y2 = y2[:,1]
return y1, y2
def pixelTometer(self, image1,image2):
yellow_contours = detect_yellow_contours(image2)
blue_contours = detect_blue_contours(image2)
y2 = detect_blue(self, image1, image2)
y2 = y2[3]
y1, y2 = get_y1_y2(yellow_contours, blue_contours)
p2m = 2.5/(y1 - y2)
#65 is the best number
return p2m
#----------------------------------------------------------------------------------------------
#Angle Detection starts here
#This part is same as 2_1_joint_estimation.py
def detect_angles_blob(self,image1,image2):
try:
p=pixelTometer(self,image1,image2)
self.p2m = p
except Exception as e:
p = self.p2m
try:
green = detect_green(self, image1, image2)
self.green = green
except Exception as e:
green = self.green
try:
red = detect_red(self, image1, image2)
self.red = red
except Exception as e:
red = self.red
p=pixelTometer(self,image1,image2)
yellow=p*detect_yellow(self,image1,image2)
blue=p*detect_blue(self,image1,image2)
ja1=0.0
ja2=np.pi/2-np.arctan2((blue[2] - green[2]), (blue[1] - green[1]))
ja3 = np.arctan2((blue[3] - green[3]), (blue[0] - green[0]))-np.pi/2
ja4 = np.arctan2((green[2] - red[2]), -(green[1] - red[1]))-np.pi/2-ja2
return np.array([ja1,ja2,ja3,ja4])
def angle_trajectory(self):
curr_time = np.array([rospy.get_time() - self.time_trajectory])
ja1 = 0.1
ja2 = float((np.pi / 2) * np.sin((np.pi / 15) * curr_time))
ja3 = float((np.pi / 2) * np.sin((np.pi / 18) * curr_time))
ja4 = float((np.pi / 2) * np.sin((np.pi / 20) * curr_time))
return np.array([ja1, ja2, ja3, ja4])
def actual_target_position(self):
curr_time = np.array([rospy.get_time() - self.time_trajectory])
x_d = float((2.5 * np.cos(curr_time * np.pi / 15))+0.5)
y_d = float(2.5 * np.sin(curr_time * np.pi / 15))
z_d = float((1 * np.sin(curr_time * np.pi / 15))+7.0)
return np.array([x_d,y_d,z_d])
#FK starts here--------------------------------------------------------------------------------
#This part is same as 3_1_FK.py
def end_effector_position(self, image1, image2):
try:
p=pixelTometer(self,image1,image2)
self.p2m = p
except Exception as e:
p = self.p2m
yellow_posn = detect_yellow(self,image1, image2)
red_posn = detect_red(self, image1, image2)
yellow_posn[3] = 800 - yellow_posn[3]
red_posn[3] = 800 - red_posn[3]
cx, cy, cz1, cz2 = p * (red_posn - yellow_posn)
ee_posn = np.array([cx, cy, cz2])
ee_posn = np.round(ee_posn,1)
return ee_posn
#Calculate the jacobian
def calculate_jacobian(self,image1,image2):
ja1,ja2,ja3,ja4=detect_angles_blob(self,image1,image2)
jacobian=np.array([[3*np.cos(ja1)*np.sin(ja2)*np.cos(ja3)*np.cos(ja4)
+3.5*np.cos(ja1)*np.sin(ja2)*np.cos(ja3)
-3*np.sin(ja1)*np.cos(ja4)*np.sin(ja3)
-3.5*np.sin(ja1)*np.sin(ja3)
+3*np.cos(ja1)*np.cos(ja2)*np.sin(ja4),
3*np.sin(ja1)*np.cos(ja2)*np.cos(ja3)*np.cos(ja4)
+3.5*np.sin(ja1)*np.cos(ja2)*np.cos(ja3)
-3*np.sin(ja1)*np.sin(ja2)*np.sin(ja4),
-3*np.sin(ja1)*np.sin(ja2)*np.sin(ja3)*np.cos(ja4)
-3.5*np.sin(ja1)*np.sin(ja2)*np.sin(ja3)
+3*np.cos(ja1)*np.cos(ja4)*np.cos(ja3)
+3.5*np.cos(ja1)*np.cos(ja3),
-3*np.sin(ja1)*np.sin(ja2)*np.cos(ja3)*np.sin(ja4)
-3*np.cos(ja1)*np.sin(ja4)*np.sin(ja3)
+3*np.sin(ja1)*np.cos(ja2)*np.cos(ja4)
],
[
3*np.sin(ja1)*np.sin(ja2)*np.cos(ja3)*np.cos(ja4)
+3.5*np.sin(ja1)*np.sin(ja2)*np.cos(ja3)
+3*np.cos(ja1)*np.cos(ja4)*np.sin(ja3)
+3.5*np.cos(ja1)*np.sin(ja3)
+3*np.sin(ja1)*np.cos(ja2)*np.sin(ja4),
-3*np.cos(ja1)*np.cos(ja2)*np.cos(ja3)* | np.cos(ja4) | numpy.cos |
import numpy as np
import pandas as pd
import pdb
import re
from time import time
import json
import random
import os
import model
import paths
from scipy.spatial.distance import pdist, squareform
from scipy.stats import multivariate_normal, invgamma, mode
from scipy.special import gamma
# from scipy.misc import imresize
from functools import partial
from math import ceil
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.preprocessing import MinMaxScaler
# --- to do with loading --- #
def get_samples_and_labels(settings):
"""
Parse settings options to load or generate correct type of data,
perform test/train split as necessary, and reform into 'samples' and 'labels'
dictionaries.
"""
if settings['data_load_from']:
data_path = './experiments/data/' + settings['data_load_from'] + '.data.npy'
print('Loading data from', data_path)
samples, pdf, labels = get_data('load', data_path)
train, vali, test = samples['train'], samples['vali'], samples['test']
train_labels, vali_labels, test_labels = labels['train'], labels['vali'], labels['test']
del samples, labels
else:
# generate the data
data_vars = ['num_samples', 'seq_length', 'num_signals', 'freq_low',
'freq_high', 'amplitude_low', 'amplitude_high', 'scale',
'full_mnist']
data_settings = dict((k, settings[k]) for k in data_vars if k in settings.keys())
samples, pdf, labels = get_data(settings['data'], data_settings)
if 'multivariate_mnist' in settings and settings['multivariate_mnist']:
seq_length = samples.shape[1]
samples = samples.reshape(-1, int(np.sqrt(seq_length)), int(np.sqrt(seq_length)))
if 'normalise' in settings and settings['normalise']: # TODO this is a mess, fix
print("monish")
print(settings['normalise'])
norm = True
else:
norm = False
if labels is None:
train, vali, test = split(samples, [0.6, 0.2, 0.2], normalise=norm)
train_labels, vali_labels, test_labels = None, None, None
else:
train, vali, test, labels_list = split(samples, [0.6, 0.2, 0.2], normalise=norm, labels=labels)
train_labels, vali_labels, test_labels = labels_list
labels = dict()
labels['train'], labels['vali'], labels['test'] = train_labels, vali_labels, test_labels
samples = dict()
samples['train'], samples['vali'], samples['test'] = train, vali, test
# update the settings dictionary to update erroneous settings
# (mostly about the sequence length etc. - it gets set by the data!)
settings['seq_length'] = samples['train'].shape[1]
settings['num_samples'] = samples['train'].shape[0] + samples['vali'].shape[0] + samples['test'].shape[0]
settings['num_signals'] = samples['train'].shape[2]
settings['num_generated_features'] = samples['train'].shape[2]
return samples, pdf, labels
def get_data(data_type, data_options=None):
"""
Helper/wrapper function to get the requested data.
"""
labels = None
pdf = None
if data_type == 'load':
data_dict = np.load(data_options).item()
samples = data_dict['samples']
pdf = data_dict['pdf']
labels = data_dict['labels']
elif data_type == 'sine':
samples = sine_wave(**data_options)
elif data_type == 'mnist':
if data_options['full_mnist']:
samples, labels = mnist()
else:
#samples, labels = load_resized_mnist_0_5(14)
samples, labels = load_resized_mnist(14) # this is the 0-2 setting
elif data_type == 'gp_rbf':
print(data_options)
samples, pdf = GP(**data_options, kernel='rbf')
elif data_type == 'linear':
samples, pdf = linear(**data_options)
else:
raise ValueError(data_type)
print('Generated/loaded', len(samples), 'samples from data-type', data_type)
return samples, pdf, labels
def get_batch(samples, batch_size, batch_idx, labels=None):
start_pos = batch_idx * batch_size
end_pos = start_pos + batch_size
if labels is None:
return samples[start_pos:end_pos], None
else:
if type(labels) == tuple: # two sets of labels
assert len(labels) == 2
return samples[start_pos:end_pos], labels[0][start_pos:end_pos], labels[1][start_pos:end_pos]
else:
assert type(labels) == np.ndarray
return samples[start_pos:end_pos], labels[start_pos:end_pos]
def normalise_data(train, vali, test, low=-1, high=1):
""" Apply some sort of whitening procedure
"""
# remember, data is num_samples x seq_length x signals
# whiten each signal - mean 0, std 1
mean = np.mean(np.vstack([train, vali]), axis=(0, 1))
std = np.std(np.vstack([train-mean, vali-mean]), axis=(0, 1))
normalised_train = (train - mean)/std
normalised_vali = (vali - mean)/std
normalised_test = (test - mean)/std
# normalised_data = data - np.nanmean(data, axis=(0, 1))
# normalised_data /= np.std(data, axis=(0, 1))
# # normalise samples to be between -1 and +1
# normalise just using train and vali
# min_val = np.nanmin(np.vstack([train, vali]), axis=(0, 1))
# max_val = np.nanmax(np.vstack([train, vali]), axis=(0, 1))
#
# normalised_train = (train - min_val)/(max_val - min_val)
# normalised_train = (high - low)*normalised_train + low
#
# normalised_vali = (vali - min_val)/(max_val - min_val)
# normalised_vali = (high - low)*normalised_vali + low
#
# normalised_test = (test - min_val)/(max_val - min_val)
# normalised_test = (high - low)*normalised_test + low
return normalised_train, normalised_vali, normalised_test
def scale_data(train, vali, test, scale_range=(-1, 1)):
signal_length = train.shape[1]
num_signals = train.shape[2]
# reshape everything
train_r = train.reshape(-1, signal_length*num_signals)
vali_r = vali.reshape(-1, signal_length*num_signals)
test_r = test.reshape(-1, signal_length*num_signals)
# fit scaler using train, vali
scaler = MinMaxScaler(feature_range=scale_range).fit(np.vstack([train_r, vali_r]))
# scale everything
scaled_train = scaler.transform(train_r).reshape(-1, signal_length, num_signals)
scaled_vali = scaler.transform(vali_r).reshape(-1, signal_length, num_signals)
scaled_test = scaler.transform(test_r).reshape(-1, signal_length, num_signals)
return scaled_train, scaled_vali, scaled_test
def split(samples, proportions, normalise=False, scale=False, labels=None, random_seed=None):
"""
Return train/validation/test split.
"""
if random_seed != None:
random.seed(random_seed)
np.random.seed(random_seed)
assert np.sum(proportions) == 1
n_total = samples.shape[0]
n_train = ceil(n_total*proportions[0])
n_test = ceil(n_total*proportions[2])
n_vali = n_total - (n_train + n_test)
# permutation to shuffle the samples
shuff = np.random.permutation(n_total)
train_indices = shuff[:n_train]
vali_indices = shuff[n_train:(n_train + n_vali)]
test_indices = shuff[(n_train + n_vali):]
# TODO when we want to scale we can just return the indices
assert len(set(train_indices).intersection(vali_indices)) == 0
assert len(set(train_indices).intersection(test_indices)) == 0
assert len(set(vali_indices).intersection(test_indices)) == 0
# split up the samples
train = samples[train_indices]
vali = samples[vali_indices]
test = samples[test_indices]
# apply the same normalisation scheme to all parts of the split
if normalise:
if scale: raise ValueError(normalise, scale) # mutually exclusive
train, vali, test = normalise_data(train, vali, test)
elif scale:
train, vali, test = scale_data(train, vali, test)
if labels is None:
return train, vali, test
else:
print('Splitting labels...')
if type(labels) == np.ndarray:
train_labels = labels[train_indices]
vali_labels = labels[vali_indices]
test_labels = labels[test_indices]
labels_split = [train_labels, vali_labels, test_labels]
elif type(labels) == dict:
# more than one set of labels! (weird case)
labels_split = dict()
for (label_name, label_set) in labels.items():
train_labels = label_set[train_indices]
vali_labels = label_set[vali_indices]
test_labels = label_set[test_indices]
labels_split[label_name] = [train_labels, vali_labels, test_labels]
else:
raise ValueError(type(labels))
return train, vali, test, labels_split
def make_predict_labels(samples, labels):
""" Given two dictionaries of samples, labels (already normalised, split etc)
append the labels on as additional signals in the data
"""
print('Appending label to samples')
assert not labels is None
if len(labels['train'].shape) > 1:
num_labels = labels['train'].shape[1]
else:
num_labels = 1
seq_length = samples['train'].shape[1]
num_signals = samples['train'].shape[2]
new_samples = dict()
new_labels = dict()
for (k, X) in samples.items():
num_samples = X.shape[0]
lab = labels[k]
# slow code because i am sick and don't want to try to be smart
new_X = np.zeros(shape=(num_samples, seq_length, num_signals + num_labels))
for row in range(num_samples):
new_X[row, :, :] = np.hstack([X[row, :, :], np.array(seq_length*[(2*lab[row]-1).reshape(num_labels)])])
new_samples[k] = new_X
new_labels[k] = None
return new_samples, new_labels
# --- specific data-types --- #
def mnist(randomize=False):
""" Load and serialise """
try:
train = np.load('./experiments/data/mnist_train.npy')
print('Loaded mnist from .npy')
except IOError:
print('Failed to load MNIST data from .npy, loading from csv')
# read from the csv
train = np.loadtxt(open('./experiments/data/mnist_train.csv', 'r'), delimiter=',')
# scale samples from 0 to 1
train[:, 1:] /= 255
# scale from -1 to 1
train[:, 1:] = 2*train[:, 1:] - 1
# save to the npy
np.save('./experiments/data/mnist_train.npy', train)
# the first column is labels, kill them
labels = train[:, 0]
samples = train[:, 1:]
if randomize:
# not needed for GAN experiments...
print('Applying fixed permutation to mnist digits.')
fixed_permutation = | np.random.permutation(28*28) | numpy.random.permutation |
from __future__ import print_function, division
import os
import sys
root_dir = os.path.dirname(sys.path[0])
sys.path.append(root_dir)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from cryoio import mrc
import density, cryoops
import geometry
import cryoem
from notimplemented import correlation
import pyximport; pyximport.install(
setup_args={"include_dirs": np.get_include()}, reload_support=True)
import sincint
def demo(N=128, rad=0.5):
TtoF = sincint.gentrunctofull(N=N, rad=rad)
xy, trunc_xy, truncmask = geometry.gencoords(N, 2, rad, True)
print('shape of TtoF:', TtoF.shape)
print('slice shape:', trunc_xy.shape[0])
trunc_slice = np.arange(trunc_xy.shape[0])
sliced_image = TtoF.dot(trunc_slice).reshape(N, N)
trunc_xy_idx = np.int_(trunc_xy + int(N/2))
# Compare speed for getting slices in this way
new_trunc_slice = sliced_image[trunc_xy_idx[:, 0], trunc_xy_idx[:, 1]]
print('error:', sum(trunc_slice - new_trunc_slice))
pol_trunc_xy = correlation.cart2pol(trunc_xy)
# inside of rad
# sort trunc_xy coordinates
sorted_idx = np.lexsort((pol_trunc_xy[:, 1], pol_trunc_xy[:, 0])) # lexsort; first, sort rho; second, sort theta
sorted_pol_trunc_xy = pol_trunc_xy[sorted_idx]
# reconstuct sorted coordinates into original state
reco_pol_trunc_xy = sorted_pol_trunc_xy[sorted_idx.argsort()]
print('error for reconstructed coordinates:', sum(correlation.pol2cart(reco_pol_trunc_xy) - trunc_xy))
reco_trunc_slice = trunc_slice[sorted_idx.argsort()]
bingo_sliced_image = TtoF.dot(reco_trunc_slice).reshape(N, N)
# outside of rad
xy_outside = xy[~truncmask]
sliced_image_outside_rad = np.zeros((N, N))
sliced_image_outside_rad[~truncmask.reshape(N, N)] = np.arange(xy_outside.shape[0])
pol_xy_outside = correlation.cart2pol(xy_outside)
outside_sorted_idx = np.lexsort((pol_xy_outside[:, 1], pol_xy_outside[:, 0])) # lexsort; first, sort rho; second, sort theta
sorted_pol_xy_outside = pol_xy_outside[outside_sorted_idx]
reco_pol_xy_outside = np.arange(xy_outside.shape[0])[outside_sorted_idx.argsort()]
bingo_sliced_image_outside_rad = np.zeros((N, N))
bingo_sliced_image_outside_rad[~truncmask.reshape(N, N)] = reco_pol_xy_outside
fig, axes = plt.subplots(2, 2)
ax = axes.flatten()
ax[0].imshow(sliced_image)
ax[1].imshow(bingo_sliced_image)
ax[2].imshow(sliced_image_outside_rad)
ax[3].imshow(bingo_sliced_image_outside_rad)
plt.show()
def compare_interpolation(N=128, rad=1):
_, trunc_xy, _ = geometry.gencoords(N, 2, rad, True)
pol_trunc_xy = correlation.cart2pol(trunc_xy)
sorted_idx = np.lexsort((pol_trunc_xy[:, 1], pol_trunc_xy[:, 0])) # lexsort; first, sort rho; second, sort theta
sorted_pol_trunc_xy = pol_trunc_xy[sorted_idx]
interpolation = ['none', 'nearest', 'nearest_decimal_1', 'nearest_half']
fig, ax = plt.subplots(nrows=len(interpolation), sharex=True)
# fig, ax = plt.subplots()
def round_to(n, precision):
# correction = 0.5 if n >= 0 else -0.5
correction = np.ones_like(n) * 0.5
correction[n < 0] = -0.5
return np.int_(n / precision + correction) * precision
def round_half(n):
return round_to(n, 0.5)
def get_ip_func(ip_method):
if 'none' == ip_method.lower():
return lambda x: x
elif 'nearest' == ip_method.lower():
return np.round
elif 'nearest_decimal_1' == ip_method.lower():
return lambda x: | np.round(x, 1) | numpy.round |
# python -m unittest tests/test_ml_training.py
import copy
import numpy as np
import pandas as pd
import os
import shutil
import unittest
from collections import OrderedDict
from subroutines.exceptions import AlgorithmError, create_generator
from subroutines.train import (
make_separate_subclass_splits, bootstrap_data, make_feat_importance_plots,
check_arguments, RunML
)
class TestClass(unittest.TestCase):
def test_make_separate_subclass_splits(self):
"""
Tests make_separate_subclass_splits in train.py
"""
print('Testing make_separate_subclass_splits')
exp_input_dict = {
1: [['A', 'B', 'C', 'D', 'B', 'A', 'D', 'C', 'C', 'A', 'D', 'B'],
np.array([['A', 'C'], ['B', 'D']], dtype=object)],
2: [np.array([['A', 'B', 'C', 'D'], ['B', 'A', 'D', 'C'], ['C', 'A', 'D', 'B']], dtype=object),
np.array([['A', 'C'], ['B', 'D']], dtype=object)],
3: [np.array(['A', 'B', 'C', 'D', 'B', 'A', 'D', 'C', 'C', np.nan, 'D', 'B'], dtype=object),
np.array([['A', 'C'], ['B', 'D']], dtype=object)],
4: [np.array(['A', 'B', 'C', 'D', 'B', 'A', 'D', 'C', 'C', 'A', 'D', 'B'], dtype=object),
[['A', 'C'], ['B', 'D']]],
5: [np.array(['A', 'B', 'C', 'D', 'B', 'A', 'D', 'C', 'C', 'A', 'D', 'B'], dtype=object),
np.array([[np.nan, 'C'], ['B', 'D']], dtype=object)],
6: [np.array(['A', 'B', 'C', 'D', 'B', 'A', 'D', 'C', 'C', 'A', 'D', 'B'], dtype=object),
np.array([['A', 'C'], ['B', 'A']], dtype=object)],
7: [np.array(['A', 'B', 'C', 'D', 'B', 'A', 'D', 'E', 'C', 'A', 'D', 'B'], dtype=object),
np.array([['A', 'C'], ['B', 'D']], dtype=object)],
8: [np.array(['A', 'B', 'C', 'D', 'B', 'A', 'D', 'E', 'C', 'A', 'D', 'B'], dtype=object),
np.array([['A', 'C'], ['B', 'D'], ['E', 'F']], dtype=object)],
9: [np.array(['A', 'B', 'C', 'D', 'B', 'A', 'D', 'C', 'C', 'A', 'D', 'B'], dtype=object),
np.array([['A', 'C'], ['B', 'D']], dtype=object)]
}
for num in exp_input_dict.keys():
subclasses = exp_input_dict[num][0]
subclass_splits = exp_input_dict[num][1]
if num == 1:
with self.assertRaises(TypeError) as message:
splits = make_separate_subclass_splits(subclasses, subclass_splits)
next(splits)
self.assertEqual(
str(message.exception), 'Expect "subclasses" to be a (1D) '
'array of subclass values'
)
elif num == 2:
with self.assertRaises(ValueError) as message:
splits = make_separate_subclass_splits(subclasses, subclass_splits)
next(splits)
self.assertEqual(
str(message.exception), 'Expect "subclasses" to be a 1D array'
)
elif num == 3:
with self.assertRaises(ValueError) as message:
splits = make_separate_subclass_splits(subclasses, subclass_splits)
next(splits)
self.assertEqual(
str(message.exception), 'NaN value(s) detected in '
'"subclasses" array'
)
elif num == 4:
with self.assertRaises(TypeError) as message:
splits = make_separate_subclass_splits(subclasses, subclass_splits)
next(splits)
self.assertEqual(
str(message.exception), 'Expect "subclass_splits" to be a '
'(2D) array of subclass values'
)
elif num == 5:
with self.assertRaises(ValueError) as message:
splits = make_separate_subclass_splits(subclasses, subclass_splits)
next(splits)
self.assertEqual(
str(message.exception), 'NaN value(s) detected in '
'"subclass_splits" array'
)
elif num == 6:
with self.assertRaises(ValueError) as message:
splits = make_separate_subclass_splits(subclasses, subclass_splits)
next(splits)
self.assertEqual(
str(message.exception), 'Repeated subclass labels detected '
'in "subclass_splits"'
)
elif num == 7:
with self.assertRaises(ValueError) as message:
splits = make_separate_subclass_splits(subclasses, subclass_splits)
next(splits)
self.assertEqual(
str(message.exception), 'Subclass E is found in '
'"subclasses" but not "subclass_splits"'
)
elif num == 8:
with self.assertRaises(ValueError) as message:
splits = make_separate_subclass_splits(subclasses, subclass_splits)
next(splits)
self.assertEqual(
str(message.exception), 'Subclass F is found in '
'"subclass_splits" but not "subclasses"'
)
elif num == 9:
exp_split = (sub_list for sub_list in
[np.array([0, 2, 5, 7, 8, 9]),
np.array([1, 3, 4, 6, 10, 11])])
act_split = make_separate_subclass_splits(subclasses, subclass_splits)
for i, split_1 in enumerate(list(exp_split)):
for j, split_2 in enumerate(list(act_split)):
if i == j:
np.testing.assert_equal(split_1, split_2)
def test_bootstrap_data(self):
"""
Tests bootstrap_data in train.py
"""
print('Testing bootstrap_data')
exp_input_dict = {
1: [[[1.0, 1.5, 1.2], [4.6, 2.3, 2.1], [1.0, 2.2, 1.8],
[1.8, 1.1, 0.6], [0.7, 0.9, 0.7], [4.1, 3.3, 2.6],
[3.4, 2.5, 1.4], [2.7, 2.2, 1.9], [4.0, 4.0, 3.1],
[1.6, 0.5, 1.0]],
np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']),
['1', '2', '3'], True],
2: [np.array([[1.0, 1.5, 1.2], [4.6, 2.3, 2.1], [1.0, 2.2, 1.8],
[1.8, 1.1, 0.6], [0.7, 0.9, 0.7], [4.1, 3.3, 2.6],
[3.4, 2.5, 1.4], [2.7, 2.2, 1.9], [4.0, 4.0, 3.1],
[1.6, 0.5, 1.0]]),
['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'],
['1', '2', '3'], True],
3: [np.array([[1.0, 1.5, 1.2], [4.6, 2.3, 2.1], [1.0, 2.2, 1.8],
[1.8, 1.1, 0.6], [0.7, 0.9, 0.7], [4.1, 3.3, 2.6],
[3.4, 2.5, 1.4], [2.7, 2.2, 1.9], [4.0, 4.0, 3.1],
[1.6, 0.5, 1.0]]),
np.array([['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']]),
['1', '2', '3'], True],
4: [np.array([[1.0, 1.5, 1.2], [4.6, 2.3, 2.1], [1.0, 2.2, 1.8],
[1.8, 1.1, 0.6], [0.7, 0.9, 0.7], [4.1, 3.3, 2.6],
[3.4, 2.5, 1.4], [2.7, 2.2, 1.9], [4.0, 4.0, 3.1],
[1.6, 0.5, 1.0]]),
np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']),
['1', '2', '3'], True],
5: [np.array([[1.0, 1.5, 1.2], [4.6, 2.3, 2.1], [1.0, 2.2, 1.8],
[1.8, 1.1, 0.6], [0.7, 0.9, 0.7], [4.1, 3.3, 2.6],
[3.4, 2.5, 1.4], [2.7, 2.2, 1.9], [4.0, 4.0, 3.1],
[1.6, 0.5, 1.0]]),
np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']),
np.array(['1', '2', '3']), True],
6: [np.array([[1.0, 1.5, 1.2], [4.6, 2.3, 2.1], [1.0, 2.2, 1.8],
[1.8, 1.1, 0.6], [0.7, 0.9, 0.7], [4.1, 3.3, 2.6],
[3.4, 2.5, 1.4], [2.7, 2.2, 1.9], [4.0, 4.0, 3.1],
[1.6, 0.5, 1.0]]),
np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']),
['1', '2', '3', '4'], True],
7: [np.array([[1.0, 1.5, 1.2], [4.6, 2.3, 2.1], [1.0, 2.2, 1.8],
[1.8, 1.1, 0.6], [0.7, 0.9, 0.7], [4.1, 3.3, 2.6],
[3.4, 2.5, 1.4], [2.7, 2.2, 1.9], [4.0, 4.0, 3.1],
[1.6, 0.5, 1.0]]),
np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']),
['1', '2', '3'], 1.0],
8: [np.array([[1.0, 1.5, 1.2], [4.6, 2.3, 2.1], [1.0, 2.2, 1.8],
[1.8, 1.1, 0.6], [0.7, 0.9, 0.7], [4.1, 3.3, 2.6],
[3.4, 2.5, 1.4], [2.7, 2.2, 1.9], [4.0, 4.0, 3.1],
[1.6, 0.5, 1.0]]),
np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']),
['1', '2', '3'], False],
9: [np.array([[1.0, 1.5, 1.2], [4.6, 2.3, 2.1], [1.0, 2.2, 1.8],
[1.8, 1.1, 0.6], [0.7, 0.9, 0.7], [4.1, 3.3, 2.6],
[3.4, 2.5, 1.4], [2.7, 2.2, 1.9], [4.0, 4.0, 3.1],
[1.6, 0.5, 1.0]]),
np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']),
['1', '2', '3'], True]
}
for num in exp_input_dict.keys():
x = exp_input_dict[num][0]
y = exp_input_dict[num][1]
features = exp_input_dict[num][2]
scale = exp_input_dict[num][3]
if num == 1:
with self.assertRaises(TypeError) as message: bootstrap_data(
x, y, features, scale, True
)
self.assertEqual(
str(message.exception), 'Expect "x" to be a (2D) array of x'
' values'
)
if num == 2:
with self.assertRaises(TypeError) as message: bootstrap_data(
x, y, features, scale, True
)
self.assertEqual(
str(message.exception), 'Expect "y" to be a (1D) array of y'
' values'
)
if num == 3:
with self.assertRaises(ValueError) as message: bootstrap_data(
x, y, features, scale, True
)
self.assertEqual(
str(message.exception), 'Expect "y" to be a 1D array of y '
'values'
)
if num == 4:
with self.assertRaises(ValueError) as message: bootstrap_data(
x, y, features, scale, True
)
self.assertEqual(
str(message.exception), 'Different numbers of rows in '
'arrays "x" and "y"'
)
if num == 5:
with self.assertRaises(TypeError) as message: bootstrap_data(
x, y, features, scale, True
)
self.assertEqual(
str(message.exception), 'Expect "features" to be a list'
)
if num == 6:
with self.assertRaises(ValueError) as message: bootstrap_data(
x, y, features, scale, True
)
self.assertEqual(
str(message.exception), 'Expect entries in "features" list '
'to correspond to the columns in "x"'
)
if num == 7:
with self.assertRaises(TypeError) as message: bootstrap_data(
x, y, features, scale, True
)
self.assertEqual(
str(message.exception), 'Expect "scale" to be a Boolean '
'value (either True or False)'
)
if num == 8:
exp_out_x = pd.DataFrame(
np.array([[1.0, 1.5, 1.2],
[3.4, 2.5, 1.4],
[4.6, 2.3, 2.1],
[1.8, 1.1, 0.6],
[0.7, 0.9, 0.7],
[4.1, 3.3, 2.6],
[4.0, 4.0, 3.1],
[1.0, 1.5, 1.2],
[3.4, 2.5, 1.4],
[4.1, 3.3, 2.6]]),
index=None, columns=features
)
exp_out_y = ['a', 'g', 'b', 'd', 'e', 'f', 'i', 'a', 'g', 'f']
act_out_x, act_out_y = bootstrap_data(x, y, features, scale, True)
pd.testing.assert_frame_equal(exp_out_x, act_out_x)
self.assertEqual(exp_out_y, act_out_y)
if num == 9:
exp_out_x = pd.DataFrame(
np.array([[-0.83478261, -0.5625, -0.15686275],
[0., 0.0625, 0.],
[0.4173913, -0.0625, 0.54901961],
[-0.55652174, -0.8125, -0.62745098],
[-0.93913043, -0.9375, -0.54901961],
[0.24347826, 0.5625, 0.94117647],
[0.20869565, 1., 1.33333333],
[-0.83478261, -0.5625, -0.15686275],
[0., 0.0625, 0.],
[0.24347826, 0.5625, 0.94117647]]),
index=None, columns=features
)
exp_out_y = ['a', 'g', 'b', 'd', 'e', 'f', 'i', 'a', 'g', 'f']
act_out_x, act_out_y = bootstrap_data(x, y, features, scale, True)
pd.testing.assert_frame_equal(exp_out_x, act_out_x)
self.assertEqual(exp_out_y, act_out_y)
def test_make_feat_importance_plots(self):
"""
Tests make_feat_importance_plots in train.py
"""
print('Testing make_feat_importance_plots')
input_feat_importances = {
'Feature_1': [7.8, 8.7, 0.1, 8.1, 0.4],
'Feature_2': [6.4, 0.1, 0.6, 8.3, 5.2],
'Feature_3': [7.1, 8.4, 0.0, 9.3, 2.5],
'Feature_4': [3.4, 2.1, 1.6, 5.6, 9.4],
'Feature_5': [8.5, 3.4, 6.6, 6.4, 9.0],
'Feature_6': [3.5, 4.3, 8.9, 2.3, 4.1],
'Feature_7': [6.5, 8.4, 2.1, 3.2, 7.8],
'Feature_8': [8.2, 4.7, 4.3, 1.0, 4.3],
'Feature_9': [8.2, 5.6, 5.0, 0.8, 0.9],
'Feature_10': [1.9, 4.0, 0.5, 6.0, 7.8]
}
input_results_dir = 'tests/Temp_output'
input_plt_name = 'PlaceHolder'
for num in range(1, 7):
if num == 1:
with self.assertRaises(FileNotFoundError) as message:
make_feat_importance_plots(
input_feat_importances, input_results_dir,
input_plt_name, True
)
self.assertEqual(
str(message.exception),
'Directory {} does not exist'.format(input_results_dir)
)
elif num == 2:
os.mkdir(input_results_dir)
with open('{}/{}_feat_importance_percentiles.svg'.format(
input_results_dir, input_plt_name
), 'w') as f:
f.write('PlaceHolder')
with self.assertRaises(FileExistsError) as message:
make_feat_importance_plots(
input_feat_importances, input_results_dir,
input_plt_name, True
)
self.assertEqual(
str(message.exception),
'File {}/{}_feat_importance_percentiles.svg already exists '
'- please rename this file so it is not overwritten by '
'running this function'.format(input_results_dir, input_plt_name)
)
shutil.rmtree(input_results_dir)
elif num == 3:
os.mkdir(input_results_dir)
with open('{}/{}_feat_importance_all_data.svg'.format(
input_results_dir, input_plt_name
), 'w') as f:
f.write('PlaceHolder')
with self.assertRaises(FileExistsError) as message:
make_feat_importance_plots(
input_feat_importances, input_results_dir,
input_plt_name, True
)
self.assertEqual(
str(message.exception),
'File {}/{}_feat_importance_all_data.svg already exists - '
'please rename this file so it is not overwritten by '
'running this function'.format(input_results_dir, input_plt_name)
)
shutil.rmtree(input_results_dir)
elif num == 4:
os.mkdir(input_results_dir)
with self.assertRaises(TypeError) as message:
make_feat_importance_plots(
pd.DataFrame({}), input_results_dir, input_plt_name, True
)
self.assertEqual(
str(message.exception),
'Expect "feature_importances" to be a dictionary of '
'importance scores'
)
shutil.rmtree(input_results_dir)
elif num == 5:
os.mkdir(input_results_dir)
with self.assertRaises(TypeError) as message:
make_feat_importance_plots(
input_feat_importances, input_results_dir, 1.0, True
)
self.assertEqual(
str(message.exception),
'Expect "plt_name" to a string to append to the start of '
'the names of the saved plots'
)
shutil.rmtree(input_results_dir)
elif num == 6:
os.mkdir(input_results_dir)
exp_importance_df = pd.DataFrame({
'Feature': ['Feature_1', 'Feature_3', 'Feature_5', 'Feature_7',
'Feature_2', 'Feature_9', 'Feature_8', 'Feature_6',
'Feature_10', 'Feature_4'],
'Score': [7.8, 7.1, 6.6, 6.5, 5.2, 5.0, 4.3, 4.1, 4.0, 3.4],
'Lower conf limit': [0.13, 0.25, 3.7, 2.21, 0.15, 0.81,
1.33, 2.42, 0.64, 1.65],
'Upper conf limit': [8.64, 9.21, 8.95, 8.34, 8.11, 7.94,
7.85, 8.44, 7.62, 9.02]
})
exp_cols = [
'Feature_1', 'Feature_2', 'Feature_3', 'Feature_4', 'Feature_5',
'Feature_6', 'Feature_7', 'Feature_8', 'Feature_9', 'Feature_10'
]
exp_cols_all = [
'Feature_1', 'Feature_1', 'Feature_1', 'Feature_1', 'Feature_1',
'Feature_2', 'Feature_2', 'Feature_2', 'Feature_2', 'Feature_2',
'Feature_3', 'Feature_3', 'Feature_3', 'Feature_3', 'Feature_3',
'Feature_4', 'Feature_4', 'Feature_4', 'Feature_4', 'Feature_4',
'Feature_5', 'Feature_5', 'Feature_5', 'Feature_5', 'Feature_5',
'Feature_6', 'Feature_6', 'Feature_6', 'Feature_6', 'Feature_6',
'Feature_7', 'Feature_7', 'Feature_7', 'Feature_7', 'Feature_7',
'Feature_8', 'Feature_8', 'Feature_8', 'Feature_8', 'Feature_8',
'Feature_9', 'Feature_9', 'Feature_9', 'Feature_9', 'Feature_9',
'Feature_10', 'Feature_10', 'Feature_10', 'Feature_10', 'Feature_10'
]
exp_all_vals = [
7.8, 8.7, 0.1, 8.1, 0.4, 6.4, 0.1, 0.6, 8.3, 5.2, 7.1, 8.4,
0.0, 9.3, 2.5, 3.4, 2.1, 1.6, 5.6, 9.4, 8.5, 3.4, 6.6, 6.4,
9.0, 3.5, 4.3, 8.9, 2.3, 4.1, 6.5, 8.4, 2.1, 3.2, 7.8, 8.2,
4.7, 4.3, 1.0, 4.3, 8.2, 5.6, 5.0, 0.8, 0.9, 1.9, 4.0, 0.5,
6.0, 7.8]
exp_median_vals = [7.8, 5.2, 7.1, 3.4, 6.6, 4.1, 6.5, 4.3, 5.0, 4.0]
exp_lower_conf_limit_vals = [
0.13, 0.15, 0.25, 1.65, 3.7, 2.42, 2.21, 1.33, 0.81, 0.64
]
exp_upper_conf_limit_vals = [
8.64, 8.11, 9.21, 9.02, 8.95, 8.44, 8.34, 7.85, 7.94, 7.62
]
(
act_importance_df, act_cols, act_cols_all, act_all_vals,
act_median_vals, act_lower_conf_limit_vals,
act_upper_conf_limit_vals
) = make_feat_importance_plots(
input_feat_importances, input_results_dir, input_plt_name,
True
)
pd.testing.assert_frame_equal(exp_importance_df, act_importance_df)
self.assertEqual(exp_cols, act_cols)
self.assertEqual(exp_cols_all, act_cols_all)
np.testing.assert_almost_equal(exp_all_vals, act_all_vals, 7)
np.testing.assert_almost_equal(
exp_median_vals, act_median_vals, 7
)
np.testing.assert_almost_equal(
exp_lower_conf_limit_vals, act_lower_conf_limit_vals, 7
)
np.testing.assert_almost_equal(
exp_upper_conf_limit_vals, act_upper_conf_limit_vals, 7
)
shutil.rmtree(input_results_dir)
def test_check_arguments(self):
"""
Tests check_arguments in train.py
"""
print('Testing check_arguments')
# Sets "recognised" parameter values that will not raise an exception
x_train = np.array([])
y_train = np.array([])
train_groups = np.array([])
x_test = np.array([])
y_test = np.array([])
selected_features = []
splits = [(y_train, np.array([]))]
const_split = True
resampling_method = 'no_balancing'
n_components_pca = None
run = 'randomsearch'
fixed_params = {}
tuned_params = {}
train_scoring_metric = 'accuracy'
test_scoring_funcs = {}
n_iter = None
cv_folds_inner_loop = 5
cv_folds_outer_loop = 5
draw_conf_mat = True
plt_name = ''
# "Recognised" parameter values should not raise an exception
output_str = check_arguments(
'PlaceHolder', x_train, y_train, train_groups, x_test, y_test,
selected_features, splits, const_split, resampling_method,
n_components_pca, run, fixed_params, tuned_params,
train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(output_str, 'All checks passed')
# "Unrecognised" parameter values should raise an exception
# Tests x_train type
x_train_str = ''
with self.assertRaises(TypeError) as message: check_arguments(
'PlaceHolder', x_train_str, y_train, train_groups, x_test, y_test,
selected_features, splits, const_split, resampling_method,
n_components_pca, run, fixed_params, tuned_params,
train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'Expect "x_train" to be a numpy array of '
'training data fluorescence readings'
)
# Tests y_train type
y_train_str = ''
with self.assertRaises(TypeError) as message: check_arguments(
'PlaceHolder', x_train, y_train_str, train_groups, x_test, y_test,
selected_features, splits, const_split, resampling_method,
n_components_pca, run, fixed_params, tuned_params,
train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'Expect "y_train" to be a numpy array of '
'training data class labels'
)
# Tests train_groups type
train_groups_str = ''
with self.assertRaises(TypeError) as message: check_arguments(
'PlaceHolder', x_train, y_train, train_groups_str, x_test, y_test,
selected_features, splits, const_split, resampling_method,
n_components_pca, run, fixed_params, tuned_params,
train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'Expect "train_groups" to be a numpy array '
'of training data subclass labels'
)
# Tests x_test type
x_test_str = ''
with self.assertRaises(TypeError) as message: check_arguments(
'PlaceHolder', x_train, y_train, train_groups, x_test_str, y_test,
selected_features, splits, const_split, resampling_method,
n_components_pca, run, fixed_params, tuned_params,
train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'Expect "x_test" to be a numpy array of '
'test data fluorescence readings'
)
# Tests y_test type
y_test_str = ''
with self.assertRaises(TypeError) as message: check_arguments(
'PlaceHolder', x_train, y_train, train_groups, x_test, y_test_str,
selected_features, splits, const_split, resampling_method,
n_components_pca, run, fixed_params, tuned_params,
train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'Expect "y_test" to be a numpy array of '
'test data class labels'
)
# Tests y_train is a 1D array
x_train_array = np.array([[1, 1], [1, 1], [1, 1], [1, 1]])
y_train_array = np.array([[2, 2], [2, 2], [2, 2], [2, 2]])
with self.assertRaises(ValueError) as message: check_arguments(
'PlaceHolder', x_train_array, y_train_array, train_groups, x_test,
y_test, selected_features, splits, const_split, resampling_method,
n_components_pca, run, fixed_params, tuned_params,
train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'Expect "y_train" to be a 1D array'
)
# Tests mismatch in x_train and y_train shape
x_train_array = np.array([[1, 1], [1, 1], [1, 1], [1, 1]])
y_train_array = np.array([2, 2, 2, 2, 2])
with self.assertRaises(ValueError) as message: check_arguments(
'PlaceHolder', x_train_array, y_train_array, train_groups, x_test,
y_test, selected_features, splits, const_split, resampling_method,
n_components_pca, run, fixed_params, tuned_params,
train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'Different number of entries (rows) in '
'"x_train" and "y_train"'
)
# Tests train_groups is a 1D array
x_train_array = np.array([[1, 1], [1, 1], [1, 1], [1, 1]])
y_train_array = np.array([2, 2, 2, 2])
train_groups_array = np.array([[3], [3], [3], [3]])
with self.assertRaises(ValueError) as message: check_arguments(
'PlaceHolder', x_train_array, y_train_array, train_groups_array,
x_test, y_test, selected_features, splits, const_split,
resampling_method, n_components_pca, run, fixed_params,
tuned_params, train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'Expect "train_groups" to be a 1D array'
)
# Tests mismatch in x_train and train_groups shape
x_train_array = np.array([[1, 1], [1, 1], [1, 1], [1, 1]])
y_train_array = np.array([2, 2, 2, 2])
train_groups_array = np.array([3, 3, 3])
with self.assertRaises(ValueError) as message: check_arguments(
'PlaceHolder', x_train_array, y_train_array, train_groups_array,
x_test, y_test, selected_features, splits, const_split,
resampling_method, n_components_pca, run, fixed_params,
tuned_params, train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'Different number of entries (rows) in '
'"x_train" and "train_groups"'
)
# Tests y_test is a 1D array
x_test_array = np.array([[4, 4], [4, 4], [4, 4], [4, 4]])
y_test_array = np.array([[5, 5], [5, 5], [5, 5], [5, 5]])
with self.assertRaises(ValueError) as message: check_arguments(
'PlaceHolder', x_train, y_train, train_groups, x_test_array,
y_test_array, selected_features, splits, const_split,
resampling_method, n_components_pca, run, fixed_params,
tuned_params, train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'Expect "y_test" to be a 1D array'
)
# Tests mismatch in x_test and y_test shape
x_test_array = np.array([[4, 4], [4, 4], [4, 4], [4, 4]])
y_test_array = np.array([5, 5, 5, 5, 5, 5])
with self.assertRaises(ValueError) as message: check_arguments(
'PlaceHolder', x_train, y_train, train_groups, x_test_array,
y_test_array, selected_features, splits, const_split,
resampling_method, n_components_pca, run, fixed_params,
tuned_params, train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'Different number of entries (rows) in '
'"x_test" and "y_test"'
)
# Tests mismatch in x_train and x_test shape
x_train_array = np.array([[1, 1], [1, 1], [1, 1], [1, 1]])
y_train_array = np.array([2, 2, 2, 2])
train_groups_array = np.array([3, 3, 3, 3])
x_test_array = np.array([[4, 4, 4], [4, 4, 4]])
y_test_array = np.array([5, 5])
with self.assertRaises(ValueError) as message: check_arguments(
'PlaceHolder', x_train_array, y_train_array, train_groups_array,
x_test_array, y_test_array, selected_features, splits, const_split,
resampling_method, n_components_pca, run, fixed_params,
tuned_params, train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'Different number of features incorporated '
'in the training and test data'
)
# Tests no NaN in x_train
x_train_array = np.array([[1, np.nan], [1, 1], [1, 1], [1, 1]])
y_train_array = np.array([2, 2, 2, 2])
train_groups_array = np.array([3, 3, 3, 3])
x_test_array = np.array([[4, 4], [4, 4]])
y_test_array = np.array([5, 5])
with self.assertRaises(ValueError) as message: check_arguments(
'PlaceHolder', x_train_array, y_train_array, train_groups_array,
x_test_array, y_test_array, selected_features, splits, const_split,
resampling_method, n_components_pca, run, fixed_params,
tuned_params, train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'NaN value(s) detected in "x_train" data'
)
# Tests no non-numeric entries in x_train
x_train_array = np.array([[1, 1], [1, 'X'], [1, 1], [1, 1]])
y_train_array = np.array([2, 2, 2, 2])
train_groups_array = np.array([3, 3, 3, 3])
x_test_array = np.array([[4, 4], [4, 4]])
y_test_array = np.array([5, 5])
with self.assertRaises(ValueError) as message: check_arguments(
'PlaceHolder', x_train_array, y_train_array, train_groups_array,
x_test_array, y_test_array, selected_features, splits, const_split,
resampling_method, n_components_pca, run, fixed_params,
tuned_params, train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'Non-numeric value(s) in "x_train" - expect'
' all values in "x_train" to be integers / floats'
)
# Tests no NaN in y_train
x_train_array = np.array([[1, 1], [1, 1], [1, 1], [1, 1]])
y_train_array = np.array([2, 2, np.nan, 2])
train_groups_array = np.array([3, 3, 3, 3])
x_test_array = np.array([[4, 4], [4, 4]])
y_test_array = np.array([5, 5])
with self.assertRaises(ValueError) as message: check_arguments(
'PlaceHolder', x_train_array, y_train_array, train_groups_array,
x_test_array, y_test_array, selected_features, splits, const_split,
resampling_method, n_components_pca, run, fixed_params,
tuned_params, train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'NaN value(s) detected in "y_train" data'
)
# Tests no NaN in train_groups
x_train_array = np.array([[1, 1], [1, 1], [1, 1], [1, 1]])
y_train_array = np.array([2, 2, 2, 2])
train_groups_array = np.array([np.nan, 3, 3, 3])
x_test_array = np.array([[4, 4], [4, 4]])
y_test_array = np.array([5, 5])
with self.assertRaises(ValueError) as message: check_arguments(
'PlaceHolder', x_train_array, y_train_array, train_groups_array,
x_test_array, y_test_array, selected_features, splits, const_split,
resampling_method, n_components_pca, run, fixed_params,
tuned_params, train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'NaN value(s) detected in "train_groups" data'
)
# Tests no NaN in x_test
x_train_array = np.array([[1, 1], [1, 1], [1, 1], [1, 1]])
y_train_array = np.array([2, 2, 2, 2])
train_groups_array = np.array([3, 3, 3, 3])
x_test_array = np.array([[np.nan, 4], [4, 4]])
y_test_array = np.array([5, 5])
with self.assertRaises(ValueError) as message: check_arguments(
'PlaceHolder', x_train_array, y_train_array, train_groups_array,
x_test_array, y_test_array, selected_features, splits, const_split,
resampling_method, n_components_pca, run, fixed_params,
tuned_params, train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'NaN value(s) detected in "x_test" data'
)
# Tests no non-numeric values in x_test
x_train_array = np.array([[1, 1], [1, 1], [1, 1], [1, 1]])
y_train_array = np.array([2, 2, 2, 2])
train_groups_array = np.array([3, 3, 3, 3])
x_test_array = np.array([[4, 4], [4, 'X']])
y_test_array = np.array([5, 5])
with self.assertRaises(ValueError) as message: check_arguments(
'PlaceHolder', x_train_array, y_train_array, train_groups_array,
x_test_array, y_test_array, selected_features, splits, const_split,
resampling_method, n_components_pca, run, fixed_params,
tuned_params, train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'Non-numeric value(s) in "x_test" - expect '
'all values in "x_test" to be integers / floats'
)
# Tests no NaN in y_test
x_train_array = np.array([[1, 1], [1, 1], [1, 1], [1, 1]])
y_train_array = np.array([2, 2, 2, 2])
train_groups_array = np.array([3, 3, 3, 3])
x_test_array = np.array([[4, 4], [4, 4]])
y_test_array = np.array([5, np.nan])
with self.assertRaises(ValueError) as message: check_arguments(
'PlaceHolder', x_train_array, y_train_array, train_groups_array,
x_test_array, y_test_array, selected_features, splits, const_split,
resampling_method, n_components_pca, run, fixed_params,
tuned_params, train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'NaN value(s) detected in "y_test" data'
)
# Test selected_features is a list or a positive integer
selected_features_str = 'X'
with self.assertRaises(TypeError) as message: check_arguments(
'PlaceHolder', x_train, y_train, train_groups, x_test, y_test,
selected_features_str, splits, const_split, resampling_method,
n_components_pca, run, fixed_params, tuned_params,
train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'Expect "selected_features" to be either a '
'list of features to retain in the analysis, or an integer number '
'of features (to be selected via permutation analysis)'
)
# Test selected_features is a list or a positive integer
selected_features_str = 0
with self.assertRaises(ValueError) as message: check_arguments(
'PlaceHolder', x_train, y_train, train_groups, x_test, y_test,
selected_features_str, splits, const_split, resampling_method,
n_components_pca, run, fixed_params, tuned_params,
train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'The number of selected_features must be a '
'positive integer'
)
# Test length of selected_features list is less than or equal to the
# number of columns in x_train
selected_features_list = ['X', 'X', 'X']
x_train_array = np.array([[1, 1], [1, 1], [1, 1], [1, 1]])
y_train_array = np.array([2, 2, 2, 2])
train_groups_array = np.array([3, 3, 3, 3])
x_test_array = np.array([[4, 4], [4, 4]])
y_test_array = np.array([5, 5])
with self.assertRaises(ValueError) as message: check_arguments(
'PlaceHolder', x_train_array, y_train_array, train_groups_array,
x_test_array, y_test_array, selected_features_list, splits,
const_split, resampling_method, n_components_pca, run, fixed_params,
tuned_params, train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'There is a greater number of features '
'in "selected_features" than there are columns in the '
'"x_train" input arrays'
)
# Test length of selected_features list is less than or equal to the
# number of columns in x_test (when x_train is not defined)
selected_features_list = ['X', 'X', 'X']
x_train_array = np.array([])
y_train_array = np.array([])
train_groups_array = None
x_test_array = np.array([[4, 4], [4, 4]])
y_test_array = np.array([5, 5])
with self.assertRaises(ValueError) as message: check_arguments(
'PlaceHolder', x_train_array, y_train_array, train_groups_array,
x_test_array, y_test_array, selected_features_list, splits,
const_split, resampling_method, n_components_pca, run, fixed_params,
tuned_params, train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'There is a greater number of features '
'in "selected_features" than there are columns in the "x_test" '
'input arrays'
)
# Tests splits type
x_train_array = np.array([[1, 1], [1, 1], [1, 1], [1, 1]])
y_train_array = np.array([2, 2, 2, 2])
train_groups_array = np.array([3, 3, 3, 3])
x_test_array = np.array([[4, 4], [4, 4]])
y_test_array = np.array([5, 5])
selected_features_list = ['X', 'X']
splits_gen = create_generator(x_train_array.shape[0])
with self.assertRaises(TypeError) as message: check_arguments(
'PlaceHolder', x_train_array, y_train_array, train_groups_array,
x_test_array, y_test_array, selected_features_list, splits_gen,
const_split, resampling_method, n_components_pca, run, fixed_params,
tuned_params, train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'Expect "splits" to be a list of train/test'
' splits'
)
# Tests splits list matches dimensions of x_train
x_train_array = np.array([[1, 1], [1, 1], [1, 1], [1, 1]])
y_train_array = np.array([2, 2, 2, 2])
train_groups_array = np.array([3, 3, 3, 3])
x_test_array = np.array([[4, 4], [4, 4]])
y_test_array = np.array([5, 5])
selected_features_list = ['X', 'X']
splits_list = [(np.array([6, 6, 6]), np.array([6])),
(np.array([]), np.array([6, 6, 6, 6])),
(np.array([6]), np.array([6, 6, 6, 6]))]
with self.assertRaises(ValueError) as message: check_arguments(
'PlaceHolder', x_train_array, y_train_array, train_groups_array,
x_test_array, y_test_array, selected_features_list, splits_list,
const_split, resampling_method, n_components_pca, run, fixed_params,
tuned_params, train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'Size of train test splits generated by '
'"splits" does not match the number of rows in the input array '
'"x_train"'
)
# Tests const_split type
const_split_int = 1
with self.assertRaises(TypeError) as message: check_arguments(
'PlaceHolder', x_train, y_train, train_groups, x_test, y_test,
selected_features, splits, const_split_int, resampling_method,
n_components_pca, run, fixed_params, tuned_params,
train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'Expect "const_split" to be a Boolean (True'
' or False)'
)
# Tests resampling_method is recognised
resampling_method_str = ''
with self.assertRaises(ValueError) as message: check_arguments(
'PlaceHolder', x_train, y_train, train_groups, x_test, y_test,
selected_features, splits, const_split, resampling_method_str,
n_components_pca, run, fixed_params, tuned_params,
train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), '"resampling_method" unrecognised - expect '
'value to be one of the following list entries:\n[\'no_balancing\','
' \'max_sampling\', \'smote\', \'smoteenn\', \'smotetomek\']'
)
# Test n_components_pca is an integer
n_components_pca_str = 2.0
with self.assertRaises(TypeError) as message: check_arguments(
'PlaceHolder', x_train, y_train, train_groups, x_test, y_test,
selected_features, splits, const_split, resampling_method,
n_components_pca_str, run, fixed_params, tuned_params,
train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'Expect "n_components_pca" to be set either'
' to None or to a positive integer value between 1 and the number '
'of features'
)
# Test n_components_pca is an integer in the range of 1 - number of
# features
x_train_array = np.array([[1, 1], [1, 1], [1, 1], [1, 1]])
y_train_array = np.array([2, 2, 2, 2])
train_groups_array = np.array([3, 3, 3, 3])
x_test_array = np.array([[4, 4], [4, 4]])
y_test_array = np.array([5, 5])
selected_features_list = ['X', 'X']
splits_list = [(np.array([6, 6, 6]), np.array([6])),
(np.array([]), np.array([6, 6, 6, 6])),
(np.array([6]), np.array([6, 6, 6]))]
n_components_pca_int = x_train_array.shape[1] + 1
with self.assertRaises(ValueError) as message: check_arguments(
'PlaceHolder', x_train_array, y_train_array, train_groups_array,
x_test_array, y_test_array, selected_features_list, splits_list,
const_split, resampling_method, n_components_pca_int, run,
fixed_params, tuned_params, train_scoring_metric,
test_scoring_funcs, n_iter, cv_folds_inner_loop,
cv_folds_outer_loop, draw_conf_mat, plt_name, True
)
self.assertEqual(
str(message.exception), 'Expect "n_components_pca" to be set either'
' to None or to a positive integer value between 1 and the number '
'of features'
)
# Tests requirement for run to be "randomsearch", "gridsearch" or
# "train" when func_name is set to "run_ml"
x_train_array = np.array([])
y_train_array = np.array([])
train_groups_array = np.array([])
x_test_array = np.array([[4, 4], [4, 4]])
y_test_array = np.array([5, 5])
selected_features_list = ['X', 'X']
splits_list = [(np.array([]), np.array([])),
(np.array([]), np.array([])),
(np.array([]), np.array([]))]
n_components_pca_int = x_test_array.shape[1]
run_str = 'random search'
with self.assertRaises(ValueError) as message: check_arguments(
'run_ml', x_train_array, y_train_array, train_groups_array,
x_test_array, y_test_array, selected_features_list, splits_list,
const_split, resampling_method, n_components_pca_int, run_str,
fixed_params, tuned_params, train_scoring_metric,
test_scoring_funcs, n_iter, cv_folds_inner_loop,
cv_folds_outer_loop, draw_conf_mat, plt_name, True
)
self.assertEqual(
str(message.exception), 'Expect "run" to be set to either '
'"randomsearch", "gridsearch" or "train"'
)
# Tests requirement for run to be "randomsearch" or "gridsearch" when
# when func_name is set to "run_nested_CV"
run_str = 'train'
with self.assertRaises(ValueError) as message: check_arguments(
'run_nested_CV', x_train, y_train, train_groups, x_test, y_test,
selected_features, splits, const_split, resampling_method,
n_components_pca, run_str, fixed_params, tuned_params,
train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'Expect "run" to be set to either '
'"randomsearch" or "gridsearch"'
)
# Tests fixed_params type
fixed_params_df = pd.DataFrame({})
with self.assertRaises(TypeError) as message: check_arguments(
'PlaceHolder', x_train, y_train, train_groups, x_test, y_test,
selected_features, splits, const_split, resampling_method,
n_components_pca, run, fixed_params_df, tuned_params,
train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'Expect "fixed_params" to be a dictionary '
'of parameter values with which to run the selected classifier '
'algorithm'
)
# Test tuned_params type
x_train_array = np.array([[1, 1], [1, 1], [1, 1], [1, 1]])
y_train_array = np.array([2, 2, 2, 2])
train_groups_array = np.array([3, 3, 3, 3])
x_test_array = np.array([[4, 4], [4, 4]])
y_test_array = np.array([5, 5])
selected_features_list = ['X', 'X']
splits_list = [(np.array([6, 6, 6]), np.array([6])),
(np.array([]), np.array([6, 6, 6, 6])),
(np.array([6]), np.array([6, 6, 6]))]
n_components_pca_int = x_train_array.shape[1]
run_str = 'train'
fixed_params_dict = {'dual': False}
tuned_params_list = []
with self.assertRaises(TypeError) as message: check_arguments(
'run_ml', x_train_array, y_train_array, train_groups_array,
x_test_array, y_test_array, selected_features_list, splits_list,
const_split, resampling_method, n_components_pca_int, run_str,
fixed_params_dict, tuned_params_list, train_scoring_metric,
test_scoring_funcs, n_iter, cv_folds_inner_loop,
cv_folds_outer_loop, draw_conf_mat, plt_name, True
)
self.assertEqual(
str(message.exception), 'Expect "tuned_params" to be a dictionary '
'of parameter names (keys) and ranges of values to optimise '
'(values) using either random or grid search'
)
# Test train_scoring_metric is string in list of recognised scoring
# metrics in sklearn
train_scoring_metric_str = 'mutual_info_score' # Scoring metric used
# for clustering, not classification
with self.assertRaises(ValueError) as message: check_arguments(
'PlaceHolder', x_train, y_train, train_groups, x_test, y_test,
selected_features, splits, const_split, resampling_method,
n_components_pca, run, fixed_params, tuned_params,
train_scoring_metric_str, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), '"train_scoring_metric" not recogised - '
'please specify a string corresponding to the name of the metric '
'you would like to use in the sklearn.metrics module, e.g. '
'"accuracy".\nExpect metric to be in the following list:\n'
'[\'accuracy\', \'balanced_accuracy\', \'top_k_accuracy\', '
'\'average_precision\', \'neg_brier_score\', \'f1\', \'f1_micro\', '
'\'f1_macro\', \'f1_weighted\', \'f1_samples\', \'neg_log_loss\', '
'\'precision\', \'precision_micro\', \'precision_macro\', '
'\'precision_weighted\', \'precision_samples\', \'recall\', '
'\'recall_micro\', \'recall_macro\', \'recall_weighted\', '
'\'recall_samples\', \'jaccard\', \'jaccard_micro\', '
'\'jaccard_macro\', \'jaccard_weighted\', \'jaccard_samples\', '
'\'roc_auc\', \'roc_auc_ovr\', \'roc_auc_ovo\', '
'\'roc_auc_ovr_weighted\', \'roc_auc_ovo_weighted\']'
)
# Test test_scoring_funcs is a dictionary of scoring functions (keys)
# and dictionaries of parameter values to run these functions with
from sklearn.metrics import accuracy_score, jaccard_score, make_scorer
train_scoring_metric_func = make_scorer(accuracy_score)
test_scoring_funcs_dict = {accuracy_score: {'normalize': True},
jaccard_score: {'average': 'weighted'}}
with self.assertRaises(ValueError) as message: check_arguments(
'PlaceHolder', x_train, y_train, train_groups, x_test, y_test,
selected_features, splits, const_split, resampling_method,
n_components_pca, run, fixed_params, tuned_params,
train_scoring_metric_func, test_scoring_funcs_dict, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), 'Scoring function jaccard_score not '
'recognised.\nExpect scoring functions to be in the following '
'list:\n[\'accuracy_score\', \'f1_score\', \'precision_score\', '
'\'recall_score\', \'roc_auc_score\', \'cohen_kappa_score\']'
)
# Test n_iter type is an integer
n_iter_float = 3.0
with self.assertRaises(TypeError) as message: check_arguments(
'PlaceHolder', x_train, y_train, train_groups, x_test, y_test,
selected_features, splits, const_split, resampling_method,
n_components_pca, run, fixed_params, tuned_params,
train_scoring_metric, test_scoring_funcs, n_iter_float,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), '"n_iter" should be set to a positive '
'integer value'
)
# Test n_iter is a positive integer
n_iter_int = -2
with self.assertRaises(ValueError) as message: check_arguments(
'PlaceHolder', x_train, y_train, train_groups, x_test, y_test,
selected_features, splits, const_split, resampling_method,
n_components_pca, run, fixed_params, tuned_params,
train_scoring_metric, test_scoring_funcs, n_iter_int,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat, plt_name,
True
)
self.assertEqual(
str(message.exception), '"n_iter" should be set to a positive '
'integer value'
)
# Test cv_folds_inner_loop type is an integer
cv_folds_inner_loop_dict = OrderedDict()
with self.assertRaises(TypeError) as message: check_arguments(
'PlaceHolder', x_train, y_train, train_groups, x_test, y_test,
selected_features, splits, const_split, resampling_method,
n_components_pca, run, fixed_params, tuned_params,
train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop_dict, cv_folds_outer_loop, draw_conf_mat,
plt_name, True
)
self.assertEqual(
str(message.exception), 'Expect "cv_folds_inner_loop" to be a '
'positive integer value in the range of 2 - 20'
)
# Test cv_folds_inner_loop is an integer in the range of 2 - 20
cv_folds_inner_loop_int = 21
with self.assertRaises(ValueError) as message: check_arguments(
'PlaceHolder', x_train, y_train, train_groups, x_test, y_test,
selected_features, splits, const_split, resampling_method,
n_components_pca, run, fixed_params, tuned_params,
train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop_int, cv_folds_outer_loop, draw_conf_mat,
plt_name, True
)
self.assertEqual(
str(message.exception), 'Expect "cv_folds_inner_loop" to be a '
'positive integer value in the range of 2 - 20'
)
# Test cv_folds_outer_loop type is set to 'loocv' or an integer value
cv_folds_outer_loop_float = 2.3
with self.assertRaises(TypeError) as message: check_arguments(
'PlaceHolder', x_train, y_train, train_groups, x_test, y_test,
selected_features, splits, const_split, resampling_method,
n_components_pca, run, fixed_params, tuned_params,
train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop_float, draw_conf_mat,
plt_name, True
)
self.assertEqual(
str(message.exception), 'Expect "cv_folds_outer_loop" to be set to '
'either "loocv" (leave-one-out cross-validation) or a positive '
'integer in the range of 2 - 20'
)
# Test cv_folds_outer_loop type is set to 'loocv' or an integer value
cv_folds_outer_loop_str = ''
with self.assertRaises(ValueError) as message: check_arguments(
'PlaceHolder', x_train, y_train, train_groups, x_test, y_test,
selected_features, splits, const_split, resampling_method,
n_components_pca, run, fixed_params, tuned_params,
train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop_str, draw_conf_mat,
plt_name, True
)
self.assertEqual(
str(message.exception), 'Expect "cv_folds_outer_loop" to be set to '
'either "loocv" (leave-one-out cross-validation) or a positive '
'integer in the range of 2 - 20'
)
# Test cv_folds_outer_loop is an integer in the range of 2 - 20
cv_folds_outer_loop_int = 1
with self.assertRaises(ValueError) as message: check_arguments(
'PlaceHolder', x_train, y_train, train_groups, x_test, y_test,
selected_features, splits, const_split, resampling_method,
n_components_pca, run, fixed_params, tuned_params,
train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop_int, draw_conf_mat,
plt_name, True
)
self.assertEqual(
str(message.exception), 'Expect "cv_folds_outer_loop" to be set to '
'either "loocv" (leave-one-out cross-validation) or a positive '
'integer in the range of 2 - 20'
)
# Test draw_conf_mat type is a Boolean
draw_conf_mat_float = 0.0
with self.assertRaises(TypeError) as message: check_arguments(
'PlaceHolder', x_train, y_train, train_groups, x_test, y_test,
selected_features, splits, const_split, resampling_method,
n_components_pca, run, fixed_params, tuned_params,
train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat_float,
plt_name, True
)
self.assertEqual(
str(message.exception), 'Expect "draw_conf_mat" to be a Boolean '
'value (True or False)'
)
# Test plt_name
plt_name_bool = False
with self.assertRaises(TypeError) as message: check_arguments(
'PlaceHolder', x_train, y_train, train_groups, x_test, y_test,
selected_features, splits, const_split, resampling_method,
n_components_pca, run, fixed_params, tuned_params,
train_scoring_metric, test_scoring_funcs, n_iter,
cv_folds_inner_loop, cv_folds_outer_loop, draw_conf_mat,
plt_name_bool, True
)
self.assertEqual(
str(message.exception), 'Expect "plt_name" to be a string'
)
# Test passes with more complex default values
from sklearn.metrics import precision_score
x_train_ext = np.array([[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]])
y_train_ext = np.array([2, 2, 2, 2, 2])
train_groups_ext = np.array([3, 3, 3, 3, 3])
x_test_ext = np.array([[4, 4, 4, 4]])
y_test_ext = np.array([5])
selected_features_ext = ['A', 'C', 'B']
splits_ext = [(np.array([6, 6, 6, 6, 6]), np.array([]))]
const_split_ext = False
resampling_method_ext = 'smote'
n_components_pca_ext = 4
run_ext = 'train'
fixed_params_ext = {'randomstate': 0}
tuned_params_ext = {'n_estimators': np.linspace(5, 50, 10)}
train_scoring_metric_ext = 'precision'
test_scoring_funcs_ext = {precision_score: {'average': 'macro'}}
n_iter_ext = 100
cv_folds_inner_loop_ext = 10
cv_folds_outer_loop_ext = 'loocv'
draw_conf_mat_ext = False
plt_name_ext = 'run_ml'
output_str = check_arguments(
'PlaceHolder', x_train_ext, y_train_ext, train_groups_ext,
x_test_ext, y_test_ext, selected_features_ext, splits_ext,
const_split_ext, resampling_method_ext, n_components_pca_ext,
run_ext, fixed_params_ext, tuned_params_ext,
train_scoring_metric_ext, test_scoring_funcs_ext, n_iter_ext,
cv_folds_inner_loop_ext, cv_folds_outer_loop_ext, draw_conf_mat_ext,
plt_name_ext, True
)
self.assertEqual(output_str, 'All checks passed')
def test_class_initialisation(self):
"""
Tests initialisation of RunML class
"""
print('Testing RunML class')
results_dir = 'tests/Temp_output'
fluor_data = pd.DataFrame({})
classes = None
subclasses = None
shuffle = True
# Test recognises that output directory already exists
os.mkdir(results_dir)
with self.assertRaises(FileExistsError) as message:
test_ml_train = RunML(
results_dir, fluor_data, classes, subclasses, shuffle, True
)
self.assertEqual(
str(message.exception),
'Directory {} already found in {}'.format(results_dir, os.getcwd())
)
shutil.rmtree('tests/Temp_output')
# Test "classes" must be None or a list
if os.path.isdir(results_dir):
shutil.rmtree(results_dir)
with self.assertRaises(TypeError) as message:
test_ml_train = RunML(
results_dir, fluor_data, np.array([]), subclasses, shuffle, True
)
self.assertEqual(
str(message.exception),
'Expect "classes" argument to be set either to None or to a list'
)
# Test "subclasses" must be None or a list
if os.path.isdir(results_dir):
shutil.rmtree(results_dir)
with self.assertRaises(TypeError) as message:
test_ml_train = RunML(
results_dir, fluor_data, [], np.array([]), shuffle, True
)
self.assertEqual(
str(message.exception),
'Expect "subclasses" argument to be set either to None or to a list'
)
# Test that if "subclasses" is set to a value other than None, classes
# cannot be set to None
if os.path.isdir(results_dir):
shutil.rmtree(results_dir)
with self.assertRaises(TypeError) as message:
test_ml_train = RunML(
results_dir, fluor_data, classes, np.array([]), shuffle, True
)
self.assertEqual(
str(message.exception),
'If "subclasses" is set to a value other than None, then "classes" '
'must also be set to a value other than None'
)
# Tests that if subclasses list is defined, the entries in the list
# are formatted as "class_subclass"
if os.path.isdir(results_dir):
shutil.rmtree(results_dir)
with self.assertRaises(ValueError) as message:
test_ml_train = RunML(
results_dir, fluor_data, ['A', 'B', 'A', 'B'],
['A_1', 'B_1_', 'A_2', 'B_2'], shuffle, True
)
self.assertEqual(
str(message.exception),
'Entries in subclass list should be formatted as "class_subclass" '
'(in which neither "class" nor "subclass" contains the character '
'"_")'
)
if os.path.isdir(results_dir):
shutil.rmtree(results_dir)
with self.assertRaises(ValueError) as message:
test_ml_train = RunML(
results_dir, fluor_data, ['A', 'B', 'A', 'B'],
['A_1', 'C_1', 'A_2', 'B_2'], shuffle, True
)
self.assertEqual(
str(message.exception),
'Entries in subclass list should be formatted as "class_subclass" '
'(in which neither "class" nor "subclass" contains the character '
'"_")'
)
# Test requires "Analyte" column in fluor_data if classes is set to None
if os.path.isdir(results_dir):
shutil.rmtree(results_dir)
with self.assertRaises(KeyError) as message:
test_ml_train = RunML(
results_dir, fluor_data, classes, subclasses, shuffle, True
)
self.assertEqual(
str(message.exception),
'\'No "Analyte" column detected in input dataframe - if you '
'do not define the "classes" argument, the input dataframe'
' must contain an "Analyte" column\''
)
# Tests that number of entries in "classes" and "subclasses" lists are
# equal to one another and to the number of rows in "fluor_data"
# dataframe
if os.path.isdir(results_dir):
shutil.rmtree(results_dir)
fluor_data_df = pd.DataFrame({'Feature_1': [1, 3, 2, 4],
'Feature_2': [2, 4, 3, 1]})
classes_list = ['A', 'B', 'A', 'B']
subclasses_list = ['A_1', 'B_1', 'A_2', 'B_2']
with self.assertRaises(ValueError) as message:
test_ml_train = RunML(
results_dir, fluor_data_df, ['A', 'B', 'A', 'B', 'A'],
subclasses_list, shuffle, True
)
self.assertEqual(
str(message.exception),
'Mismatch between number of entries in the input dataframe and '
'the "classes" list'
)
if os.path.isdir(results_dir):
shutil.rmtree(results_dir)
with self.assertRaises(ValueError) as message:
test_ml_train = RunML(
results_dir, fluor_data_df, classes_list, ['A_1', 'B_1', 'B_2'],
shuffle, True
)
self.assertEqual(
str(message.exception),
'Mismatch between number of entries in the input dataframe and '
'the "subclasses" list'
)
# Tests prevents overwriting of "Classes" or "Subclasses" columns in
# "fluor_data" dataframe
if os.path.isdir(results_dir):
shutil.rmtree(results_dir)
with self.assertRaises(NameError) as message:
test_ml_train = RunML(
results_dir,
pd.DataFrame({'Feature_1': [1, 3, 2, 4], 'Subclasses': [2, 4, 3, 1]}),
classes_list, subclasses_list, shuffle, True
)
self.assertEqual(
str(message.exception),
'Please rename any columns in input dataframe labelled either '
'"Classes" or "Subclasses", as these columns are added to the '
'dataframe by the code during data processing'
)
# Tests no NaN or non-numeric values in "fluor_data" dataframe
if os.path.isdir(results_dir):
shutil.rmtree(results_dir)
with self.assertRaises(ValueError) as message:
test_ml_train = RunML(
results_dir,
pd.DataFrame({'Feature_1': [1, 3.0, 2, 4], 'Feature_2': [2, 4, np.nan, 1]}),
['A', 'B', 'A', 'B'], [np.nan, np.nan, np.nan, np.nan], shuffle, True
)
self.assertEqual(
str(message.exception), 'NaN detected in input dataframe'
)
if os.path.isdir(results_dir):
shutil.rmtree(results_dir)
with self.assertRaises(ValueError) as message:
test_ml_train = RunML(
results_dir,
pd.DataFrame({'Feature_1': [1, 3.0, 2, '4'], 'Feature_2': [2, 4, 3, 1]}),
['A', 'B', 'A', 'B'], [np.nan, np.nan, np.nan, np.nan], shuffle, True
)
self.assertEqual(
str(message.exception), 'Non-numeric value detected in input dataframe'
)
# Tests no NaN values in "classes" list
if os.path.isdir(results_dir):
shutil.rmtree(results_dir)
with self.assertRaises(ValueError) as message:
test_ml_train = RunML(
results_dir,
pd.DataFrame({'Feature_1': [1, 3.0, 2, 4], 'Feature_2': [2, 4, 3, 1]}),
['A', 'B', 'A', np.nan], [np.nan, np.nan, np.nan, np.nan], shuffle, True
)
self.assertEqual(
str(message.exception), 'NaN detected in class values'
)
# Tests "subclasses" list is not a mixture of NaN and other values
if os.path.isdir(results_dir):
shutil.rmtree(results_dir)
with self.assertRaises(ValueError) as message:
test_ml_train = RunML(
results_dir,
pd.DataFrame({'Feature_1': [1, 3.0, 2, 4], 'Feature_2': [2, 4, 3, 1]}),
['A', 'B', 'A', 'B'], [np.nan, 1.0, np.nan, np.nan], shuffle, True
)
self.assertEqual(
str(message.exception), 'NaN detected in subclass values'
)
# Tests that "shuffle" is a Boolean
if os.path.isdir(results_dir):
shutil.rmtree(results_dir)
with self.assertRaises(TypeError) as message:
test_ml_train = RunML(
results_dir,
pd.DataFrame({'Feature_1': [1, 3.0, 2, 4], 'Feature_2': [2, 4, 3, 1]}),
['A', 'B', 'A', 'B'], [np.nan, np.nan, np.nan, np.nan], [], True
)
self.assertEqual(
str(message.exception),
'Expect "shuffle" to be a Boolean value (True or False)'
)
# Tests object attributes saved by RunML
if os.path.isdir(results_dir):
shutil.rmtree(results_dir)
test_ml_train = RunML(
results_dir,
pd.DataFrame({'Feature_1': [1.0, 3.0, 2.0, 4.0],
'Feature_2': [2.0, 4.0, 3.0, 1.0],
'Analyte': ['A', 'B', 'A', 'B']}),
None, None, False, True
)
# Check self.classes is a numpy array of class labels (dtype=str)
np.testing.assert_equal(np.array(['A', 'B', 'A', 'B']), test_ml_train.classes)
# Check that self.sub_classes is either a numpy array of subclass labels
# (dtype=str) or None
self.assertEqual(None, test_ml_train.sub_classes)
# Check that self.fluor_data is a dataframe
pd.testing.assert_frame_equal(
pd.DataFrame({'Feature_1': [1.0, 3.0, 2.0, 4.0],
'Feature_2': [2.0, 4.0, 3.0, 1.0]}),
test_ml_train.fluor_data
)
# Check that self.x is a numpy array of self.fluor_data
np.testing.assert_equal(np.array([[1.0, 2.0],
[3.0, 4.0],
[2.0, 3.0],
[4.0, 1.0]]),
test_ml_train.x)
# Check that self.y is the same as self.classes
np.testing.assert_equal(test_ml_train.classes, test_ml_train.y)
if os.path.isdir(results_dir):
shutil.rmtree(results_dir)
def test_split_train_test_data_random(self):
"""
Tests split_train_test_data_random in train.py
"""
print('Testing split_train_test_data_random')
results_dir = 'tests/Temp_output'
fluor_data = pd.DataFrame({'Feature_1': [4, 7, 2, 9],
'Feature_2': [6, 6, 2, 6],
'Feature_3': [2, 6, 4, 6]})
classes = ['A', 'B', 'B', 'A']
subclasses = ['A_1', 'B_1', 'B_2', 'A_2']
shuffle = False
test_ml_train = RunML(
results_dir, fluor_data, classes, subclasses, shuffle, True
)
# Default function arguments
x = np.array([[1, 2], [3, 4]])
y = np.array(['a', 'b'])
const_split = True
percent_test = 0.2
# Test x is a numpy array
with self.assertRaises(TypeError) as message:
test_ml_train.split_train_test_data_random(
[[1, 2], [3, 4]], y, const_split, percent_test, True
)
self.assertEqual(
str(message.exception), 'Expect "x" to be an array of x values'
)
# Test y is a numpy array
with self.assertRaises(TypeError) as message:
test_ml_train.split_train_test_data_random(
x, ['a', 'b'], const_split, percent_test, True
)
self.assertEqual(
str(message.exception), 'Expect "y" to be an array of y values'
)
# Test that dimensions of x and y values match
with self.assertRaises(ValueError) as message:
test_ml_train.split_train_test_data_random(
np.array([[1, 2], [3, 4], [5, 6]]), y, const_split,
percent_test, True
)
self.assertEqual(
str(message.exception), 'Mismatch in the dimensions of the input '
'"x" and "y" values'
)
# Test x doesn't contain any NaN values
with self.assertRaises(ValueError) as message:
test_ml_train.split_train_test_data_random(
np.array([[1, np.nan], [3, 4]]), y, const_split, percent_test,
True
)
self.assertEqual(
str(message.exception), 'NaN value(s) detected in "x" data'
)
# Test y doesn't contain any NaN values
with self.assertRaises(ValueError) as message:
test_ml_train.split_train_test_data_random(
x, np.array([np.nan, 'b'], dtype=object), const_split,
percent_test, True
)
self.assertEqual(
str(message.exception), 'NaN value(s) detected in "y" data'
)
# Test const_split is a boolean
with self.assertRaises(TypeError) as message:
test_ml_train.split_train_test_data_random(
x, y, '', True, True
)
self.assertEqual(
str(message.exception), 'Expect "const_split" to be a Boolean (True'
' or False)'
)
# Test percent_test is a float/integer value
with self.assertRaises(TypeError) as message:
test_ml_train.split_train_test_data_random(
x, y, const_split, True, True
)
self.assertEqual(
str(message.exception), '"percent_test" argument should be set to a'
' float in the range 0 - 0.5'
)
# Test percent_test is in the range 0 - 0.5
with self.assertRaises(ValueError) as message:
test_ml_train.split_train_test_data_random(
x, y, const_split, 0.52, True
)
self.assertEqual(
str(message.exception), '"percent_test" argument should be set to a'
' float in the range 0 - 0.5'
)
# Test stratified k-fold split (random seed has been set to fixed value
# so split will be consistent during the test (but not when running the
# code outside of unit tests))
exp_split = [np.array([0, 2, 3, 4]), np.array([1])]
act_split = test_ml_train.split_train_test_data_random(
np.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]),
np.array(['a', 'a', 'a', 'a', 'a']), False, 0.2, True
)
np.testing.assert_equal(exp_split[0], act_split[0])
np.testing.assert_equal(exp_split[1], act_split[1])
# Removes directory created by defining RunML object
shutil.rmtree('tests/Temp_output')
def test_split_train_test_data_user_defined(self):
"""
Tests split_train_test_data_user_defined in train.py
"""
print('Testing split_train_test_data_user_defined')
def_results_dir = 'tests/Temp_output'
def_fluor_data = pd.DataFrame({'Feature_1': [4, 7, 2, 9],
'Feature_2': [6, 6, 2, 6],
'Feature_3': [2, 6, 4, 6]})
def_classes = ['A', 'B', 'B', 'A']
def_subclasses = ['A_1', 'B_1', 'B_2', 'A_2']
def_shuffle = False
test_ml_train = RunML(
def_results_dir, def_fluor_data, def_classes, def_subclasses,
def_shuffle, True
)
# Default function arguments
subclasses = np.array([
'Green_Diplomat', 'Black_PGTips', 'Green_Diplomat',
'Black_Dragonfly', 'Black_Yorkshire', 'Green_Dragonfly',
'Black_Dragonfly', 'Green_Clipper', 'Black_PGTips',
'Green_Diplomat', 'Green_Dragonfly', 'Black_PGTips',
'Green_Clipper', 'Green_Diplomat', 'Green_Diplomat',
'Black_Yorkshire', 'Black_Yorkshire', 'Black_PGTips',
'Black_Dragonfly', 'Black_Dragonfly', 'Green_Dragonfly',
'Green_Clipper', 'Black_Dragonfly', 'Black_PGTips'
], dtype=object)
test_subclasses = np.array(
['Green_Dragonfly', 'Black_Yorkshire'], dtype=object
)
# Tests "subclasses" is a numpy array
with self.assertRaises(TypeError) as message:
test_ml_train.split_train_test_data_user_defined(
list(subclasses), test_subclasses
)
self.assertEqual(
str(message.exception), 'Expect "subclasses" to be a (1D) array of '
'subclass values'
)
# Tests "subclasses" is a 1D numpy array
with self.assertRaises(ValueError) as message:
test_ml_train.split_train_test_data_user_defined(
np.array([[subclass] for subclass in list(subclasses)]),
test_subclasses
)
self.assertEqual(
str(message.exception), 'Expect "subclasses" to be a 1D array'
)
# Tests no NaN values in "subclasses"
nan_subclasses = copy.deepcopy(subclasses)
nan_subclasses[15] = np.nan
with self.assertRaises(ValueError) as message:
test_ml_train.split_train_test_data_user_defined(
nan_subclasses, test_subclasses
)
self.assertEqual(
str(message.exception), 'NaN value(s) detected in "subclasses" array'
)
# Tests "test_subclasses" is a numpy array
with self.assertRaises(TypeError) as message:
test_ml_train.split_train_test_data_user_defined(
subclasses, list(test_subclasses)
)
self.assertEqual(
str(message.exception), 'Expect "test_subclasses" argument to be a'
' (1D) array of the subclass values that should be separated out '
'into the test set'
)
# Tests "test_subclasses" is a 1D numpy array
with self.assertRaises(ValueError) as message:
test_ml_train.split_train_test_data_user_defined(
subclasses,
np.array([[test_subclass] for test_subclass in list(test_subclasses)])
)
self.assertEqual(
str(message.exception), 'Expect "test_subclasses" to be a 1D array'
)
# Tests no NaN values in "test_subclasses"
nan_test_subclasses = copy.deepcopy(test_subclasses)
nan_test_subclasses[1] = np.nan
with self.assertRaises(ValueError) as message:
test_ml_train.split_train_test_data_user_defined(
subclasses, nan_test_subclasses
)
self.assertEqual(
str(message.exception),
'NaN value(s) detected in "test_subclasses" array'
)
# Tests that all entries in "test_subclasses" are also included in
# "subclasses"
with self.assertRaises(ValueError) as message:
test_ml_train.split_train_test_data_user_defined(
np.array([subclass for subclass in subclasses
if subclass != 'Black_Yorkshire']),
test_subclasses
)
self.assertEqual(
str(message.exception),
'Not all of the entries in the "test_subclasses" array are found in'
' the "subclasses" array. Expect "test_subclasses" argument to be a'
' (1D) array of the subclass values that should be separated out '
'into the test set'
)
# Tests generation of user-defined split
exp_split = [
np.array([0, 1, 2, 3, 6, 7, 8, 9, 11, 12,
13, 14, 17, 18, 19, 21, 22, 23]),
np.array([4, 5, 10, 15, 16, 20])
]
act_split = test_ml_train.split_train_test_data_user_defined(
subclasses, test_subclasses
)
np.testing.assert_equal(exp_split[0], act_split[0])
np.testing.assert_equal(exp_split[1], act_split[1])
# Removes directory created by defining RunML object
shutil.rmtree('tests/Temp_output')
def test_calc_feature_correlations(self):
"""
Tests calc_feature_correlations in train.py
"""
print('Testing calc_feature_correlations')
def_results_dir = 'tests/Temp_output'
def_fluor_data = pd.DataFrame({'Feature_1': [4, 2, 7, 9],
'Feature_2': [6, 6, 2, 6],
'Feature_3': [8, 6, 4, 2]})
def_classes = ['A', 'B', 'B', 'A']
def_subclasses = ['A_1', 'B_1', 'B_2', 'A_2']
def_shuffle = False
test_ml_train = RunML(
def_results_dir, def_fluor_data, def_classes, def_subclasses,
def_shuffle, True
)
# Default function arguments
fluor_data = None
correlation_coeff = 'kendall'
plt_name = ''
abs_vals = False
# Tests "fluor_data" is a dataframe
with self.assertRaises(TypeError) as message:
test_ml_train.calc_feature_correlations(
def_fluor_data.to_numpy(), correlation_coeff, plt_name,
abs_vals, True
)
self.assertEqual(
str(message.exception), '"fluor_data" should be a dataframe of '
'fluorescence readings'
)
# Tests "fluor_data" contains only integer/float values
test_fluor_data = pd.DataFrame({'Feature_1': [4, 2, 7, 9],
'Feature_2': [6, 6, 2, 6],
'Feature_3': [8, 'x', 4, 2]}, dtype=object)
with self.assertRaises(ValueError) as message:
test_ml_train.calc_feature_correlations(
test_fluor_data, correlation_coeff, plt_name, abs_vals, True
)
self.assertEqual(
str(message.exception), 'Non-numeric value(s) in "fluor_data" - '
'expect all values in "fluor_data" to be integers / floats'
)
# Tests "fluor_data" doesn't contain any NaN values
test_fluor_data = pd.DataFrame({'Feature_1': [4, 2, 7, np.nan],
'Feature_2': [6, 6, 2, 6],
'Feature_3': [8, 6, 4, 2]}, dtype=object)
with self.assertRaises(ValueError) as message:
test_ml_train.calc_feature_correlations(
test_fluor_data, correlation_coeff, plt_name, abs_vals, True
)
self.assertEqual(
str(message.exception), 'NaN value(s) found in "fluor_data"'
)
# Tests "correlation_coefficient" is set to "kendall", "spearman" or
# "pearson"
with self.assertRaises(ValueError) as message:
test_ml_train.calc_feature_correlations(
fluor_data, 'kendal', plt_name, abs_vals, True
)
self.assertEqual(
str(message.exception), 'Value specified for "correlation_coeff" '
'not recognised - should be set to "kendall", "spearman" or '
'"pearson"'
)
# Tests "plt_name" is a string to be appended to the beginning of the
# name of the saved plot
with self.assertRaises(TypeError) as message:
test_ml_train.calc_feature_correlations(
fluor_data, correlation_coeff, 1.0, abs_vals, True
)
self.assertEqual(
str(message.exception), '"plt_name" should be a string value'
)
# Tests "abs_vals" is a Boolean
with self.assertRaises(TypeError) as message:
test_ml_train.calc_feature_correlations(
fluor_data, correlation_coeff, plt_name, [], True
)
self.assertEqual(
str(message.exception), '"abs_vals" should be a Boolean value'
)
# Tests Kendall's Tau correlation coefficient
exp_corr_matrix = pd.DataFrame({
'Feature_1': [1.0, -0.23570226, -0.66666667],
'Feature_2': [-0.23570226, 1.0, 0.23570226],
'Feature_3': [-0.66666667, 0.23570226, 1.0]
})
exp_corr_matrix.index = ['Feature_1', 'Feature_2', 'Feature_3']
act_corr_matrix = test_ml_train.calc_feature_correlations(
fluor_data, 'kendall', plt_name, False, True
)
pd.testing.assert_frame_equal(exp_corr_matrix, act_corr_matrix)
# Tests Spearman's rank correlation coefficient (with absolute readings)
exp_corr_matrix = pd.DataFrame({
'Feature_1': [1.0, 0.25819889, 0.80000000],
'Feature_2': [0.25819889, 1.0, 0.25819889],
'Feature_3': [0.80000000, 0.25819889, 1.0]
})
exp_corr_matrix.index = ['Feature_1', 'Feature_2', 'Feature_3']
act_corr_matrix = test_ml_train.calc_feature_correlations(
fluor_data, 'spearman', plt_name, True, True
)
pd.testing.assert_frame_equal(exp_corr_matrix, act_corr_matrix)
# Tests Pearson's correlation coefficient
exp_corr_matrix = pd.DataFrame({
'Feature_1': [1.0, -0.32163376, -0.8304548],
'Feature_2': [-0.32163376, 1.0, 0.25819889],
'Feature_3': [-0.8304548, 0.25819889, 1.0]
})
exp_corr_matrix.index = ['Feature_1', 'Feature_2', 'Feature_3']
act_corr_matrix = test_ml_train.calc_feature_correlations(
fluor_data, 'pearson', plt_name, False, True
)
pd.testing.assert_frame_equal(exp_corr_matrix, act_corr_matrix)
# Removes directory created by defining RunML object
shutil.rmtree('tests/Temp_output')
def test_calc_feature_importances_kbest(self):
"""
Tests calc_feature_importances_kbest in train.py
"""
print('Testing calc_feature_importances_kbest')
results_dir = 'tests/Temp_output'
fluor_data = pd.DataFrame({'Feature_1': [4, 2, 7, 9],
'Feature_2': [6, 6, 2, 6],
'Feature_3': [8, 6, 4, 2]})
classes = ['A', 'B', 'B', 'A']
subclasses = ['A_1', 'B_1', 'B_2', 'A_2']
shuffle = False
test_ml_train = RunML(
results_dir, fluor_data, classes, subclasses, shuffle, True
)
# Defines function arguments
x = None
y = None
features = None
method_classif = 'f_classif'
num_repeats = 1000
scale = True
plt_name = ''
# Test "x" is a numpy array
with self.assertRaises(TypeError) as message:
test_ml_train.calc_feature_importances_kbest(
fluor_data, y, features, method_classif, num_repeats, scale,
plt_name, True
)
self.assertEqual(
str(message.exception), 'Expect "x" to be a (2D) array of '
'fluorescence readings'
)
# Test "x" is a 2D array
with self.assertRaises(ValueError) as message:
test_ml_train.calc_feature_importances_kbest(
fluor_data.to_numpy().flatten(), y, features, method_classif,
num_repeats, scale, plt_name, True
)
self.assertEqual(
str(message.exception), 'Expect "x" to be a (2D) array of '
'fluorescence readings'
)
# Test "y" is a numpy array
with self.assertRaises(TypeError) as message:
test_ml_train.calc_feature_importances_kbest(
fluor_data.to_numpy(), classes, features, method_classif,
num_repeats, scale, plt_name, True
)
self.assertEqual(
str(message.exception), 'Expect "y" to be a (1D) array of class '
'labels'
)
# Test "y" is a 1D array
with self.assertRaises(ValueError) as message:
test_ml_train.calc_feature_importances_kbest(
x, fluor_data.to_numpy(), features, method_classif, num_repeats,
scale, plt_name, True
)
self.assertEqual(
str(message.exception), 'Expect "y" to be a (1D) array of class '
'labels'
)
# Test "features" is a list
with self.assertRaises(TypeError) as message:
test_ml_train.calc_feature_importances_kbest(
fluor_data.to_numpy(), np.array(classes), fluor_data.columns,
method_classif, num_repeats, scale, plt_name, True
)
self.assertEqual(
str(message.exception), 'Expect "features" to be a list of the '
'column ids in "x"'
)
# Test that dimensions of "x" and "y" match
with self.assertRaises(ValueError) as message:
test_ml_train.calc_feature_importances_kbest(
x, np.array([1, 2, 3]), features, method_classif, num_repeats,
scale, plt_name, True
)
self.assertEqual(
str(message.exception), 'Mismatch between the number of rows in "x"'
' and the number of entries in "y"'
)
# Test that dimensions of "x" and "features" match
with self.assertRaises(ValueError) as message:
test_ml_train.calc_feature_importances_kbest(
x, y, fluor_data.columns.tolist()[:2], method_classif,
num_repeats, scale, plt_name, True
)
self.assertEqual(
str(message.exception), 'Mismatch between the number of columns in '
'"x" and the number of column ids in "features"'
)
# Test "method_classif" is a recognised value
with self.assertRaises(ValueError) as message:
test_ml_train.calc_feature_importances_kbest(
x, y, features, 'fclassif', num_repeats, scale, plt_name,
True
)
self.assertEqual(
str(message.exception), '"method_classif" should be set to either '
'"f_classif" or "mutual_info_classif"'
)
# Test "num_repeats" is an integer
from sklearn.feature_selection import f_classif
with self.assertRaises(TypeError) as message:
test_ml_train.calc_feature_importances_kbest(
x, y, features, f_classif, 1000., scale, plt_name,
True
)
self.assertEqual(
str(message.exception), '"num_repeats" should be set to a positive '
'integer value'
)
# Test "num_repeats" is a positive integer
with self.assertRaises(ValueError) as message:
test_ml_train.calc_feature_importances_kbest(
x, y, features, method_classif, -4, scale, plt_name,
True
)
self.assertEqual(
str(message.exception), '"num_repeats" should be set to a positive '
'integer value'
)
# Test "scale" is a Boolean
with self.assertRaises(TypeError) as message:
test_ml_train.calc_feature_importances_kbest(
x, y, features, method_classif, num_repeats, 1, plt_name,
True
)
self.assertEqual(
str(message.exception), '"scale" should be set to a Boolean value'
)
# Test "plt_name" is a string
with self.assertRaises(TypeError) as message:
test_ml_train.calc_feature_importances_kbest(
x, y, features, method_classif, num_repeats, scale, True,
True
)
self.assertEqual(
str(message.exception), '"plt_name" should be a string value'
)
# Test KBest feature selection
exp_importance_df = pd.DataFrame({'Feature': ['Feature_1', 'Feature_2'],
'Score': [0.91984923, 0.08015077],
'Lower conf limit': [0.91984923, 0.08015077],
'Upper conf limit': [0.91984923, 0.08015077]})
exp_feat_importances = OrderedDict({'Feature_1': [0.91984923 for n in range(10)],
'Feature_2': [0.08015077 for n in range(10)]})
(
act_importance_df, act_feat_importances
) = test_ml_train.calc_feature_importances_kbest(
np.array([[2, 2],
[1, 10],
[2, 3],
[1, 1],
[2, 7],
[10, 6],
[7, 2],
[2, 2],
[1, 10],
[10, 6],
[2, 7],
[3, 8]]),
np.array(['A', 'B', 'A', 'B', 'A', 'B', 'A', 'B', 'A', 'B', 'A', 'B']),
['Feature_1', 'Feature_2'], 'f_classif', 10, True, '', True
)
pd.testing.assert_frame_equal(exp_importance_df, act_importance_df)
self.assertEqual(list(exp_feat_importances.keys()),
list(act_feat_importances.keys()))
for key in exp_feat_importances.keys():
np.testing.assert_almost_equal(
exp_feat_importances[key], act_feat_importances[key], 7
)
# Removes directory created by defining RunML object
shutil.rmtree('tests/Temp_output')
def test_calc_feature_importances_tree(self):
"""
Tests calc_feature_importances_tree in train.py
"""
print('Testing calc_feature_importances_tree')
results_dir = 'tests/Temp_output'
fluor_data = pd.DataFrame({'Feature_1': [4, 2, 7, 9],
'Feature_2': [6, 6, 2, 6],
'Feature_3': [8, 6, 4, 2]})
classes = ['A', 'B', 'B', 'A']
subclasses = ['A_1', 'B_1', 'B_2', 'A_2']
shuffle = False
test_ml_train = RunML(
results_dir, fluor_data, classes, subclasses, shuffle, True
)
# Defines function arguments
x = None
y = None
features = None
num_repeats = 1000
scale = True
plt_name = ''
# Test "x" is a numpy array
with self.assertRaises(TypeError) as message:
test_ml_train.calc_feature_importances_tree(
fluor_data, y, features, num_repeats, scale, plt_name, True
)
self.assertEqual(
str(message.exception), 'Expect "x" to be a (2D) array of '
'fluorescence readings'
)
# Test "x" is a 2D array
with self.assertRaises(ValueError) as message:
test_ml_train.calc_feature_importances_tree(
fluor_data.to_numpy().flatten(), y, features, num_repeats,
scale, plt_name, True
)
self.assertEqual(
str(message.exception), 'Expect "x" to be a (2D) array of '
'fluorescence readings'
)
# Test "y" is a numpy array
with self.assertRaises(TypeError) as message:
test_ml_train.calc_feature_importances_tree(
fluor_data.to_numpy(), classes, features, num_repeats, scale,
plt_name, True
)
self.assertEqual(
str(message.exception), 'Expect "y" to be a (1D) array of class '
'labels'
)
# Test "y" is a 1D array
with self.assertRaises(ValueError) as message:
test_ml_train.calc_feature_importances_tree(
x, fluor_data.to_numpy(), features, num_repeats, scale,
plt_name, True
)
self.assertEqual(
str(message.exception), 'Expect "y" to be a (1D) array of class '
'labels'
)
# Test "features" is a list
with self.assertRaises(TypeError) as message:
test_ml_train.calc_feature_importances_tree(
fluor_data.to_numpy(), np.array(classes), fluor_data.columns,
num_repeats, scale, plt_name, True
)
self.assertEqual(
str(message.exception), 'Expect "features" to be a list of the '
'column ids in "x"'
)
# Test that dimensions of "x" and "y" match
with self.assertRaises(ValueError) as message:
test_ml_train.calc_feature_importances_tree(
x, np.array([1, 2, 3]), features, num_repeats, scale, plt_name,
True
)
self.assertEqual(
str(message.exception), 'Mismatch between the number of rows in "x"'
' and the number of entries in "y"'
)
# Test that dimensions of "x" and "features" match
with self.assertRaises(ValueError) as message:
test_ml_train.calc_feature_importances_tree(
x, y, fluor_data.columns.tolist()[:2], num_repeats, scale,
plt_name, True
)
self.assertEqual(
str(message.exception), 'Mismatch between the number of columns in '
'"x" and the number of column ids in "features"'
)
# Test "num_repeats" is an integer
with self.assertRaises(TypeError) as message:
test_ml_train.calc_feature_importances_tree(
x, y, features, 1000., scale, plt_name, True
)
self.assertEqual(
str(message.exception), '"num_repeats" should be set to a positive '
'integer value'
)
# Test "num_repeats" is a positive integer
with self.assertRaises(ValueError) as message:
test_ml_train.calc_feature_importances_tree(
x, y, features, -4, scale, plt_name, True
)
self.assertEqual(
str(message.exception), '"num_repeats" should be set to a positive '
'integer value'
)
# Test "scale" is a Boolean
with self.assertRaises(TypeError) as message:
test_ml_train.calc_feature_importances_tree(
x, y, features, num_repeats, 1, plt_name, True
)
self.assertEqual(
str(message.exception), '"scale" should be set to a Boolean value'
)
# Test "plt_name" is a string
with self.assertRaises(TypeError) as message:
test_ml_train.calc_feature_importances_tree(
x, y, features, num_repeats, scale, True, True
)
self.assertEqual(
str(message.exception), '"plt_name" should be a string value'
)
# Test tree feature selection
exp_importance_df = pd.DataFrame({'Feature': ['Feature_1', 'Feature_2'],
'Score': [0.53276046, 0.46723954],
'Lower conf limit': [0.53276046, 0.46723954],
'Upper conf limit': [0.53276046, 0.46723954]})
exp_feat_importances = OrderedDict({'Feature_1': [0.53276046 for n in range(10)],
'Feature_2': [0.46723954 for n in range(10)]})
(
act_importance_df, act_feat_importances
) = test_ml_train.calc_feature_importances_tree(
np.array([[2, 2],
[1, 10],
[2, 3],
[1, 1],
[2, 7],
[10, 6],
[7, 2],
[2, 2],
[1, 10],
[10, 6],
[2, 7],
[3, 8]]),
np.array(['A', 'B', 'A', 'B', 'A', 'B', 'A', 'B', 'A', 'B', 'A', 'B']),
['Feature_1', 'Feature_2'], 10, False, '', True
)
pd.testing.assert_frame_equal(exp_importance_df, act_importance_df)
self.assertEqual(list(exp_feat_importances.keys()),
list(act_feat_importances.keys()))
for key in exp_feat_importances.keys():
np.testing.assert_almost_equal(
exp_feat_importances[key], act_feat_importances[key], 7
)
# Removes directory created by defining RunML object
shutil.rmtree('tests/Temp_output')
def test_calc_feature_importances_permutation(self):
"""
Tests calc_feature_importances_permutation in train.py
"""
print('Testing calc_feature_importances_permutation')
results_dir = 'tests/Temp_output'
fluor_data = pd.DataFrame({'Feature_1': [4, 2, 7, 9],
'Feature_2': [6, 6, 2, 6],
'Feature_3': [8, 6, 4, 2]})
classes = ['A', 'B', 'B', 'A']
subclasses = ['A_1', 'B_1', 'B_2', 'A_2']
shuffle = False
test_ml_train = RunML(
results_dir, fluor_data, classes, subclasses, shuffle, True
)
# Defines function arguments
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import make_scorer, f1_score
x = None
y = None
features = None
classifier = AdaBoostClassifier
parameters = {'n_estimators': [10, 30, 100, 300, 1000]}
model_metric = 'accuracy'
f1 = make_scorer(f1_score, average='weighted')
num_repeats = 1000
scale = True
plt_name = ''
# Test "x" is a numpy array
with self.assertRaises(TypeError) as message:
test_ml_train.calc_feature_importances_permutation(
fluor_data, y, features, classifier, parameters, model_metric,
num_repeats, scale, plt_name, True
)
self.assertEqual(
str(message.exception), 'Expect "x" to be a (2D) array of '
'fluorescence readings'
)
# Test "x" is a 2D array
with self.assertRaises(ValueError) as message:
test_ml_train.calc_feature_importances_permutation(
fluor_data.to_numpy().flatten(), y, features, classifier,
parameters, model_metric, num_repeats, scale, plt_name, True
)
self.assertEqual(
str(message.exception), 'Expect "x" to be a (2D) array of '
'fluorescence readings'
)
# Test "y" is a numpy array
with self.assertRaises(TypeError) as message:
test_ml_train.calc_feature_importances_permutation(
fluor_data.to_numpy(), classes, features, classifier,
parameters, model_metric, num_repeats, scale, plt_name, True
)
self.assertEqual(
str(message.exception), 'Expect "y" to be a (1D) array of class '
'labels'
)
# Test "y" is a 1D array
with self.assertRaises(ValueError) as message:
test_ml_train.calc_feature_importances_permutation(
x, fluor_data.to_numpy(), features, classifier, parameters,
model_metric, num_repeats, scale, plt_name, True
)
self.assertEqual(
str(message.exception), 'Expect "y" to be a (1D) array of class '
'labels'
)
# Test "features" is a list
with self.assertRaises(TypeError) as message:
test_ml_train.calc_feature_importances_permutation(
fluor_data.to_numpy(), np.array(classes), fluor_data.columns,
classifier, parameters, model_metric, num_repeats, scale,
plt_name, True
)
self.assertEqual(
str(message.exception), 'Expect "features" to be a list of the '
'column ids in "x"'
)
# Test that dimensions of "x" and "y" match
with self.assertRaises(ValueError) as message:
test_ml_train.calc_feature_importances_permutation(
x, np.array([1, 2, 3]), features, classifier, parameters,
model_metric, num_repeats, scale, plt_name, True
)
self.assertEqual(
str(message.exception), 'Mismatch between the number of rows in "x"'
' and the number of entries in "y"'
)
# Test that dimensions of "x" and "features" match
with self.assertRaises(ValueError) as message:
test_ml_train.calc_feature_importances_permutation(
x, y, fluor_data.columns.tolist()[:2], classifier, parameters,
model_metric, num_repeats, scale, plt_name, True
)
self.assertEqual(
str(message.exception), 'Mismatch between the number of columns in '
'"x" and the number of column ids in "features"'
)
# Test "parameters" is a dictionary
from sklearn.ensemble import RandomForestClassifier
with self.assertRaises(TypeError) as message:
test_ml_train.calc_feature_importances_permutation(
x, y, fluor_data.columns.tolist(), RandomForestClassifier, [],
model_metric, num_repeats, scale, plt_name, True
)
self.assertEqual(
str(message.exception), 'Expect "parameters" to be a dictionary of '
'parameter names (keys) and arrays of values to consider for them '
'(values) in a grid search'
)
# Test "model_metrics" is a recognised string
with self.assertRaises(ValueError) as message:
test_ml_train.calc_feature_importances_permutation(
x, y, fluor_data.columns.tolist(), RandomForestClassifier,
OrderedDict(), 'acuracy', num_repeats, scale, plt_name, True
)
self.assertEqual(
str(message.exception), 'Value provided for "model_metric" not '
'recognised - please specify one of the strings in the list below:'
'\n[\'accuracy\', \'balanced_accuracy\', \'top_k_accuracy\', '
'\'average_precision\', \'neg_brier_score\', \'f1\', \'f1_micro\', '
'\'f1_macro\', \'f1_weighted\', \'f1_samples\', \'neg_log_loss\', '
'\'precision\', \'precision_micro\', \'precision_macro\', '
'\'precision_weighted\', \'precision_samples\', \'recall\', '
'\'recall_micro\', \'recall_macro\', \'recall_weighted\', '
'\'recall_samples\', \'jaccard\', \'jaccard_micro\', '
'\'jaccard_macro\', \'jaccard_weighted\', \'jaccard_samples\', '
'\'roc_auc\', \'roc_auc_ovr\', \'roc_auc_ovo\', '
'\'roc_auc_ovr_weighted\', \'roc_auc_ovo_weighted\']'
)
# Test "num_repeats" is an integer
with self.assertRaises(TypeError) as message:
test_ml_train.calc_feature_importances_permutation(
x, y, fluor_data.columns.tolist(), classifier, OrderedDict(),
f1, 1000., scale, plt_name, True
)
self.assertEqual(
str(message.exception), '"num_repeats" should be set to a positive '
'integer value'
)
# Test "num_repeats" is a positive integer
with self.assertRaises(ValueError) as message:
test_ml_train.calc_feature_importances_permutation(
x, y, fluor_data.columns.tolist(), classifier, OrderedDict(),
'precision', 0, scale, plt_name, True
)
self.assertEqual(
str(message.exception), '"num_repeats" should be set to a positive '
'integer value'
)
# Test "scale" is a Boolean
with self.assertRaises(TypeError) as message:
test_ml_train.calc_feature_importances_permutation(
x, y, fluor_data.columns.tolist(), classifier, OrderedDict(),
f1, num_repeats, 1, plt_name, True
)
self.assertEqual(
str(message.exception), '"scale" should be set to a Boolean value'
)
# Test "plt_name" is a string
with self.assertRaises(TypeError) as message:
test_ml_train.calc_feature_importances_permutation(
x, y, fluor_data.columns.tolist(), classifier, OrderedDict(),
model_metric, num_repeats, False, {}, True
)
self.assertEqual(
str(message.exception), '"plt_name" should be a string value'
)
# Test permutation feature selection
from sklearn.svm import SVC
svc_params = OrderedDict({
'C': np.logspace(-5, 15, num=41, base=2),
'gamma': np.logspace(-15, 3, num=37, base=2),
'kernel': ['rbf']
})
exp_importance_df = pd.DataFrame({'Feature': ['Feature_1', 'Feature_2'],
'Score': [-0.05714286, -0.06666667],
'Lower conf limit': [-0.05714286, -0.06666667],
'Upper conf limit': [-0.05714286, -0.06666667]})
exp_feat_importances = OrderedDict({'Feature_1': [-0.05714286 for n in range(10)],
'Feature_2': [-0.06666667 for n in range(10)]})
(
act_importance_df, act_feat_importances
) = test_ml_train.calc_feature_importances_permutation(
np.array([[2, 2],
[1, 10],
[2, 3],
[1, 1],
[2, 7],
[10, 6],
[7, 2],
[2, 2],
[1, 10],
[10, 6],
[2, 7],
[3, 8]]),
np.array([0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]),
['Feature_1', 'Feature_2'], SVC, svc_params, 'f1', 10, True,
'', True
)
pd.testing.assert_frame_equal(exp_importance_df, act_importance_df)
self.assertEqual(list(exp_feat_importances.keys()),
list(act_feat_importances.keys()))
for key in exp_feat_importances.keys():
np.testing.assert_almost_equal(
exp_feat_importances[key], act_feat_importances[key], 7
)
# Removes directory created by defining RunML object
shutil.rmtree('tests/Temp_output')
def test_run_pca(self):
"""
Tests run_pca in train.py
"""
print('Testing run_pca')
results_dir = 'tests/Temp_output'
fluor_data = pd.DataFrame({'Feature_1': [4, 2, 7, 9],
'Feature_2': [6, 6, 2, 6],
'Feature_3': [8, 6, 4, 2]})
classes = ['A', 'B', 'B', 'A']
subclasses = ['A_1', 'B_1', 'B_2', 'A_2']
shuffle = False
test_ml_train = RunML(
results_dir, fluor_data, classes, subclasses, shuffle, True
)
# Defines function arguments
x_df = None
scale = True
plt_name = ''
# Test "fluor_data" is a pandas dataframe
with self.assertRaises(TypeError) as message:
test_ml_train.run_pca(fluor_data.to_numpy(), scale, plt_name, True)
self.assertEqual(
str(message.exception), 'Expect "fluor_data" to be dataframe of '
'fluorescence readings'
)
# Test "fluor_data" only contains float and/or integer values
x_df_non_num = pd.DataFrame(
{'A': [1, 2], 'B': [np.nan, 4], 'C': [4, '5']}
)
with self.assertRaises(ValueError) as message:
test_ml_train.run_pca(x_df_non_num, scale, plt_name, True)
self.assertEqual(
str(message.exception), 'Non-numeric value(s) in "fluor_data" - '
'expect all values in "fluor_data" to be integers / floats'
)
# Test "scale" is a Boolean
with self.assertRaises(TypeError) as message:
test_ml_train.run_pca(x_df, 1, plt_name, True)
self.assertEqual(
str(message.exception), '"scale" should be set to a Boolean value'
)
# Test "plt_name" is a string
with self.assertRaises(TypeError) as message:
test_ml_train.run_pca(x_df, False, [], True)
self.assertEqual(
str(message.exception), '"plt_name" should be a string value'
)
# Test PCA calculation
from sklearn.preprocessing import RobustScaler
act_model, act_pca_components = test_ml_train.run_pca(
fluor_data, True, '', True
)
exp_transform_x = np.array(
[[-1.18870441, -0.758145263, -0.390925304],
[-1.16720749, -0.579369555, 0.422721778],
[ 3.03636219, -0.179547060, 0.00190310232],
[-0.680450290, 1.51706188, -0.0336995763]]
)
exp_pca_components = pd.DataFrame({
'Component': [1, 2, 3],
'Feature_1': [0.1613685, 0.63232001, -0.75771473],
'Feature_2': [-0.97491947, 0.22137831, -0.02288376],
'Feature_3': [-0.15327175, -0.74240357, -0.65218457]
})
exp_pca_components = exp_pca_components.set_index('Component', drop=True)
# Fitting, but not transformation, of the data is carried out in run_pca)
np.testing.assert_almost_equal(
exp_transform_x,
act_model.fit_transform(RobustScaler().fit_transform(fluor_data.to_numpy())),
7
)
np.testing.assert_almost_equal(
np.array([0.77693266, 0.20232139, 0.02074595]),
act_model.explained_variance_ratio_, 7
)
pd.testing.assert_frame_equal(exp_pca_components, act_pca_components)
# Removes directory created by defining RunML object
shutil.rmtree('tests/Temp_output')
def test_plot_scatter_on_pca_axes(self):
"""
Tests plot_scatter_on_pca_axes in train.py
"""
print('Testing plot_scatter_on_pca_axes')
results_dir = 'tests/Temp_output'
fluor_data = pd.DataFrame({'Feature_1': [4, 2, 7, 9],
'Feature_2': [6, 6, 2, 6],
'Feature_3': [8, 6, 4, 2]})
classes = ['A', 'B', 'B', 'A']
subclasses = ['A_1', 'B_1', 'B_2', 'A_3']
shuffle = False
test_ml_train = RunML(
results_dir, fluor_data, classes, subclasses, shuffle, True
)
# Defines function arguments
x = None
y = None
sub_list = None
num_dimensions = 2
scale = True
plt_name = ''
# Test "x" is a numpy array
with self.assertRaises(TypeError) as message:
test_ml_train.plot_scatter_on_pca_axes(
fluor_data, y, sub_list, num_dimensions, scale, plt_name, True
)
self.assertEqual(
str(message.exception), 'Expect "x" to be a (2D) array of '
'fluorescence readings'
)
# Test "x" is a 2D array
with self.assertRaises(ValueError) as message:
test_ml_train.plot_scatter_on_pca_axes(
fluor_data.to_numpy().flatten(), y, sub_list, num_dimensions,
scale, plt_name, True
)
self.assertEqual(
str(message.exception), 'Expect "x" to be a (2D) array of '
'fluorescence readings'
)
# Test "y" is a numpy array
with self.assertRaises(TypeError) as message:
test_ml_train.plot_scatter_on_pca_axes(
fluor_data.to_numpy(), classes, sub_list, num_dimensions, scale,
plt_name, True
)
self.assertEqual(
str(message.exception), 'Expect "y" to be a (1D) array of class '
'labels'
)
# Test "y" is a 1D array
with self.assertRaises(ValueError) as message:
test_ml_train.plot_scatter_on_pca_axes(
x, fluor_data.to_numpy(), sub_list, num_dimensions, scale,
plt_name, True
)
self.assertEqual(
str(message.exception), 'Expect "y" to be a (1D) array of class '
'labels'
)
# Test "subclasses" is a numpy array
with self.assertRaises(TypeError) as message:
test_ml_train.plot_scatter_on_pca_axes(
x, fluor_data.to_numpy().flatten()[:4], subclasses,
num_dimensions, scale, plt_name, True
)
self.assertEqual(
str(message.exception), 'Expect "subclasses" to be set either to '
'None, or to a (1D) array of the subclasses present in the dataset'
)
# Test "subclasses" is a 1D array
with self.assertRaises(ValueError) as message:
test_ml_train.plot_scatter_on_pca_axes(
x, np.array(classes), fluor_data.to_numpy(), num_dimensions,
scale, plt_name, True
)
self.assertEqual(
str(message.exception), 'Expect "subclasses" to be set either to '
'None, or to a (1D) array of the subclasses present in the dataset'
)
# Test that dimensions of "x" and "y" match
with self.assertRaises(ValueError) as message:
test_ml_train.plot_scatter_on_pca_axes(
x, np.array([1, 2, 3]), np.array(subclasses), num_dimensions,
scale, plt_name, True
)
self.assertEqual(
str(message.exception), 'Mismatch between the number of rows in "x"'
' and the number of entries in "y"'
)
# Test that dimensions of "x" and "subclasses" match
with self.assertRaises(ValueError) as message:
test_ml_train.plot_scatter_on_pca_axes(
x, y, np.array([1, 2, 3]), num_dimensions, scale, plt_name, True
)
self.assertEqual(
str(message.exception), 'Mismatch between the number of rows in "x"'
' and the number of entries in "subclasses"'
)
# Test "num_dimensions" is equal to 2 or 3
with self.assertRaises(ValueError) as message:
test_ml_train.plot_scatter_on_pca_axes(
x, y, sub_list, 4, scale, plt_name, True
)
self.assertEqual(
str(message.exception), 'Expect "num_dimensions" to be set to 2 or 3'
)
# Test "scale" is a Boolean
with self.assertRaises(TypeError) as message:
test_ml_train.plot_scatter_on_pca_axes(
x, y, sub_list, 3, '', plt_name, True
)
self.assertEqual(
str(message.exception), '"scale" should be a Boolean (True or False)'
)
# Test "plt_name" is a string
with self.assertRaises(TypeError) as message:
test_ml_train.plot_scatter_on_pca_axes(
x, y, sub_list, num_dimensions, scale, [], True
)
self.assertEqual(
str(message.exception), '"plt_name" should be a string value'
)
# Test subclasses contain '_' character only once
with self.assertRaises(ValueError) as message:
test_ml_train.plot_scatter_on_pca_axes(
x, y, np.array(['A_1', 'B_1_', 'B_2', 'A_2_2']), num_dimensions,
scale, plt_name, True
)
self.assertEqual(
str(message.exception), 'Character "_" found in subclass A_2_2 '
'more/less than once'
)
# Test function with defined subclasses
exp_cat_class_colours = {'A': 'b',
'B': 'g'}
exp_cat_markers = {'A_1': 'o',
'A_3': 'v',
'B_1': 'o',
'B_2': 'v'}
exp_cat_colours = {'A_1': 'b',
'A_3': 'b',
'B_1': 'g',
'B_2': 'g'}
exp_X_reduced = np.array(
[[-1.18870441, -0.758145263],
[-1.16720749, -0.579369555],
[3.03636219, -0.179547060],
[-0.680450290, 1.51706188]]
)
(
act_cat_class_colours, act_cat_markers, act_cat_colours,
act_X_reduced
) = test_ml_train.plot_scatter_on_pca_axes(
x, y, sub_list, num_dimensions, scale, plt_name, True
)
self.assertEqual(exp_cat_class_colours, act_cat_class_colours)
self.assertEqual(exp_cat_markers, act_cat_markers)
self.assertEqual(exp_cat_colours, act_cat_colours)
np.testing.assert_almost_equal(exp_X_reduced, act_X_reduced)
# Test function with no subclasses
shutil.rmtree('tests/Temp_output')
results_dir = 'tests/Temp_output'
fluor_data = pd.DataFrame({'Feature_1': [4, 2, 7, 9],
'Feature_2': [6, 6, 2, 6],
'Feature_3': [8, 6, 4, 2]})
classes = ['A', 'B', 'B', 'A']
subclasses = None
shuffle = False
test_ml_train = RunML(
results_dir, fluor_data, classes, subclasses, shuffle, True
)
exp_cat_class_colours = {'A': 'b',
'B': 'g'}
exp_cat_markers = {'A': 'o',
'B': 'o'}
exp_cat_colours = {'A': 'b',
'B': 'g'}
exp_X_reduced = np.array(
[[-3.18109341, -0.22506071, 1.44221788],
[-3.49530274, -0.42244343, -1.36176368],
[2.37050586, 2.57343631, -0.09015284],
[4.30589029, -1.92593218, 0.00969865]]
)
(
act_cat_class_colours, act_cat_markers, act_cat_colours,
act_X_reduced
) = test_ml_train.plot_scatter_on_pca_axes(
x, y, sub_list, 3, False, plt_name, True
)
self.assertEqual(exp_cat_class_colours, act_cat_class_colours)
self.assertEqual(exp_cat_markers, act_cat_markers)
self.assertEqual(exp_cat_colours, act_cat_colours)
np.testing.assert_almost_equal(exp_X_reduced, act_X_reduced)
# Removes directory created by defining RunML object
shutil.rmtree('tests/Temp_output')
def test_define_fixed_model_params(self):
"""
Tests define_fixed_model_params in train.py
"""
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC, SVC
from sklearn.ensemble import AdaBoostClassifier, AdaBoostRegressor
from sklearn.naive_bayes import GaussianNB
from sklearn.dummy import DummyClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
print('Testing define_fixed_model_params')
results_dir = 'tests/Temp_output'
fluor_data = pd.DataFrame({'Feature_1': [4, 2, 7, 9],
'Feature_2': [6, 6, 2, 6],
'Feature_3': [8, 6, 4, 2]})
classes = ['A', 'B', 'B', 'A']
subclasses = ['A_1', 'B_1', 'B_2', 'A_2']
shuffle = False
test_ml_train = RunML(
results_dir, fluor_data, classes, subclasses, shuffle, True
)
# Test LogisticRegression
exp_params = OrderedDict({'n_jobs': -1, 'max_iter': 1000})
act_params = test_ml_train.define_fixed_model_params(LogisticRegression())
self.assertEqual(exp_params, act_params)
# Test KNeighborsClassifier
exp_params = OrderedDict({'metric': 'minkowski',
'n_jobs': -1})
act_params = test_ml_train.define_fixed_model_params(KNeighborsClassifier())
self.assertEqual(exp_params, act_params)
# Test LinearSVC
exp_params = OrderedDict({'dual': False,
'max_iter': 10000})
act_params = test_ml_train.define_fixed_model_params(LinearSVC())
self.assertEqual(exp_params, act_params)
# Test SVC
exp_params = OrderedDict()
act_params = test_ml_train.define_fixed_model_params(SVC())
self.assertEqual(exp_params, act_params)
# Test AdaBoostClassifier
exp_params = OrderedDict()
act_params = test_ml_train.define_fixed_model_params(AdaBoostClassifier())
self.assertEqual(exp_params, act_params)
# Test LinearDiscriminantAnalysis
exp_params = OrderedDict()
act_params = test_ml_train.define_fixed_model_params(LinearDiscriminantAnalysis())
self.assertEqual(exp_params, act_params)
# Test DummyClassifier
exp_params = OrderedDict({'strategy': 'prior'})
act_params = test_ml_train.define_fixed_model_params(DummyClassifier())
self.assertEqual(exp_params, act_params)
# Test Gaussian Naive Bayes
exp_params = OrderedDict()
act_params = test_ml_train.define_fixed_model_params(GaussianNB())
self.assertEqual(exp_params, act_params)
# Test unexpected classifier
with self.assertRaises(TypeError) as message:
test_ml_train.define_fixed_model_params(AdaBoostRegressor())
self.assertEqual(
str(message.exception),
'Unrecognised value provided for "classifier". Expect "classifier" '
'to be one of:\nsklearn.linear_model.LogisticRegression()\n'
'sklearn.neighbors.KNeighborsClassifier()\n'
'sklearn.svm.LinearSVC()\nsklearn.svm.SVC()\n'
'sklearn.ensemble.AdaBoostClassifier()\n'
'sklearn.naive_bayes.GaussianNB()\n'
'sklearn.discriminant_analysis.LinearDiscriminantAnalysis()\n'
'sklearn.dummy.DummyClassifier()'
)
# Removes directory created by defining RunML object
shutil.rmtree('tests/Temp_output')
def test_define_tuned_model_params(self):
"""
Tests define_tuned_model_params in train.py
"""
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC, SVC
from sklearn.ensemble import AdaBoostClassifier, AdaBoostRegressor
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.dummy import DummyClassifier
print('Testing define_tuned_model_params')
results_dir = 'tests/Temp_output'
fluor_data = pd.DataFrame({'Feature_1': [4, 2, 7, 9],
'Feature_2': [6, 6, 2, 6],
'Feature_3': [8, 6, 4, 2]})
classes = ['A', 'B', 'B', 'A']
subclasses = ['A_1', 'B_1', 'B_2', 'A_2']
shuffle = False
test_ml_train = RunML(
results_dir, fluor_data, classes, subclasses, shuffle, True
)
# Defines function arguments
x_train = fluor_data.to_numpy()
n_folds = 5
# Test "x_train" is a numpy array
with self.assertRaises(TypeError) as message:
test_ml_train.define_tuned_model_params(
LogisticRegression(), fluor_data, n_folds
)
self.assertEqual(
str(message.exception), '"x_train" should be a (2D) array of '
'fluoresence readings'
)
# Test "n_folds" is an integer
with self.assertRaises(TypeError) as message:
test_ml_train.define_tuned_model_params(
LogisticRegression(), x_train, 5.0
)
self.assertEqual(
str(message.exception), '"n_folds" should be set to a positive '
'integer value'
)
# Test n_folds is a positive integer
with self.assertRaises(ValueError) as message:
test_ml_train.define_tuned_model_params(
LogisticRegression(), x_train, 0
)
self.assertEqual(
str(message.exception), '"n_folds" should be set to a positive '
'integer value'
)
# Test LogisticRegression
exp_params = OrderedDict({
'penalty': ['l1', 'l2'],
'solver': ['liblinear', 'sag', 'saga', 'newton-cg', 'lbfgs'],
'multi_class': ['ovr', 'multinomial'],
'C': np.logspace(-3, 5, 17)
})
act_params = test_ml_train.define_tuned_model_params(
LogisticRegression(), x_train, n_folds
)
self.assertEqual(list(exp_params.keys()), list(act_params.keys()))
for exp_key, exp_val in exp_params.items():
act_val = act_params[exp_key]
if type(exp_val) == np.ndarray:
np.testing.assert_equal(exp_val, act_val)
else:
self.assertEqual(exp_val, act_val)
# Test KNeighborsClassifier
with self.assertRaises(AlgorithmError) as message:
test_ml_train.define_tuned_model_params(
KNeighborsClassifier(), x_train, n_folds
)
self.assertEqual(
str(message.exception), 'Too few data points in dataset to run k '
'nearest neighbours'
)
exp_params = OrderedDict({
'n_neighbors': np.array([2, 3]),
'weights': ['uniform', 'distance'],
'p': np.array([1, 2])
})
act_params = test_ml_train.define_tuned_model_params(
KNeighborsClassifier(), x_train, 1
)
self.assertEqual(list(exp_params.keys()), list(act_params.keys()))
for exp_key, exp_val in exp_params.items():
act_val = act_params[exp_key]
if type(exp_val) == np.ndarray:
np.testing.assert_equal(exp_val, act_val)
else:
self.assertEqual(exp_val, act_val)
# Test LinearSVC
exp_params = OrderedDict({'C': np.logspace(-5, 15, num=21, base=2)})
act_params = test_ml_train.define_tuned_model_params(
LinearSVC(), x_train, n_folds
)
self.assertEqual(list(exp_params.keys()), list(act_params.keys()))
for exp_key, exp_val in exp_params.items():
act_val = act_params[exp_key]
if type(exp_val) == np.ndarray:
np.testing.assert_equal(exp_val, act_val)
else:
self.assertEqual(exp_val, act_val)
# Test SVC
exp_params = OrderedDict({
'C': np.logspace(-5, 15, num=21, base=2),
'gamma': np.logspace(-15, 3, num=19, base=2),
'kernel': ['rbf']
})
act_params = test_ml_train.define_tuned_model_params(
SVC(), x_train, n_folds
)
self.assertEqual(list(exp_params.keys()), list(act_params.keys()))
for exp_key, exp_val in exp_params.items():
act_val = act_params[exp_key]
if type(exp_val) == np.ndarray:
np.testing.assert_equal(exp_val, act_val)
else:
self.assertEqual(exp_val, act_val)
# Test AdaBoostClassifier
with self.assertRaises(AlgorithmError) as message:
test_ml_train.define_tuned_model_params(
AdaBoostClassifier(), x_train, n_folds
)
self.assertEqual(
str(message.exception), 'Too few data points in dataset to use '
'AdaBoost classifier'
)
exp_params = OrderedDict({
'n_estimators': np.array([int(x) for x in np.logspace(1, 4, 7)])
})
act_params = test_ml_train.define_tuned_model_params(
AdaBoostClassifier(), x_train, 1
)
self.assertEqual(list(exp_params.keys()), list(act_params.keys()))
for exp_key, exp_val in exp_params.items():
act_val = act_params[exp_key]
if type(exp_val) == np.ndarray:
np.testing.assert_equal(exp_val, act_val)
else:
self.assertEqual(exp_val, act_val)
# Test Gaussian Naive Bayes
exp_params = OrderedDict()
act_params = test_ml_train.define_tuned_model_params(
GaussianNB(), x_train, n_folds
)
self.assertEqual(exp_params, act_params)
# Test Linear Discriminant Analysis
exp_params = OrderedDict()
act_params = test_ml_train.define_tuned_model_params(
LinearDiscriminantAnalysis(), x_train, n_folds
)
self.assertEqual(exp_params, act_params)
# Test Dummy Classifier
exp_params = OrderedDict()
act_params = test_ml_train.define_tuned_model_params(
DummyClassifier(), x_train, n_folds
)
self.assertEqual(exp_params, act_params)
# Test unexpected classifier
with self.assertRaises(TypeError) as message:
act_params = test_ml_train.define_tuned_model_params(
AdaBoostRegressor(), x_train, n_folds
)
self.assertEqual(
str(message.exception),
'Unrecognised value provided for "classifier". Expect "classifier" '
'to be one of:\nsklearn.linear_model.LogisticRegression()\n'
'sklearn.neighbors.KNeighborsClassifier()\n'
'sklearn.svm.LinearSVC()\nsklearn.svm.SVC()\n'
'sklearn.ensemble.AdaBoostClassifier()\n'
'sklearn.naive_bayes.GaussianNB()\n'
'sklearn.discriminant_analysis.LinearDiscriminantAnalysis()\n'
'sklearn.dummy.DummyClassifier()'
)
# Removes directory created by defining RunML object
shutil.rmtree('tests/Temp_output')
def test_flag_extreme_params(self):
"""
Tests flag_extreme_params in train.py
"""
print('Testing flag_extreme_params')
results_dir = 'tests/Temp_output'
fluor_data = pd.DataFrame({'Feature_1': [4, 2, 7, 9],
'Feature_2': [6, 6, 2, 6],
'Feature_3': [8, 6, 4, 2]})
classes = ['A', 'B', 'B', 'A']
subclasses = ['A_1', 'B_1', 'B_2', 'A_2']
shuffle = False
test_ml_train = RunML(
results_dir, fluor_data, classes, subclasses, shuffle, True
)
# Defines function arguments
best_params = {'A': 1,
'B': 1.5,
'C': 1.0}
poss_params = OrderedDict({'B': np.array([1.5, 2.5]),
'A': np.array([0, 1, 2, 3]),
'C': np.array([1.0, 1.5, 2.0])})
# Test "best_params" is a dictionary
with self.assertRaises(TypeError) as message:
test_ml_train.flag_extreme_params([], poss_params, True)
self.assertEqual(
str(message.exception), 'Expect "best_params" to be a dictionary of'
' "optimal" parameter values returned after running an algorithm '
'such as RandomizedSearchCV or GridSearchCV'
)
# Test "poss_params" is a dictionary
with self.assertRaises(TypeError) as message:
test_ml_train.flag_extreme_params(best_params, True, True)
self.assertEqual(
str(message.exception), 'Expect "poss_params" to be the dictionary '
'of parameter ranges fed into the optimisation algorithm, such as '
'that returned by define_model_params function'
)
# Test keys in "best_params" and "poss_params" match
with self.assertRaises(ValueError) as message:
test_ml_train.flag_extreme_params({'A': 1}, poss_params, True)
self.assertEqual(
str(message.exception), 'Mismatch in the keys in "best_params" and '
'"poss_params"'
)
# Test warning message that should be printed when parameter value in
# "best_params" lies at the extreme end of the range specified in
# "poss_params"
exp_warning = (
'\x1b[31m WARNING: Optimal value selected for C is at the extreme '
'of the range tested \033[0m \nRange tested: [1.0, 1.5, 2.0]\nValue '
'selected: 1.0\n\n'
)
act_warning = test_ml_train.flag_extreme_params(best_params, poss_params, True)
self.assertEqual(exp_warning, act_warning)
# Removes directory created by defining RunML object
shutil.rmtree('tests/Temp_output')
def test_conv_resampling_method(self):
"""
Tests conv_resampling_method in train.py
"""
print('Testing conv_resampling_method')
results_dir = 'tests/Temp_output'
fluor_data = pd.DataFrame({'Feature_1': [4, 2, 7, 9],
'Feature_2': [6, 6, 2, 6],
'Feature_3': [8, 6, 4, 2]})
classes = ['A', 'B', 'B', 'A']
subclasses = ['A_1', 'B_1', 'B_2', 'A_2']
shuffle = False
test_ml_train = RunML(
results_dir, fluor_data, classes, subclasses, shuffle, True
)
# Tests error
with self.assertRaises(ValueError) as message:
test_ml_train.conv_resampling_method('', False)
self.assertEqual(
str(message.exception), 'Resampling method not recognised'
)
# Removes directory created by defining RunML object
shutil.rmtree('tests/Temp_output')
def test_run_randomised_search(self):
"""
Tests run_randomised_search in train.py
"""
print('Testing run_randomised_search')
results_dir = 'tests/Temp_output'
fluor_data = pd.DataFrame({
'Feature_1': [5, 9, 8, 1, 3, 5, 10, 6, 7, 1, 8, 9, 1, 10, 2, 2, 8,
7, 1, 3, 8, 4, 3, 4, 4, 6, 2, 10, 4, 5, 1, 7, 10, 3,
10, 6, 3, 8, 1, 4, 6, 1, 5, 2, 2, 1, 7, 1, 2, 4],
'Feature_2': [9, 9, 7, 9, 6, 4, 7, 4, 2, 9, 7, 9, 7, 6, 4, 10, 8, 1,
5, 4, 3, 3, 4, 3, 1, 4, 9, 6, 7, 10, 4, 6, 9, 2, 7, 4,
3, 5, 7, 10, 1, 5, 3, 7, 2, 5, 10, 2, 2, 5],
'Feature_3': [5, 4, 8, 10, 3, 2, 10, 5, 1, 10, 5, 5, 5, 10, 7, 1, 8,
8, 2, 1, 10, 9, 10, 6, 7, 4, 3, 3, 10, 10, 4, 7, 4, 6,
10, 7, 6, 9, 4, 9, 9, 4, 4, 5, 10, 2, 10, 1, 7, 10]
})
classes = [
'A', 'B', 'B', 'A', 'B', 'B', 'A', 'A', 'B', 'A', 'A', 'B', 'B',
'A', 'A', 'A', 'B', 'B', 'A', 'B', 'A', 'B', 'A', 'A', 'A', 'A',
'A', 'A', 'B', 'A', 'A', 'A', 'B', 'A', 'B', 'A', 'B', 'B', 'A',
'A', 'B', 'B', 'B', 'A', 'A', 'A', 'B', 'B', 'B', 'B'
]
subclasses = [
'A_1', 'B_2', 'B_1', 'A_2', 'B_1', 'B_2', 'A_2', 'A_1', 'B_1',
'A_1', 'A_2', 'B_1', 'B_2', 'A_1', 'A_1', 'A_2', 'B_2', 'B_1',
'A_1', 'B_2', 'A_2', 'B_2', 'A_2', 'A_2', 'A_2', 'A_1', 'A_1',
'A_2', 'B_2', 'A_1', 'A_2', 'A_1', 'B_2', 'A_1', 'B_1', 'A_2',
'B_2', 'B_2', 'A_1', 'A_1', 'B_1', 'B_1', 'B_2', 'A_1', 'A_1',
'A_2', 'B_2', 'B_2', 'B_1', 'B_1'
]
shuffle = False
test_ml_train = RunML(
results_dir, fluor_data, classes, subclasses, shuffle, True
)
# Test random search with PCA
from sklearn.ensemble import AdaBoostClassifier
from sklearn.model_selection import GroupKFold
parameters = {'n_estimators': [3, 10, 30, 100, 300, 1000]}
splits = list(GroupKFold(n_splits=2).split(
X=test_ml_train.x, y=test_ml_train.y,
groups=test_ml_train.sub_classes
))
exp_search_results = {
'params': [{'AdaBoostClassifier__n_estimators': 30},
{'AdaBoostClassifier__n_estimators': 10},
{'AdaBoostClassifier__n_estimators': 300},
{'AdaBoostClassifier__n_estimators': 3}],
'split0_test_score': np.array([0.68, 0.68, 0.68, 0.64]),
'split1_test_score': np.array([0.68, 0.68, 0.6, 0.64]),
'mean_test_score': np.array([0.68, 0.68, 0.64, 0.64]),
'std_test_score': np.array([0., 0., 0.04, 0.]),
'rank_test_score': np.array([1, 1, 3, 3], dtype=np.int32)
}
exp_best_params = {'AdaBoostClassifier__n_estimators': 30}
act_random_search = test_ml_train.run_randomised_search(
test_ml_train.x, test_ml_train.y, test_ml_train.sub_classes,
['Feature_1', 'Feature_3'], AdaBoostClassifier(random_state=1),
splits, True, 'smote', 1, parameters, 'accuracy', 4, True
)
act_search_results = act_random_search.cv_results_
for key in [
'mean_fit_time', 'std_fit_time', 'mean_score_time',
'std_score_time', 'param_AdaBoostClassifier__n_estimators'
]:
del(act_search_results[key])
act_best_params = act_random_search.best_params_
self.assertEqual(list(exp_search_results.keys()),
list(act_search_results.keys()))
for key in exp_search_results.keys():
if type(exp_search_results[key]) == np.ndarray:
np.testing.assert_almost_equal(
exp_search_results[key], act_search_results[key], 7
)
else:
self.assertEqual(
exp_search_results[key], act_search_results[key]
)
self.assertEqual(exp_best_params, act_best_params)
# Test random search without PCA
from sklearn.svm import LinearSVC
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import f1_score, make_scorer
parameters = {'C': np.logspace(-3, 5, num=9, base=10)}
splits = list(StratifiedKFold(n_splits=4, shuffle=True, random_state=1).split(
X=test_ml_train.x, y=test_ml_train.y, groups=test_ml_train.sub_classes
))
scoring_metric = make_scorer(f1_score, average='binary', pos_label='A')
exp_search_results = {
'params': [{'LinearSVC__C': 0.001},
{'LinearSVC__C': 0.01},
{'LinearSVC__C': 0.1},
{'LinearSVC__C': 1.0},
{'LinearSVC__C': 10.0},
{'LinearSVC__C': 100.0},
{'LinearSVC__C': 1000.0},
{'LinearSVC__C': 10000.0},
{'LinearSVC__C': 100000.0}],
'split0_test_score': np.array([
0.66666667, 0.66666667, 0.66666667, 0.66666667, 0.66666667,
0.66666667, 0.66666667, 0.66666667, 0.66666667
]),
'split1_test_score': np.array([
0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5
]),
'split2_test_score': np.array([
0.33333333, 0.33333333, 0.57142857, 0.57142857, 0.57142857,
0.57142857, 0.57142857, 0.57142857, 0.57142857
]),
'split3_test_score': np.array([
0.44444444, 0.44444444, 0.6, 0.54545455, 0.54545455, 0.54545455,
0.54545455, 0.54545455, 0.54545455
]),
'mean_test_score': np.array([
0.48611111, 0.48611111, 0.58452381, 0.57088745, 0.57088745,
0.57088745, 0.57088745, 0.57088745, 0.57088745
]),
'std_test_score': np.array([
0.12028131, 0.12028131, 0.05979699, 0.0609217, 0.0609217,
0.0609217, 0.0609217, 0.0609217, 0.0609217
]),
'rank_test_score': np.array([8, 8, 1, 2, 2, 2, 2, 2, 2], dtype=np.int32)
}
exp_best_params = {'LinearSVC__C': 0.1}
act_random_search = test_ml_train.run_randomised_search(
test_ml_train.x, test_ml_train.y, None, ['Feature_1', 'Feature_2'],
LinearSVC(dual=False, random_state=1), splits, True,
'max_sampling', None, parameters, scoring_metric, None, True
)
act_search_results = act_random_search.cv_results_
for key in [
'mean_fit_time', 'std_fit_time', 'mean_score_time',
'std_score_time', 'param_LinearSVC__C'
]:
del(act_search_results[key])
act_best_params = act_random_search.best_params_
self.assertEqual(list(exp_search_results.keys()),
list(act_search_results.keys()))
for key in exp_search_results.keys():
if type(exp_search_results[key]) == np.ndarray:
np.testing.assert_almost_equal(
exp_search_results[key], act_search_results[key], 7
)
else:
self.assertEqual(
exp_search_results[key], act_search_results[key]
)
self.assertEqual(exp_best_params, act_best_params)
# Removes directory created by defining RunML object
shutil.rmtree('tests/Temp_output')
def test_run_grid_search(self):
"""
Tests run_grid_search in train.py
"""
print('Testing run_grid_search')
results_dir = 'tests/Temp_output'
fluor_data = pd.DataFrame({
'Feature_1': [5, 9, 8, 1, 3, 5, 10, 6, 7, 1, 8, 9, 1, 10, 2, 2, 8,
7, 1, 3, 8, 4, 3, 4, 4, 6, 2, 10, 4, 5, 1, 7, 10, 3,
10, 6, 3, 8, 1, 4, 6, 1, 5, 2, 2, 1, 7, 1, 2, 4],
'Feature_2': [9, 9, 7, 9, 6, 4, 7, 4, 2, 9, 7, 9, 7, 6, 4, 10, 8, 1,
5, 4, 3, 3, 4, 3, 1, 4, 9, 6, 7, 10, 4, 6, 9, 2, 7, 4,
3, 5, 7, 10, 1, 5, 3, 7, 2, 5, 10, 2, 2, 5],
'Feature_3': [5, 4, 8, 10, 3, 2, 10, 5, 1, 10, 5, 5, 5, 10, 7, 1, 8,
8, 2, 1, 10, 9, 10, 6, 7, 4, 3, 3, 10, 10, 4, 7, 4, 6,
10, 7, 6, 9, 4, 9, 9, 4, 4, 5, 10, 2, 10, 1, 7, 10]
})
classes = [
'A', 'B', 'B', 'A', 'B', 'B', 'A', 'A', 'B', 'A', 'A', 'B', 'B',
'A', 'A', 'A', 'B', 'B', 'A', 'B', 'A', 'B', 'A', 'A', 'A', 'A',
'A', 'A', 'B', 'A', 'A', 'A', 'B', 'A', 'B', 'A', 'B', 'B', 'A',
'A', 'B', 'B', 'B', 'A', 'A', 'A', 'B', 'B', 'B', 'B'
]
subclasses = [
'A_1', 'B_2', 'B_1', 'A_2', 'B_1', 'B_2', 'A_2', 'A_1', 'B_1',
'A_1', 'A_2', 'B_1', 'B_2', 'A_1', 'A_1', 'A_2', 'B_2', 'B_1',
'A_1', 'B_2', 'A_2', 'B_2', 'A_2', 'A_2', 'A_2', 'A_1', 'A_1',
'A_2', 'B_2', 'A_1', 'A_2', 'A_1', 'B_2', 'A_1', 'B_1', 'A_2',
'B_2', 'B_2', 'A_1', 'A_1', 'B_1', 'B_1', 'B_2', 'A_1', 'A_1',
'A_2', 'B_2', 'B_2', 'B_1', 'B_1'
]
shuffle = False
test_ml_train = RunML(
results_dir, fluor_data, classes, subclasses, shuffle, True
)
# Test grid search with PCA
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import recall_score, make_scorer
parameters = {'C': np.logspace(-2, 3, num=6, base=10),
'gamma': np.logspace(-4, 1, num=6, base=10)}
splits = list(StratifiedKFold(n_splits=4, shuffle=True, random_state=1).split(
X=test_ml_train.x, y=test_ml_train.y, groups=test_ml_train.sub_classes
))
scoring_metric = make_scorer(recall_score, average='macro')
exp_search_results = {
'params': [{'SVC__C': 0.01, 'SVC__gamma': 0.0001},
{'SVC__C': 0.01, 'SVC__gamma': 0.001},
{'SVC__C': 0.01, 'SVC__gamma': 0.01},
{'SVC__C': 0.01, 'SVC__gamma': 0.1},
{'SVC__C': 0.01, 'SVC__gamma': 1.0},
{'SVC__C': 0.01, 'SVC__gamma': 10.0},
{'SVC__C': 0.1, 'SVC__gamma': 0.0001},
{'SVC__C': 0.1, 'SVC__gamma': 0.001},
{'SVC__C': 0.1, 'SVC__gamma': 0.01},
{'SVC__C': 0.1, 'SVC__gamma': 0.1},
{'SVC__C': 0.1, 'SVC__gamma': 1.0},
{'SVC__C': 0.1, 'SVC__gamma': 10.0},
{'SVC__C': 1.0, 'SVC__gamma': 0.0001},
{'SVC__C': 1.0, 'SVC__gamma': 0.001},
{'SVC__C': 1.0, 'SVC__gamma': 0.01},
{'SVC__C': 1.0, 'SVC__gamma': 0.1},
{'SVC__C': 1.0, 'SVC__gamma': 1.0},
{'SVC__C': 1.0, 'SVC__gamma': 10.0},
{'SVC__C': 10.0, 'SVC__gamma': 0.0001},
{'SVC__C': 10.0, 'SVC__gamma': 0.001},
{'SVC__C': 10.0, 'SVC__gamma': 0.01},
{'SVC__C': 10.0, 'SVC__gamma': 0.1},
{'SVC__C': 10.0, 'SVC__gamma': 1.0},
{'SVC__C': 10.0, 'SVC__gamma': 10.0},
{'SVC__C': 100.0, 'SVC__gamma': 0.0001},
{'SVC__C': 100.0, 'SVC__gamma': 0.001},
{'SVC__C': 100.0, 'SVC__gamma': 0.01},
{'SVC__C': 100.0, 'SVC__gamma': 0.1},
{'SVC__C': 100.0, 'SVC__gamma': 1.0},
{'SVC__C': 100.0, 'SVC__gamma': 10.0},
{'SVC__C': 1000.0, 'SVC__gamma': 0.0001},
{'SVC__C': 1000.0, 'SVC__gamma': 0.001},
{'SVC__C': 1000.0, 'SVC__gamma': 0.01},
{'SVC__C': 1000.0, 'SVC__gamma': 0.1},
{'SVC__C': 1000.0, 'SVC__gamma': 1.0},
{'SVC__C': 1000.0, 'SVC__gamma': 10.0}],
'split0_test_score': np.array([
0.51190476, 0.51190476, 0.51190476, 0.51190476, 0.66666667,
0.45238095, 0.51190476, 0.51190476, 0.51190476, 0.51190476,
0.66666667, 0.45238095, 0.51190476, 0.51190476, 0.51190476,
0.51190476, 0.52380952, 0.53571429, 0.51190476, 0.51190476,
0.51190476, 0.58333333, 0.53571429, 0.63095238, 0.51190476,
0.51190476, 0.51190476, 0.53571429, 0.53571429, 0.64285714,
0.51190476, 0.51190476, 0.58333333, 0.53571429, 0.53571429,
0.55952381
]),
'split1_test_score': np.array([
0.21428571, 0.21428571, 0.14285714, 0.14285714, 0.1547619,
0.42857143, 0.21428571, 0.21428571, 0.14285714, 0.14285714,
0.1547619, 0.42857143, 0.21428571, 0.21428571, 0.14285714,
0.14285714, 0.1547619, 0.60714286, 0.21428571, 0.21428571,
0.14285714, 0.1547619, 0.28571429, 0.60714286, 0.21428571,
0.21428571, 0.07142857, 0.08333333, 0.45238095, 0.53571429,
0.21428571, 0.14285714, 0.1547619, 0.08333333, 0.51190476,
0.53571429
]),
'split2_test_score': np.array([
0.17142857, 0.17142857, 0.17142857, 0.31428571, 0.48571429,
0.51428571, 0.17142857, 0.17142857, 0.17142857, 0.31428571,
0.48571429, 0.51428571, 0.17142857, 0.17142857, 0.17142857,
0.31428571, 0.51428571, 0.51428571, 0.17142857, 0.17142857,
0.17142857, 0.45714286, 0.51428571, 0.51428571, 0.17142857,
0.17142857, 0.24285714, 0.51428571, 0.51428571, 0.51428571,
0.17142857, 0.17142857, 0.45714286, 0.51428571, 0.58571429,
0.58571429
]),
'split3_test_score': np.array([
0.5, 0.5, 0.41666667, 0.5, 0.66666667, 0.41666667, 0.5, 0.5,
0.41666667, 0.5, 0.66666667, 0.41666667, 0.5, 0.5, 0.41666667,
0.5, 0.66666667, 0.41666667, 0.5, 0.5, 0.41666667, 0.5,
0.58333333, 0.33333333, 0.5, 0.5, 0.41666667, 0.66666667, 0.5,
0.41666667, 0.5, 0.5, 0.5, 0.58333333, 0.5, 0.33333333
]),
'mean_test_score': np.array([
0.34940476, 0.34940476, 0.31071429, 0.3672619, 0.49345238,
0.45297619, 0.34940476, 0.34940476, 0.31071429, 0.3672619,
0.49345238, 0.45297619, 0.34940476, 0.34940476, 0.31071429,
0.3672619, 0.46488095, 0.51845238, 0.34940476, 0.34940476,
0.31071429, 0.42380952, 0.4797619 , 0.52142857, 0.34940476,
0.34940476, 0.31071429, 0.45, 0.50059524, 0.52738095,
0.34940476, 0.33154762, 0.42380952, 0.42916667, 0.53333333,
0.50357143
]),
'std_test_score': np.array([
0.15733552, 0.15733552, 0.15754368, 0.15141411, 0.20903199,
0.03766028, 0.15733552, 0.15733552, 0.15754368, 0.15141411,
0.20903199, 0.03766028, 0.15733552, 0.15733552, 0.15754368,
0.15141411, 0.18894755, 0.06808389, 0.15733552, 0.15733552,
0.15754368, 0.16182596, 0.11478685, 0.11701842, 0.15733552,
0.15733552, 0.1684976, 0.21958397, 0.03060126, 0.08036376,
0.15733552, 0.17474775, 0.16182596, 0.20122481, 0.03286232,
0.09986527
]),
'rank_test_score': np.array(
[20, 20, 32, 17, 7, 11, 20, 20, 32, 17, 7, 11, 20, 20, 32, 17,
10, 4, 20, 20, 32, 15, 9, 3, 20, 20, 32, 13, 6, 2, 20, 31, 15,
14, 1, 5], dtype=np.int32)
}
exp_best_params = {'SVC__C': 1000.0,
'SVC__gamma': 1.0}
act_grid_search = test_ml_train.run_grid_search(
test_ml_train.x, test_ml_train.y, test_ml_train.sub_classes,
['Feature_1', 'Feature_2', 'Feature_3'], SVC(random_state=1),
splits, True, 'smotetomek', 1, parameters, scoring_metric, True
)
act_search_results = act_grid_search.cv_results_
for key in [
'mean_fit_time', 'std_fit_time', 'mean_score_time',
'std_score_time', 'param_SVC__C', 'param_SVC__gamma'
]:
del(act_search_results[key])
act_best_params = act_grid_search.best_params_
self.assertEqual(list(exp_search_results.keys()),
list(act_search_results.keys()))
for key in exp_search_results.keys():
if type(exp_search_results[key]) == np.ndarray:
np.testing.assert_almost_equal(
exp_search_results[key], act_search_results[key], 7
)
else:
self.assertEqual(
exp_search_results[key], act_search_results[key]
)
self.assertEqual(exp_best_params, act_best_params)
# Test grid search without PCA
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import GroupKFold
splits = list(GroupKFold(n_splits=2).split(
X=test_ml_train.x, y=test_ml_train.y,
groups=test_ml_train.sub_classes
))
exp_search_results = {
'params': [{}],
'split0_test_score': np.array([0.52]),
'split1_test_score': | np.array([0.4]) | numpy.array |
# Dual annealing unit tests implementation.
# Copyright (c) 2018 <NAME> <<EMAIL>>,
# <NAME> <<EMAIL>>
# Author: <NAME>, PMP S.A.
"""
Unit tests for the dual annealing global optimizer
"""
from scipy.optimize import dual_annealing
from scipy.optimize._dual_annealing import VisitingDistribution
from scipy.optimize._dual_annealing import ObjectiveFunWrapper
from scipy.optimize._dual_annealing import EnergyState
from scipy.optimize._dual_annealing import LocalSearchWrapper
from scipy.optimize import rosen, rosen_der
import numpy as np
from numpy.testing import (assert_equal, TestCase, assert_allclose,
assert_array_less)
from pytest import raises as assert_raises
from scipy._lib._util import check_random_state
class TestDualAnnealing(TestCase):
def setUp(self):
# A function that returns always infinity for initialization tests
self.weirdfunc = lambda x: np.inf
# 2-D bounds for testing function
self.ld_bounds = [(-5.12, 5.12)] * 2
# 4-D bounds for testing function
self.hd_bounds = self.ld_bounds * 4
# Number of values to be generated for testing visit function
self.nbtestvalues = 5000
self.high_temperature = 5230
self.low_temperature = 0.1
self.qv = 2.62
self.seed = 1234
self.rs = check_random_state(self.seed)
self.nb_fun_call = 0
self.ngev = 0
def tearDown(self):
pass
def callback(self, x, f, context):
# For testing callback mechanism. Should stop for e <= 1 as
# the callback function returns True
if f <= 1.0:
return True
def func(self, x, args=()):
# Using Rastrigin function for performing tests
if args:
shift = args
else:
shift = 0
y = np.sum((x - shift) ** 2 - 10 * np.cos(2 * np.pi * (
x - shift))) + 10 * np.size(x) + shift
self.nb_fun_call += 1
return y
def rosen_der_wrapper(self, x, args=()):
self.ngev += 1
return rosen_der(x, *args)
def test_visiting_stepping(self):
lu = list(zip(*self.ld_bounds))
lower = np.array(lu[0])
upper = np.array(lu[1])
dim = lower.size
vd = VisitingDistribution(lower, upper, self.qv, self.rs)
values = | np.zeros(dim) | numpy.zeros |
import sys
sys.path.append('../')
import config as cf
from word_feature.utils import get_unique_word, read_caption_clean_file, map_w2id
import numpy as np
def load_glove(path):
"""
Give you the dict of word and its coefficent
"""
f = open(path, encoding='utf-8')
print("Loading the /{}/ vector".format(path.split('/')[-1]))
embeddings_index = {}
for line in f:
values = line.split()
word = values[0]
coefs = | np.asarray(values[1:], dtype='float32') | numpy.asarray |
"""
"""
import os
import numpy as np
import scipy.misc
import tensorflow as tf
from six.moves import range
from mnist import load_mnist
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('mnist-root-path', None, '')
tf.app.flags.DEFINE_string('ckpt-path', None, '')
tf.app.flags.DEFINE_string('meta-path', None, '')
tf.app.flags.DEFINE_string('result-path', None, '')
def load_datasets():
"""
load mnist
"""
path_root = FLAGS.mnist_root_path
path_train_eigens = os.path.join(path_root, 'train-images-idx3-ubyte.gz')
path_train_labels = os.path.join(path_root, 'train-labels-idx1-ubyte.gz')
path_issue_eigens = os.path.join(path_root, 't10k-images-idx3-ubyte.gz')
path_issue_labels = os.path.join(path_root, 't10k-labels-idx1-ubyte.gz')
datasets = load_mnist(
path_train_eigens, path_train_labels,
path_issue_eigens, path_issue_labels)
all_eigens = np.concatenate(
[datasets['train_eigens'], datasets['issue_eigens']], axis=0)
all_labels = np.concatenate(
[datasets['train_labels'], datasets['issue_labels']], axis=0)
eigens = np.zeros_like(all_eigens[:10])
labels = np.zeros_like(all_labels[:10])
for i in range(10):
i_labels = np.where(all_labels[:, i] == 1.0)[0]
m = np.random.randint(i_labels.size)
n = i_labels[m]
eigens[i] = all_eigens[n]
labels[i] = all_labels[n]
return eigens, labels
def main(_):
"""
"""
eigens, labels = load_datasets()
with tf.Session() as session:
saver = tf.train.import_meta_graph(FLAGS.meta_path)
saver.restore(session, FLAGS.ckpt_path)
graph = tf.get_default_graph()
images_tensor = graph.get_tensor_by_name('images:0')
labels_tensor = graph.get_tensor_by_name('labels:0')
digit_capsules_tensor = graph.get_tensor_by_name('digit_capsules:0')
inserted_digit_capsules_tensor = \
graph.get_tensor_by_name('inserted_digit_capsules:0')
reconstruction_tensor = \
graph.get_tensor_by_name('reconstructions_from_latent:0')
# NOTE: fetch digit capsules of all digits
feeds = {
images_tensor: eigens,
labels_tensor: labels,
}
digit_capsules = session.run(digit_capsules_tensor, feed_dict=feeds)
# prepare masks
masks = | np.zeros((11 * 16, 10, 16)) | numpy.zeros |
import numpy as np
import numpy.linalg as LA
from .solve_R1 import problem_R1, Classo_R1, pathlasso_R1
from .solve_R2 import problem_R2, Classo_R2, pathlasso_R2
from .solve_R3 import problem_R3, Classo_R3, pathlasso_R3
from .solve_R4 import problem_R4, Classo_R4, pathlasso_R4
from .path_alg import solve_path, pathalgo_general, h_lambdamax
"""
Classo and pathlasso are the main functions,
they can call every algorithm acording
to the method and formulation required
"""
# can be 'Path-Alg', 'P-PDS' , 'PF-PDS' or 'DR'
def Classo(
matrix,
lam,
typ="R1",
meth="DR",
rho=1.345,
get_lambdamax=False,
true_lam=False,
e=None,
rho_classification=-1.0,
w=None,
intercept=False,
return_sigm=True,
):
if w is not None:
matrices = (matrix[0] / w, matrix[1] / w, matrix[2])
else:
matrices = matrix
X, C, y = matrices
if typ == "R3":
if intercept:
# here we use the fact that for R1 and R3,
# the intercept is simple beta0 = ybar-Xbar .vdot(beta)
# so by changing the X to X-Xbar and y to y-ybar
# we can solve standard problem
Xbar, ybar = np.mean(X, axis=0), np.mean(y)
matrices = (X - Xbar, C, y - ybar)
if meth not in ["Path-Alg", "DR"]:
meth = "DR"
if e is None or e == len(matrices[0]) / 2:
r = 1.0
pb = problem_R3(matrices, meth)
e = len(matrices[0]) / 2
else:
r = np.sqrt(2 * e / len(matrices[0]))
pb = problem_R3((matrices[0] * r, matrices[1], matrices[2] * r), meth)
lambdamax = pb.lambdamax
if true_lam:
beta, s = Classo_R3(pb, lam / lambdamax)
else:
beta, s = Classo_R3(pb, lam)
if intercept:
betaO = ybar - np.vdot(Xbar, beta)
beta = np.array([betaO] + list(beta))
elif typ == "R4":
if meth not in ["Path-Alg", "DR"]:
meth = "DR"
if e is None or e == len(matrices[0]):
r = 1.0
pb = problem_R4(matrices, meth, rho, intercept=intercept)
e = len(matrices[0])
else:
r = np.sqrt(e / len(matrices[0]))
pb = problem_R4(
(matrices[0] * r, matrices[1], matrices[2] * r),
meth,
rho / r,
intercept=intercept,
)
lambdamax = pb.lambdamax
if true_lam:
beta, s = Classo_R4(pb, lam / lambdamax)
else:
beta, s = Classo_R4(pb, lam)
elif typ == "R2":
if meth not in ["Path-Alg", "P-PDS", "PF-PDS", "DR"]:
meth = "ODE"
pb = problem_R2(matrices, meth, rho, intercept=intercept)
lambdamax = pb.lambdamax
if true_lam:
beta = Classo_R2(pb, lam / lambdamax)
else:
beta = Classo_R2(pb, lam)
elif typ == "C2":
assert set(matrices[2]).issubset({1, -1})
lambdamax = h_lambdamax(
matrices, rho_classification, typ="C2", intercept=intercept
)
if true_lam:
out = solve_path(
matrices,
lam / lambdamax,
False,
rho_classification,
"C2",
intercept=intercept,
)
else:
out = solve_path(
matrices, lam, False, rho_classification, "C2", intercept=intercept
)
if intercept:
beta0, beta = out[0][-1], out[1][-1]
beta = np.array([beta0] + list(beta))
else:
beta = out[0][-1]
elif typ == "C1":
assert set(matrices[2]).issubset({1, -1})
lambdamax = h_lambdamax(matrices, 0, typ="C1", intercept=intercept)
if true_lam:
out = solve_path(
matrices, lam / lambdamax, False, 0, "C1", intercept=intercept
)
else:
out = solve_path(matrices, lam, False, 0, "C1", intercept=intercept)
if intercept:
beta0, beta = out[0][-1], out[1][-1]
beta = np.array([beta0] + list(beta))
else:
beta = out[0][-1]
else: # LS
if intercept:
# here we use the fact that for R1 and R3,
# the intercept is simple beta0 = ybar-Xbar .vdot(beta)
# so by changing the X to X-Xbar and y to y-ybar
# we can solve standard problem
Xbar, ybar = np.mean(X, axis=0), np.mean(y)
matrices = (X - Xbar, C, y - ybar)
if meth not in ["Path-Alg", "P-PDS", "PF-PDS", "DR"]:
meth = "DR"
pb = problem_R1(matrices, meth)
lambdamax = pb.lambdamax
if true_lam:
beta = Classo_R1(pb, lam / lambdamax)
else:
beta = Classo_R1(pb, lam)
if intercept:
betaO = ybar - np.vdot(Xbar, beta)
beta = np.array([betaO] + list(beta))
if w is not None:
if intercept:
beta[1:] = beta[1:] / w
else:
beta = beta / w
if typ in ["R3", "R4"] and return_sigm:
if get_lambdamax:
return (lambdamax, beta, s)
else:
return (beta, s)
if get_lambdamax:
return (lambdamax, beta)
else:
return beta
def pathlasso(
matrix,
lambdas=False,
n_active=0,
lamin=1e-2,
typ="R1",
meth="Path-Alg",
rho=1.345,
true_lam=False,
e=None,
return_sigm=False,
rho_classification=-1.0,
w=None,
intercept=False,
):
Nactive = n_active
if Nactive == 0:
Nactive = False
if type(lambdas) is bool:
lambdas = lamin ** (np.linspace(0.0, 1, 100))
if lambdas[0] < lambdas[-1]:
lambdass = [
lambdas[i] for i in range(len(lambdas) - 1, -1, -1)
] # reverse the list if needed
else:
lambdass = [lambdas[i] for i in range(len(lambdas))]
if w is not None:
matrices = (matrix[0] / w, matrix[1] / w, matrix[2])
else:
matrices = matrix
X, C, y = matrices
if typ == "R2":
pb = problem_R2(matrices, meth, rho, intercept=intercept)
lambdamax = pb.lambdamax
if true_lam:
lambdass = [lamb / lambdamax for lamb in lambdass]
BETA = pathlasso_R2(pb, lambdass, n_active=Nactive)
elif typ == "R3":
if intercept:
# here we use the fact that for R1 and R3, the intercept is simple beta0 = ybar-Xbar .vdot(beta) so by changing the X to X-Xbar and y to y-ybar we can solve standard problem
Xbar, ybar = np.mean(X, axis=0), np.mean(y)
matrices = (X - Xbar, C, y - ybar)
if e is None or e == len(matrices[0]) / 2:
r = 1.0
pb = problem_R3(matrices, meth)
else:
r = np.sqrt(2 * e / len(matrices[0]))
pb = problem_R3((matrices[0] * r, matrices[1], matrices[2] * r), meth)
lambdamax = pb.lambdamax
if true_lam:
lambdass = [lamb / lambdamax for lamb in lambdass]
BETA, S = pathlasso_R3(pb, lambdass, n_active=Nactive)
S = np.array(S) / r ** 2
BETA = | np.array(BETA) | numpy.array |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import copy
import numpy as np
from six.moves import xrange
import tensorflow as tf
import warnings
from . import utils_tf
from . import utils
from cleverhans.compat import reduce_max, reduce_min
from cleverhans.compat import reduce_mean, reduce_sum
from cleverhans.compat import reduce_any
from . import loss as loss_module
_logger = utils.create_logger("cleverhans.attacks.tf")
np_dtype = np.dtype('float32')
tf_dtype = tf.as_dtype('float32')
def ZERO():
return | np.asarray(0., dtype=np_dtype) | numpy.asarray |
"""
Test twisted boundary conditions with an Ising system
"""
import numpy as np
import ed_spins
import ed_geometry as geom
import ed_symmetry as symm
n_phases = 4
phis = np.linspace(0, 2*np.pi, n_phases)
phi1s, phi2s = | np.meshgrid(phis, phis) | numpy.meshgrid |
from . algebraic_system import AlgebraicSystem
import numpy as np
from scipy.sparse.linalg import spsolve as sparseLinearSolve, lgmres
from scipy.sparse import dok_matrix
from numpy.linalg import norm
from .fill_jacobian import fillJacobian
class NewtonSolver(object):
def __init__(self, maxIter=20, tolerance=1e-6, factor=1.0, _callback=None, silent=False):
self.maximumIterations=maxIter
self.tolerance=tolerance
self.iterCallback= _callback
self.factor=factor
self.silent=silent
return
def log(self, msg):
if(not self.silent):
print(msg)
return
def solve(self, system: AlgebraicSystem):
if(system.numberOfEquations() != system.numberOfVariables()):
self.log(f"Number of Equations: {system.numberOfEquations()} Number of Variables: {system.numberOfVariables()}")
raise RuntimeError("Can only solve square systems")
system.createIndex()
system.createSparsityPattern()
delta = np.zeros(system.numberOfVariables())
b = np.zeros(system.numberOfEquations())
n =1e12
e= 1e12
if(self.iterCallback):
self.iterCallback(-1,n,e )
labels=["Iter","Norm","Residual","Flags","Comment"]
comment=''
self.log(f"{labels[0]:<4} {'': <10s} {labels[1]:<12} {'': <10s} {labels[2]:<12} {'': <10s} {labels[3]:<6} {'': <10s} {labels[4]:<12}")
for i in range(self.maximumIterations):
A,b= fillJacobian(system,b)
delta= sparseLinearSolve(A,-b)
comment=''
n=norm(delta)
e=np.amax(b)
flags=["-","-","-","-"]
if( | np.isnan(n) | numpy.isnan |
"""Tests for the mask.py script."""
import pytest
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
import unittest.mock as mock
from deltametrics import cube
from deltametrics import mask
from deltametrics.plan import OpeningAnglePlanform
from deltametrics.sample_data import _get_rcm8_path, _get_golf_path
rcm8_path = _get_rcm8_path()
with pytest.warns(UserWarning):
rcm8cube = cube.DataCube(rcm8_path)
golf_path = _get_golf_path()
golfcube = cube.DataCube(golf_path)
_OAP_0 = OpeningAnglePlanform.from_elevation_data(
golfcube['eta'][-1, :, :],
elevation_threshold=0)
_OAP_05 = OpeningAnglePlanform.from_elevation_data(
golfcube['eta'][-1, :, :],
elevation_threshold=0.5)
@mock.patch.multiple(mask.BaseMask,
__abstractmethods__=set())
class TestBaseMask:
"""
To test the BaseMask, we patch the base job with a filled abstract method
`.run()`.
.. note:: This patch is handled at the class level above!!
"""
fake_input = np.ones((100, 200))
@mock.patch('deltametrics.mask.BaseMask._set_shape_mask')
def test_name_setter(self, patched):
basemask = mask.BaseMask('somename', self.fake_input)
assert basemask.mask_type == 'somename'
patched.assert_called() # this would change the shape
assert basemask.shape is None # so shape is not set
assert basemask._mask is None # so mask is not set
def test_simple_example(self):
basemask = mask.BaseMask('field', self.fake_input)
# make a bunch of assertions
assert np.all(basemask._mask == False)
assert np.all(basemask.integer_mask == 0)
assert basemask._mask is basemask.mask
assert basemask.shape == self.fake_input.shape
def test_trim_mask_length(self):
basemask = mask.BaseMask('field', self.fake_input)
# mock as though the mask were made
basemask._mask = self.fake_input.astype(bool)
assert np.all(basemask.integer_mask == 1)
_l = 5
basemask.trim_mask(length=_l)
assert basemask._mask.dtype == bool
assert np.all(basemask.integer_mask[:_l, :] == 0)
assert np.all(basemask.integer_mask[_l:, :] == 1)
@pytest.mark.xfail(raises=NotImplementedError, strict=True,
reason='Have not implemented pathway.')
def test_trim_mask_cube(self):
basemask = mask.BaseMask('field', self.fake_input)
# mock as though the mask were made
basemask._mask = self.fake_input.astype(bool)
assert np.all(basemask.integer_mask == 1)
basemask.trim_mask(golfcube)
# assert np.all(basemask.integer_mask[:5, :] == 0)
# assert np.all(basemask.integer_mask[5:, :] == 1)
@pytest.mark.xfail(raises=NotImplementedError, strict=True,
reason='Have not implemented pathway.')
def test_trim_mask_noargs(self):
basemask = mask.BaseMask('field', self.fake_input)
# mock as though the mask were made
basemask._mask = self.fake_input.astype(bool)
assert np.all(basemask.integer_mask == 1)
basemask.trim_mask()
# assert np.all(basemask.integer_mask[:5, :] == 0)
# assert np.all(basemask.integer_mask[5:, :] == 1)
def test_trim_mask_axis1_withlength(self):
basemask = mask.BaseMask('field', self.fake_input)
# mock as though the mask were made
basemask._mask = self.fake_input.astype(bool)
assert np.all(basemask.integer_mask == 1)
_l = 5
basemask.trim_mask(axis=0, length=_l)
assert basemask._mask.dtype == bool
assert np.all(basemask.integer_mask[:, :_l] == 0)
assert np.all(basemask.integer_mask[:, _l:] == 1)
def test_trim_mask_diff_True(self):
basemask = mask.BaseMask('field', self.fake_input)
# everything is False (0)
assert np.all(basemask.integer_mask == 0)
_l = 5
basemask.trim_mask(value=True, length=_l)
assert basemask._mask.dtype == bool
assert | np.all(basemask.integer_mask[:_l, :] == 1) | numpy.all |
import math
import unittest
import warnings
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
def _uniform(*shape):
return numpy.random.uniform(-1, 1, shape).astype(numpy.float32)
def _full_like(x, val):
xp = cuda.get_array_module(x)
return xp.full_like(x, val)
def _zeros_like(x):
xp = cuda.get_array_module(x)
return xp.zeros_like(x)
def _dot(x, y):
return sum(map(lambda a: a[0] * a[1], zip(x, y)))
class NumericalGradientTest(unittest.TestCase):
eps = None
atol = 1e-3
rtol = 1e-3
def f(self, xs):
return xs[0] ** 2,
def df(self, xs):
return (2 * xs[0],),
def setUp(self):
self.xs = (_uniform(2, 1),)
self.gys = (_uniform(2, 1),)
def check_numerical_grad_one(self, f, df, xs, gys, eps):
dfxs = df(xs)
gys = tuple(0 if gy is None else gy for gy in gys)
# matrix-vector multiplication of dfxs and dys
dx_expect = tuple(map(lambda dfx: _dot(dfx, gys), dfxs))
def func():
return f(xs)
dx_actual = gradient_check.numerical_grad(func, xs, gys, eps)
self.assertEqual(len(dx_expect), len(dx_actual))
for e, a in zip(dx_expect, dx_actual):
testing.assert_allclose(e, a, atol=self.atol, rtol=self.rtol)
def check_numerical_grad(self, f, df, xs, gys, eps=None):
if eps is None:
eps = tuple(10 ** (-i) for i in six.moves.range(2, 5))
elif not isinstance(eps, tuple):
eps = (eps, )
for e in eps:
self.check_numerical_grad_one(f, df, xs, gys, e)
def test_numerical_grad_cpu(self):
self.check_numerical_grad(self.f, self.df, self.xs, self.gys,
eps=self.eps)
@attr.gpu
def test_numerical_grad_gpu(self):
gys = tuple(None if gy is None else cuda.to_gpu(gy)
for gy in self.gys)
self.check_numerical_grad(self.f, self.df,
tuple(map(cuda.to_gpu, self.xs)), gys,
eps=self.eps)
class NumericalGradientTest2(NumericalGradientTest):
def f(self, xs):
return 1,
def df(self, xs):
return (0,),
class NumericalGradientTest3(NumericalGradientTest):
# Too small eps causes cancellation of significant digits
eps = (1e-2, 1e-3)
def f(self, xs):
xp = cuda.get_array_module(*xs)
return xp.exp(xs[0]),
def df(self, xs):
xp = cuda.get_array_module(*xs)
return (xp.exp(xs[0]),),
def setUp(self):
self.xs = (_uniform(2, 1),)
self.gys = (_uniform(2, 1),)
class NumericalGradientTest4(NumericalGradientTest):
atol = 1e-2
rtol = 1e-2
def f(self, xs):
assert len(xs) == 2
return (2 * xs[0] + 3 * xs[1],
4 * xs[0] + 5 * xs[1],
6 * xs[0] + 7 * xs[1])
def df(self, xs):
assert len(xs) == 2
return (
(_full_like(xs[0], 2), _full_like(xs[0], 4), _full_like(xs[0], 6)),
(_full_like(xs[1], 3), _full_like(xs[1], 5), _full_like(xs[1], 7)))
def setUp(self):
self.xs = tuple(_uniform(2, 1) for _ in six.moves.range(2))
self.gys = tuple(_uniform(2, 1) for _ in six.moves.range(3))
class NumericalGradientTest5(NumericalGradientTest4):
def f(self, xs):
assert len(xs) == 2
return (2 * xs[0] + 3 * xs[1],
4 * xs[0] + 5 * xs[1],
6 * xs[0] + 7 * xs[1])
def df(self, xs):
assert len(xs) == 2
return (
(_full_like(xs[0], 2), _zeros_like(xs[0]), _full_like(xs[0], 6)),
(_full_like(xs[1], 3), _zeros_like(xs[1]), _full_like(xs[1], 7)))
def setUp(self):
super(NumericalGradientTest5, self).setUp()
self.gys = (_uniform(2, 1), None, _uniform(2, 1))
class NumericalGradientTest6(NumericalGradientTest):
def setUp(self):
self.xs = (_uniform(2, 1),)
self.gys = (None,)
class NumericalGradientReferenceTest(unittest.TestCase):
def setUp(self):
self.x = _uniform(2, 3)
def check_reference(self, x):
# A returned value and an input refers the same memory.
# See issue #488
def func():
return x,
gx, = gradient_check.numerical_grad(func, (x,), (1,))
testing.assert_allclose(cuda.to_cpu(gx), 1)
def test_reference_cpu(self):
self.check_reference(self.x)
@attr.gpu
def test_reference_gpu(self):
self.check_reference(cuda.to_gpu(self.x))
class NumericalGradientInvalidEps(NumericalGradientTest):
def check_invalid_eps(self, xs, gys, eps):
with self.assertRaises(AssertionError):
self.check_numerical_grad(self.f, self.df, xs, gys, eps)
@condition.retry(3)
def test_numerical_grad_cpu(self):
self.check_invalid_eps(self.xs, self.gys, 0)
self.check_invalid_eps(self.xs, self.gys, -1.0)
@condition.retry(3)
@attr.gpu
def test_numerical_grad_gpu(self):
xs = tuple(map(cuda.to_gpu, self.xs))
gys = tuple(None if gy is None else cuda.to_gpu(gy)
for gy in self.gys)
self.check_invalid_eps(xs, gys, 0)
self.check_invalid_eps(xs, gys, -1.0)
class NumericalGradientInvalidType(unittest.TestCase):
def setUp(self):
self.x = numpy.array(0)
self.y = numpy.array(0)
self.f = lambda: None
@attr.gpu
def test_invalid_inputs(self):
y = cuda.to_gpu(self.y)
with self.assertRaises(RuntimeError):
gradient_check.numerical_grad(self.f, (self.x, y), ())
@attr.gpu
def test_invalid_outputs(self):
y = cuda.to_gpu(self.y)
with self.assertRaises(RuntimeError):
gradient_check.numerical_grad(self.f, (), (self.x, y))
@attr.gpu
def test_invalid_mixed(self):
y = cuda.to_gpu(self.y)
with self.assertRaises(RuntimeError):
gradient_check.numerical_grad(self.f, (self.x,), (y,))
class NumericalGradientEpsTest(unittest.TestCase):
def setUp(self):
self.x = numpy.array(0.0, dtype=numpy.float32)
self.y = numpy.array(1.0, dtype=numpy.float32)
def check_different_eps(self, x, y):
def f():
if -1 < x < 1:
return x.copy(),
elif -2 < x < 2:
return 2 * x,
else:
return 0,
gx, = gradient_check.numerical_grad(f, (x,), (y,), eps=0.5)
self.assertEqual(gx, 1.)
gx, = gradient_check.numerical_grad(f, (x,), (y,), eps=1.5)
self.assertEqual(gx, 2.)
gx, = gradient_check.numerical_grad(f, (x,), (y,), eps=2.5)
self.assertEqual(gx, 0.)
def test_differenct_eps_cpu(self):
self.check_different_eps(self.x, self.y)
@attr.gpu
def test_differenct_eps_gpu(self):
self.check_different_eps(cuda.to_gpu(self.x), cuda.to_gpu(self.y))
default_eps = 1e-3
# `result`: True if `func` is non-differentiable on `x`
@testing.parameterize(*[
{'func': 'zero', 'x': [-100.], 'result': False},
{'func': 'zero', 'x': [100.], 'result': False},
{'func': 'zero', 'x': [0.], 'result': False},
{'func': 'zero', 'x': [default_eps / 10], 'result': False},
{'func': 'zero', 'x': numpy.random.normal(size=(3, 2)), 'result': False},
{'func': 'zero', 'x': numpy.random.normal(size=()), 'result': False},
{'func': 'linear', 'x': [-100.], 'result': False},
{'func': 'linear', 'x': [100.], 'result': False},
{'func': 'linear', 'x': [0.], 'result': False},
{'func': 'linear', 'x': numpy.random.normal(size=(3, 2)), 'result': False},
{'func': 'linear', 'x': numpy.random.normal(size=()), 'result': False},
# (Invalid input domain)
{'func': 'linear', 'x': [numpy.inf], 'result': False,
'ignore_warning': RuntimeWarning},
{'func': 'quadratic', 'x': [-100.], 'result': False},
{'func': 'quadratic', 'x': [100.], 'result': False},
{'func': 'quadratic', 'x': [0.], 'result': False},
{'func': 'cubic', 'x': [-100.], 'result': False},
{'func': 'cubic', 'x': [100.], 'result': False},
{'func': 'cubic', 'x': [0.], 'result': False},
# Too large epsilon
{'func': 'cubic', 'x': [0.], 'eps': 1e-1, 'result': True},
{'func': 'abs', 'x': [0.], 'result': True},
{'func': 'abs', 'x': [[3, 1], [0, 2]], 'result': True},
{'func': 'abs', 'x': [default_eps * 0.8], 'result': True},
{'func': 'abs', 'x': [-default_eps * 0.8], 'result': True},
{'func': 'abs', 'x': [default_eps * 1.2], 'result': False},
{'func': 'abs', 'x': [-default_eps * 1.2], 'result': False},
{'func': 'abs', 'x': [100.], 'result': False},
{'func': 'abs', 'x': [-100.], 'result': False},
{'func': 'step', 'x': [0.], 'result': True},
{'func': 'step', 'x': [default_eps * 0.8], 'result': True},
{'func': 'step', 'x': [-default_eps * 0.8], 'result': True},
{'func': 'step', 'x': [default_eps * 1.2], 'result': False},
{'func': 'step', 'x': [-default_eps * 1.2], 'result': False},
{'func': 'step', 'x': [100.], 'result': False},
{'func': 'step', 'x': [-100.], 'result': False},
{'func': 'clip', 'x': [0.], 'result': True},
{'func': 'clip', 'x': [1.], 'result': True},
{'func': 'clip', 'x': [0.5], 'result': False},
{'func': 'floor', 'x': [0.], 'result': True},
{'func': 'floor', 'x': [100 + default_eps * 0.8], 'result': True},
{'func': 'floor', 'x': [100 - default_eps * 0.8], 'result': True},
{'func': 'floor', 'x': [100 + default_eps * 1.2], 'result': False},
{'func': 'floor', 'x': [100 - default_eps * 1.2], 'result': False},
{'func': 'exp', 'x': [-100], 'result': False},
{'func': 'exp', 'x': [0.], 'result': False},
{'func': 'exp', 'x': [13.], 'result': False},
{'func': 'log', 'x': [100.], 'result': False},
# (Smaller epsilon is required because slope is steep)
{'func': 'log', 'x': [1e-3], 'eps': 1e-6, 'result': False},
{'func': 'log', 'x': [0.], 'result': True,
'ignore_warning': RuntimeWarning},
# (Invalid input domain)
{'func': 'log', 'x': [-10.], 'result': False,
'ignore_warning': RuntimeWarning},
{'func': 'tan', 'x': [default_eps * 1.2], 'result': False},
{'func': 'tan', 'x': [default_eps * 0.8], 'result': False},
{'func': 'tan', 'x': [math.pi / 2], 'result': True},
{'func': 'tan', 'x': [-math.pi / 2], 'result': True},
{'func': 'tan', 'x': [3 * math.pi / 2], 'result': True},
{'func': 'tan', 'x': [3 * math.pi / 2 + default_eps * 0.8],
'result': True},
{'func': 'tan', 'x': [3 * math.pi / 2 - default_eps * 0.8],
'result': True},
# (Smaller epsilon is required because slope is steep)
{'func': 'tan', 'x': [3 * math.pi / 2 + 1e-3], 'eps': 1e-6,
'result': False},
# (Smaller epsilon is required because slope is steep)
{'func': 'tan', 'x': [3 * math.pi / 2 - 1e-3], 'eps': 1e-6,
'result': False},
{'func': 'nan_segment', 'x': [0.], 'result': False},
{'func': 'nan_segment', 'x': [-1.], 'result': True},
{'func': 'nan_segment', 'x': [1.], 'result': True},
])
class NumericalGradientDetectNondifferentiableTest(unittest.TestCase):
def setUp(self):
self.eps = getattr(self, 'eps', default_eps)
self.ignore_warning = getattr(self, 'ignore_warning', None)
def _func_zero(self, x):
xp = cuda.get_array_module(x)
return xp.zeros_like(x),
def _func_linear(self, x):
return 2 * x,
def _func_quadratic(self, x):
return x * x + 2.,
def _func_cubic(self, x):
return -3 * x ** 3 + 2 * x ** 2 + 1,
def _func_abs(self, x):
return abs(x),
def _func_step(self, x):
xp = cuda.get_array_module(x)
y = xp.zeros_like(x)
y[x > 0] = 1
return y,
def _func_clip(self, x):
y = x.clip(0, 1)
return y,
def _func_floor(self, x):
xp = cuda.get_array_module(x)
return xp.floor(x),
def _func_exp(self, x):
xp = cuda.get_array_module(x)
return xp.exp(x),
def _func_log(self, x):
xp = cuda.get_array_module(x)
return xp.log(x),
def _func_tan(self, x):
xp = cuda.get_array_module(x)
return xp.tan(x),
def _func_nan_segment(self, x):
xp = cuda.get_array_module(x)
y = xp.ones_like(x)
y[-1 < x < 1] = numpy.nan
return y,
def check_positive(self, xp, func_name, inputs, eps, nout):
# Should be non-differentiable
func = getattr(self, '_func_{}'.format(func_name))
grad_outputs = [
xp.random.uniform(-1, 1, _.shape).astype(_.dtype) for _ in inputs]
def f():
return func(*inputs) * nout
try:
gradient_check.numerical_grad(
f, inputs, grad_outputs, eps=eps,
detect_nondifferentiable=True)
except gradient_check.NondifferentiableError:
pass
else:
raise AssertionError(
'Function `{}` is expected to be non-differentiable, '
'but determined to be differentiable.\n\n'
'eps: {}\n'
'inputs: {}\n'
'xp: {}\n'
''.format(
func_name, eps, inputs, xp.__name__))
def check_negative(self, xp, func_name, inputs, eps, nout):
# Should be differentiable
func = getattr(self, '_func_{}'.format(func_name))
grad_outputs = [
xp.random.uniform(-1, 1, _.shape).astype(_.dtype) for _ in inputs]
def f():
return func(*inputs) * nout
try:
gradient_check.numerical_grad(
f, inputs, grad_outputs, eps=eps,
detect_nondifferentiable=True)
except gradient_check.NondifferentiableError as e:
raise AssertionError(
'Function `{}` is expected to be differentiable, '
'but determined to be non-differentiable.\n\n'
'eps: {}\n'
'inputs: {}\n'
'xp: {}\n\n'
'{}: {}'
''.format(
func_name, eps, inputs, xp.__name__,
e.__class__.__name__, e))
def check(self, xp, nout):
inputs = [xp.asarray(self.x).astype(numpy.float32)]
with warnings.catch_warnings():
if self.ignore_warning:
warnings.simplefilter('ignore', self.ignore_warning)
if self.result:
self.check_positive(xp, self.func, inputs, self.eps, nout)
else:
self.check_negative(xp, self.func, inputs, self.eps, nout)
def test_cpu(self):
self.check(numpy, 1)
@attr.gpu
def test_gpu(self):
self.check(cuda.cupy, 1)
def test_2_outputs_cpu(self):
self.check(numpy, 2)
@attr.gpu
def test_2_outputs_gpu(self):
self.check(cuda.cupy, 2)
class AssertAllCloseTest(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
self.y = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
def check_identical(self, x):
testing.assert_allclose(x, x, atol=0, rtol=0)
@condition.repeat(5)
def test_identical_cpu(self):
self.check_identical(self.x)
@condition.repeat(5)
@attr.gpu
def test_identical_gpu(self):
self.check_identical(cuda.to_gpu(self.x))
def check_atol(self, x, y):
x_cpu = cuda.to_cpu(x)
y_cpu = cuda.to_cpu(y)
max_abs_diff = numpy.max(numpy.abs(x_cpu - y_cpu))
with self.assertRaises(AssertionError):
testing.assert_allclose(x, y, atol=max_abs_diff - 1, rtol=0)
testing.assert_allclose(x, y, atol=max_abs_diff + 1, rtol=0)
@condition.repeat(5)
def test_atol_cpu(self):
self.check_atol(self.x, self.y)
@condition.repeat(5)
@attr.gpu
def test_atol_gpu(self):
self.check_atol(cuda.to_gpu(self.x), cuda.to_gpu(self.y))
class AssertAllCloseTest2(unittest.TestCase):
def setUp(self):
self.x = | numpy.random.uniform(-1, 1, (2, 3)) | numpy.random.uniform |
#This script contains a library of functions that use various open source statistical and geospatial
#software packages to ease basic raster processing and modeling procedures
#This script was written with funding from a USDA Forest Service Remote Sensing
#Steering Commmittee project that used thermal data to model percent impervious
#This script was written by <NAME> at the Forest Service Remote Sensing Applications Center
#<EMAIL>
###############################################################################
#Import all necessary packages
import shutil, os, subprocess, sys, string, random, math, time, itertools, zipfile,gzip
# import urllib.request, urllib.parse, urllib.error,
##from scipy.optimize import curve_fit
import scipy
from scipy import stats
from multiprocessing import Pool,Process
import multiprocessing
###############################################################################
#Set based on whether R is needed
needs_r = False
###############################################################################
cwd = os.getcwd().replace('\\','/') + '/'
###############################################################################
#Set Python version and home directory
# python_possibilities = {'C:\\Python27\\ArcGIS10.3': [27, 10.3],'C:\\Python27\\ArcGIS10.2': [27, 10.2],'C:\\Python27\\ArcGIS10.1': [27, 10.1],'C:\\Python26\\ArcGIS10.0': [26, 10], 'C:\\Python26': [26, 9.3],'C:\\Python25' : [25, 9.3]}
# for possibility in python_possibilities:
# if os.path.exists(possibility):
# arc_version = python_possibilities[possibility][1]
# python_version = python_possibilities[possibility][0]
# python_dir = possibility
# #break
# ###############################################################################
# #Set up the gdal data and bin environment variable names
# #Set up the gdal data and bin environment variable names
# site_packages_dir = python_dir + '/Lib/site-packages/'
# def setup_gdal_dirs():
# global gdal_bin_dir, gdal_data_dir, gdal, gdal_array, osr, ogr, gdalconst, path
# gdal_data_options = [site_packages_dir + 'gdalwin32-1.6/data', site_packages_dir + 'gdalwin32-1.9/bin/gdal-data']
# gdal_bin_options = [site_packages_dir + 'gdalwin32-1.6/bin', site_packages_dir + 'gdalwin32-1.9/bin']
# gdal_data_dir = ''
# gdal_bin_dir = ''
# for data_option in gdal_data_options:
# if os.path.exists(data_option):
# gdal_data_dir = data_option
# for bin_option in gdal_bin_options:
# if os.path.exists(bin_option):
# gdal_bin_dir = bin_option
# path = os.environ.get('PATH')
# if path[-1] != ';':
# path += ';'
# if gdal_data_dir != '':
# # print 'Updating GDAL_DATA path variable'
# os.putenv('GDAL_DATA', gdal_data_dir)
# if gdal_bin_dir != '':
# # print 'Updating path with GDAL bin location'
# path = path + gdal_bin_dir
# os.putenv('PATH',path)
# #print os.environ.get('PATH')
# os.chdir('c:\\windows\\system32')
# os.chdir(cwd)
# setup_gdal_dirs()
from osgeo import gdal
from osgeo import gdal_array
from osgeo import osr, ogr
from osgeo import gdalconst
###############################################################################
#Let user know what the directories are
#(Arc version does not necessarily mean that Arc is installed)
# print 'Arc version:',arc_version
# print 'Python version:', python_version
# print 'Python dir:', python_dir
# print 'GDAL bin:', gdal_bin_dir
# print 'GDAL data:', gdal_data_dir
# python_version_dec = str(float(python_version)/10)
# python_version = str(python_version)
# admin = False
# #############################################################################
# #Find the program files dir
# program_files_dir_options = ['C:/Program Files (x86)/', 'C:/Program Files/']
# for option in program_files_dir_options:
# if os.path.exists(option):
# program_files_dir = option
# break
# print 'Program files dir:', program_files_dir
#############################################################################
#Set up the gdal directory
gdal_dir = ''
program_files_dir = ''#program_files_dir + 'FWTools2.4.7/bin/'
# if os.path.exists(gdal_dir) == False:
# gdal_dir = cwd + 'FWTools2.4.7/bin/'
# if os.path.exists(python_dir)==False:
# print 'Python version:', python_version, 'Arc version:', arc_version,'does not exist'
# raw_input('Press enter to exit')
# sys.exit()
#############################################################################
#Import some more packages
try:
from tarfile import TarFile
except:
import tarfile
import tarfile
# from tkFileDialog import askopenfilename
# from tkFileDialog import askopenfilenames
# from tkFileDialog import askdirectory
# from tkSimpleDialog import askstring
# from tkMessageBox import showwarning
# import tkMessageBox
# from tkFileDialog import asksaveasfilename
################################################################
#Image format driver dictionary
format_dict = {'.tif': 'GTiff', '.img' : 'HFA', '.jpg' : 'JPEG', '.gif' : 'GIF', '.grid' : 'AAIGrid', '.hdr': 'envi', '': 'envi','.ntf': 'NITF','.vrt':'VRT'}
formats_dict = format_dict
################################################################
#From: https://www.geeksforgeeks.org/python-remove-duplicates-list/
def Remove(duplicate):
final_list = []
for num in duplicate:
if num not in final_list:
final_list.append(num)
return final_list
################################################################
def fast_copy(from_file,to_file):
print(('Copying: '+to_file))
cp = open(from_file,'rb').read()
with open(to_file,'wb') as f:f.write(cp)
#Adapted From: https://www.tutorialspoint.com/How-to-zip-a-folder-recursively-using-Python
def zipdir(in_dir, out_zip):
zipf = zipfile.ZipFile(out_zip, 'w', zipfile.ZIP_DEFLATED,allowZip64=True)
# ziph is zipfile handle
for root, dirs, files in os.walk(in_dir):
# print(root,dirs)
if len(files) > 0:
for file in files:
print(('Zipping',os.path.join(root, file).split(in_dir)[1]))
zipf.write(os.path.join(root, file),os.path.join(root, file).split(in_dir)[1])
zipf.close()
def smart_unzip(Zip, base_dir = ''):
z = zipfile.ZipFile(Zip)
if base_dir == '':
base_dir = cwd
if base_dir[-1] != '/':
base_dir += '/'
for f in z.namelist():
fbd = base_dir + f
if f.endswith('/') and os.path.exists(fbd) == False:
print(('Extracting', f))
os.makedirs(fbd)
elif os.path.isdir(f) == False and os.path.exists(fbd) == False:
print(('Extracting', f))
z.extract(f, base_dir)
def smart_zip(files,out_zip):
zf = zipfile.ZipFile(out_zip, mode='w',allowZip64 = True)
for f in files:
print(('Zipping:',base(f)))
zf.write(f,arcname=os.path.basename(f))
zf.close()
################################################################
#Returns Windows version
def get_os_info():
if os.name == 'nt':
ver = os.sys.getwindowsversion()
ver_format = ver[3], ver[0], ver[1]
win_version = {
(1, 4, 0): '95',
(1, 4, 10): '98',
(1, 4, 90): 'ME',
(2, 4, 0): 'NT',
(2, 5, 0): '2000',
(2, 5, 1): 'XP',
(2, 5, 2): '2003',
(2, 6, 1): '7'
}
if ver_format in win_version:
wv = win_version[ver_format]
print(('Windows version:', wv))
return win_version[ver_format]
else:
return '8?'
################################################################
#Define a function that can install various packages over the internet
# def install(package_name, cleanup = False):
# install_packages = {'dbfpy':['http://sourceforge.net/projects/dbfpy/files/dbfpy/2.2.5/dbfpy-2.2.5.win32.exe/download', 'dbfpy-2.2.5.win32.exe'],
# 'numpy': ['http://sourceforge.net/projects/numpy/files/NumPy/1.6.1/numpy-1.6.1-win32-superpack-python'+python_version_dec+'.exe/download','numpy-1.6.1-win32-superpack-python'+python_version_dec+'.exe'],
# 'gdal' : ['http://pypi.python.org/packages/'+python_version_dec+'/G/GDAL/GDAL-1.6.1.win32-py'+python_version_dec+'.exe#md5=5e48c85a9ace1baad77dc26bb42ab4e1','GDAL-1.6.1.win32-py'+python_version_dec+'.exe'],
# 'rpy2' : ['http://pypi.python.org/packages/'+python_version_dec+'/r/rpy2/rpy2-2.0.8.win32-py'+python_version_dec+'.msi#md5=2c8d174862c0d132db0c65777412fe04','rpy2-2.0.8.win32-py'+python_version_dec+'.msi'],
# 'r11' : ['http://cran.r-project.org/bin/windows/base/old/2.11.1/R-2.11.1-win32.exe', 'R-2.11.1-win32.exe'],
# 'r12' : ['http://cran.r-project.org/bin/windows/base/old/2.12.1/R-2.12.1-win.exe', 'R-2.12.1-win32.exe'],
# 'fw_tools' : ['http://home.gdal.org/fwtools/FWTools247.exe', 'FWTools247.exe'],
# 'numexpr' :['https://code.google.com/p/numexpr/downloads/detail?name=numexpr-1.4.2.win32-py'+python_version_dec+'.exe&can=2&q=','numexpr-1.4.2.win32-py'+python_version_dec+'.exe'],
# 'matplotlib' : ['http://sourceforge.net/projects/matplotlib/files/matplotlib/matplotlib-1.1.1/matplotlib-1.1.1.win32-py'+python_version_dec+'.exe/download', 'matplotlib-1.1.1rc.win32-py2.6.exe'],
# 'scipy' : ['http://sourceforge.net/projects/scipy/files/scipy/0.10.1/scipy-0.10.1-win32-superpack-python'+python_version_dec+'.exe', 'scipy-0.10.1-win32-superpack-python'+python_version_dec+'.exe'],
# 'gdalwin32':['http://download.osgeo.org/gdal/win32/1.6/gdalwin32exe160.zip', 'ggdalwin32exe160.zip'],
# 'pywin32' : ['http://sourceforge.net/projects/pywin32/files/pywin32/Build%20217/pywin32-217.win32-py'+python_version_dec+'.exe','pywin32-217.win32-py'+python_version_dec+'.exe'],
# 'pil' : ['http://effbot.org/downloads/PIL-1.1.7.win32-py'+python_version_dec+'.exe', 'PIL-1.1.7.win32-py'+python_version_dec+'.exe'],
# 'basemap' : ['http://sourceforge.net/projects/matplotlib/files/matplotlib-toolkits/basemap-1.0.5/basemap-1.0.5.win32-py'+python_version_dec+'.exe', 'basemap-1.0.5.win32-py'+python_version_dec+'.exe']
# }
# #extensions = {'dbfpy':'.exe','numpy': '.exe','gdal' : '.exe','rpy2' : '.msi'}
# #Finds the url and .exe name from the install_packages dictionary
# url = install_packages[package_name][0]
# exe = cwd + '/'+install_packages[package_name][1]
# #Downloads the executable
# if os.path.exists(exe) == False:
# print(('Downloading', os.path.basename(exe)))
# File = urllib.request.urlretrieve(url, exe)
# #If it's not a zip file, it tries to run it, first as a .exe, and then as .msi
# if os.path.splitext(exe)[1] != '.zip':
# print(('Installing', package_name))
# try:
# call = subprocess.Popen(exe)
# except:
# print(('Running .msi', exe))
# call = subprocess.Popen('msiexec /i ' + os.path.basename(exe))
# call.wait()
# else:
# print ('its a zip')
# smart_unzip(exe)
# if cleanup == True:
# try:
# os.remove(exe)
# except:
# print(('Could not remove:', os.path.basename(exe)))
################################################################
#Function to install any r packages
#Can provide a single name of a library or a list of names of libraries to install
def r_library_installer(lib_list = '', cran = 'local({r <- getOption("repos")\nr["CRAN"] <- "http://cran.stat.ucla.edu"\noptions(repos=r)})', guiable = True):
if lib_list == '':
lib_list = askstring('Message','Please enter an r library to install')
r(cran)
if lib_list != list:
lib_list = [lib_list]
for lib in lib_list:
print(('Installing:', lib))
try:
r('install.packages("' + lib + '")')
except:
print(('Could not install:', lib))
#########################################################################
##
##try:
##
## path = os.environ.get('PATH')
## if path[-1] != ';':
## path += ';'
##
## path = path + gdal_bin_dir
## print path
## os.putenv('GDAL_DATA', gdal_data_dir)
## os.putenv('PATH',path)
## print os.environ.get('GDAL_DATA')
## os.chdir('c:\\windows\\system32')
## from osgeo import gdal
## from osgeo import gdal_array
## from osgeo import osr, ogr
## from osgeo import gdalconst
## os.chdir(cwd)
##
##except:
##
## admin = tkMessageBox.askyesno('Administrator','Are you an administrator?')
## if admin:
## # install('gdalwin32')
## install('gdal')
## install('pywin32')
## install('numpy')
## path = os.environ.get('PATH')
## if path[-1] != ';':
## path += ';'
##
## path = path + python_dir+'\\Lib\\site-packages\\gdalwin32-1.6\\bin'
## os.putenv('GDAL_DATA',python_dir+'\\Lib\\site-packages\\gdalwin32-1.6\\data')
## os.putenv('PATH',path)
##
## try:
## from osgeo import gdal
## from osgeo import gdal_array
## from osgeo import osr, ogr
## except:
## print 'Installation of gdal/osgeo was unsuccessful'
## print 'Please search for GDAL-1.6.1.win32-py'+python_version_dec+'.exe and manually install'
## raw_input('Press enter to exit')
## sys.exit()
## else:
## tkMessageBox.showinfo('Administrator','You must be administrator to install the software.')
## sys.exit()
##################################################################
try:
import numpy
from numpy import numarray
try:
from numpy.numarray import nd_image
except:
no_nd = False
# print 'Cannot use nd_image functions'
except:
install('numpy')
try:
import numpy
from numpy import numarray
from numpy.numarray import nd_image
except:
print('Installation of numpy was unsuccessful')
print('Please search for numpy and manually install')
input('Press enter to exit')
sys.exit()
##################################################################
try:
from dbfpy import dbf
except Exception as e:
# install('dbfpy')
print('Could not import dbfpy')
print(e)
# try:
# from dbfpy import dbf
# print('Successfully installed dbfpy')
# except:
# print('Installation of dbfpy was unsuccessful')
# print('Please search for dbfpy and manually install')
# input('Press enter to exit')
##################################################################
# if os.path.exists(gdal_dir) == False:
# install('fw_tools')
####################################################################
if needs_r:
try:
os.chdir(program_files_dir + '/R/R-2.11.1/bin')
path = os.getenv('PATH')
if path[-1] != ';':
path += ';'
r_home = program_files_dir.replace('/', '\\') + 'R\\R-2.11.1'
win32com_path = python_dir + '\\Lib\\site-packages\\win32'
sys.path.append(win32com_path)
path = path + r_home
os.putenv('PATH',path)
os.putenv('R_HOME',r_home)
#os.putenv('Rlib',os.path.split(r_home)[0] + '\\library')
print(('r_home:',r_home))
print(( os.getenv('R_HOME')))
import rpy2.robjects as RO
import rpy2.robjects.numpy2ri
r = RO.r
os.chdir(cwd)
except:
print( 'Could not find rpy2')
if admin == False:
admin = tkMessageBox.askyesno('Administrator','Are you an administrator?')
if admin:
install('r11')
install('rpy2')
try:
import rpy2.robjects as RO
import rpy2.robjects.numpy2ri
r = RO.r
lib_list = ['rgdal', 'raster', 'maptools', 'randomForest']
for lib in lib_list:
try:
r.library(lib)
except:
print(( 'Installing:', lib))
r_library_installer([lib])
except:
print( 'Installation of rpy2 was unsuccessful')
print( 'Please search for rpy2 and manually install')
input('Press enter to exit')
sys.exit()
else:
tkMessageBox.showinfo('Administrator','You must be administrator to install the software.')
#sys.exit()
################################################################
try:
print( 'Importing rscript')
from Rscript import *
## r_dir = program_files_dir + 'R/R-2.12.1/bin/'
## if os.path.exists(r_dir) == False:
## warning = showwarning('!!!MUST READ!!!!', 'In the "Select Additional Tasks" window, ensure that the "Save version number in registry" option is unchecked\n'\
## 'The script will not run properly if left checked')
## r1 = R()
## r1 = None
except:
print( 'Cannot use Rscript module')
################################################################
def check_zero_start(in_number,break_numbers = [100,10]):
ns = str(in_number)
for bn in break_numbers:
if float(in_number) < bn:
ns = '0' + ns
return ns
def possible_compositing_periods(period_length = 8):
ocps = []
just_starts = []
cps = list(range(1,365,period_length))
for cp in cps:
cps1 = check_zero_start(cp,[100,10])
cps2 = check_zero_start(cp + 15,[100,10])
ocps.append([cps1,cps2])
just_starts.append(cps1)
return ocps, just_starts
def now(Format = "%b %d %Y %H:%M:%S %a"):
import datetime
today = datetime.datetime.today()
s = today.strftime(Format)
d = datetime.datetime.strptime(s, Format)
return d.strftime(Format)
def date_modified(File):
import datetime
#return time.ctime(os.path.getmtime(File))
(mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = os.stat(File)
return mtime, datetime.datetime.strptime(time.ctime(ctime), '%a %b %d %H:%M:%S %Y')
def year_month_day_to_seconds(year_month_day):
import datetime, calendar
ymd = year_month_day
return calendar.timegm(datetime.datetime(ymd[0], ymd[1], ymd[2]).timetuple())
def milliseconds_to_str_date_time(milliseconds):
import datetime
return datetime.datetime.fromtimestamp(int(milliseconds)/1000.0).strftime('%Y-%m-%d_%H:%M:%S.%f')
#######################################
geog_crs = '+proj=longlat +datum=NAD83 +no_defs'
web_mercator_crs ='+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs'
geog_crss = [geog_crs,web_mercator_crs]
albers_crs = '+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs +towgs84=0,0,0'
################################################################
#Function to load r library using rpy2
#Calls on installer if the library does not exist
def r_library_loader(lib_list = '', guiable = True):
if lib_list == '':
lib_list = askstring('Message','Please enter an r library to load')
if type(lib_list) != list:
lib_list = [lib_list]
for lib in lib_list:
try:
print(( 'Loading:', lib))
r.library(lib)
except:
print(('Installing:', lib))
r_library_installer(lib)
print()
print(('Loading:', lib))
r.library(lib)
################################################################
#Status bar function that prints the percent of the total list that has been looked at
#last variable must be seeded and then passed in within the loop in order to prevent printing due to rounding
def status_bar(current_index, list_length, percent_interval = 5, last = 0):
divisor = 100/ percent_interval
interval = list_length / divisor
percent_list = list(range(0, 100, percent_interval))
current_percent = int(float(current_index)/ float(list_length) * 100)
if int(current_percent) in percent_list and current_percent != last:
last = current_percent
print((str(current_percent) +'%'))
return last
############################################################################################
def sort(in_list, order = list(range(1000)), num_break = '_', num_place = 0, simple = True):
out_list = []
for num in order:
for entry in in_list:
if simple == False:
if str(num) == entry.split(num_break)[num_place]:
out_list.append(entry)
else:
if int(num) == int(entry):
out_list.append(int(entry))
return out_list
##############################################################################################
def sort_by_column(in_2d_list, order = list(range(1000)), column_no = 0):
out_list = []
for num in order:
for entry in in_2d_list:
if int(num) == int(entry[column_no]):
out_list.append(entry)
return out_list
############################################################################
def invert_list(in_list):
out_list = [0] * len(in_list)
for i in range(1, len(in_list)):
out_list[-i] = in_list[i]
out_list.pop(0)
out_list.append(in_list[0])
return out_list
############################################################################
def collapse(in_list):
out_list = []
for i in in_list:
if type(i) == list:
for i2 in i:
out_list.append(i2)
else:
out_list.append(i)
return out_list
############################################################################
def unique_count(in_list):
set_list = list(set(in_list))
out_list = []
for part in set_list:
counter = 0
for line in in_list:
if line == part:
counter += 1
out_list.append([part, counter])
out_list = invert_list(sort_by_column(out_list, column_no = 1))
return out_list
########################################################################################
#Function to convert a specified column from a specified dbf file into a list
#e.g. dbf_to_list(some_dbf_file, integer_column_number)
def dbf_to_list(dbf_file, field_name):
if os.path.splitext(dbf_file)[1] == '.shp':
dbf_file = os.path.splitext(dbf_file)[0] + '.dbf'
#The next exception that is handled is handled within an if loop
#This exception would occur if a non .dbf file was entered
#First it finds wither the extension is not a .dbf by splitting the extension out
if os.path.splitext(dbf_file)[1] != '.dbf':
#Then the user is prompted with what occured and prompted to exit as above
print('Must input a .dbf file')
print(('Cannot compile ' + dbf_file))
input('Press enter to continue')
sys.exit()
#Finally- the actual function code body
#First the dbf file is read in using the dbfpy Dbf function
db = dbf.Dbf(dbf_file)
#Db is now a dbf object within the dbfpy class
#Next find out how long the dbf is
rows = len(db)
#Set up a list variable to write the column numbers to
out_list = []
#Iterate through each row within the dbf
for row in range(rows):
#print row
#Add each number in the specified column number to the list
out_list.append(db[row][field_name])
db.close()
#Return the list
#This makes the entire function equal to the out_list
return out_list
################################################################
############################################################################
#Converts utm coordinates to geographic coordinates
#Code not written at RSAC
#Code source: http://stackoverflow.com/questions/343865/how-to-convert-from-utm-to-latlng-in-python-or-javascript
#Code foundation: http://www.ibm.com/developerworks/java/library/j-coordconvert/index.html
def utm_to_geog(zone = '', easting = '', northing = '', northernHemisphere=True, guiable = True, echo = False):
if zone == '':
zone = int(askstring('Zone Entry','Enter UTM zone number: '))
if easting == '':
easting = float(askstring('Easting Entry','Enter UTM easting: '))
if northing == '':
northing = float(askstring('Northing Entry','Enter UTM northing: '))
if not northernHemisphere:
northing = 10000000 - northing
northing = float(northing)
easting = float(easting)
zone = int(zone)
a = 6378137
e = 0.081819191
e1sq = 0.006739497
k0 = 0.9996
arc = northing / k0
mu = arc / (a * (1 - math.pow(e, 2) / 4.0 - 3 * math.pow(e, 4) / 64.0 - 5 * math.pow(e, 6) / 256.0))
ei = (1 - math.pow((1 - e * e), (1 / 2.0))) / (1 + math.pow((1 - e * e), (1 / 2.0)))
ca = 3 * ei / 2 - 27 * math.pow(ei, 3) / 32.0
cb = 21 * math.pow(ei, 2) / 16 - 55 * math.pow(ei, 4) / 32
cc = 151 * math.pow(ei, 3) / 96
cd = 1097 * math.pow(ei, 4) / 512
phi1 = mu + ca * math.sin(2 * mu) + cb * math.sin(4 * mu) + cc * math.sin(6 * mu) + cd * math.sin(8 * mu)
n0 = a / math.pow((1 - math.pow((e * math.sin(phi1)), 2)), (1 / 2.0))
r0 = a * (1 - e * e) / math.pow((1 - math.pow((e * math.sin(phi1)), 2)), (3 / 2.0))
fact1 = n0 * math.tan(phi1) / r0
_a1 = 500000 - easting
dd0 = _a1 / (n0 * k0)
fact2 = dd0 * dd0 / 2
t0 = math.pow(math.tan(phi1), 2)
Q0 = e1sq * math.pow(math.cos(phi1), 2)
fact3 = (5 + 3 * t0 + 10 * Q0 - 4 * Q0 * Q0 - 9 * e1sq) * math.pow(dd0, 4) / 24
fact4 = (61 + 90 * t0 + 298 * Q0 + 45 * t0 * t0 - 252 * e1sq - 3 * Q0 * Q0) * math.pow(dd0, 6) / 720
lof1 = _a1 / (n0 * k0)
lof2 = (1 + 2 * t0 + Q0) * math.pow(dd0, 3) / 6.0
lof3 = (5 - 2 * Q0 + 28 * t0 - 3 * math.pow(Q0, 2) + 8 * e1sq + 24 * math.pow(t0, 2)) * math.pow(dd0, 5) / 120
_a2 = (lof1 - lof2 + lof3) / math.cos(phi1)
_a3 = _a2 * 180 / math.pi
latitude = 180 * (phi1 - fact1 * (fact2 + fact3 + fact4)) / math.pi
if not northernHemisphere:
latitude = -latitude
longitude = ((zone > 0) and (6 * zone - 183.0) or 3.0) - _a3
if echo == True:
print(( 'Latitude:', latitude))
print(( 'Longitude:', longitude))
return latitude, longitude
##############################################################################################
#Converts utm coord list (xmin, ymin, xmax, ymax) to geographic based on a specified zone
def utm_coords_to_geographic(coords, zone):
coords1 = utm_to_geog(zone, coords[0], coords[1])
coords2 = utm_to_geog(zone, coords[2], coords[3])
out = [coords1[1], coords1[0], coords2[1], coords2[0]]
return out
##############################################################################################
def batch_utm_to_geog(zone, coord_list):
out_list = []
for coords in coord_list:
lat, lon = utm_to_geog(zone, coords[0], coords[1])
out_list.append([lon, lat])
return out_list
##############################################################################################
#Converts between Numpy and GDAL data types
#Will automatically figure out which direction it must go (numpy to gdal or gdal to numpy)
def dt_converter(dt):
Dict = {'u1': 'Byte', 'uint8' : 'Byte', 'uint16': 'UInt16','u2': 'UInt16', 'u4': 'UInt32', 'i2' : 'Int16','i4':'Int32', 'int16':'Int16', 'Float32' : 'float32','float32' : 'Float32', 'Float64' : 'float64','float64' : 'Float64'}
try:
Type = Dict[dt]
except:
Dict = dict([[a[1], a[0]] for a in iter(Dict.items())])
Type = Dict[dt]
return Type
##############################################################################################
#Finds the data type of an image
#Returns the gdal data type
def dt_finder(image):
rast = gdal.Open(image)
band1 = rast.GetRasterBand(1)
dt = band1.DataType
rast = None
band1 = None
Dict = {1: 'Byte', 2 : 'UInt16', 3: 'Int16', 4: 'UInt32', 5: 'Int32', 6 : 'Float32'}
dataType = Dict[dt]
return dataType
##############################################################################################
#Converts between common projection formats
#Returns a dictionary containing the wkt and proj4 formats
def projection_format_converter(projection, Format = 'Wkt'):
spatialRef = osr.SpatialReference()
eval('spatialRef.ImportFrom'+Format+'(projection)')
proj4 = spatialRef.ExportToProj4()
wkt = spatialRef.ExportToWkt()
# xml = spatialRef.ExportToXML()
# pretty_wkt = spatialRef.ExportToPrettyWkt()
# pci = spatialRef.ExportToPCI()
# usgs = spatialRef.ExportToUSGS()
#epsg = spatialRef.Export
return {'proj4' : proj4, 'wkt': wkt, 'spatialRef' : spatialRef}#,'xml':xml,'pretty_wkt':pretty_wkt,'pci':pci,'usgs':usgs}
def reverseDictionary(Dict):
return dict([[a[1], a[0]] for a in iter(Dict.items())])
##############################################################################################
#Buffers coordinates a specified distance
#Input must be projected, but can product geographic coordinates with UTM input
def buffer_coords(coords, Buffer = 1000, geographic = False, zone = ''):
out_coords = [coords[0] - Buffer, coords[1] - Buffer, coords[2] + Buffer, coords[3] + Buffer]
if geographic == True:
out_coords = utm_coords_to_geographic(out_coords, zone)
return out_coords
##############################################################################################
def coords_to_gdal(coords):
return str(coords[0]) + ', ' + str(coords[1]) + ', ' + str(coords[2]) + ', ' + str(coords[3])
##############################################################################################
def coords_to_box_coords(coords):
ul = [coords[0],coords[-1]]
ur = [coords[2],coords[-1]]
lr = [coords[2],coords[1]]
ll = [coords[0],coords[1]]
return [ul,ur,lr,ll]
##############################################################################################
#Gathers various information about a shapefile and returns it in a dictionary
def shape_info(shapefile, runr = False, small = False):
## r('library(maptools)')
## r('shp = readShapeSpatial("' + shapefile + '")')
## r('bbox = data.frame((summary(shp)[2]))')
## bbox = r('bbox')
## r('print(summary(shp))')
ext =os.path.splitext(shapefile)[1]
if ext != '.shp' and ext in list(formats_dict.keys()):
return raster_info(shapefile)
elif ext != '.shp':
shapefile = os.path.splitext(shapefile)[0] + '.shp'
proj4 = ''
if runr == True:
r('library(rgdal)')
r('shp = readOGR("' + shapefile + '", "' + os.path.splitext(shapefile)[0].split('/')[-1] + '")')
proj4string = r('as.character(proj4string(shp))')
proj4 = str(proj4string[0])
#print proj4
prj_filename = os.path.splitext(shapefile)[0] + '.prj'
shp = ogr.Open(shapefile)
lyr = shp.GetLayerByName(os.path.splitext(shapefile)[0].split('/')[-1])
extent = list(lyr.GetExtent())
numFeatures = lyr.GetFeatureCount()
projection = lyr.GetSpatialRef()
ESRI_projection = projection
xmin = extent[0]
xmax = extent[1]
ymin = extent[2]
ymax = extent[3]
coords = [xmin,ymin,xmax,ymax]
width = xmax - xmin
height = ymax - ymin
gdal_coords = str(xmin) + ' ' + str(ymin) + ' ' + str(xmax) + ' ' + str(ymax)
if os.path.exists(prj_filename):
prj_open = open(prj_filename)
prj = prj_open.readlines()
prj_open.close()
try:
if len(projection) > 1:
projection = prj[0].split('PROJCS["')[1].split('",')[0]
else:
projection = prj
except:
projection = prj
try:
zone = projection.split('Zone')[1].split(',')[0][1:3]
except:
try:
zone = projection.split('zone ')[1][:2]
except:
try:
zone = projection.split('zone=')[1][:2]
except:
if type(projection) == list:
try:
zone = projection[0].split('Zone_')[1][:2]
except:
zone = ''
else:
zone = ''
if projection[:3] == 'NAD':
datum = 'NAD83'
elif projection[:3] == 'WGS':
datum = 'WGS84'
else:
datum = ''
if proj4 == '':
crs = '+proj=utm +zone=' + zone + ' +ellps=' + datum + ' +datum='
proj4 = prj
else:
crs = proj4
if small == False:
projections = projection_format_converter(str(ESRI_projection), 'Wkt')
shp = None
lyr = None
info = {'esri' : projection, 'width': width, 'height': height,'gdal_coords': gdal_coords, 'coords' : coords, 'feature_count': numFeatures, 'zone':zone, 'datum': datum, 'crs':crs, 'projection': projection}
if small == False:
info['proj4'] = projections['proj4']
info['wkt'] = projections['wkt']
return info
##############################################################################################
def xy_list_to_kml(xy_list, kml, zone = '', utm_or_geog = 'utm', lonIndex = 0, latIndex = 1):
ID = os.path.basename(kml)
out_kml = '<?xml version="1.0" encoding="UTF-8"?>\n<kml xmlns="http://www.opengis.net/kml/2.2">\n<Document id="'+ID+'">\n<name>'+ID+'</name>\n'
out_kml += '<Snippet></Snippet>\n<Folder id="FeatureLayer0">\n<name>'+ID+'</name>\n<Snippet></Snippet>\n'
kml = os.path.splitext(kml)[0] + '.kml'
i = 1
for line in xy_list:
x = line[lonIndex]
y = line[latIndex]
if utm_or_geog == 'utm':
coords = utm_to_geog(zone, x, y)
else:
coords = [x, y]
out_kml += '<Placemark>\n<name>'+str(i)+'</name>\n<styleUrl>#IconStyle00</styleUrl>\n<Snippet></Snippet>\n<Point>\n<extrude>0</extrude>\t<altitudeMode>relativeToGround</altitudeMode>\n'
out_kml += '<coordinates> '+str(coords[0])+','+str(coords[1])+',0.000000</coordinates>\n</Point>\n</Placemark>\n'
i += 1
out_kml += '</Folder>\n<Style id="IconStyle00">\n<IconStyle>\n<Icon><href>http://www.google.com/intl/en_us/mapfiles/ms/icons/red-dot.png</href></Icon>\n<scale>1.000000</scale>\n</IconStyle>\n<LabelStyle>\n<color>00000000</color>\n<scale>0.000000</scale>\n</LabelStyle>\n</Style>\n</Document>\n</kml>'
out_open = open(kml, 'w')
out_open.writelines(out_kml)
out_open.close()
##############################################################################################
#Converts a CSV to kml
def csv_to_kml(csv, kml, zone = '', utm_or_geog = 'utm',header = True,id = False,iconURL = 'http://maps.google.com/mapfiles/kml/shapes/cross-hairs_highlight.png'):
open_csv = open(csv, 'r')
lines = open_csv.readlines()
open_csv.close()
print(lines)
ID = os.path.basename(csv)
out_kml = '<?xml version="1.0" encoding="UTF-8"?>\n<kml xmlns="http://www.opengis.net/kml/2.2">\n<Document id="'+ID+'">\n<name>'+ID+'</name>\n'
out_kml += '<Snippet></Snippet>\n<Folder id="FeatureLayer0">\n<name>'+ID+'</name>\n<Snippet></Snippet>\n'
out_kml += '<Style id="pushpin">\n<IconStyle id="mystyle">\n<Icon>\n<href>'+iconURL+'</href>\n'
out_kml +='<scale>1.0</scale>\n</Icon>\n</IconStyle>\n</Style>\n'
if header == True:
lines = lines[1:]
if id:
xIndex = 1
yIndex = 2
else:
xIndex = 0
yIndex = 1
i = 1
for line in lines:
x = float(line.split(',')[xIndex])
y = float(line.split(',')[yIndex][:-1])
print((x,y))
if id:
idNo =line.split(',')[0]
else:
idNo = i
if utm_or_geog == 'utm':
coords = utm_to_geog(zone, x, y)
else:
coords = [x, y]
out_kml += '<Placemark>\n<name>'+str(idNo)+'</name>\n<styleUrl>#pushpin</styleUrl>\n<Snippet></Snippet>\n<Point>\n<extrude>0</extrude>\t<altitudeMode>relativeToGround</altitudeMode>\n'
out_kml += '<coordinates> '+str(coords[1])+','+str(coords[0])+',0.000000</coordinates>\n</Point>\n</Placemark>\n'
i += 1
out_kml += '</Folder>\n<Style id="IconStyle00">\n<IconStyle>\n<Icon><href>000000.png</href></Icon>\n<scale>1.000000</scale>\n</IconStyle>\n<LabelStyle>\n<color>00000000</color>\n<scale>0.000000</scale>\n</LabelStyle>\n</Style>\n</Document>\n</kml>'
out_open = open(kml, 'w')
out_open.writelines(out_kml)
out_open.close()
##############################################################################################
def shape_to_kml(in_shp, out_kml, name_field = 'NAME',gdal_dir = program_files_dir + '/FWTools2.4.7/bin/'):
#print 'Converting', base(in_shp),'to',base(out_kml)
gdal_call = gdal_dir + 'ogr2ogr -f KML "' + out_kml + '" "' + in_shp + '" -dsco NameField=' + name_field
print (gdal_call)
call = subprocess.Popen(gdal_call)
call.wait()
##Dir = 'X:/201704_Mission/SpatialData/'
##shp = Dir + 'Forest_Reserves_clip.shp'
##kml = os.path.splitext(shp)[0] + '_k.kml'
##shape_to_kml(shp,kml)
##############################################################################################
#####################################################################
#Function to convert point plot shp to plot box shp and kml of specified radius
#Assumes input plot shp is projected and not geographic
def pt_plots_to_kml_box(plot_shp,plot_radius =15,plot_id_field = 'plotid'):
#Set up output names
plot_box_shp = os.path.splitext(plot_shp)[0] + '_'+str(int(plot_radius*2))+'m_box.shp'
plot_box_kml = os.path.splitext(plot_box_shp)[0] + '.kml'
#Extract the coordinates from shp
coords,polygon, ftl = get_coords(plot_shp)
#Create polygon box coords
poly_coords = [[[i[0]-plot_radius,i[1]+plot_radius],\
[i[0]+plot_radius,i[1]+plot_radius],\
[i[0]+plot_radius,i[1]-plot_radius],\
[i[0]-plot_radius,i[1]-plot_radius]] for i in coords]
#Write out shp
si = shape_info(plot_shp)
ids = dbf_to_list(plot_shp, plot_id_field)
if os.path.exists(plot_box_shp)== False:
list_to_polygon_shapefile(poly_coords, plot_box_shp, si['proj4'])
update_field(plot_box_shp, plot_id_field, ids,datatype = 'Integer',fieldWidth = 10)
#Convert to kml
shape_to_kml(plot_box_shp, plot_box_kml, name_field = plot_id_field)
#####################################################################
def range_to_dt(Min, Max):
dt_ranges_int = [[[0, 256], 'Byte'], [[-32768, 32769], 'Int16'], [[0, 65536], 'Unt16'], [[0, 4294967296], 'UInt32']]
dt_ranges_float = [[[-3.4E38, 3.4E38], 'Float32'], [[-1.7E308, 1.7E308], 'Float64']]
print((type(Min)))
type_dict = {float: 'float', int: 'int'}
for Range in eval('dt_ranges_' + type_dict[type(Min)]):
dt_range = Range[0]
if Min >= dt_range[0] and Max <= dt_range[1]:
return Range[1]
break
##############################################################################################
def get_xo_yo_w_h(large_coords,small_coords,res):
large_height =int((large_coords[-1] - large_coords[1])/res)
large_width =int((large_coords[-2] - large_coords[0])/res)
xo = int(math.floor((small_coords[0] - large_coords[0])/res))
yo = int(math.ceil((large_coords[-1] - small_coords[-1])/res))
if xo < 0:
xo = 0
if yo < 0:
yo = 0
if xo == 0:
w = int(math.floor((small_coords[2] - small_coords[0])/res))
else:
w = int(math.floor((small_coords[2] - small_coords[0])/res))
if yo == 0:
h = int(math.floor((large_coords[-1] - small_coords[1])/res))
else:
h = int(math.floor((small_coords[-1] - small_coords[1])/res))
if h + yo > large_height:
h = large_height-yo
if w + xo > large_width:
w = large_width - xo
return xo,yo,w,h
def simple_merge(image_list,output,no_data = ''):
union_info =union_extent_info(image_list, band_no = 1)
initialize_raster(output,dt = union_info['dt'], width = union_info['width'], height = union_info['height'], bands = 1, projection = union_info['projection'], transform = union_info['transform'], out_no_data = no_data,compress = True)
for image in image_list:
print(('Burning in:',base(image)))
burn_in_raster(image,output,no_data = no_data)
def burn_in_raster(small_raster,large_raster,no_data = '',Min = '',Max = ''):
sri = raster_info(small_raster)
lri = raster_info(large_raster)
s_res,s_coords = sri['res'],sri['coords']
l_res, l_coords = lri['res'],lri['coords']
xo,yo,w,h = get_xo_yo_w_h(l_coords,s_coords,s_res)
rs = brick(small_raster)
if Max != '':
print ('Applying max')
rs[rs > Max] = Max
if Min != '':
print ('Applying min')
rs[rs < Min] = Min
#yo = yo +1
update_raster(large_raster,rs,xo,yo,no_data)
def burn_in_array(rs,large_raster,s_coords,no_data = '',Min = '',Max = ''):
# sri = raster_info(small_raster)
lri = raster_info(large_raster)
# s_res,s_coords = sri['res'],sri['coords']
l_res, l_coords = lri['res'],lri['coords']
xo,yo,w,h = get_xo_yo_w_h(l_coords,s_coords,l_res)
# rs = brick(small_raster)
if Max != '':
print ('Applying max')
rs[rs > Max] = Max
if Min != '':
print ('Applying min')
rs[rs < Min] = Min
update_raster(large_raster,rs,xo,yo,no_data)
##############################################################################################
#Updates values of an existing raster with an input array
#If image is multi-band and array is single band, the single band will be applied across all input image bands
#No data values in the array will not be updated
def update_raster(image_to_update, array,xo,yo, no_data = ''):
rast = gdal.Open(image_to_update, gdal.GA_Update)
## if no_data != '' and no_data != None:
## print 'Masking no data value:', no_data
## array = numpy.ma.masked_equal(array, int(no_data))
ri= raster_info(image_to_update)
ri_dt = ri['dt']
if numpy_or_gdal(ri_dt) == 'gdal':
numpy_dt = dt_converter(ri_dt)
else:
numpy_dt = ri_dt
array = numpy.array(array).astype(numpy_dt)
no_image_bands = ri['bands']
if array.ndim == 2:
no_array_bands = 1
elif array.ndim == 3:
no_array_bands = len(array)
a_width = array.shape[-1]
a_height = array.shape[-2]
for i in range(no_image_bands):
print(('Updating band:', i + 1))
br = rast.GetRasterBand(i + 1)
if array.ndim == 3:
at = array[i]
if no_data != '' and no_data != None:
print(('Masking no data value:', no_data))
print(('Numpy dt:', numpy_dt))
from_array = br.ReadAsArray(xo, yo, a_width, a_height).astype(numpy_dt)
msk = numpy.equal(at,no_data)
print(('msk',msk))
print(('msk_shp',msk.shape))
print(('at_shp', at.shape))
print(('from_array_shp', from_array.shape))
numpy.putmask(at,msk,from_array)
msk = None
from_array = None
# print at
## from_array[at != no_data] = 0
## at[at == no_data ] = 0
## at = numpy.amax([from_array,at], axis = 0)
br.WriteArray(at,xo,yo)
at = None
else:
if no_data != '' and no_data != None:
print(('Masking no data value:', no_data))
print(('Numpy dt:', numpy_dt))
from_array = br.ReadAsArray(xo, yo, a_width, a_height).astype(numpy_dt)
# print 'msk',msk
numpy.putmask(array,numpy.equal(array,no_data),from_array)
from_array = None
#from_array[array != no_data] = 0
#array[array == no_data ] = 0
#array = numpy.amax([from_array,array], axis = 0)
br.WriteArray(array,xo,yo)
# brick_info(image_to_update, get_stats = True)
array = None
rast = None
br = None
##############################################################################################
def update_color_table_or_names(image,color_table = '',names = ''):
rast = gdal.Open(image, gdal.GA_Update)
b = rast.GetRasterBand(1)
if color_table != '' and color_table != None:
print(('Updating color table for:',image))
# b.SetRasterColorInterpretation(gdal.GCI_PaletteIndex)
b.SetRasterColorTable(color_table)
if names != '' and names != None:
print(('Updating names for:',image))
b.SetRasterCategoryNames(names)
rast = None
b = None
##############################################################################################
def just_raster_extent(raster):
rast = gdal.Open(raster)
width = rast.RasterXSize
height = rast.RasterYSize
transform = rast.GetGeoTransform()
rast = None
transform = list(transform)
xmax = transform[0] + (int(round(transform[1])) * width)
xmin = transform[0]
ymax = transform[3]
ymin = transform[3]- (int(round(transform[1])) * height)
return [xmin,ymin,xmax,ymax]
##############################################################################################
#Gathers various information about a raster and returns it in a dictionary
def raster_info(image = '', band_no = 1, get_stats = False, guiable = True):
if image == '':
guied = True
image = str(askopenfilename(title = 'Select Strata Raster',filetypes=[("IMAGINE","*.img"),("tif","*.tif")]))
else:
guied = False
#print image
rast = gdal.Open(image)
band1 = rast.GetRasterBand(band_no)
dt = band1.DataType
md = rast.GetMetadata('IMAGE_STRUCTURE')
# c = 'gdalinfo "' + image + '"'
# call = subprocess.Popen(c)
# call.wait()
# print(band1.GetMetadata())
# Use dict.get method in case the metadata dict does not have a 'COMPRESSION' key
compression = md.get('COMPRESSION', None)
ct = band1.GetRasterColorTable()
names = band1.GetRasterCategoryNames()
no_data = band1.GetNoDataValue()
if get_stats == True:
stats = band1.GetStatistics(False,1)
# print(stats)
Min = stats[0]
Max = stats[1]
mean = stats[2]
stdev = stats[3]
else:
Min, Max, mean, stdev = 0,0,0,0
band1 = None
Dict = {1: 'Byte', 2 : 'UInt16', 3: 'Int16', 4: 'UInt32', 5: 'Int32', 6 : 'Float32',7 : 'Float64'}
bit_depth_dict = {1: 8, 2 : 16, 3: 16, 4: 32, 5: 32, 6 : 32,7 : 64}
discrete_or_continuous_dict = {1: 'discrete', 2 : 'discrete', 3: 'discrete', 4: 'discrete', 5: 'discrete', 6 : 'continuous',7 : 'continuous'}
dataType = Dict[dt]
bit_depth = bit_depth_dict[dt]
discrete_or_continuous = discrete_or_continuous_dict[dt]
dt_ranges = {'Byte': [0,255], 'Int16': [-32768, 32768], 'UInt16': [0,65535],'UInt32': [0,4294967295], 'Float32':[-3.4E38, 3.4E38],'Float64':[-1.7E308, 1.7E308]}
try:
dt_range = dt_ranges[dataType]
except:
dt_range = [0,255]
width = rast.RasterXSize
height = rast.RasterYSize
bands = rast.RasterCount
projection = rast.GetProjection()
transform = rast.GetGeoTransform()
# ulx, xres, xskew, uly, yskew, yres = rast.GetGeoTransform()
# lrx = ulx + (rast.RasterXSize * xres)
# lry = uly + (rast.RasterYSize * yres)
transform = list(transform)
projections = projection_format_converter(projection, 'Wkt')
# print(projections['pretty_wkt'])
xmax = transform[0] + (int(round(transform[1])) * width)
xmin = transform[0]
ymax = transform[3]
ymin = transform[3]- (int(round(transform[1])) * height)
# print(xmin,ymin,xmax,ymax)
from_sr = osr.SpatialReference()
from_sr.ImportFromWkt(projections['wkt'])
geog_sr = osr.SpatialReference()
geog_sr.ImportFromEPSG(4326)
ul_point = ogr.Geometry(ogr.wkbPoint)
ul_point.AddPoint(xmin,ymax)
ul_point.AssignSpatialReference(from_sr)
ul_point.TransformTo(geog_sr)
lr_point = ogr.Geometry(ogr.wkbPoint)
lr_point.AddPoint(xmax,ymin)
lr_point.AssignSpatialReference(from_sr)
lr_point.TransformTo(geog_sr)
coords = [xmin,ymin,xmax,ymax]
coords_geog = [ul_point.GetX(),lr_point.GetY(),lr_point.GetX(),ul_point.GetY()]
gdal_coords = str(xmin) + ' ' + str(ymin) + ' ' + str(xmax) + ' ' + str(ymax)
try:
zone = projection.split('Zone')[1].split(',')[0][1:3]
except:
try:
zone = projection.split('zone ')[1][:2]
except:
zone = ''
datum_list = {'NAD':'NAD83', 'WGS':'WGS84'}
try:
datum = projection.split('GEOGCS["')[1].split('",')[0]
if datum not in datum_list:
for dat in datum_list:
if datum.find(dat) > -1:
datum = datum_list[dat]
except:
datum = ''
hemisphere = ''
if (projection.find('North') > -1 or projection.find('north') > -1) and (projection.find('Hemisphere') > -1 or projection.find('hemisphere') > -1):
hemisphere = 'North'
else:
hemisphere = 'South'
units = ''
if projection.find('meter') > -1 or projection.find('METER')> -1 or projection.find('Meter') > -1:
units = 'Meters'
res = transform[1]
info = {'image':image,'no_data' : no_data, 'proj4': projections['proj4'], 'wkt': projections['wkt'], 'units': units,
'hemisphere' : hemisphere,'min': Min, 'max': Max, 'mean': mean, 'std':stdev, 'stdev':stdev,
'gdal_coords': gdal_coords, 'coords' : coords, 'coords_geog':coords_geog,'projection':projection, 'transform': transform,
'width': width, 'height': height, 'bands': bands, 'band_count': bands, 'zone' : zone, 'datum': datum,
'res': res, 'resolution':res, 'dt_range': dt_range,'datatype': dataType, 'dt': dataType, 'DataType': dataType,'bit_depth':bit_depth,'color_table':ct,'names':names,'compression_method':compression,'discrete_or_continuous':discrete_or_continuous}
if guied == True:
for piece in info:
print(( piece, info[piece]))
rast = None
return info
##############################################################################################
#Applies raster_info across all bands and returns a list of raster_info dictionaries
def brick_info(image = '', get_stats = False):
info = []
list(map(lambda band : info.append(raster_info(image, band, get_stats)), list(range(1, raster_info(image)['bands'] + 1))))
return info
##############################################################################################
#Mr sid metadata extractor
def mr_sid_metadata(metadata_text_file):
open_m = open(metadata_text_file, 'r')
lines = open_m.readlines()
open_m.close()
find_list = ['West_Bounding_Coordinate', 'East_Bounding_Coordinate', 'North_Bounding_Coordinate', 'South_Bounding_Coordinate']
out_dict = {}
for Find in find_list:
for line in lines:
if line.find(Find) > -1:
coord = line.split(': ')[1].split('\n')[0]
out_dict[Find.split('_')[0]] = coord
coords = [float(out_dict['West']), float(out_dict['South']), float(out_dict['East']), float(out_dict['North'])]
out_dict['coords'] = coords
return out_dict
##############################################################################################
#Determines whether the data type is a numpy or gdal data type
def numpy_or_gdal(dt):
numpy_list = ['u1', 'uint8', 'uint16','u2', 'u4', 'i2', 'int16', 'Float32','float32', 'Float64','float64']
gdal_list = ['Byte', 'Byte', 'UInt16','UInt16','UInt32','Int16', 'Int16', 'float32','Float32','float64','Float64']
dt_list = []
if dt in numpy_list:
dt_list.append('numpy')
if dt in gdal_list:
dt_list.append('gdal')
if len(dt_list) == 2:
return 'both'
elif len(dt_list) == 1:
return dt_list[0]
else:
return 'neither'
##############################################################################################
def is_leap_year(year):
year = int(year)
if year % 4 == 0:
if year%100 == 0 and year % 400 != 0:
return False
else:
return True
else:
return False
##############################################################################################
def julian_to_calendar(julian_date, year = time.localtime()[0]):
julian_date, year = int(julian_date), int(year)
is_leap = is_leap_year(year)
if is_leap:
leap, length = True, [31,29,31,30,31,30,31,31,30,31,30,31]
else:
leap, length = False, [31,28,31,30,31,30,31,31,30,31,30,31]
ranges = []
start = 1
for month in length:
stop = start + month
ranges.append(list(range(start, stop)))
start = start + month
month_no = 1
for Range in ranges:
if julian_date in Range:
mn = month_no
day_no = 1
for day in Range:
if day == julian_date:
dn = day_no
day_no += 1
month_no += 1
if len(str(mn)) == 1:
lmn = '0' + str(mn)
else:
lmn = str(mn)
if len(str(dn)) == 1:
ldn = '0' + str(dn)
else:
ldn = str(dn)
return {'monthdate': lmn + ldn,'month':mn, 'day':dn, 'date_list': [mn,dn], 'date': str(mn) + '/' + str(dn)}
##############################################################################################
def calendar_to_julian(day, month, year = time.localtime()[0]):
day, month, year = int(day),int(month),int(year)
is_leap = is_leap_year(year)
n, nl=[0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334], [0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335]
x = 1
while x <=12:
if month == x:
if not is_leap:
julian = n[x-1]+ day
else:
julian = nl[x-1]+ day
return julian
x = x+1
##############################################################################################
def base(in_path):
return os.path.basename(os.path.splitext(in_path)[0])
##############################################################################################
def check_dir(in_path):
if os.path.exists(in_path) == False:
print(('Making dir:', in_path))
os.makedirs(in_path)
def checkDir(in_path):
check_dir(in_path)
##############################################################################################
def check_end(in_path, add = '/'):
if in_path[-len(add):] != add:
out = in_path + add
else:
out = in_path
return out
##############################################################################################
def glob_dir(Dir):
dirs = [i for i in glob(Dir,'') if os.path.isdir(i)]
dirs = [check_end(i) for i in dirs]
return dirs
##############################################################################################
#Returns all files containing an extension or any of a list of extensions
#Can give a single extension or a list of extensions
def glob(Dir, extension):
Dir = check_end(Dir)
if type(extension) != list:
if extension.find('*') == -1:
return [Dir + i for i in [i for i in os.listdir(Dir) if os.path.splitext(i)[1] == extension]]
else:
return [Dir + i for i in os.listdir(Dir)]
else:
out_list = []
for ext in extension:
tl = [Dir + i for i in [i for i in os.listdir(Dir) if os.path.splitext(i)[1] == ext]]
for l in tl:
out_list.append(l)
return out_list
##############################################################################################
#Returns all files containing a specified string (Find)
def glob_find(Dir, Find):
Dir = check_end(Dir)
if type(Find) != list:
return [Dir + i for i in [i for i in os.listdir(Dir) if i.find(Find) > -1]]
else:
out_list = []
for F in Find:
t1 = [Dir + i for i in [i for i in os.listdir(Dir) if i.find(F) > -1]]
for t in t1:
out_list.append(t)
return out_list
##############################################################################################
#Returns all files ending with a specified string (end)
def glob_end(Dir, end):
Dir = check_end(Dir)
if type(end) != list:
return [Dir + i for i in [i for i in os.listdir(Dir) if i[-len(end):] == end]]
else:
out_list = []
for ed in end:
t1 = [Dir + i for i in [i for i in os.listdir(Dir) if i[-len(ed):] == ed]]
for t in t1:
out_list.append(t)
return out_list
##############################################################################################
##def glob_find_iter(Dir, find_list):
## out_list = []
## Find = find_list[0]
## tl1 = map(lambda i : Dir + i, filter(lambda i:i.find(Find) > -1, os.listdir(Dir)))
## for Find in find_list[1:]:
##
##
##############################################################################################
def set_no_data(image, no_data_value = -9999, update_stats = True):
rast = gdal.Open(image, gdal.GA_Update)
ri = raster_info(image)
nd = ri['no_data']
print(('Processing no_data for:',base(image)))
if nd != no_data_value:
print(('Changing no data from:',nd,'to',no_data_value))
for band in range(1, ri['bands']+1):
b = rast.GetRasterBand(band)
b.SetNoDataValue(no_data_value)
if update_stats:
print(('Updating stats for band:',band))
Min,Max,Mean,Std = b.ComputeStatistics(0)
b.SetStatistics(Min,Max,Mean,Std)
else:
Min,Max,Mean,Std = b.GetStatistics(0,0)
print(('Min:',Min))
print(('Max:',Max))
print(('Mean:',Mean))
print(('Std:', Std))
else:
print(('No data already = ', no_data_value))
print()
def set_stats(image,Min=None,Max=None,Mean=None,Std=None):
rast = gdal.Open(image, gdal.GA_Update)
ri = raster_info(image)
nd = ri['no_data']
for band in range(1, ri['bands']+1):
b = rast.GetRasterBand(band)
b.SetStatistics(Min,Max,Mean,Std)
#rast = None
def set_projection(image,crs):
rast = gdal.Open(image, gdal.GA_Update)
rast.SetProjection(crs)
rast = None
##
##proj = 'PROJCS["NAD83 / Conus Albers",\
## GEOGCS["NAD83",\
## DATUM["North American Datum 1983",\
## SPHEROID["GRS 1980", 6378137.0, 298.257222101, AUTHORITY["EPSG","7019"]],\
## TOWGS84[1.0, 1.0, -1.0, 0.0, 0.0, 0.0, 0.0],\
## AUTHORITY["EPSG","6269"]],\
## PRIMEM["Greenwich", 0.0, AUTHORITY["EPSG","8901"]],\
## UNIT["degree", 0.017453292519943295],\
## AXIS["Geodetic longitude", EAST],\
## AXIS["Geodetic latitude", NORTH],\
## AUTHORITY["EPSG","4269"]],\
## PROJECTION["Albers Equal Area", AUTHORITY["EPSG","9822"]],\
## PARAMETER["central_meridian", -96.0],\
## PARAMETER["latitude_of_origin", 23.0],\
## PARAMETER["standard_parallel_1", 29.5],\
## PARAMETER["false_easting", 0.0],\
## PARAMETER["false_northing", 0.0],\
## PARAMETER["standard_parallel_2", 45.5],\
## UNIT["m", 1.0],\
## AXIS["Easting", EAST],\
## AXIS["Northing", NORTH],\
## AUTHORITY["EPSG","5070"]]'
##Dir = 'D:/Downloads/rtfd_baselines/'
##tifs = glob(Dir,'.tif')
##for tif in tifs:
## set_projection(tif,proj)
## set_no_data(tif,-32768)
##############################################################################################
def quick_look(tar_list, out_dir, bands = [], leave_extensions = ['_MTLold.txt', '_MTL.txt'], df = 'ENVI', out_extension = ''):
if os.path.exists(out_dir) == False:
os.makedirs(out_dir)
out_dir_temp = out_dir + 'individual_bands/'
out_dir_not_now = out_dir_temp + 'old_bands/'
if os.path.exists(out_dir_temp) == False:
os.makedirs(out_dir_temp)
if os.path.exists(out_dir_not_now) == False:
os.makedirs(out_dir_not_now)
stack_out_list = []
for tar in tar_list:
if tar.find('.tar.gz') > -1:
stack_out = out_dir + os.path.basename(tar.split('.tar.gz')[0]) + out_extension
else:
stack_out = out_dir + os.path.splitext(os.path.basename(tar))[0] + out_extension
stack_out_list.append(stack_out)
if os.path.exists(stack_out) == False:
try:
untar(tar, out_dir_temp, bands = bands)
except:
print(('Could not untar all files in', os.path.basename(tar)))
t_files = glob(out_dir_temp, '*')
tto_stack = glob(out_dir_temp, '.TIF')
if len(tto_stack) == 0:
tto_stack = glob(out_dir_temp, '.tif')
to_stack = [tto_stack[0]]
res = raster_info(tto_stack[0])['res']
for to in tto_stack[1:]:
try:
rt = raster_info(to)['res']
if rt == res:
to_stack.append(to)
except:
print(('Could not include', os.path.basename(to), ' in stack'))
stack(to_stack, stack_out, to_stack[0], df = df)
for File in t_files:
not_now_filename = out_dir_not_now + os.path.basename(File)
if os.path.isdir(File) == False:
is_in = 0
for extension in leave_extensions:
if File.find(extension) > -1:
is_in = 1
if is_in == 0:
if os.path.exists(not_now_filename) == False:
try:
shutil.move(File, not_now_filename)
except:
print(('Could not move', File))
else:
try:
os.remove(File)
except:
print(('Could not remove', File))
else:
try:
shutil.move(File, out_dir + os.path.basename(File))
except:
print(('Could not move', File))
else:
print(( 'Already created:', stack_out))
###############################################################################################Untars Landsat TM (or any) tarball
def untar(tarball, output_dir = '', bands = []):
if output_dir == '':
output_dir = os.path.dirname(tarball) + '/'
out_list = []
out_folder = os.path.basename(tarball).split('.')[0].split('[')[0]
if os.path.exists(output_dir + out_folder) == False:
try:
tar = TarFile.open(tarball, 'r:gz')
except:
tar = TarFile.open(tarball, 'r')
#tar = gzip.open(tarball)
if bands == []:
print(('Unzipping:', os.path.basename(tarball)))
tar.extractall(path = output_dir)
else:
tar_names = tar.getnames()#[band]
for band in bands:
band = int(band)
tar_name = tar_names[band]
output_name = output_dir + tar_name
out_list.append(output_name)
if os.path.exists(output_name) == False:
print(( 'Unzipping:', output_dir + tar_name))
tar.extract(tar_name, path = output_dir)
tar.close()
else:
print(('Already unzipped:', os.path.basename(tarball)))
return out_list
#File = 'C:/Users/ihousman/Downloads/rsgislib-2.0.0.tar'
#out_dir = 'C:/Users/ihousman/Downloads/'
#untar(File,out_dir)
#gz = '//166.2.126.38/2013_Composites_Compressed/AQUA/113_128/zone14_path_113_128_AQUA_composite_surface_reflectance.img.gz'
#outfile = 'C:/Users/ihousman/Downloads/' + base(gz)
def ungz(gz, outfile = ''):
if outfile == '' or outfile == None:
outfile = os.path.splitext(gz)[0]
if os.path.exists(outfile) == False:
print(( 'Un gzing:', gz))
infile = gzip.open(gz, 'rb')
output = open(outfile, 'wb')
file_content = infile.read()
output.writelines(file_content)
output.close()
else:
print(('Already created:', outfile))
###################################################
#Taken from: https://stackoverflow.com/questions/2104080/how-can-i-check-file-size-in-python
def convert_bytes(num):
"""
this function will convert bytes to MB.... GB... etc
"""
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
def file_size(file_path):
"""
this function will return the file size
"""
if os.path.isfile(file_path):
file_info = os.stat(file_path)
return convert_bytes(file_info.st_size)
###################################################
##############################################################################################
#Will read a raster into a numpy array
#Returns a numpy array
#u1,u2,u4, i1,i2,i4, float32, float64
#Does not support < unsigned 8 byte
def raster(Raster, dt = '', band_no = 1, xoffset = 0, yoffset = 0, width = '', height = '', na_value = ''):
#os.chdir('C:/Python26/ArcGIS10.0/Lib/site-packages/osgeo')
info = raster_info(Raster)
if dt == '':
dt = info['dt']
if numpy_or_gdal(dt) == 'gdal':
dt = dt_converter(dt)
if width == '':
width = info['width'] - xoffset
if height == '':
height = info['height']- yoffset
band_no = int(band_no)
print()
print(('Reading raster:', Raster.split('/')[-1]))
print(('Band number:', band_no))
rast = gdal.Open(Raster)
band1 = rast.GetRasterBand(band_no)
print(('Datatype:',dt))
print(( width, height))
band1_pixels = band1.ReadAsArray(xoffset, yoffset, width, height).astype(dt)
rast = None
band1 = None
print((band1_pixels.shape))
print(('As datatype:',str(type(band1_pixels[1][1]))))
print()
if na_value != '':
band1_pixels = numpy.ma.masked_equal(band1_pixels, int(na_value))
return band1_pixels
band1_pixels = None
def brick(Raster, dt = '', xoffset = 0, yoffset = 0, width = '', height = '', band_list = [], na_value = '', image_list = False):
if image_list == True:
band_list = list(range(1, len(Raster) + 1))
info = raster_info(Raster[0])
else:
info = raster_info(Raster)
if band_list != [] and band_list != '' and band_list != None:
bands = band_list
else:
bands = list(range(1, info['bands'] + 1))
if dt == '':
dt = info['dt']
if numpy_or_gdal(dt) == 'gdal':
dt = dt_converter(dt)
if width == '':
width = info['width'] - xoffset
if height == '':
height = info['height']- yoffset
print()
try:
print(('Reading raster:', Raster.split('/')[-1]))
except:
print ('Reading raster')
print(('Datatype:',dt))
array_list = numpy.zeros([len(bands), height, width], dtype = dt)
if image_list == False:
rast = gdal.Open(Raster)
array_no = 0
for band in bands:
print(('Reading band number:', band))
band1 = rast.GetRasterBand(band)
band1_pixels = band1.ReadAsArray(xoffset, yoffset, width, height).astype(dt)
array_list[array_no] = band1_pixels
array_no += 1
else:
array_no = 0
for raster in Raster:
rast = gdal.Open(raster)
print(('Reading:', os.path.basename(raster)))
band1 = rast.GetRasterBand(1)
band1_pixels = band1.ReadAsArray(xoffset, yoffset, width, height).astype(dt)
array_list[array_no] = band1_pixels
array_no += 1
rast = None
band1 = None
band1_pixels = None
print(('Returning', len(array_list),'band 3-d array'))
array_list = numpy.array(array_list)
if na_value != '' and na_value != None:
array_list = numpy.ma.masked_equal(array_list, int(na_value))
return array_list
array_list = None
######################################################################################
def tile_array(array, tiles):
out = numpy.hsplit(array, tiles)
return out
def untile_array(tiled_array):
out = numpy.hstack(tiled_array)
return out
def image_tiler(image, tile_size_x = 1000, tile_size_y = 1000):
info = raster_info(image)
bands = info['bands']
width = info['width']
height = info['height']
print((width, height))
coords = info['coords']
res = info['res']
print (coords)
ulx = coords[0]
uly = coords[-1]
tilesx = math.ceil(float(width)/ float(tile_size_x))
tilesy = math.ceil(float(width)/float(tile_size_y))
for tilex in range(tilesx):
xmin = ulx + (tilex * tile_size_x* res)
xmax = ulx + (tilex * tile_size_x * res) + (tile_size_x * res)
if xmax > coords[2]:
xmax = coords[2]
for tiley in range(tilesy):
ymax = uly - (tiley * tile_size_y * res)
ymin = uly - (tiley * tile_size_y * res) - (tile_size_y * res)
if ymin < coords[1]:
ymin = coords[1]
gdal_coords = str(xmin) + ' ' +str(ymin) + ' ' + str(xmax) + ' ' + str(ymax)
#print gdal_coords
output = os.path.splitext(image)[0] + '_Tile_' +str(tilex) + '_' + str(tiley) + '.img'
if os.path.exists(output) == False:
reproject(image, output, zone = info['zone'], datum = info['datum'], clip_extent = gdal_coords, resampling_method = 'nearest')
######################################################################################
######################################################################################
#Function intended to be used to divide a list into threads
#Returns a 2-d array of lists of parts of the list for each thread
def set_maker(in_list, threads):
out_sets = []
tl = len(in_list) / threads
remainder = len(in_list) % threads
i = 0
for t in range(threads):
tt = []
while len(tt) < tl:
tt.append(in_list[i])
i += 1
out_sets.append(tt)
for r in range(remainder):
out_sets[r].append(in_list[i])
i += 1
#print 'The sets are', out_sets
return out_sets
######################################################################################
def new_set_maker(in_list,threads):
out_sets =[]
for t in range(threads):
out_sets.append([])
i =0
for il in in_list:
out_sets[i].append(il)
i += 1
if i >= threads:
i = 0
return out_sets
######################################################################################
#Function intended to be used to call on a batch file externally
#Generally used as part of multi-threading
def bat_thread(bat_lines, bat_name, run = True):
blno = open(bat_name, 'w')
blno.writelines(bat_lines)
blno.close()
if run == True:
call = subprocess.Popen(bat_name)
call.wait()
###########################################################################################
#Function intended to be used to call on a Python script externally
#Generally used as part of multi-threading
def python_thread(script_lines, script_name, run = True):
sco = open(script_name, 'w')
sco.writelines(script_lines)
sco.close()
bl = []
bl.append('cd\\ \n')
bl.append(script_name[:2] + '\n')
#bl.append('cd ' + os.path.dirname(script_name) + '\n')
#bl.append(os.path.basename(script_name) + '\n')
bl.append(script_name + '\n')
bln = script_name + '.bat'
blno = open(bln, 'w')
blno.writelines(bl)
blno.close()
if run == True:
call = subprocess.Popen(bln)
call.wait()
###########################################################################################
def r_thread(script_lines, script_name, r_bin_dir, run = True):
sco = open(script_name, 'w')
sco.writelines(script_lines)
sco.close()
bl = []
bl.append('cd\\ \n')
#bl.append(script_name[:2] + '\n')
bl.append('cd ' + r_bin_dir + '\n')
bl.append('"' + r_bin_dir + 'rscript.exe" "' + script_name+ '"\n')
bln = script_name + '.bat'
blno = open(bln, 'w')
blno.writelines(bl)
blno.close()
if run == True:
call = subprocess.Popen(bln)
call.wait()
###########################################################################################
def reset_no_data(in_image, out_image, in_no_data = -9999, out_no_data = 0):
if os.path.exists(out_image) == False:
print(( 'Creating', out_image))
ti = tiled_image(out_image, in_image, outline_tiles = True, out_no_data = out_no_data)
ci = 1
for xo,yo,w,h in ti.chunk_list:
print(('Recoding chunk', ci, 'out of', len(ti.chunk_list)))
r = brick(in_image, '',xo,yo,w,h)
print(( 'Recoding', in_no_data,'to', out_no_data))
r[r == in_no_data] = out_no_data
ti.add_tile(r, xo,yo)
r = None
ci +=1
ti.rm()
ti = None
print(( 'Computing stats for', out_image))
raster_info(out_image, get_stats = True)
######################################################################################
def check_xy(x,y):
if x == '':
x = numpy.arange(1, len(y) + 1)
elif y == '':
y = numpy.arange(1, len(x) + 1)
x = numpy.array(x).astype('Float32')
y = numpy.array(y).astype('Float32')
return x,y
def rmse(actual,predicted):
error = actual - predicted
return numpy.sqrt(numpy.mean(error**2))
def mae(actual, predicted):
error = actual - predicted
return numpy.mean( | numpy.absolute(error) | numpy.absolute |
import numpy as np
def length(x, axis=-1, keepdims=True):
"""
Computes vector norm along a tensor axis(axes)
:param x: tensor
:param axis: axis(axes) along which to compute the norm
:param keepdims: indicates if the dimension(s) on axis should be kept
:return: The length or vector of lengths.
"""
lgth = np.sqrt(np.sum(x * x, axis=axis, keepdims=keepdims))
return lgth
def normalize(x, axis=-1, eps=1e-8):
"""
Normalizes a tensor over some axis (axes)
:param x: data tensor
:param axis: axis(axes) along which to compute the norm
:param eps: epsilon to prevent numerical instabilities
:return: The normalized tensor
"""
res = x / (length(x, axis=axis) + eps)
return res
def quat_normalize(x, eps=1e-8):
"""
Normalizes a quaternion tensor
:param x: data tensor
:param eps: epsilon to prevent numerical instabilities
:return: The normalized quaternions tensor
"""
res = normalize(x, eps=eps)
return res
def quat_getDif(x, y, eps=1e-8):
"""
Normalizes a quaternion tensor
:param x: data tensor 1
:param y: data tensor 2
:return: The difference quaternion betweeen both quaternions
"""
return quat_normalize(quat_mul(quat_inv(x),y))
def angle_axis_to_quat(angle, axis):
"""
Converts from and angle-axis representation to a quaternion representation
:param angle: angles tensor
:param axis: axis tensor
:return: quaternion tensor
"""
c = np.cos(angle / 2.0)[..., np.newaxis]
s = | np.sin(angle / 2.0) | numpy.sin |
# -*- coding: utf-8 -*-
"""
...
"""
import LibraryTT.txt2array as conversion
import numpy as np
from numpy import sqrt
import pandas as pd
import matplotlib.pyplot as plt
import random
import math
from mpl_toolkits.mplot3d import Axes3D
# import open3d as o3d
# %matplotlib inline
D = conversion.txt2array()
DD = | np.copy(D) | numpy.copy |
import numpy as np
import pytest, os
from numpy.testing import assert_array_equal
from ReconstructOrder.datastructures.physical_data import PhysicalData
def test_basic_constructor_nparray():
"""
test assignment using numpy arrays
"""
phys = PhysicalData()
phys.I_trans = np.ones((512, 512))
phys.polarization = 2 * np.ones((512, 512))
phys.retard = 3 * np.ones((512, 512))
phys.depolarization = 4 * np.ones((512, 512))
phys.azimuth = 5 * np.ones((512, 512))
phys.azimuth_degree = 6 * np.ones((512, 512))
phys.azimuth_vector = 7 * np.ones((512, 512))
assert_array_equal(phys.I_trans, np.ones((512, 512)))
assert_array_equal(phys.polarization, 2*np.ones((512, 512)))
assert_array_equal(phys.retard, 3*np.ones((512, 512)))
assert_array_equal(phys.depolarization, 4*np.ones((512, 512)))
assert_array_equal(phys.azimuth, 5*np.ones((512, 512)))
assert_array_equal(phys.azimuth_degree, 6*np.ones((512, 512)))
assert_array_equal(phys.azimuth_vector, 7*np.ones((512, 512)))
def test_basic_constructor_memap(setup_temp_data):
"""
test assignment using memory mapped files
"""
mm = setup_temp_data
phys = PhysicalData()
phys.I_trans = mm
phys.polarization = 2 * mm
phys.retard = 3 * mm
phys.depolarization = 4 * mm
phys.azimuth = 5 * mm
phys.azimuth_degree = 6 * mm
phys.azimuth_vector = 7 * mm
| assert_array_equal(phys.I_trans, mm) | numpy.testing.assert_array_equal |
import time
import numpy as np
from datasets import load_dataset
from memo import memfile, grid
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import f1_score
from sklearn.pipeline import make_pipeline, make_union
from sklearn.feature_extraction.text import HashingVectorizer
import datasets
datasets.logging.set_verbosity_error()
if __name__ == "__main__":
@memfile("sgd-benchmarks.jsonl", skip=True)
def train_test_sgd_model(dataset_name, n_features=10_000, n_hash=3, subword=True, early_stopping=False):
datasets = {
"ag_news": {"train": "train", "valid": "test", "text": "text", "label": "label"},
"banking77": {"train": "train", "valid": "test", "text": "text", "label": "label"},
"emotion": {"train": "train", "valid": "test", "text": "text", "label": "label"},
}
dataset = load_dataset(dataset_name)
d = datasets[dataset_name]
dataset = load_dataset(dataset_name)
X_train = dataset[d['train']][d['text']]
y_train = dataset[d['train']][d['label']]
X_test = dataset[d['valid']][d['text']]
y_test = dataset[d['valid']][d['label']]
featurizers = [HashingVectorizer(n_features=n_features + i) for i in range(n_hash)]
if subword:
featurizers += [HashingVectorizer(ngram_range = (2, 4), n_features=n_features + i, analyzer="char") for i in range(n_hash)]
classifier = SGDClassifier()
if early_stopping:
classifier = SGDClassifier(early_stopping=True, n_iter_no_change=3, tol=0.0001, validation_fraction=0.2)
pipe = make_pipeline(
make_union(*featurizers),
classifier
)
t0 = time.time()
pipe.fit(X_train, y_train)
t1 = time.time()
pred_test = pipe.predict(X_test)
t2 = time.time()
pred_train = pipe.predict(X_train)
return {
"acc_valid": np.mean(pred_test == y_test),
"acc_train": | np.mean(pred_train == y_train) | numpy.mean |
import numpy as np
from determined.pytorch import Reducer, _reduce_metrics
def test_reducer() -> None:
metrics = | np.array([0.25, 0.5, 0.75, 1, 25.5, 1.9]) | numpy.array |
import os
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report, roc_curve, roc_auc_score, confusion_matrix
from sklearn.feature_selection import SelectKBest, chi2
import numpy as np
"""Data is reformated to add a win/loss class and all columns are renamed to provide one column for each performance
characteristic. There is now a separate row for winners and losers. """
def get_all_data():
"""
:return: concated df of all csv's since 1985
:rtype: dataFrame
"""
path = 'tennis_atp_1985>'
all_data = pd.DataFrame()
for file in os.listdir('tennis_atp_1985>'):
file_path = os.path.join(path, file)
all_data = all_data.append(pd.read_csv(file_path))
return all_data
def data_clean(data):
"""
Filters all unnecessary features from data set containg matches since 1985
:param data: data set compiled in get_all_data
:type data: dataFrame
:return clean:
:rtype clean: dataFrame
"""
# select all features of winning participants
winners = data.filter(['winner_name', 'winner_hand', 'winner_ht', 'winner_age', 'w_ace', 'w_df',
'w_svpt', 'w_1stIn', 'w_1stWon', 'w_2ndWon', 'w_SvGms', 'w_bpSaved', 'w_bpFaced'])
winners['won'] = 1
# select all features of losing participants
losers = data.filter(['loser_name', 'loser_hand', 'loser_ht', 'loser_age', 'l_ace', 'l_df',
'l_svpt', 'l_1stIn', 'l_1stWon', 'l_2ndWon', 'l_SvGms', 'l_bpSaved', 'l_bpFaced'])
losers['won'] = 0
winners.rename(columns={'winner_name': 'name', 'winner_hand': 'hand', 'winner_ht': 'ht', 'winner_age': 'age',
'w_ace': 'ace', 'w_df': 'df', 'w_svpt': 'svpt', 'w_1stIn': '1stIn', 'w_1stWon':
'1stWon', 'w_2ndWon': '2ndWon', 'w_SvGms': 'svGms', 'w_bpSaved': 'bpSaved', 'w_bpFaced':
'bpFaced'}, inplace=True)
losers.rename(columns={'loser_name': 'name', 'loser_hand': 'hand', 'loser_ht': 'ht', 'loser_age': 'age', 'l_ace':
'ace', 'l_df': 'df', 'l_svpt': 'svpt', 'l_1stIn': '1stIn', 'l_1stWon': '1stWon',
'l_2ndWon': '2ndWon', 'l_SvGms': 'svGms', 'l_bpSaved': 'bpSaved', 'l_bpFaced': 'bpFaced'},
inplace=True)
clean = pd.concat([winners, losers], axis=0)
clean['serving_bp_won'] = clean['bpSaved'] / clean['bpFaced']
clean['serving_bp_lost'] = 1 - clean['serving_bp_won']
clean['returning_bp_won'] = clean['bpSaved'] / clean['bpFaced']
clean['returning_bp_lost'] = 1 - clean['returning_bp_won']
# Null values are safely dropped and this indicates matches where there was a 0 for any of these categores
clean.dropna(inplace=True)
print(clean.isnull().values.any())
# one-hot encoded dummy variable for hand of the participant
clean = pd.get_dummies(clean, prefix='hand', columns=['hand'])
return clean
"""Uses Select K best and Extra Trees to find best features for ml models"""
def select_features(clean):
"""
Uses SelectKBest and ChiSquared to determine most useful features
:param clean: filtered df from data_clean, only 2019 used as these features are most applicable for prediction
:type clean: dataFrame
:return features: five most useful features for predicting the outcome
:rtype: np array
"""
X = clean.loc[:, clean.columns != 'won']
X = X.select_dtypes(exclude=['object'])
y = np.array(clean['won'])
best_features = SelectKBest(score_func=chi2, k=10)
fit = best_features.fit(X, y)
df_scores = pd.DataFrame(fit.scores_)
df_columns = pd.DataFrame(X.columns)
feature_scores = pd.concat([df_columns, df_scores], axis=1)
feature_scores.columns = ['Specs', 'Score']
features = (feature_scores.nlargest(10, 'Score'))
features.drop(['Score'], axis=1, inplace=True)
features = features[:5]
features = np.array(features['Specs'])
print(features)
features = np.append(features, ['1stIn', 'svGms', 'hand_R'])
return features
def log_regression(clean, met_features):
"""
Performs Logistic Regression using SciKit Learn
Produces results using Classification Report Class from SciKit Learn
:param clean: df from data_clean
:type clean: dataFrame
:param met_features: array returned from select_features
:type met_features: np array
"""
X = clean[met_features]
y = np.array(clean['won'])
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=.8, random_state=42)
model = LogisticRegression()
model.fit(X_train, y_train)
y_predicted = model.predict(X_test)
y_pred_probs = model.predict_proba(X_test)
y_pred_probs = y_pred_probs[:, 1]
auc = roc_auc_score(y_test, y_pred_probs)
fpr, tpr, thresholds = roc_curve(y_test, y_pred_probs)
plot_roc(fpr, tpr, auc)
plot_cm(y_test, y_predicted)
print(classification_report(y_test, y_predicted))
def decision_tree(clean, met_features):
"""
Performs Decision Tree Classification using SciKit Learn
Produces results using Classification Report Class from SciKit Learn
:param clean: df from data_clean
:type clean: dataFrame
:param met_features: array returned from select_features
:type met_features: np array
"""
X = clean[met_features]
y = np.array(clean['won'])
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=.8, random_state=42)
model = DecisionTreeClassifier(max_depth=4)
model.fit(X_train, y_train)
y_predicted = model.predict(X_test)
y_pred_probs = model.predict_proba(X_test)
y_pred_probs = y_pred_probs[:, 1]
auc = roc_auc_score(y_test, y_pred_probs)
fpr, tpr, thresholds = roc_curve(y_test, y_pred_probs)
plot_roc(fpr, tpr, auc)
plot_cm(y_test, y_predicted)
print(classification_report(y_test, y_predicted))
def random_forest(clean, met_features):
"""
Performs Random Forest Classification using SciKit Learn
Produces results using Classification Report Class from SciKit Learn
:param clean: df from data_clean
:type clean: dataFrame
:param met_features: array returned from select_features
:type met_features: np array
"""
X = clean[met_features]
y = | np.array(clean['won']) | numpy.array |
"""
@author: jens
@modifiers: hyatt, neergaard
Migrated from inf_hypnodensity on 12/6/2019
"""
import pickle
import numpy as np
import pywt # wavelet entropy
import itertools # for extracting feature combinations
import os # for opening os files for pickle.
from inf_tools import softmax
class HypnodensityFeatures(object): # <-- extract_features
num_features = 489
def __init__(self, app_config):
self.config = app_config
# Dictionaries, keyed by model names
self.meanV = {}
# Standard deviation of features.
self.stdV = {}
# range is calculated as difference between 15th and 85th percentile - this was previously the "scaleV".
self.rangeV = {}
self.medianV = {}
try:
self.selected = app_config.narco_prediction_selected_features
except:
self.selected = [] # [1, 11, 16, 22, 25, 41, 43, 49, 64, 65, 86, 87, 103, 119, 140, 147, 149, 166, 196, 201, 202, 220, 244, 245, 261, 276, 289, 296, 299, 390, 405, 450, 467, 468, 470, 474, 476, 477]
self.scale_path = app_config.hypnodensity_scale_path # 'scaling'
# self.select_features_path = appConfig.hypnodensity_select_features_path
# self.select_features_pickle_name = appConfig.hypnodensity_select_features_pickle_name # 'narcoFeatureSelect.p'
def extract(self, hyp):
eps = 1e-10
features = np.zeros([24 + 31 * 15])
hyp = hyp[~np.isnan(hyp[:, 0]), :] # or np.invert(np.isnan(hyp[:, 0])
# k = [i for i, v in enumerate(hyp[:, 0]) if np.isnan(v)]
# hyp[k[0] - 2:k[-1] + 2, :]
j = -1
for i in range(5):
for comb in itertools.combinations([0, 1, 2, 3, 4], i + 1): # 31 iterations and 15 features per iteration
j += 1
dat = np.prod(hyp[:, comb], axis=1) ** (1 / float(len(comb)))
features[j * 15] = np.log(np.mean(dat) + eps)
features[j * 15 + 1] = -np.log(1 - np.max(dat))
moving_av = np.convolve(dat, np.ones(10), mode='valid')
features[j * 15 + 2] = np.mean(np.abs(np.diff(moving_av))) # diff of raw data
# features[j * 15 + 2] = np.mean(np.abs(np.diff(dat))) # Alex's next version: moving average may smooth the transitions out too much - removing a hyper-parameter
features[j * 15 + 3] = self.wavelet_entropy(dat) # Shannon entropy - check if it is used as a feature - was not selected.
rate = np.cumsum(dat) / np.sum(dat)
# check at which point of the study the percentage of this combination of sleep stages is reached.
try:
I1 = (i for i, v in enumerate(rate) if v > 0.05).__next__()
except StopIteration:
I1 = len(hyp)
features[j * 15 + 4] = np.log(I1 * 2 + eps)
try:
I2 = (i for i, v in enumerate(rate) if v > 0.1).__next__()
except StopIteration:
I2 = len(hyp)
features[j * 15 + 5] = | np.log(I2 * 2 + eps) | numpy.log |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# TODO:
# - scaling of freq distributions
import os
import sys
import math
import json
import struct
import numpy as np
import matplotlib.pyplot as plt
from common import DataPreset, write_preset_to_file, save_plot
# Available presets:
# * simple_freq
# * simple_phase
# * simple_cluster
# * simple_anti_phase
# * [custom]
def get_simple_preset(preset, g_dtype=np.float64):
if preset == 'simple_freq': # setup for freq sync
N = 8
k = np.zeros((N,N), dtype=g_dtype)
omega = np.ones(N, dtype=g_dtype)
phase = np.zeros(N, dtype=g_dtype)
i = 0
while i < (N-1):
if i%2:
a = 4.0
else:
a = 2.0
k[i+1,i] = k[i,i+1] = a
i += 1
#k[N-1,0] = k[0,N-1] = 4.0 # driving oscillator ???
k *= 1.0/np.sqrt(3.0)
omega[0] = omega[2] = omega[4] = omega[6] = 3.0 * np.sqrt(3.0)
omega[1] = omega[3] = omega[5] = omega[7] = 1.0 * np.sqrt(3.0)
elif preset == 'simple_phase': # setup for phase sync
N = 8
k = np.zeros((N,N), dtype=g_dtype)
omega = np.ones(N, dtype=g_dtype)
phase = np.zeros(N, dtype=g_dtype)
i = 0
while i < (N-1):
if i%2:
a = 2
else:
a = 1
k[i+1,i] = k[i,i+1] = a
i += 1
k[N-1,0] = k[0,N-1] = 2
# all omegas == 1
phase = np.array([0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4, 1.6], dtype=g_dtype)
elif preset == 'simple_cluster': # setup for cluster sync
N = 8
k = np.zeros((N,N), dtype=g_dtype)
omega = np.ones(N, dtype=g_dtype)
phase = np.zeros(N, dtype=g_dtype)
e1 = 1.0/32.0
e2 = 1.0/34.0
e3 = 1.0/35.0
e4 = np.sqrt(3.0)
k = np.array([[0., 3., 0., e1, e1, e1, e3, e3],
[3., 0., 2., e1, e1, e1, e3, e3],
[0., 2., 0., e1, e1, e1, e3, e3],
[e1, e1, e1, 0., e4, 0., e2, e2],
[e1, e1, e1, e4, 0., 1., e2, e2],
[e1, e1, e1, 0., 1., 0., e2, e2],
[e3, e3, e3, e2, e2, e2, 0., 1.],
[e3, e3, e3, e2, e2, e2, 1., 0.]], dtype=g_dtype)
omega[0] = omega[1] = omega[2] = 3.0/32.0 + (3.0*np.sqrt(3.0))/35.0
omega[3] = omega[4] = omega[5] = 1.0/34.0 + (2.0*np.sqrt(3.0))/35.0
omega[6] = omega[7] = np.sqrt(3.0)/70.0
phase = np.array([0.5, 1.0, 3.0, 4.0, 2.0, 1.5, 3.0, 3.5], dtype=g_dtype)
elif preset == 'simple_anti_phase': # setup for anti-phase sync
N = 7
k = | np.zeros((N,N), dtype=g_dtype) | numpy.zeros |
# -----------------------------------------------------------
# demonstrates how to normalize data within each operating regime with multi-layer perceptron
#
# (C) 2020 <NAME>, Lisbon, Portugal
# Released under GNU Public License (GPL)
# email <EMAIL>
# -----------------------------------------------------------
import matplotlib.pylab as plt
import pandas as pd
import numpy as np
from keras import optimizers
from sklearn.cluster import KMeans
from keras.layers import Dense
from keras.models import Sequential
from keras import initializers
from keras import backend as K
###########################################
#
# Auxiliary reading functions
#
###########################################
| np.random.seed(7) | numpy.random.seed |
import argparse
from datetime import datetime
import h5py
import numpy as np
import tensorflow as tf
import importlib
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = BASE_DIR
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'models'))
sys.path.append(os.path.join(ROOT_DIR, '../utils'))
import tf_util
import json
from commons import check_mkdir, force_mkdir
from geometry_utils import *
from progressbar import ProgressBar
from subprocess import call
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--model', type=str, default='model_ins', help='Model name [default: model]')
parser.add_argument('--category', type=str, nargs='+', help='Category name [default: Chair]')
parser.add_argument('--level_id', type=int, default='3', help='Level ID [default: 3]')
parser.add_argument('--num_ins', type=int, default='200', help='Max Number of Instance [default: 200]')
parser.add_argument('--log_dir', type=str, default='log', help='Log dir [default: log]')
parser.add_argument('--valid_dir', type=str, default='valid', help='Valid dir [default: valid]')
parser.add_argument('--num_point', type=int, default=10000, help='Point Number [default: 10000]')
parser.add_argument('--batch_size', type=int, default=1, help='Batch Size during training [default: 1]')
parser.add_argument('--margin_same', type=float, default=1.0, help='Double hinge loss margin: same semantic [default: 1]')
FLAGS = parser.parse_args()
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = FLAGS.num_point
GPU_INDEX = FLAGS.gpu
MODEL = importlib.import_module(FLAGS.model) # import network module
MODEL_FILE = os.path.join(ROOT_DIR, 'models', FLAGS.model+'.py')
LOG_DIR = FLAGS.log_dir
CKPT_DIR = os.path.join(LOG_DIR, 'trained_models')
if not os.path.exists(LOG_DIR):
print('ERROR: log_dir %s does not exist! Please Check!' % LOG_DIR)
exit(1)
LOG_DIR = os.path.join(LOG_DIR, FLAGS.valid_dir)
check_mkdir(LOG_DIR)
for category in FLAGS.category:
# load meta data files
stat_in_fn = '../../stats/after_merging_label_ids/%s-level-%d.txt' % (category, FLAGS.level_id)
print('Reading from ', stat_in_fn)
with open(stat_in_fn, 'r') as fin:
part_name_list = [item.rstrip().split()[1] for item in fin.readlines()]
print('Part Name List: ', part_name_list)
data_in_dir = '../../data/ins_seg_h5_for_sgpn/%s-%d/' % (category, FLAGS.level_id)
val_h5_fn_list = []
for item in os.listdir(data_in_dir):
if item.endswith('.h5') and item.startswith('val-'):
val_h5_fn_list.append(os.path.join(data_in_dir, item))
NUM_CLASSES = len(part_name_list)
print('Semantic Labels: ', NUM_CLASSES)
NUM_CLASSES = 1
print('force Semantic Labels: ', NUM_CLASSES)
NUM_INS = FLAGS.num_ins
print('Number of Instances: ', NUM_INS)
def load_data(fn):
out = h5py.File(fn)
pts = out['pts'][:, :NUM_POINT, :]
semseg_one_hot = out['semseg_one_hot'][:, :NUM_POINT, :]
semseg_mask = out['semseg_mask'][:, :NUM_POINT]
insseg_one_hot = out['insseg_one_hot'][:, :NUM_POINT, :]
insseg_mask = out['insseg_mask'][:, :NUM_POINT]
out.close()
semseg_one_hot = np.ones((semseg_one_hot.shape[0], semseg_one_hot.shape[1], NUM_CLASSES))
return pts, semseg_one_hot, semseg_mask, insseg_one_hot, insseg_mask
# Adapted from https://github.com/laughtervv/SGPN/blob/master/utils/test_utils.py#L11-L92
def Get_Ths(pts_corr, seg, ins, ths, ths_, cnt):
pts_in_ins = {}
for ip, pt in enumerate(pts_corr):
if ins[ip] in pts_in_ins.keys():
pts_in_curins_ind = pts_in_ins[ins[ip]]
pts_notin_curins_ind = (~(pts_in_ins[ins[ip]])) & (seg==seg[ip])
hist, bin = np.histogram(pt[pts_in_curins_ind], bins=20)
numpt_in_curins = np.sum(pts_in_curins_ind)
numpt_notin_curins = np.sum(pts_notin_curins_ind)
if numpt_notin_curins > 0:
tp_over_fp = 0
ib_opt = -2
for ib, b in enumerate(bin):
if b == 0:
break
tp = float(np.sum(pt[pts_in_curins_ind] < bin[ib])) / float(numpt_in_curins)
fp = float(np.sum(pt[pts_notin_curins_ind] < bin[ib])) / float(numpt_notin_curins)
if tp <= 0.5:
continue
if fp == 0. and tp > 0.5:
ib_opt = ib
break
if tp/fp > tp_over_fp:
tp_over_fp = tp / fp
ib_opt = ib
if tp_over_fp > 4.:
ths[seg[ip]] += bin[ib_opt]
ths_[seg[ip]] += bin[ib_opt]
cnt[seg[ip]] += 1
else:
pts_in_curins_ind = (ins == ins[ip])
pts_in_ins[ins[ip]] = pts_in_curins_ind
pts_notin_curins_ind = (~(pts_in_ins[ins[ip]])) & (seg==seg[ip])
hist, bin = np.histogram(pt[pts_in_curins_ind], bins=20)
numpt_in_curins = np.sum(pts_in_curins_ind)
numpt_notin_curins = np.sum(pts_notin_curins_ind)
if numpt_notin_curins > 0:
tp_over_fp = 0
ib_opt = -2
for ib, b in enumerate(bin):
if b == 0:
break
tp = float(np.sum(pt[pts_in_curins_ind]<bin[ib])) / float(numpt_in_curins)
fp = float(np.sum(pt[pts_notin_curins_ind]<bin[ib])) / float(numpt_notin_curins)
if tp <= 0.5:
continue
if fp == 0. and tp > 0.5:
ib_opt = ib
break
if tp / fp > tp_over_fp:
tp_over_fp = tp / fp
ib_opt = ib
if tp_over_fp > 4.:
ths[seg[ip]] += bin[ib_opt]
ths_[seg[ip]] += bin[ib_opt]
cnt[seg[ip]] += 1
return ths, ths_, cnt
def valid():
with tf.Graph().as_default():
with tf.device('/gpu:'+str(GPU_INDEX)):
pointclouds_ph, ptsseglabel_ph, ptsgroup_label_ph, _, _, _ = \
MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT, NUM_INS, NUM_CLASSES) # B x N x 3
is_training_ph = tf.placeholder(tf.bool, shape=())
group_mat_label = tf.matmul(ptsgroup_label_ph, tf.transpose(ptsgroup_label_ph, perm=[0, 2, 1]))
net_output = MODEL.get_model(pointclouds_ph, NUM_CLASSES, FLAGS.margin_same, is_training_ph)
loader = tf.train.Saver()
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess = tf.Session(config=config)
# Load pretrained model
ckptstate = tf.train.get_checkpoint_state(CKPT_DIR)
if ckptstate is not None:
LOAD_MODEL_FILE = os.path.join(CKPT_DIR, os.path.basename(ckptstate.model_checkpoint_path))
loader.restore(sess, LOAD_MODEL_FILE)
print("Model loaded in file: %s" % LOAD_MODEL_FILE)
else:
print("Fail to load modelfile: %s" % CKPT_DIR)
# Start to compute statistics on the validation set
ths = | np.zeros(NUM_CLASSES) | numpy.zeros |
#!/usr/bin/env python3
import numpy as np
import poppy
import time
from astropy import units as u
from astropy.constants import codata2014 as const
from astropy.modeling.blackbody import blackbody_lambda
from functools import reduce
from .inject_images import InjectCompanion
from .my_warnings import warnings
class SubtractImages(InjectCompanion):
'''
This class is meant to be inherited by a KlipRetrieve() class instance, not
used individually.
Creates KLIP bases, carries out PSF subtraction, saves the results, and
also calculates and saves information about contrast and separation in the
resulting subtracted images.
The key methods that handle calculation and new object creation are...
- self._generate_klip_proj(): uses the reference images as a library
from which to make KLIP projections of each slice of each target image.
Its output is self.klip_proj, an HDUList of these projections.
- self._generate_theo_klip_proj() achieves the same result when
self.align_style is 'theoretical', though the path is different
due to the different structure of reference images in that scenario.
- self._generate_contrasts(): uses the result of the previous method to
calculate radial profiles for different versions of the target images.
It outputs four HDULists -- self.pre_prof_hdu, self.post_prof_hdu,
self.photon_prof_hdu, and self.pre_avg_hdu -- with contrast versus
separation information for every slice of each target image, before and
after subtraction.
'''
def __init__(self):
super().__init__()
def _count_photons(self,
temp_star=6000*u.K, rad_star=1*u.solRad, dist=1.5*u.pc,
wv=4*u.micron, exp_time=2000*u.second, throughput=.3):
'''
***Something in here is incorrect?***
Returns the number of photons received by a detector based on the
stellar and instrument parameters specified as arguments in astropy
units.
Remember that photon counting has Poisson-type error, so photon noise is
the square root of this function's result. A fuller explanation of the
process is available in `subtract_psfs.ipynb` (coming soon?).
'''
# interpet unitless quantities (e.g. source_proj below) in radians
u.set_enabled_equivalencies(u.dimensionless_angles())
# calculate stellar attributes
#lum_star = const.sigma_sb * temp_star * np.pi * (rad_star)**2
#flux_bol = lum_star / (4 * np.pi * dist**2)
#source_proj = np.arctan(rad_star / dist)**2 # exact
source_proj = (rad_star / dist)**2 # approximated
# define JWST info
diam_jwst = 6.5 * u.m
area_scope = np.pi * (diam_jwst / 2)**2
#wv = np.mean([self.lo_wv, self.hi_wv]) * u.m
# resolve_pwr = (len(self.wvlnths) * np.mean([self.lo_wv, self.hi_wv])
# / (self.hi_wv - self.lo_wv))
#wv_resolution = wv / resolve_pwr
# (not all KlipCreate sessions use resolve_pwr,
# so need a safer way to calc resolution)
#wv = np.mean([self.lo_wv, self.hi_wv]) * u.m # not needed
wv_resolution = (self.hi_wv - self.lo_wv) * u.m / len(self.wvlnths)
# approximating that each wavelength slice is the same width
# calculate blackbody radiation & photon info based on target wavelength
with warnings.catch_warnings(record=True) as w:
# ignore astropy 4's blackbody-related deprecation warning, for now
warnings.simplefilter('ignore')
bb_rad = blackbody_lambda(wv, temp_star)
photon_nrg = const.h * const.c / wv
# get number of photons received by detector and resulting noise
num_photons = (throughput * area_scope * source_proj * wv_resolution
*photon_nrg**(-1) * exp_time * bb_rad).decompose().to('')
#photon_noise = np.sqrt(num_phot)
return num_photons#, photon_noise
def _get_klip_basis(self, ref, explain=None, modes=None, verbose=False):
'''
Use a a Karhunen-Loève transform to create a set of basis vectors from a
reference library to be used for KLIP projection later on.
Argument `ref` is a numpy array (not HDUList) of some number of
reference images.
Argument `explain` is the fraction of variance you want explained by
`ref`'s eigenvalues, throwing out those aren't needed after the KL
transform.
Argument `modes` is the explicit maximum number of eigenvalues to keep.
(You can use either `explain` or `modes`, but not both.)
Pass this function's output to self._project_onto_basis() to complete
the KLIP projection process.
'''
if (explain is not None) + (modes is not None) > 1:
raise ValueError('only one of `explain`/`modes` can have a value')
elif (explain is not None) + (modes is not None) < 1:
raise ValueError('either `explain` or `modes` must have a value')
my_pr = lambda *args, **kwargs: (print(*args, **kwargs)
if verbose else None)
# flatten psf arrays and find eigenv*s for the result
ref_flat = ref.reshape(ref.shape[0], -1)
e_vals, e_vecs = np.linalg.eig(np.dot(ref_flat, ref_flat.T))
my_pr('********', "eigenvalues are {e_vals}", sep='\n')
# sort eigenvalues ("singular values") in descending order
desc = np.argsort(e_vals)[::-1] # sort indices of e_vals in desc. order
sv = np.sqrt(e_vals[desc]).reshape(-1, 1)
# do the KL transform
Z = np.dot(1 / sv * e_vecs[:,desc].T, ref_flat)
my_pr(f"Z shape is {Z.shape}")
if explain:
test_vars = [np.sum(e_vals[0:i+1]) / np.sum(e_vals) > explain
for i, _ in enumerate(e_vals)]
modes = np.argwhere(np.array(test_vars) == True).flatten()[0] + 1
# limit Z to a certain number of bases
Z_trim = Z[:modes,:]
my_pr(f"trimmed Z shape is {Z_trim.shape}")
return Z_trim
def _project_onto_basis(self, target, Z_trim, verbose=False):
'''
Help estimate PSF intensity by projecting a target image onto a KL
basis made from the reference images.
Argument `target` is a 2D array representing a slice from some target
observation's data cube of images.
Argument `Z_trim` is the result of self._get_klip_basis() for the
target image's reference image library at the same wavelength.
Separating that method from this one helps with the speed of
self._generate_klip_proj() since target images with the same wavelength
share the same library of reference images.
'''
my_pr = lambda *args, **kwargs: (print(*args, **kwargs)
if verbose else None)
# flatten target arrays
targ_flat = target.flatten()
if verbose:
my_pr(f"target shape is {targ_flat.shape}", end='\n********')
# project onto KL basis to estimate PSF intensity
proj = np.dot(targ_flat, Z_trim.T)
klipped = np.dot(Z_trim.T, proj).reshape(target.shape)
return klipped
def _generate_klip_proj(self, cube_list, verbose=True):
'''
Generates a HDUList of KLIP projections for every slice of each
post-padded target image data cube. The result is used in the
post-subtraction plotting methods.
Argument `cube_list` is an HDUList of *aligned*, NaN-less data cubes.
`self.stackable_cubes` is usually the only appropriate choice here.
Argument `verbose` is a boolean that, when True, allows the method to
print progress messages.
'''
print_ast = lambda text: print('\n********', text, '********', sep='\n')
my_pr = lambda txt, **kwargs: (print_ast(txt, **kwargs)
if verbose else None)
my_pr('generating KLIP projections of target images '
'in `self.klip_proj`...')
# collect all images in one 4D array.
# dimensions are: number of ref & tgt data cubes,
# number of wavelength slices, and the 2D shape of a post-padded image
cube_list = self._pklcopy(cube_list)
all_cubes = np.array([cube.data for cube in cube_list])
# separate the reference and target data cubes
refs_all = all_cubes[:len(self.positions)]
tgts_all = all_cubes[len(self.positions):]
# set up hdulist of klip projections for all slices of all target images
# (otherwise, has the same HDU structure as stackable_cubes)
klip_proj = self._pklcopy(cube_list[len(self.positions):])
# carry out klip projections for all slices of every target image
# and insert them into the HDUList generated above
for sl in range(tgts_all.shape[1]): # number of wavelength slices
refs_sliced = refs_all[:,sl]
tgts_sliced = tgts_all[:,sl]
ref_klip_basis = self._get_klip_basis(refs_sliced,
#explain=.99)
modes=len(self.positions))
for j, tg in enumerate(tgts_sliced):
ref_klip = self._project_onto_basis(tg, ref_klip_basis)
klip_proj[j].data[sl] = ref_klip
return klip_proj
def _generate_theo_klip_proj(self, cube_list, fine_ref_cubes, verbose=True):
'''
**Exclusively for theoretically-aligned HDULists.**
Produces the same output as `self._generate_klip_proj()` -- an HDUList
of KLIP projections for every slice of each post-padded target image
data cube. The result is used in the post-subtraction plotting methods.
Argument `cube_list` is an HDUList of *aligned*, NaN-less data cubes.
`self.stackable_cubes` is usually the only appropriate argument here;
its latter half of aligned target images is what will be used here.
Argument `fine_ref_cubes` is a list of 4D arrays. Each array is a set of
"fine-aligned" reference cubes that was made to match a certain target.
The references in index -1 of `fine_ref_cubes` match with the target
cube at index -1 of `cube_list`, and so on.
Argument `verbose` is a boolean that, when True, allows the method to
print progress messages.
'''
print_ast = lambda text: print('\n********', text, '********', sep='\n')
my_pr = lambda txt: print_ast(txt) if verbose else None
my_pr('generating KLIP projections of target images '
'in `self.klip_proj`...')
# set up hdulist of klip projections for all slices of all target images
# (otherwise, has the same HDU structure as cube_list)
cube_list = self._pklcopy(cube_list)
fine_tgt_cubes = [cb.data for cb in cube_list[len(self.positions):]]
klip_proj = self._pklcopy(cube_list[len(self.positions):])
# carry out klip projections for all slices of every target image
# and insert them into the HDUList generated above
for im in range(len(fine_tgt_cubes)): # number of target images
# change shape of data array to match current target
for sl in range(fine_tgt_cubes[im].shape[0]): # wvln slices per cube
# get all slices of target and assoc. refs at this wavelength
refs_at_wvln = fine_ref_cubes[im][:, sl]
tgt_at_wvln = fine_tgt_cubes[im][sl]
# project the target onto the basis formed by these references
ref_klip_basis = self._get_klip_basis(refs_at_wvln,
#explain=.99)
modes=len(self.positions))
ref_klip = self._project_onto_basis(tgt_at_wvln, ref_klip_basis)
# save the result as a slice of this projected target image
klip_proj[im].data[sl] = ref_klip
return klip_proj
def _generate_contrasts(self, cube_list, verbose=True):
'''
Generate and return HDULists containing contrast/separation curves for
every available wavelength over a few variations of the target images:
1. The radial profile of standard deviation in the original target
images ("pre-subtraction", from `self.pre_prof_hdu`)
2. The radial profile of standard deviation in the original target
images minus their corresponding KLIP projections ("post-subtraction",
from `self.post_prof_hdu`)
3. The photon noise (INCOMPLETE)
4. The radial profile of average pixel value in the original target
image (from `self.pre_avg_hdu`).
All of these are normalized by the brightest pixel in the original
target image. Each of these measurements is its own HDUList with length
equal to the number of target images in the directory. Each entry is a
stack of 2D separation/contrast arrays (in that order), the number in
the stack matches the number of wavelength slices available in
self.stackable_cubes.
Argument `cube_list` is an HDUList of *aligned*, NaN-less data cubes.
`self.stackable_cubes` is usually the only appropriate choice here.
Argument `verbose` is a boolean that, when True, allows the method to
print progress messages.
'''
print_ast = lambda text: print('********', text, '********', sep='\n')
my_pr = lambda txt: print_ast(txt) if verbose else None
my_pr('generating pre-/post-subtraction contrast curves...')
pix_len = .1
# collect all slices of all target images in one list (diff. shapes)
cube_list = self._pklcopy(cube_list)[len(self.positions):]
tgt_imgs = [cube.data for cube in cube_list]
# collect all slices of all KLIP projections in one list (diff. shapes)
prj_imgs = [cube.data for cube in self.klip_proj]
# create contrast/separation HDUlists to be filled
# (will have same headers as stackable_cubes but data will change)
pre_prof_hdu = self._pklcopy(cube_list)
post_prof_hdu = self._pklcopy(cube_list)
photon_prof_hdu = self._pklcopy(cube_list)
pre_avg_hdu = self._pklcopy(cube_list)
# get per-slice photon noise values
num_phot = np.array([self._count_photons(wv=wvl * u.m, dist=40*u.pc)
for wvl in self.wvlnths])
phot_noise_frac = np.sqrt(num_phot) / num_phot
# create HDULists of post-subtraction and photon noise scenes
# (cube_list is already an HDU of pre-subtraction target images)
subt_list = self._pklcopy(cube_list)
phot_list = self._pklcopy(cube_list)
for ext, cube in enumerate(cube_list):
subt_list[ext].data -= prj_imgs[ext]
phot_list[ext].data = ( | np.sqrt(phot_list[ext].data) | numpy.sqrt |
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pickle
from combined_thresh import combined_thresh
from perspective_transform import perspective_transform
def line_fit(binary_warped, T):
"""
Find and fit lane lines
"""
# Assuming you have created a warped binary image called "binary_warped"
# Take a histogram of the bottom half of the image
# 假设你已经创建了一个名为“binary_warped”的变形二进制图像,获取图像下半部分的直方图
# axis=0 按列计算
img_roi_y = 700 # [1]设置ROI区域的左上角的起点
img_roi_x = 0
img_roi_height = binary_warped.shape[0] # [2]设置ROI区域的高度
img_roi_width = binary_warped.shape[1] # [3]设置ROI区域的宽度
img_roi = binary_warped[img_roi_y:img_roi_height, img_roi_x:img_roi_width]
# cv2.imshow('img_roi', img_roi)
histogram = np.sum(img_roi[0 :, :], axis=0)
# histogram = np.sum(img_roi[int(np.floor(binary_warped.shape[0]*(1-T))):,:], axis=0)
# plt.show()
# Create an output image to draw on and visualize the result
# 创建一个输出图像来绘制并可视化结果
out_img = (np.dstack((binary_warped, binary_warped, binary_warped))*255).astype('uint8')
cv2.rectangle(out_img, (img_roi_x, img_roi_y), (img_roi_width, img_roi_height), (255, 0, 0), 5)
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
# 找出直方图左右两半的峰值 这些将成为左右线的起点
midpoint = np.int(histogram.shape[0]/2)
leftx_base = np.argmax(histogram[100:midpoint]) + 100
rightx_base = np.argmax(histogram[midpoint:-100]) + midpoint
# PMH:如果一边未检测到车道线,即无直方图峰值,则根据另一条车道线复制一个搜索起点
if (leftx_base == 100):
leftx_base = np.argmax(histogram[midpoint:-100]) - midpoint
if (rightx_base == midpoint):
rightx_base = np.argmax(histogram[100:midpoint]) + midpoint
# Choose the number of sliding windows 选择滑动窗口的数量
nwindows = 9
# Set height of windows
# 设置窗口的高度 128
window_height = np.int(binary_warped.shape[0]/nwindows)
# Identify the x and y positions of all nonzero pixels in the image
# 确定图像中所有非零像素的x和y位置
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
# 为每个窗口更新当前位置
leftx_current = leftx_base
rightx_current = rightx_base
leftx_current_last = leftx_base
rightx_current_last = rightx_base
leftx_current_next = leftx_base
rightx_current_next = rightx_base
# Set the width of the windows +/- margin
# 设置窗口+/-边距的宽度
margin = 150
# Set minimum number of pixels found to recenter window
# 设置发现到最近窗口的最小像素数
minpix = 50
# Create empty lists to receive left and right lane pixel indices
# 创建空列表以接收左右车道像素索引
left_lane_inds = []
right_lane_inds = []
# plt.figure(2)
# plt.subplot(2, 1, 1)
# plt.plot(histogram)
# Step through the windows one by one
# 逐一浏览窗口
for window in range(nwindows-2):
# Identify window boundaries in x and y (and right and left)
# 确定x和y(以及右和左)的窗口边界
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
leftx_current = leftx_current_next
rightx_current = rightx_current_next
# 设置滑移窗口左右边界
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
# 在可视化图像上绘制窗口
cv2.rectangle(out_img, (win_xleft_low, win_y_low), (win_xleft_high, win_y_high), (0, 255, 0), 2)
cv2.rectangle(out_img, (win_xright_low, win_y_low), (win_xright_high, win_y_high), (0, 255, 0), 2)
# plt.subplot(2, 1, 2)
# plt.imshow(out_img, cmap='gray', vmin=0, vmax=1)
# Identify the nonzero pixels in x and y within the window
# 确定窗口内x和y的非零像素
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
# 将这些索引附加到列表中
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
# 如果找到> minpix像素,请在其平均位置上重新调整下一个窗口
if len(good_left_inds) > minpix:
leftx_current_next = np.int(np.mean(nonzerox[good_left_inds]))
else:
if window > 2:
leftx_current_next = leftx_current + (leftx_current - leftx_current_last)
# good_left_inds = int((win_y_low + win_y_high) / 2) * binary_warped.shape[0] + leftx_current
# left_lane_inds.append(np.int64(good_left_inds)) # 20180516 pmh 加入方框中点作为拟合点
else:
leftx_current_next = leftx_base
if len(good_right_inds) > minpix:
rightx_current_next = np.int(np.mean(nonzerox[good_right_inds]))
else:
if window > 2:
rightx_current_next = rightx_current + (rightx_current - rightx_current_last)
# right_lane_inds.append(good_right_inds)
else:
rightx_current_next = rightx_base
leftx_current_last = leftx_current
rightx_current_last = rightx_current
# plt.figure(2)
# plt.subplot(2, 1, 1)
# plt.plot(histogram)
# plt.subplot(2, 1, 2)
# plt.imshow(out_img, cmap='gray', vmin=0, vmax=1)
# cv2.imshow('out_img', out_img)
# plt.savefig('D:/CIDI/data/L/line_fit_histo/')
# plt.close()
# save_file = '%s%06d%s' % ('D:/data/PNG20180206dataAllRectJPG/result1/', num_i+100000, 'Lr.jpg')
# fig1 = plt.gcf()
# fig1.set_size_inches(18.5, 10.5)
# plt.savefig(save_file)
# Concatenate the arrays of indices连接索引数组
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
# 提取左右线像素位置
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
ret = {}
# 如果车道线非空,则进行拟合二次曲线
if (len(left_lane_inds) > 0) & (len(right_lane_inds) > 0):
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Return a dict of relevant variables
ret['left_fit'] = left_fit
ret['right_fit'] = right_fit
ret['nonzerox'] = nonzerox
ret['nonzeroy'] = nonzeroy
ret['out_img'] = out_img
ret['left_lane_inds'] = left_lane_inds
ret['right_lane_inds'] = right_lane_inds
ret['histo'] = histogram
return ret
def tune_fit(binary_warped, left_fit, right_fit):
"""
Given a previously fit line, quickly try to find the line based on previous lines
给定一条先前合适的线条,快速尝试根据之前的线条找到线条
"""
# Assume you now have a new warped binary image
# from the next frame of video (also called "binary_warped")
# It's now much easier to find line pixels!
# 假设你现在有一个来自下一帧视频的新的变形二进制图像(也称为“binary_warped”)现在找到线像素更容易了!
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
margin = 100
left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] + margin)))
right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] + margin)))
# Again, extract left and right line pixel positions
# 再次提取左右线像素位置
leftx = nonzerox[left_lane_inds] # 对一系列的bool变量返回 true 的 id 号
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# If we don't find enough relevant points, return all None (this means error)
# 如果我们找不到足够的相关点,则返回全部无(这意味着错误)
min_inds = 10
if lefty.shape[0] < min_inds or righty.shape[0] < min_inds:
return None
# Fit a second order polynomial to each
# 为每个拟合一个二阶多项式
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
# Return a dict of relevant variables
ret = {}
ret['left_fit'] = left_fit
ret['right_fit'] = right_fit
ret['nonzerox'] = nonzerox
ret['nonzeroy'] = nonzeroy
ret['left_lane_inds'] = left_lane_inds
ret['right_lane_inds'] = right_lane_inds
return ret
def viz1(binary_warped, ret, save_file=None):
"""
Visualize each sliding window location and predicted lane lines, on binary warped image
save_file is a string representing where to save the image (if None, then just display)
在二值变形图像上显示每个滑动窗口位置和预测车道线save_file是一个字符串,表示图像的保存位置(如果为None,则仅显示)
"""
# Grab variables from ret dictionary
left_fit = ret['left_fit']
right_fit = ret['right_fit']
nonzerox = ret['nonzerox']
nonzeroy = ret['nonzeroy']
out_img = ret['out_img']
left_lane_inds = ret['left_lane_inds']
right_lane_inds = ret['right_lane_inds']
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
plt.imshow(out_img)
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
plt.xlim(0, 1280)
plt.ylim(720, 0)
if save_file is None:
plt.show()
else:
plt.savefig(save_file)
plt.gcf().clear()
def viz2(binary_warped, ret, save_file=None):
"""
Visualize the predicted lane lines with margin, on binary warped image
save_file is a string representing where to save the image (if None, then just display)
在二值变形图像上显示带边距的预测车道线save_file是表示图像保存位置的字符串(如果为None,则仅显示)
"""
# Grab variables from ret dictionary
left_fit = ret['left_fit']
right_fit = ret['right_fit']
nonzerox = ret['nonzerox']
nonzeroy = ret['nonzeroy']
left_lane_inds = ret['left_lane_inds']
right_lane_inds = ret['right_lane_inds']
# Create an image to draw on and an image to show the selection window
out_img = (np.dstack((binary_warped, binary_warped, binary_warped))*255).astype('uint8')
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0])
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
margin = 100 # NOTE: Keep this in sync with *_fit()
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin, ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin, ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0,255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0,255, 0))
result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
plt.imshow(result)
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
plt.xlim(0, 1280)
plt.ylim(720, 0)
if save_file is None:
plt.show()
else:
plt.savefig(save_file)
plt.gcf().clear()
def calc_curve(left_lane_inds, right_lane_inds, nonzerox, nonzeroy):
"""
Calculate radius of curvature in meters
以米为单位计算曲率半径
"""
# y_eval = 1160 # 720p video/image, so last (lowest on screen) y index is 719
y_eval = 700 # 图像分辨率为1920*1080,取图像下1/3位置为计算斜率位置
# Define conversions in x and y from pixels space to meters
# 定义x和y从像素空间到米的转换5.86um
ym_per_pix = 5.86/1000000 # meters per pixel in y dimensiony维度上每像素的米数30/720
xm_per_pix = 5.86/1000000 # meters per pixel in x dimensiony维度上每像素的米数3.7/700
# Extract left and right line pixel positions
# 提取左右线像素位置
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit new polynomials to x,y in world space
# 将新的多项式拟合到世界空间中的x,y
left_fit_cr = np.polyfit(lefty*ym_per_pix, leftx*xm_per_pix, 2)
right_fit_cr = np.polyfit(righty*ym_per_pix, rightx*xm_per_pix, 2)
# Calculate the new radii of curvature
# 计算新的曲率半径 等于曲率的倒数 曲率K = (|2a|) / (1 + (2ax + b)^2)^1.5
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / (2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / (2*right_fit_cr[0])
# Now our radius of curvature is in meters现在我们的曲率半径以米为单位
return left_curverad, right_curverad
def calc_vehicle_offset(undist, left_fit, right_fit):
"""
Calculate vehicle offset from lane center, in meters
计算车道中心的车辆偏移量,单位为米
"""
# Calculate vehicle center offset in pixels
top_y = 1
bottom_y = undist.shape[0] - 1
top_x_left = left_fit[0]*(top_y**2) + left_fit[1]*top_y + left_fit[2]
top_x_right = right_fit[0]*(top_y**2) + right_fit[1]*top_y + right_fit[2]
bottom_x_left = left_fit[0]*(bottom_y**2) + left_fit[1]*bottom_y + left_fit[2]
bottom_x_right = right_fit[0]*(bottom_y**2) + right_fit[1]*bottom_y + right_fit[2]
vehicle_offset = undist.shape[1]/2 - (bottom_x_left + bottom_x_right)/2
# Convert pixel offset to meters
xm_per_pix = 5.86/1000000 # meters per pixel in x dimension
vehicle_offset *= xm_per_pix
return vehicle_offset, bottom_x_left, bottom_x_right, top_x_left, top_x_right
def final_viz(undist, left_fit, right_fit, m_inv, left_curve, right_curve, vehicle_offset, is_left_filtered, is_right_filtered):
"""
Final lane line prediction visualized and overlayed on top of original image
最终车道线预测可视化并覆盖在原始图像的顶部
"""
# Generate x and y values for plotting 为绘图生成x和y值
ploty = np.linspace(0, undist.shape[0]-1, undist.shape[0])
# 拟合的二次函数
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
# 透过透视变换的矩阵求出点坐标(透视变换后的四个点----》原图的四个点)
a1 = [left_fitx[0], ploty[0], 1]
a2 = [left_fitx[undist.shape[0]-150], ploty[undist.shape[0]-150], 1]
a3 = [right_fitx[0], ploty[0], 1]
a4 = [right_fitx[undist.shape[0]-150], ploty[undist.shape[0]-150], 1]
a = [a1, a2, a3, a4]
rr1 = np.dot(a, m_inv.T)
xx1 = np.ceil(rr1[:, 0] / rr1[:, 2]) # x坐标, 逆透视变换回来的坐标 np.ceil朝正无穷大取整数,注意除以缩放系数
yy1 = np.ceil(rr1[:, 1] / rr1[:, 2]) # y坐标
# 将车道线坐标点 经过逆透视变换后转换到原坐标系
left_points = []
right_points = []
for i in range(len(left_fitx)):
left_point = [left_fitx[i], ploty[i], 1]
right_point = [right_fitx[i], ploty[i], 1]
left_points.append(left_point)
right_points.append(right_point)
left_point_inv_trans = np.dot(left_points, m_inv.T)
right_point_inv_trans = np.dot(right_points, m_inv.T)
# 逆透视变换回来的车道线坐标,注意除以缩放系数。np.ceil朝正无穷大取整数。
left_point_inv_xx = np.ceil(left_point_inv_trans[:, 0] / left_point_inv_trans[:, 2])
left_point_inv_yy = np.ceil(left_point_inv_trans[:, 1] / left_point_inv_trans[:, 2])
right_point_inv_xx = np.ceil(right_point_inv_trans[:, 0] / right_point_inv_trans[:, 2])
right_point_inv_yy = np.ceil(right_point_inv_trans[:, 1] / right_point_inv_trans[:, 2])
# Create an image to draw the lines on 创建一个图像来绘制线条
# warp_zero = np.zeros_like(warped).astype(np.uint8)
# color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# color_warp = np.zeros((720, 1280, 3), dtype='uint8') # NOTE: Hard-coded image dimensions
color_warp = np.zeros((undist.shape[0], undist.shape[1]+500, 3), dtype='uint8')
# Recast the x and y points into usable format for cv2.fillPoly()
# 将x和y点重新转换为cv2.fillPoly()的可用格式
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# 将逆透视变换后的车道线坐标点坐标转换为可用格式。
pts_inv_left = np.array([np.transpose( | np.vstack([left_point_inv_xx, left_point_inv_yy]) | numpy.vstack |
import numpy as np
import torch
import torch.nn.functional as F
import torch.nn as nn
import sys
from torch.autograd import Variable
import math
from torch import Tensor
class Conv_0(nn.Module):
"""Convolutional block as comparision with sinc filters"""
def __init__(self, out_channels, kernel_size, stride=1, padding=2, dilation=1, bias=False, groups=1, is_mask=False):
super(Conv_0, self).__init__()
self.conv = nn.Conv1d(1, out_channels, kernel_size, stride, padding, dilation, groups)
self.channel_number = out_channels
self.is_mask = is_mask
def forward(self, x, is_training):
x = self.conv(x)
if is_training and self.is_mask:
v = self.channel_number
f = np.random.uniform(low=0.0, high=16)
f = int(f)
f0 = np.random.randint(0, v-f)
x[:, f0:f0+f, :] = 0
return x
class SincConv_fast(nn.Module):
"""Sinc-based convolution
Parameters
----------
in_channels : `int`
Number of input channels. Must be 1.
out_channels : `int`
Number of filters.
kernel_size : `int`
Filter length.
sample_rate : `int`, optional
Sample rate. Defaults to 16000.
Usage
-----
See `torch.nn.Conv1d`
Reference
---------
<NAME>, <NAME>,
"Speaker Recognition from raw waveform with SincNet".
https://arxiv.org/abs/1808.00158
"""
@staticmethod
def to_mel(hz):
return 2595 * np.log10(1 + hz / 700)
@staticmethod
def to_hz(mel):
return 700 * (10 ** (mel / 2595) - 1)
def __init__(self, out_channels, kernel_size, sample_rate=16000, in_channels=1,
stride=1, padding=2, dilation=1, bias=False, groups=1, min_low_hz=50, min_band_hz=50,
freq_scale='mel', is_trainable=False, is_mask=False):
super(SincConv_fast,self).__init__()
if in_channels != 1:
#msg = (f'SincConv only support one input channel '
# f'(here, in_channels = {in_channels:d}).')
msg = "SincConv only support one input channel (here, in_channels = {%i})" % (in_channels)
raise ValueError(msg)
self.out_channels = out_channels+4
self.kernel_size = kernel_size
self.is_mask = is_mask
# Forcing the filters to be odd (i.e, perfectly symmetrics)
if kernel_size%2==0:
self.kernel_size=self.kernel_size+1
self.stride = stride
self.padding = padding
self.dilation = dilation
if bias:
raise ValueError('SincConv does not support bias.')
if groups > 1:
raise ValueError('SincConv does not support groups.')
self.sample_rate = sample_rate
self.min_low_hz = min_low_hz
self.min_band_hz = min_band_hz
# initialize filterbanks such that they are equally spaced in Mel scale
# low_hz = 30
# high_hz = self.sample_rate / 2 - (self.min_low_hz + self.min_band_hz)
# self.min_low_hz = 300
# self.min_band_hz = 300
low_hz = 0
high_hz = self.sample_rate / 2 - (self.min_low_hz + self.min_band_hz)
if freq_scale == 'mel':
mel = np.linspace(self.to_mel(low_hz),
self.to_mel(high_hz),
self.out_channels + 1)
hz = self.to_hz(mel)
elif freq_scale == 'lem':
mel = np.linspace(self.to_mel(low_hz),
self.to_mel(high_hz),
self.out_channels + 1)
hz = self.to_hz(mel)
hz=np.abs(np.flip(hz)-1)
elif freq_scale == 'linear':
hz = np.linspace(low_hz,
high_hz,
self.out_channels + 1)
# filter lower frequency (out_channels, 1)
self.low_hz_ = nn.Parameter(torch.Tensor(hz[:-1]).view(-1, 1), requires_grad=is_trainable)
# filter frequency band (out_channels, 1)
self.band_hz_ = nn.Parameter(torch.Tensor(np.diff(hz)).view(-1, 1), requires_grad=is_trainable)
# Hamming window
#self.window_ = torch.hamming_window(self.kernel_size)
n_lin=torch.linspace(0, (self.kernel_size/2)-1, steps=int((self.kernel_size/2))) # computing only half of the window
self.window_=0.54-0.46*torch.cos(2*math.pi*n_lin/self.kernel_size);
# (1, kernel_size/2)
n = (self.kernel_size - 1) / 2.0
self.n_ = 2*math.pi*torch.arange(-n, 0).view(1, -1) / self.sample_rate # Due to symmetry, I only need half of the time axes
def forward(self, waveforms, is_training=False):
"""
Parameters
----------
waveforms : `torch.Tensor` (batch_size, 1, n_samples)
Batch of waveforms.
Returns
-------
features : `torch.Tensor` (batch_size, out_channels, n_samples_out)
Batch of sinc filters activations.
"""
self.n_ = self.n_.to(waveforms.device)
self.window_ = self.window_.to(waveforms.device)
low = self.min_low_hz + torch.abs(self.low_hz_)
high = torch.clamp(low + self.min_band_hz + torch.abs(self.band_hz_),self.min_low_hz,self.sample_rate/2)
band=(high-low)[:,0]
f_times_t_low = torch.matmul(low, self.n_)
f_times_t_high = torch.matmul(high, self.n_)
band_pass_left=((torch.sin(f_times_t_high)-torch.sin(f_times_t_low))/(self.n_/2))*self.window_ # Equivalent of Eq.4 of the reference paper (SPEAKER RECOGNITION FROM RAW WAVEFORM WITH SINCNET). I just have expanded the sinc and simplified the terms. This way I avoid several useless computations.
band_pass_center = 2*band.view(-1,1)
band_pass_right= torch.flip(band_pass_left,dims=[1])
band_pass=torch.cat([band_pass_left,band_pass_center,band_pass_right],dim=1)
band_pass = band_pass / (2*band[:,None])
self.filters = (band_pass).view(
self.out_channels, 1, self.kernel_size)
self.filters = self.filters[:self.out_channels-4, :, :]
if is_training and self.is_mask:
v = self.filters.shape[0]
f = np.random.uniform(low=0.0, high=16)
f = int(f)
f0 = np.random.randint(0, v-f)
self.filters[f0:f0+f, :, :] = 0
output = F.conv1d(waveforms, self.filters, stride=self.stride,
padding=self.padding, dilation=self.dilation,
bias=None, groups=1)
return output
class SincConv(nn.Module):
@staticmethod
def to_mel(hz):
return 2595 * np.log10(1 + hz / 700)
@staticmethod
def to_hz(mel):
return 700 * (10 ** (mel / 2595) - 1)
def __init__(self, out_channels, kernel_size, in_channels=1, sample_rate=16000,
stride=1, padding=0, dilation=1, bias=False, groups=1, freq_scale='mel', is_mask=False):
super(SincConv,self).__init__()
if in_channels != 1:
msg = "SincConv only support one input channel (here, in_channels = {%i})" % (in_channels)
raise ValueError(msg)
self.out_channels = out_channels+1
self.kernel_size = kernel_size
self.sample_rate = sample_rate
self.is_mask = is_mask
# Forcing the filters to be odd (i.e, perfectly symmetrics)
if kernel_size%2 == 0:
self.kernel_size = self.kernel_size + 1
self.stride = stride
self.padding = padding
self.dilation = dilation
if bias:
raise ValueError('SincConv does not support bias.')
if groups > 1:
raise ValueError('SincConv does not support groups.')
# initialize filterbanks using Mel scale
NFFT = 512
f = int(self.sample_rate/2) * np.linspace(0,1,int(NFFT/2)+1)
# using mel scale
if freq_scale == 'mel':
print('***Initialising Mel scale Sinc Layer...***')
f_mel = self.to_mel(f) # Hz to mel conversion
f_mel_max = np.max(f_mel)
f_mel_min = np.min(f_mel)
filband_widths_mel = np.linspace(f_mel_min, f_mel_max, self.out_channels+2)
filbandwidthsf = self.to_hz(filband_widths_mel) # Mel to Hz conversion
self.freq = filbandwidthsf[:self.out_channels]
# using Inverse-mel scale
elif freq_scale == 'lem':
print('***Initialising Inverse-Mel scale Sinc Layer...***')
f_mel = self.to_mel(f) # Hz to mel conversion
f_mel_max = | np.max(f_mel) | numpy.max |
from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
n = 10
s = 1.0
x = np.linspace(0, n - 1, n + (n - 1) * 20)
def rho(r, k):
if k == 0:
y = np.exp(-(r/s)**2)
else:
e = np.exp(1)
y = (e/k**2)**(k**2) * (r/s)**(2*k**2) * np.exp(-(r/s)**2)
return y
plt.figure(figsize=(6, 3))
colors = cm.rainbow( | np.linspace(1, 0, 7) | numpy.linspace |
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.animation as animation
from mpl_toolkits.axes_grid1 import make_axes_locatable
import h5py
import numexpr as ne
"""File that generates density and spin expectation video plots for a given dataset specifically for the EPP phase."""
def makeSpinMagExpectationVideo(filename):
global cont
# ----------------- Initialising contours and cvals for spin plots ---------------------------
fig_spin, axes_spin = plt.subplots(1, 3, sharey=True, figsize=(10, 6))
for axis in axes_spin:
axis.set_aspect('equal')
dens_max = np.max([abs(psi_plus[:, :, -1]) ** 2, abs(psi_0[:, :, -1]) ** 2, abs(psi_minus[:, :, -1]) ** 2])
cvals_spin = np.linspace(0, 1, num=25, endpoint=True) # Spin contour values
cvals_dens = np.linspace(0, dens_max, num=25, endpoint=True) # Contour values
# Initialising contours
cont_splus = axes_spin[0].contourf(X, Y, abs(psi_plus[:, :, 0]) ** 2, cvals_dens, cmap='gnuplot')
cont_spin = axes_spin[1].contourf(X, Y, spin_expec_mag[:, :, 0], cvals_spin, cmap='PuRd')
cont_sminus = axes_spin[2].contourf(X, Y, abs(psi_minus[:, :, 0]) ** 2, cvals_dens, cmap='gnuplot')
cont = [cont_splus, cont_spin, cont_sminus]
# Density colorbar
divider = make_axes_locatable(axes_spin[2])
cax = divider.append_axes('right', size='5%', pad=0.05)
cbar_dens = fig_spin.colorbar(cont_sminus, cax=cax, orientation='vertical')
cbar_dens.formatter.set_powerlimits((0, 0))
cbar_dens.update_ticks()
# Spin expec mag colorbar
divider = make_axes_locatable(axes_spin[1])
cax = divider.append_axes('right', size='5%', pad=0.05)
cbar_spin = fig_spin.colorbar(cont_spin, cax=cax, orientation='vertical', ticks=[0, 1])
cbar_spin.ax.set_yticklabels = ['0', '1']
fig_spin.tight_layout(rect=[0.02, 0.03, 1, 0.75])
# -----------------------------------------------------------------------------------------------
# Sets axes limits and titles
for ax in axes_spin:
ax.set_xlim(np.min(x[:]), np.max(x[:]))
ax.set_ylim(np.min(y[:]), np.max(y[:]))
ax.set_xlabel(r'$x / \xi$')
if ax == axes_spin[0]:
ax.set_title(r'$|\psi_+|^2$')
ax.set_ylabel(r'$y / \xi$')
if ax == axes_spin[2]:
ax.set_title(r'$|\psi_-|^2$')
if ax == axes_spin[1]:
ax.set_aspect('equal')
ax.set_title(r'$|<F>|$')
# Animation function
def animate_spin(i):
global cont
for contour in cont:
for c in contour.collections:
c.remove()
axes_spin[0].contourf(X, Y, abs(psi_plus[:, :, i]) ** 2, cvals_dens, cmap='gnuplot')
axes_spin[1].contourf(X, Y, spin_expec_mag[:, :, i], cvals_spin, cmap='PuRd')
axes_spin[2].contourf(X, Y, abs(psi_minus[:, :, i]) ** 2, cvals_dens, cmap='gnuplot')
cont = [axes_spin[0], axes_spin[1], axes_spin[2]]
print('On spin iteration %i' % (i + 1))
plt.suptitle(r'$\tau$ = %2f' % (Nframe * dt * i), y=0.8)
return cont
# Calls the animation function and saves the result
anim = animation.FuncAnimation(fig_spin, animate_spin, frames=num_of_frames, repeat=False)
anim.save('../../images/unsorted/{}'.format(filename), dpi=200,
writer=animation.FFMpegWriter(fps=60, codec="libx264", extra_args=['-pix_fmt', 'yuv420p']))
print('Spin video saved successfully.')
# ---------------------------------------------------------------------------------------------------------------------
# Loading data
# ---------------------------------------------------------------------------------------------------------------------
filename = input('Enter filename: ')
data_path = '../../data/{}.hdf5'.format(filename)
data_file = h5py.File(data_path, 'r')
# Loading grid array data:
x, y = data_file['grid/x'], data_file['grid/y']
X, Y = np.meshgrid(x[:], y[:])
Nx, Ny = x[:].size, y[:].size
dx, dy = x[1] - x[0], y[1] - y[0]
dkx, dky = 2 * np.pi / (Nx * dx), 2 * np.pi / (Ny * dy)
kxx = np.arange(-Nx // 2, Nx // 2) * dkx
kyy = np.arange(-Nx // 2, Nx // 2) * dky
Kx, Ky = np.meshgrid(kxx, kyy)
# Loading time variables:
Nt, dt, Nframe = np.array(data_file['time/Nt']), np.array(data_file['time/dt']), np.array(data_file['time/Nframe'])
# Three component wavefunction
psi_plus = data_file['wavefunction/psi_plus']
psi_0 = data_file['wavefunction/psi_0']
psi_minus = data_file['wavefunction/psi_minus']
num_of_frames = psi_plus.shape[-1]
# Magnitude of spin expectation
print('Calculating spin vectors...')
spin_expec_mag = | np.empty((Nx, Ny, num_of_frames), dtype='float32') | numpy.empty |
import numpy as np
import pandas as pd
import datetime as dt
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler as SScale
from sklearn.neighbors import DistanceMetric
from final_epl_eda import LeagueDFEDA
from scipy import stats
from scipy.spatial.distance import euclidean, jaccard, cosine
import time
import requests
import csv
import re
import pickle
def unpickler(file):
with open(file, 'rb') as f:
return pickle.load(f)
def pickler(input, output):
with open(output, 'wb') as f:
pickle.dump(input,f,pickle.HIGHEST_PROTOCOL)
class SimilarityDF(pd.DataFrame): # create a class for standardizing and vectorizing team stats
@property
def _constructor(self):
return SimilarityDF
def vectorizer(self,team_lst,df_type): # standardizes and vectorizes the input for all epl teams and a single nfl team
if df_type == 'EPL':
temp_df = self.set_index(['squad','season'])
elif df_type == 'NFL':
temp_df = self.set_index(['team','year'])
if len(team_lst) > 1:
stack_start = temp_df.loc[team_lst[0]]
stack_start_std = SScale().fit_transform(stack_start).ravel()
stack_start_std = stack_start_std.reshape(1,-1)
for team in team_lst[1:]:
team_df = temp_df.loc[team]
team_df_std = SScale().fit_transform(team_df).ravel()
team_df_std = team_df_std.reshape(1,-1)
stack_start_std = np.concatenate((stack_start_std,team_df_std),axis=0)
else:
stack_start = temp_df.loc[team_lst]
stack_start_std = SScale().fit_transform(stack_start).ravel()
stack_start_std = stack_start_std.reshape(1,-1)
return stack_start_std
class Distances(): # create a class to calculate the distances between vectors for recommendations
def __init__(self,team_vector,league_matrix,weights=None):
self.team_vector = team_vector
self.league_matrix = league_matrix
self.weights = weights
def euclidean_dist_calc(self,weights): # calculates the euclidean distance
weights = self.weights
mat_shape = self.league_matrix.shape
if not weights:
weights = np.ones((1,mat_shape[1]))
if self.league_matrix.shape[0] > 1:
euc_dist = euclidean(self.team_vector,np.matrix(self.league_matrix[0]),weights)
for u in np.matrix(self.league_matrix[1:]):
euc = euclidean(self.team_vector,u,weights)
euc_dist = np.hstack((euc_dist,euc))
else:
euc_dist = euclidean(self.team_vector,self.league_matrix,weights)
return euc_dist
def cosine_sim_calc(self): # calculates the cosine similarity (not used)
mat_shape = self.league_matrix.shape
if self.league_matrix.shape[0] > 1:
cos_start = np.dot(self.team_vector,np.matrix(self.league_matrix[0]).T)/(np.linalg.norm(self.team_vector) *
np.linalg.norm(np.matrix(self.league_matrix[0])))
cos_sim = 0.5 + 0.5 * cos_start
for u in np.matrix(self.league_matrix[1:]):
cos_cont = np.dot(self.team_vector,u.T)/(np.linalg.norm(self.team_vector) * np.linalg.norm(u))
cos_append = 0.5 + 0.5 * cos_cont
cos_sim = np.hstack((cos_sim,cos_append))
else:
costheta = np.dot(self.team_vector,self.league_matrix.T)/(np.linalg.norm(self.team_vector) *
np.linalg.norm(self.league_matrix.T))
cos_sim = 0.5 + 0.5 * costheta
return cos_sim
def cosine_dist_calc(self,weights): # calculates the cosine distance
weights = self.weights
mat_shape = self.league_matrix.shape
if weights == None:
weights = np.ones((self.league_matrix.shape[1]))
if mat_shape[0] > 1:
cos_dist = cosine(self.team_vector, | np.matrix(self.league_matrix[0]) | numpy.matrix |
import os, subprocess, time, signal,random
import gym
from gym import error, spaces
from gym import utils
from gym.utils import seeding
import numpy as np
import cv2
import torch
# try:
# import hfo_py
# except ImportError as e:
# raise error.DependencyNotInstalled("{}. (HINT: you can install HFO dependencies with 'pip install gym[soccer].)'".format(e))
import logging
logger = logging.getLogger(__name__)
class ActionEnv(gym.Env, utils.EzPickle):
def __init__(self):
self.action_space = spaces.Discrete(11)
self.observation_space = spaces.Tuple((
spaces.Discrete(112),
spaces.Discrete(112),
spaces.Discrete(3)))
self.seed()
def init_data(self,videos_infos, opts, transform, do_action,overlap_ratio):
self.videos_infos=videos_infos
self.opts = opts
self.transform = transform
# self.args = args
self.do_action = do_action
self.overlap_ratio = overlap_ratio
self.videos = [] # list of clips dict
self.RL_steps = self.opts['train']['RL_steps'] # clip length
vid_idxs = np.random.permutation(len(self.videos_infos))
# print("num videos: %d "%len(vid_idxs))
for vid_idx in vid_idxs:
# dict consist of set of clips in ONE video
clips = {
'img_path': [],
'frame_start': [],
'frame_end': [],
'init_bbox': [],
'end_bbox': [],
'vid_idx': [],
}
vid_info = self.videos_infos[vid_idx]
if self.RL_steps is None:
self.RL_steps = len(vid_info['gt']) - 1
vid_clip_starts = [0]
vid_clip_ends = [len(vid_info['gt']) - 1]
else:
vid_clip_starts = np.array(range(len(vid_info['gt']) - self.RL_steps))
vid_clip_starts = np.random.permutation(vid_clip_starts)
vid_clip_ends = vid_clip_starts + self.RL_steps
# number of clips in one video
num_train_clips = min(self.opts['train']['rl_num_batches'], len(vid_clip_starts))
# print("num_train_clips of vid " + str(vid_idx) + ": ", str(num_train_clips))
n_clip=0
# for clipIdx in range(num_train_clips):
for clipIdx in range(len(vid_clip_starts)):
if n_clip>=num_train_clips:
break
frameStart = vid_clip_starts[clipIdx]
frameEnd = vid_clip_ends[clipIdx]
n_obj1=len(vid_info['trackid'][frameStart])
n_obj2 = len(vid_info['trackid'][frameEnd])
if n_obj1==0 or n_obj2==0:
continue
# print("debug")
n_clip+=1
n_obj=min(n_obj1,n_obj2)
choose_obj=random.randint(0,n_obj-1)
clips['img_path'].append(vid_info['img_files'][frameStart:frameEnd])
clips['frame_start'].append(frameStart)
clips['frame_end'].append(frameEnd)
clips['init_bbox'].append(vid_info['gt'][frameStart][choose_obj])
clips['end_bbox'].append(vid_info['gt'][frameEnd][choose_obj])
clips['vid_idx'].append(vid_idx)
# if num_train_clips > 0: # small hack
if len(clips['vid_idx']) > 0: # small hack
self.videos.append(clips)
self.clip_idx = -1 # hack for reset function
self.vid_idx = 0
self.state = None # current bbox
self.gt = None # end bbox
self.current_img = None # current image frame
self.current_img_cuda = None
self.current_patch = None # current patch (transformed)
self.current_patch_cuda = None
self.current_img_idx = 0
self.finish_epoch=False
# self.box_history_clip=[]
def reset_env(self):
self.clip_idx = -1 # hack for reset function
self.vid_idx = 0
self.state = None # current bbox
self.gt = None # end bbox
self.current_img = None # current image frame
self.current_img_cuda = None
self.current_patch = None # current patch (transformed)
self.current_patch_cuda = None
self.current_img_idx = 0
self.finish_epoch = False
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self,action):
info = {
'finish_epoch': False
}
if action == self.opts['stop_action']:
reward, done,_ = self.go_to_next_frame()
info['finish_epoch'] = self.finish_epoch
else: # just go to the next patch (still same frame/current_img)
reward = 0
done = False
iou_bef=self.overlap_ratio(self.gt, self.state)
# do action
self.state = self.do_action(self.state, self.opts, action, self.current_img.shape)
iou_aft=self.overlap_ratio(self.gt, self.state)
iou_change=iou_aft-iou_bef
iou_ratio=20
if iou_change>0.01:
reward=iou_change*iou_ratio-0.1
elif iou_change<0.01:
reward=iou_change*iou_ratio-0.1
self.current_patch, _, _, _ = self.transform(self.current_img, self.state)
return self.current_patch,self.state, reward, done, info
def reset(self):
while True:
self.clip_idx += 1
# if the clips in a video are finished... go to the next video
if self.clip_idx >= len(self.videos[self.vid_idx]['frame_start']):
self.vid_idx += 1
self.clip_idx = 0
if self.vid_idx >= len(self.videos):
self.vid_idx = 0
# one epoch finish... need to reinitialize the class to use this again randomly
self.finish_epoch=True
return None
# initialize state, gt, current_img_idx, current_img, and current_patch with new clip
self.state = self.videos[self.vid_idx]['init_bbox'][self.clip_idx]
self.gt = self.videos[self.vid_idx]['end_bbox'][self.clip_idx]
# if self.state==[0,0,0,0]:
# print("debug")
# frameStart = self.videos[self.vid_idx]['frame_start'][self.clip_idx]
#self.current_img_idx = 1 # self.current_img_idx = frameStart + 1
self.current_img_idx = 1 #the frameStart(the 0th img,idx:0) is for initial, the current_img(idx:1) is for training.
self.current_img = cv2.imread(self.videos[self.vid_idx]['img_path'][self.clip_idx][self.current_img_idx])
# imgcuda = self.current_img.copy()
imgcuda = self.current_img.astype(np.float32)
self.current_img=torch.from_numpy(imgcuda).cuda()
self.current_patch, _, _, _ = self.transform(self.current_img, np.array(self.state))
#Modified by zb --- 2019-11-16 22:11:16 --- to check : at this step ,the data of patch seems have some problem\
#because some data results are under zero
if self.gt != '': # small hack
break
return self.current_patch
def get_current_patch(self):
return self.current_patch
def get_current_train_vid_idx(self):
return self.videos[self.vid_idx]['vid_idx'][0]
def get_state(self):
return self.state
def get_current_img(self):
return self.current_img
def go_to_next_frame(self):
# self.box_history_clip = []
self.current_img_idx += 1
# finish_epoch = False
# if already in the end of a clip...
#aaa=self.current_img_idx
#bbb=len(self.videos[self.vid_idx]['img_path'][self.clip_idx])
if self.current_img_idx >= len(self.videos[self.vid_idx]['img_path'][self.clip_idx]):
# calculate reward before reset
reward = self.reward_original(np.array(self.gt), np.array(self.state))
# print("reward=" + str(reward))
# reset (reset state, gt, current_img_idx, current_img and current_img_patch)
# self.finish_epoch,_ = self.reset() # go to the next clip (or video)
self.reset()
done = True # done means one clip is finished
# just go to the next frame (means new patch and new image)
else:
# reward = 0
reward = self.reward_original(np.array(self.gt), | np.array(self.state) | numpy.array |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from skimage.transform import resize
from random import random
import _pickle
from scipy import misc
from scipy import io
from skimage import color
import glob
import gzip
import math
import numpy as np
import os
from scipy.io import loadmat as loadmat
from six.moves import urllib
from six.moves import xrange
import sys
import tarfile
import tensorflow as tf
FLAGS = tf.flags.FLAGS
def create_dir_if_needed(dest_directory):
"""
Create directory if doesn't exist
:param dest_directory:
:return: True if everything went well
"""
if not tf.gfile.IsDirectory(dest_directory):
tf.gfile.MakeDirs(dest_directory)
return True
def maybe_download(file_urls, directory):
"""
Download a set of files in temporary local folder
:param directory: the directory where to download
:return: a tuple of filepaths corresponding to the files given as input
"""
# Create directory if doesn't exist
assert create_dir_if_needed(directory)
# This list will include all URLS of the local copy of downloaded files
result = []
# For each file of the dataset
for file_url in file_urls:
# Extract filename
filename = file_url.split('/')[-1]
# If downloading from GitHub, remove suffix ?raw=True from local filename
if filename.endswith("?raw=true"):
filename = filename[:-9]
# Deduce local file url
#filepath = os.path.join(directory, filename)
filepath = directory + '/' + filename
# Add to result list
result.append(filepath)
# Test if file already exists
if not tf.gfile.Exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(file_url, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
return result
def image_whitening(data):
"""
Subtracts mean of image and divides by adjusted standard variance (for
stability). Operations are per image but performed for the entire array.
:param image: 4D array (ID, Height, Weight, Channel)
:return: 4D array (ID, Height, Weight, Channel)
"""
assert len(np.shape(data)) == 4
# Compute number of pixels in image
nb_pixels = np.shape(data)[1] * np.shape(data)[2] * np.shape(data)[3]
# Subtract mean
mean = np.mean(data, axis=(1,2,3))
ones = np.ones(np.shape(data)[1:4], dtype=np.float32)
for i in xrange(len(data)):
data[i, :, :, :] -= mean[i] * ones
# Compute adjusted standard variance
adj_std_var = np.maximum(np.ones(len(data), dtype=np.float32) / math.sqrt(nb_pixels), np.std(data, axis=(1,2,3))) #NOLINT(long-line)
# Divide image
for i in xrange(len(data)):
data[i, :, :, :] = data[i, :, :, :] / adj_std_var[i]
print(np.shape(data))
return data
def extract_svhn(local_url):
"""
Extract a MATLAB matrix into two numpy arrays with data and labels
:param local_url:
:return:
"""
with tf.gfile.Open(local_url, mode='rb') as file_obj:
# Load MATLAB matrix using scipy IO
dict = loadmat(file_obj)
# Extract each dictionary (one for data, one for labels)
data, labels = dict["X"], dict["y"]
# Set np type
data = np.asarray(data, dtype=np.float32)
labels = np.asarray(labels, dtype=np.int32)
# Transpose data to match TF model input format
data = data.transpose(3, 0, 1, 2)
# Fix the SVHN labels which label 0s as 10s
labels[labels == 10] = 0
# Fix label dimensions
labels = labels.reshape(len(labels))
return data, labels
def unpickle_cifar_dic(file):
"""
Helper function: unpickles a dictionary (used for loading CIFAR)
:param file: filename of the pickle
:return: tuple of (images, labels)
"""
fo = open(file, 'rb')
dict = _pickle.load(fo, encoding='latin1')
fo.close()
return dict['data'], dict['labels']
def extract_cifar10(local_url, data_dir):
"""
Extracts the CIFAR-10 dataset and return numpy arrays with the different sets
:param local_url: where the tar.gz archive is located locally
:param data_dir: where to extract the archive's file
:return: a tuple (train data, train labels, test data, test labels)
"""
# These numpy dumps can be reloaded to avoid performing the pre-processing
# if they exist in the working directory.
# Changing the order of this list will ruin the indices below.
preprocessed_files = ['/cifar10_train.npy',
'/cifar10_train_labels.npy',
'/cifar10_test.npy',
'/cifar10_test_labels.npy']
all_preprocessed = True
for file in preprocessed_files:
if not tf.gfile.Exists(data_dir + file):
all_preprocessed = False
break
if all_preprocessed:
# Reload pre-processed training data from numpy dumps
with tf.gfile.Open(data_dir + preprocessed_files[0], mode='r') as file_obj:
train_data = np.load(file_obj)
with tf.gfile.Open(data_dir + preprocessed_files[1], mode='r') as file_obj:
train_labels = np.load(file_obj)
# Reload pre-processed testing data from numpy dumps
with tf.gfile.Open(data_dir + preprocessed_files[2], mode='r') as file_obj:
test_data = np.load(file_obj)
with tf.gfile.Open(data_dir + preprocessed_files[3], mode='r') as file_obj:
test_labels = np.load(file_obj)
else:
# Do everything from scratch
# Define lists of all files we should extract
train_files = ["data_batch_" + str(i) for i in xrange(1,6)]
test_file = ["test_batch"]
cifar10_files = train_files + test_file
# Check if all files have already been extracted
need_to_unpack = False
for file in cifar10_files:
if not tf.gfile.Exists(file):
need_to_unpack = True
break
# We have to unpack the archive
if need_to_unpack:
tarfile.open(local_url, 'r:gz').extractall(data_dir)
# Load training images and labels
images = []
labels = []
for file in train_files:
# Construct filename
filename = file
# Unpickle dictionary and extract images and labels
images_tmp, labels_tmp = unpickle_cifar_dic(filename)
# Append to lists
images.append(images_tmp)
labels.append(labels_tmp)
# Convert to numpy arrays and reshape in the expected format
train_data = np.asarray(images, dtype=np.float32).reshape((50000,3,32,32))
train_data = np.swapaxes(train_data, 1, 3)
train_labels = np.asarray(labels, dtype=np.int32).reshape(50000)
# Save so we don't have to do this again
np.save(data_dir + preprocessed_files[0], train_data)
np.save(data_dir + preprocessed_files[1], train_labels)
# Construct filename for test file
filename = data_dir + "/cifar-10-batches-py/" + test_file[0]
# Load test images and labels
test_data, test_images = unpickle_cifar_dic(filename)
# Convert to numpy arrays and reshape in the expected format
test_data = np.asarray(test_data,dtype=np.float32).reshape((10000,3,32,32))
test_data = np.swapaxes(test_data, 1, 3)
test_labels = np.asarray(test_images, dtype=np.int32).reshape(10000)
# Save so we don't have to do this again
np.save(data_dir + preprocessed_files[2], test_data)
np.save(data_dir + preprocessed_files[3], test_labels)
return train_data, train_labels, test_data, test_labels
# Data from http://cswww.essex.ac.uk/mv/allfaces/faces95.html
def extract_faces(data_dir):
preprocessed_files = ['/faces_train.npy',
'/faces_train_labels.npy',
'/faces_test.npy',
'/faces_test_labels.npy']
all_preprocessed = True
for file in preprocessed_files:
if not tf.gfile.Exists(data_dir + file):
all_preprocessed = False
break
# if False:
if all_preprocessed:
# Reload pre-processed training data from numpy dumps
train_data = np.load(data_dir + preprocessed_files[0])
train_labels = np.load(data_dir + preprocessed_files[1])
# Reload pre-processed testing data from numpy dumps
test_data = np.load(data_dir + preprocessed_files[2])
test_labels = np.load(data_dir + preprocessed_files[3])
else:
# Do everything from scratch
# Define lists of all files we should extract
train_files = glob.iglob("/home/Cooper/faces95/*")
# Load training images and labels
images = []
labels = []
test_images = []
test_labels = []
for label,person in enumerate(train_files):
pictures = list(glob.iglob(person+"/*"))
for picture in pictures[0:15]:
images.append(resize(misc.imread(picture),(30,30)))
labels.append(label)
for picture in pictures[15:20]:
test_images.append(resize(misc.imread(picture),(30,30)))
test_labels.append(label)
p = np.random.permutation(len(images))
images = np.put(np.zeros(len(images)),p,images)
labels = np.put(np.zeros(len(images)),p,labels)
# Convert to numpy arrays and reshape in the expected format
train_data = np.asarray(images, dtype=np.float32).reshape((72*15,3,30,30))
train_data = np.swapaxes(train_data, 1, 3)
train_labels = np.asarray(labels, dtype=np.int32).reshape(72*15)
# Save so we don't have to do this again
np.save(data_dir + preprocessed_files[0], train_data)
np.save(data_dir + preprocessed_files[1], train_labels)
# Convert to numpy arrays and reshape in the expected format
test_data = np.asarray(test_images,dtype=np.float32).reshape((72*5,3,30,30))
test_data = np.swapaxes(test_data, 1, 3)
test_labels = np.asarray(test_labels, dtype=np.int32).reshape(72*5)
# Save so we don't have to do this again
np.save(data_dir + preprocessed_files[2], test_data)
np.save(data_dir + preprocessed_files[3], test_labels)
return train_data, train_labels, test_data, test_labels
def extract_mnist_data(filename, num_images, image_size, pixel_depth):
"""
Extract the images into a 4D tensor [image index, y, x, channels].
Values are rescaled from [0, 255] down to [-0.5, 0.5].
"""
# if not os.path.exists(file):
if not tf.gfile.Exists(filename+".npy"):
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(image_size * image_size * num_images)
data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)
data = (data - (pixel_depth / 2.0)) / pixel_depth
data = data.reshape(num_images, image_size, image_size, 1)
#np.save(filename, data)
return data
else:
with tf.gfile.Open(filename+".npy", mode='r') as file_obj:
return np.load(file_obj)
def extract_mnist_labels(filename, num_images):
"""
Extract the labels into a vector of int64 label IDs.
"""
# if not os.path.exists(file):
if not tf.gfile.Exists(filename+".npy"):
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(1 * num_images)
labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int32)
#np.save(filename, labels)
return labels
else:
with tf.gfile.Open(filename+".npy", mode='r') as file_obj:
return np.load(file_obj)
def extract_netflix(data_dir):
preprocessed_files = ['/netflix_train.npy',
'/netflix_train_labels.npy',
'/netflix_valid.npy',
'/netflix_valid_labels.npy',
'/netflix_test.npy',
'/netflix_test_labels.npy']
all_preprocessed = True
for file in preprocessed_files:
if not tf.gfile.Exists(data_dir + file):
all_preprocessed = False
break
if all_preprocessed:
#Reload pre-processed training data from numpy dumps
train_data = np.load(data_dir + preprocessed_files[0])
train_labels = np.load(data_dir + preprocessed_files[1])
#Reload pre-processed validation data from numpy dumps
valid_data = np.load(data_dir + preprocessed_files[2])
valid_data = np.load(data_dir + preprocessed_files[3])
#Reload pre-processed testing data from numpy dumps
test_data = np.load(data_dir + preprocessed_files[4])
test_data = np.load(data_dir + preprocessed_files[5])
else:
#Do all pre-processing from scratch
train_file_path = "/home/logan_ford16/models/research/differential_privacy/multiple_teachers/DeepRecommender/Netflix/N3M_TRAIN/n3m.train.txt"
valid_file_path = "/home/logan_ford16/models/research/differential_privacy/multiple_teachers/DeepRecommender/Netflix/N3M_VALID/n3m.valid.txt"
test_file_path = "/home/logan_ford16/models/research/differential_privacy/multiple_teachers/DeepRecommender/Netflix/N3M_TEST/n3m.test.txt"
with open(train_file_path, 'r') as src:
for line in src.readlines():
parts = line.strip().split('\t')
if len(parts) < 3:
raise ValueError('Encountered badly formatted line in {}'.format(train_file_path))
def ld_svhn(extended=False, test_only=False):
"""
Load the original SVHN data
:param extended: include extended training data in the returned array
:param test_only: disables loading of both train and extra -> large speed up
:return: tuple of arrays which depend on the parameters
"""
# Define files to be downloaded
# WARNING: changing the order of this list will break indices (cf. below)
file_urls = ['http://ufldl.stanford.edu/housenumbers/train_32x32.mat',
'http://ufldl.stanford.edu/housenumbers/test_32x32.mat',
'http://ufldl.stanford.edu/housenumbers/extra_32x32.mat']
# Maybe download data and retrieve local storage urls
local_urls = maybe_download(file_urls, FLAGS.data_dir)
# Extra Train, Test, and Extended Train data
if not test_only:
# Load and applying whitening to train data
train_data, train_labels = extract_svhn(local_urls[0])
train_data = image_whitening(train_data)
# Load and applying whitening to extended train data
ext_data, ext_labels = extract_svhn(local_urls[2])
ext_data = image_whitening(ext_data)
# Load and applying whitening to test data
test_data, test_labels = extract_svhn(local_urls[1])
test_data = image_whitening(test_data)
if test_only:
return test_data, test_labels
else:
if extended:
# Stack train data with the extended training data
train_data = np.vstack((train_data, ext_data))
train_labels = np.hstack((train_labels, ext_labels))
return train_data, train_labels, test_data, test_labels
else:
# Return training and extended training data separately
return train_data,train_labels, test_data,test_labels, ext_data,ext_labels
def ld_cifar10(test_only=False):
"""
Load the original CIFAR10 data
:param extended: include extended training data in the returned array
:param test_only: disables loading of both train and extra -> large speed up
:return: tuple of arrays which depend on the parameters
"""
# Define files to be downloaded
file_urls = ['https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz']
# Maybe download data and retrieve local storage urls
local_urls = maybe_download(file_urls, FLAGS.data_dir)
# Extract archives and return different sets
dataset = extract_cifar10(local_urls[0], FLAGS.data_dir)
# Unpack tuple
train_data, train_labels, test_data, test_labels = dataset
# Apply whitening to input data
train_data = image_whitening(train_data)
test_data = image_whitening(test_data)
if test_only:
return test_data, test_labels
else:
return train_data, train_labels, test_data, test_labels
def ld_mnist(test_only=False):
"""
Load the MNIST dataset
:param extended: include extended training data in the returned array
:param test_only: disables loading of both train and extra -> large speed up
:return: tuple of arrays which depend on the parameters
"""
# Define files to be downloaded
# WARNING: changing the order of this list will break indices (cf. below)
file_urls = ['http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz',
]
# Maybe download data and retrieve local storage urls
local_urls = maybe_download(file_urls, FLAGS.data_dir)
# Extract it into np arrays.
train_data = extract_mnist_data(local_urls[0], 60000, 28, 1)
train_labels = extract_mnist_labels(local_urls[1], 60000)
test_data = extract_mnist_data(local_urls[2], 10000, 28, 1)
test_labels = extract_mnist_labels(local_urls[3], 10000)
if test_only:
return test_data, test_labels
else:
return train_data, train_labels, test_data, test_labels
def extract_wiki(data_dir):
preprocessed_files = ['/wiki_train.npy',
'/wiki_train_labels.npy',
'/wiki_test.npy',
'/wiki_test_labels.npy']
all_preprocessed = True
for file in preprocessed_files:
if not tf.gfile.Exists(data_dir + file):
all_preprocessed = False
break
# if False:
if all_preprocessed:
# Reload pre-processed training data from numpy dumps
train_data = np.load(data_dir + preprocessed_files[0])
train_labels = np.load(data_dir + preprocessed_files[1])
# Reload pre-processed testing data from numpy dumps
test_data = np.load(data_dir + preprocessed_files[2])
test_labels = np.load(data_dir + preprocessed_files[3])
else:
# Do everything from scratch
# Define lists of all files we should extract
# Load training images and labels
images = []
labels = []
test_images = []
test_labels = []
m = io.loadmat("/home/Cooper/wiki_crop/wiki.mat")
n = len(m["wiki"]["full_path"][0][0][0])
for i,file in enumerate(m["wiki"]["full_path"][0][0][0]):
# if i > 10:
# break
print(i,n)
picture = "/home/Cooper/wiki_crop/"+file[0]
proportion = 0.8
r = random()
im = resize(color.rgb2gray(misc.imread(picture)),(256,256))
label = m["wiki"]["gender"][0][0][0][i]
if not math.isnan(label):
if r < proportion:
images.append(im)
labels.append(int(label))
else:
test_images.append(im)
test_labels.append(int(label))
# p = np.random.permutation(len(images))
# images = np.put(np.zeros(len(images)),p,images)
# labels = np.put(np.zeros(len(images)),p,labels)
# Convert to numpy arrays and reshape in the expected format
train_data = np.asarray(images, dtype=np.float32).reshape((len(labels),256,256,1))
# train_data = np.swapaxes(train_data, 1, 3)
train_labels = np.asarray(labels, dtype=np.int32).reshape(len(labels))
# Save so we don't have to do this again
np.save(data_dir + preprocessed_files[0], train_data)
np.save(data_dir + preprocessed_files[1], train_labels)
# Convert to numpy arrays and reshape in the expected format
test_data = np.asarray(test_images,dtype=np.float32).reshape((len(test_labels),256,256,1))
# test_data = np.swapaxes(test_data, 1, 3)
test_labels = np.asarray(test_labels, dtype=np.int32).reshape(len(test_labels))
# Save so we don't have to do this again
np.save(data_dir + preprocessed_files[2], test_data)
np.save(data_dir + preprocessed_files[3], test_labels)
return train_data, train_labels, test_data, test_labels
def extract_imdb(data_dir):
preprocessed_files = ['/imdb_train.npy',
'/imdb_train_labels.npy',
'/imdb_test.npy',
'/imdb_test_labels.npy']
all_preprocessed = True
for file in preprocessed_files:
if not tf.gfile.Exists(data_dir + file):
all_preprocessed = False
break
if all_preprocessed:
# Reload pre-processed training data from numpy dumps
train_data = np.load(data_dir + preprocessed_files[0])
train_labels = np.load(data_dir + preprocessed_files[1])
# Reload pre-processed testing data from numpy dumps
test_data = np.load(data_dir + preprocessed_files[2])
test_labels = np.load(data_dir + preprocessed_files[3])
else:
images = []
labels = []
test_images = []
test_labels = []
m = io.loadmat("/data/imdb_crop/imdb.mat")
n = len(m["imdb"]["full_path"][0][0][0])
for i,file in enumerate(m["imdb"]["full_path"][0][0][0]):
if i%100 == 0:
print(i,n)
picture = "/data/imdb_crop/"+file[0]
proportion = 0.8
r = random()
im = resize(color.rgb2gray(misc.imread(picture)),(100,100))
label = m["imdb"]["gender"][0][0][0][i]
if not math.isnan(label):
if r < proportion:
images.append(im)
labels.append(int(label))
else:
test_images.append(im)
test_labels.append(int(label))
# p = np.random.permutation(len(images))
# images = np.put(np.zeros(len(images)),p,images)
# labels = np.put(np.zeros(len(images)),p,labels)
# Convert to numpy arrays and reshape in the expected format
train_data = np.asarray(images, dtype=np.float32).reshape((len(labels),100,100,1))
# train_data = np.swapaxes(train_data, 1, 3)
train_labels = np.asarray(labels, dtype=np.int32).reshape(len(labels))
# Save so we don't have to do this again
np.save(data_dir + preprocessed_files[0], train_data)
| np.save(data_dir + preprocessed_files[1], train_labels) | numpy.save |
import sys
import numpy as np
import cv2
from imutils import face_utils
import datetime
import imutils
import time
import dlib
gray2 = None
shape2 = None
points2 = None
hullIndex = None
dt = None
# Apply affine transform calculated using srcTri and dstTri to src and
# output an image of size.
def niceTime(now):
return(round((time.time() - now) * 1000))
def applyAffineTransform(src, srcTri, dstTri, size) :
# Given a pair of triangles, find the affine transform.
warpMat = cv2.getAffineTransform( np.float32(srcTri), np.float32(dstTri) )
# Apply the Affine Transform just found to the src image
dst = cv2.warpAffine( src, warpMat, (size[0], size[1]), None, flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101 )
return dst
# Check if a point is inside a rectangle
def rectContains(rect, point) :
if point[0] < rect[0] :
return False
elif point[1] < rect[1] :
return False
elif point[0] > rect[0] + rect[2] :
return False
elif point[1] > rect[1] + rect[3] :
return False
return True
#calculate delanauy triangle
def calculateDelaunayTriangles(rect, points):
#create subdiv
subdiv = cv2.Subdiv2D(rect);
# Insert points into subdiv
for p in points:
subdiv.insert(p)
triangleList = subdiv.getTriangleList();
delaunayTri = []
pt = []
count= 0
for t in triangleList:
pt.append((t[0], t[1]))
pt.append((t[2], t[3]))
pt.append((t[4], t[5]))
pt1 = (t[0], t[1])
pt2 = (t[2], t[3])
pt3 = (t[4], t[5])
if rectContains(rect, pt1) and rectContains(rect, pt2) and rectContains(rect, pt3):
count = count + 1
ind = []
for j in range(0, 3):
for k in range(0, len(points)):
if(abs(pt[j][0] - points[k][0]) < 1.0 and abs(pt[j][1] - points[k][1]) < 1.0):
ind.append(k)
if len(ind) == 3:
delaunayTri.append((ind[0], ind[1], ind[2]))
pt = []
return delaunayTri
# Warps and alpha blends triangular regions from img1 and img2 to img
def warpTriangle(img1, img2, t1, t2) :
# Find bounding rectangle for each triangle
r1 = cv2.boundingRect(np.float32([t1]))
r2 = cv2.boundingRect(np.float32([t2]))
# Offset points by left top corner of the respective rectangles
t1Rect = []
t2Rect = []
t2RectInt = []
for i in range(0, 3):
t1Rect.append(((t1[i][0] - r1[0]),(t1[i][1] - r1[1])))
t2Rect.append(((t2[i][0] - r2[0]),(t2[i][1] - r2[1])))
t2RectInt.append(((t2[i][0] - r2[0]),(t2[i][1] - r2[1])))
# Get mask by filling triangle
mask = np.zeros((r2[3], r2[2], 3), dtype = np.float32)
cv2.fillConvexPoly(mask, np.int32(t2RectInt), (1.0, 1.0, 1.0), 16, 0);
# Apply warpImage to small rectangular patches
img1Rect = img1[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]]
#img2Rect = np.zeros((r2[3], r2[2]), dtype = img1Rect.dtype)
size = (r2[2], r2[3])
img2Rect = applyAffineTransform(img1Rect, t1Rect, t2Rect, size)
img2Rect = img2Rect * mask
# Copy triangular region of the rectangular patch to the output image
img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] = img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] * ( (1.0, 1.0, 1.0) - mask )
img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] = img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] + img2Rect
def face_swap3(img_ref, detector, predictor):
gray1 = cv2.cvtColor(img_ref, cv2.COLOR_BGR2GRAY)
# detect faces in the grayscale frame
rects1 = detector(gray1, 0)
if (len(rects1) < 2): #at least 2 faces in image need to be found
return None
if is_out_of_image(rects1, gray1.shape[1], gray1.shape[0]):
return None
img1Warped = np.copy(img_ref);
shape1 = predictor(gray1, rects1[0])
points1 = face_utils.shape_to_np(shape1) #type is a array of arrays (list of lists)
if is_out_of_image_points(points1, gray1.shape[1], gray1.shape[0]): #check if points are inside the image
return None
#need to convert to a list of tuples
points1 = list(map(tuple, points1))
shape2 = predictor(gray1, rects1[1])
points2 = face_utils.shape_to_np(shape2)
if is_out_of_image_points(points2, gray1.shape[1], gray1.shape[0]): #check if points are inside the image
return None
points2 = list(map(tuple, points2))
# Find convex hull
hull1 = []
hull2 = []
hullIndex = cv2.convexHull(np.array(points2), returnPoints = False)
for i in range(0, len(hullIndex)):
hull1.append(points1[ int(hullIndex[i]) ])
hull2.append(points2[ int(hullIndex[i]) ])
# Find delanauy traingulation for convex hull points
sizeImg2 = img_ref.shape
rect = (0, 0, sizeImg2[1], sizeImg2[0])
dt = calculateDelaunayTriangles(rect, hull2)
if len(dt) == 0:
return None
# Apply affine transformation to Delaunay triangles
for i in range(0, len(dt)):
t1 = []
t2 = []
#get points for img1, img2 corresponding to the triangles
for j in range(0, 3):
t1.append(hull1[dt[i][j]])
t2.append(hull2[dt[i][j]])
warpTriangle(img_ref, img1Warped, t1, t2)
# Calculate Mask
hull8U = []
for i in range(0, len(hull2)):
hull8U.append((hull2[i][0], hull2[i][1]))
mask = np.zeros(img_ref.shape, dtype = img_ref.dtype)
cv2.fillConvexPoly(mask, np.int32(hull8U), (255, 255, 255))
r = cv2.boundingRect(np.float32([hull2]))
center = ((r[0]+int(r[2]/2), r[1]+int(r[3]/2)))
# Clone seamlessly.
output = cv2.seamlessClone(np.uint8(img1Warped), img_ref, mask, center, cv2.NORMAL_CLONE)
img1Warped = np.copy(img_ref);
dt = calculateDelaunayTriangles(rect, hull1)
if len(dt) == 0:
return None
# Apply affine transformation to Delaunay triangles
for i in range(0, len(dt)):
t1 = []
t2 = []
#get points for img1, img2 corresponding to the triangles
for j in range(0, 3):
t1.append(hull2[dt[i][j]])
t2.append(hull1[dt[i][j]])
warpTriangle(img_ref, img1Warped, t1, t2)
# Calculate Mask
hull8U = []
for i in range(0, len(hull2)):
hull8U.append((hull1[i][0], hull1[i][1]))
mask = np.zeros(img_ref.shape, dtype = img_ref.dtype)
cv2.fillConvexPoly(mask, np.int32(hull8U), (255, 255, 255))
r = cv2.boundingRect(np.float32([hull1]))
center = ((r[0]+int(r[2]/2), r[1]+int(r[3]/2)))
# Clone seamlessly.
output = cv2.seamlessClone(np.uint8(img1Warped), output, mask, center, cv2.NORMAL_CLONE)
return output
#put face in img_ref into face of img_mount_face
def face_swap(img_ref, img_mount_face, detector, predictor):
gray2 = cv2.cvtColor(img_mount_face, cv2.COLOR_BGR2GRAY)
# detect faces in the grayscale frame
rects2 = detector(gray2, 0)
gray1 = cv2.cvtColor(img_ref, cv2.COLOR_BGR2GRAY)
# detect faces in the grayscale frame
rects1 = detector(gray1, 0)
print(len(rects2))
if (len(rects2) == 0 or len(rects1) == 0): #if not found faces in images return error
return None
img1Warped = np.copy(img_mount_face);
shape1 = predictor(gray1, rects1[0])
points1 = face_utils.shape_to_np(shape1) #type is a array of arrays (list of lists)
#need to convert to a list of tuples
points1 = list(map(tuple, points1))
shape2 = predictor(gray2, rects2[0])
points2 = face_utils.shape_to_np(shape2)
points2 = list(map(tuple, points2))
# Find convex hull
hull1 = []
hull2 = []
hullIndex = cv2.convexHull(np.array(points2), returnPoints = False)
for i in range(0, len(hullIndex)):
hull1.append(points1[ int(hullIndex[i]) ])
hull2.append(points2[ int(hullIndex[i]) ])
# Find delanauy traingulation for convex hull points
sizeImg2 = img_mount_face.shape
rect = (0, 0, sizeImg2[1], sizeImg2[0])
dt = calculateDelaunayTriangles(rect, hull2)
if len(dt) == 0:
return None
# Apply affine transformation to Delaunay triangles
for i in range(0, len(dt)):
t1 = []
t2 = []
#get points for img1, img2 corresponding to the triangles
for j in range(0, 3):
t1.append(hull1[dt[i][j]])
t2.append(hull2[dt[i][j]])
warpTriangle(img_ref, img1Warped, t1, t2)
# Calculate Mask
hull8U = []
for i in range(0, len(hull2)):
hull8U.append((hull2[i][0], hull2[i][1]))
mask = np.zeros(img_mount_face.shape, dtype = img_mount_face.dtype)
cv2.fillConvexPoly(mask, np.int32(hull8U), (255, 255, 255))
r = cv2.boundingRect(np.float32([hull2]))
center = ((r[0]+int(r[2]/2), r[1]+int(r[3]/2)))
# Clone seamlessly.
output = cv2.seamlessClone(np.uint8(img1Warped), img_mount_face, mask, center, cv2.NORMAL_CLONE)
return output
#swaps faces in img_ref and img_mount_face (two separate files)
def face_swap2(img_ref, img_mount_face, detector, predictor, noseShift, beanMode = False, modeChange = True, rects1 = None, gray1 = None, benchmark = False, verbose = False):
global gray2, rects2, shape2, hullIndex, dt, points2
now = time.time()
if modeChange:
gray2 = cv2.cvtColor(img_mount_face, cv2.COLOR_BGR2GRAY)
# detect faces in the grayscale frame
rects2 = detector(gray2, 0)
if gray1 is None:
gray1 = cv2.cvtColor(img_ref, cv2.COLOR_BGR2GRAY)
if rects1 is None:
# detect faces in the grayscale frame
rects1 = detector(gray1, 0)
if benchmark : print("\t\t", "face detect", niceTime(now)); now = time.time()
# print(rects2)
# print(type(rects2))
# print(type(rects2).__name__)
# print(len(rects2))
if (len(rects2) == 0 or len(rects1) == 0): #if not found faces in images return error
height, width = gray2.shape
#print("Shape:")
#print(height, width)
defaultRect = dlib.rectangle(left=10, top=10, right=width - 10, bottom=height - 10)
#print("Rect:")
#print(defaultRect)
rects2.append(defaultRect)
#return None, None
#print("Len:")
#print(len(rects2))
img1Warped = np.copy(img_mount_face);
img2Warped = | np.copy(img_ref) | numpy.copy |
import time
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
from tqdm import tqdm
from course_lib.Base.BaseRecommender import BaseRecommender
from src.data_management.data_preprocessing_fm import sample_negative_interactions_uniformly
from src.utils.general_utility_functions import get_total_number_of_users, get_total_number_of_items
from sklearn.preprocessing import MinMaxScaler
def preprocess_dataframe_after_reading(df: pd.DataFrame):
df = df.copy()
df = df.sort_values(by="user_id", ascending=True)
df = df.reset_index()
df = df.drop(columns=["index"], inplace=False)
return df
def get_valid_dataframe_second_version(user_id_array, cutoff, main_recommender, path, mapper, recommender_list,
URM_train, user_factors=None, item_factors=None):
data_frame = get_boosting_base_dataframe(user_id_array=user_id_array, top_recommender=main_recommender,
exclude_seen=True, cutoff=cutoff)
for rec in recommender_list:
data_frame = add_recommender_predictions(data_frame=data_frame, recommender=rec,
column_name=rec.RECOMMENDER_NAME)
data_frame = advanced_subclass_handling(data_frame=data_frame, URM_train=URM_train, path=path)
data_frame = add_ICM_information(data_frame=data_frame, path=path, one_hot_encoding_subclass=False,
use_subclass=True)
data_frame = add_UCM_information(data_frame=data_frame, path=path, user_mapper=mapper)
data_frame = add_user_len_information(data_frame=data_frame, URM_train=URM_train)
data_frame = add_item_popularity(data_frame=data_frame, URM_train=URM_train)
if user_factors is not None:
data_frame = add_user_factors(data_frame=data_frame, user_factors=user_factors)
if item_factors is not None:
data_frame = add_item_factors(data_frame=data_frame, item_factors=item_factors)
data_frame = data_frame.sort_values(by="user_id", ascending=True)
data_frame = data_frame.reset_index()
data_frame = data_frame.drop(columns=["index"], inplace=False)
return data_frame
def get_train_dataframe_proportion(user_id_array, cutoff, main_recommender, path, mapper, recommender_list,
URM_train, proportion, user_factors=None, item_factors=None,
negative_label_value=0, threshold=0.7):
data_frame = get_boosting_base_dataframe(user_id_array=user_id_array, top_recommender=main_recommender,
exclude_seen=False, cutoff=cutoff)
labels, non_zero_count, _ = get_label_array(data_frame, URM_train)
data_frame['label'] = labels
data_frame = add_random_negative_ratings(data_frame=data_frame, URM_train=URM_train, proportion=proportion,
negative_label_value=negative_label_value)
data_frame = data_frame.sort_values(by="user_id", ascending=True)
data_frame = data_frame.reset_index()
data_frame = data_frame.drop(columns=["index"], inplace=False)
for rec in recommender_list:
data_frame = add_recommender_predictions(data_frame=data_frame, recommender=rec,
column_name=rec.RECOMMENDER_NAME)
# Add labels value in order to differentiate more the elements
mask = (data_frame[rec.RECOMMENDER_NAME] > threshold) & (data_frame['label'] > 0)
print("\t Score greater than threshold: {}/{}".format(np.sum(mask), non_zero_count))
data_frame.loc[mask, 'label'] += 1
print("Labels greater than 1: {}".format(np.sum(data_frame['label'] > 1)))
data_frame = advanced_subclass_handling(data_frame=data_frame, URM_train=URM_train, path=path, add_subclass=False)
data_frame = add_ICM_information(data_frame=data_frame, path=path, one_hot_encoding_subclass=False,
use_subclass=True)
data_frame = add_UCM_information(data_frame=data_frame, path=path, user_mapper=mapper)
data_frame = add_user_len_information(data_frame=data_frame, URM_train=URM_train)
data_frame = add_item_popularity(data_frame=data_frame, URM_train=URM_train)
if user_factors is not None:
data_frame = add_user_factors(data_frame=data_frame, user_factors=user_factors)
if item_factors is not None:
data_frame = add_item_factors(data_frame=data_frame, item_factors=item_factors)
data_frame = data_frame.sort_values(by="user_id", ascending=True)
data_frame = data_frame.reset_index()
data_frame = data_frame.drop(columns=["index"], inplace=False)
return data_frame
def get_dataframe_all_data(user_id_array, path, mapper, recommender_list,
URM_train, proportion, user_factors=None, item_factors=None):
negative_URM = sample_negative_interactions_uniformly(negative_sample_size=len(URM_train.data) * proportion,
URM=URM_train)
data_frame = get_dataframe_URM(user_id_array=user_id_array, URM_train=URM_train + negative_URM)
labels, _, _ = get_label_array(data_frame, URM_train)
data_frame['label'] = labels
data_frame = data_frame.sort_values(by="user_id", ascending=True)
data_frame = data_frame.reset_index()
data_frame = data_frame.drop(columns=["index"], inplace=False)
for rec in recommender_list:
data_frame = add_recommender_predictions(data_frame=data_frame, recommender=rec,
column_name=rec.RECOMMENDER_NAME)
data_frame = advanced_subclass_handling(data_frame=data_frame, URM_train=URM_train, path=path, add_subclass=False)
data_frame = add_ICM_information(data_frame=data_frame, path=path, one_hot_encoding_subclass=False,
use_subclass=True)
data_frame = add_UCM_information(data_frame=data_frame, path=path, user_mapper=mapper)
data_frame = add_user_len_information(data_frame=data_frame, URM_train=URM_train)
data_frame = add_item_popularity(data_frame=data_frame, URM_train=URM_train)
if user_factors is not None:
data_frame = add_user_factors(data_frame=data_frame, user_factors=user_factors)
if item_factors is not None:
data_frame = add_item_factors(data_frame=data_frame, item_factors=item_factors)
data_frame = data_frame.sort_values(by="user_id", ascending=True)
data_frame = data_frame.reset_index()
data_frame = data_frame.drop(columns=["index"], inplace=False)
return data_frame
def get_dataframe_first_version(user_id_array, remove_seen_flag, cutoff, main_recommender, path, mapper,
recommender_list,
URM_train):
# Get dataframe for these users
data_frame = get_boosting_base_dataframe(user_id_array=user_id_array, exclude_seen=remove_seen_flag,
cutoff=cutoff, top_recommender=main_recommender)
for rec in recommender_list:
data_frame = add_recommender_predictions(data_frame=data_frame, recommender=rec,
column_name=rec.RECOMMENDER_NAME)
data_frame = add_ICM_information(data_frame=data_frame, path=path)
data_frame = add_UCM_information(data_frame=data_frame, path=path, user_mapper=mapper)
data_frame = add_user_len_information(data_frame=data_frame, URM_train=URM_train)
data_frame = data_frame.sort_values(by="user_id", ascending=True)
data_frame = data_frame.reset_index()
data_frame.drop(columns=["index"], inplace=False)
return data_frame
def add_user_factors(data_frame: pd.DataFrame, user_factors: np.ndarray):
"""
Add user factors to the dataframe
:param data_frame:
:param user_factors:
:return:
"""
print("Adding user factors...")
data_frame = data_frame.copy()
user_factors_df = pd.DataFrame(data=user_factors,
index=np.arange(0, user_factors.shape[0]),
columns=["user_factor_{}".format(i + 1) for i in range(user_factors.shape[1])])
data_frame = pd.merge(data_frame, user_factors_df, left_on="user_id", right_index=True)
return data_frame
def add_item_factors(data_frame: pd.DataFrame, item_factors: np.ndarray):
"""
Add item factors to the dataframe
:param data_frame:
:param item_factors:
:return:
"""
print("Adding item factors...")
data_frame = data_frame.copy()
item_factors_df = pd.DataFrame(data=item_factors,
index=np.arange(0, item_factors.shape[0]),
columns=["item_factor_{}".format(i + 1) for i in range(item_factors.shape[1])])
data_frame = pd.merge(data_frame, item_factors_df, left_on="item_id", right_index=True)
return data_frame
def add_item_popularity(data_frame: pd.DataFrame, URM_train: csr_matrix):
"""
Add the item popularity to the dataframe
:param data_frame: data frame containing information for boosting
:param URM_train: URM train matrix
:return: dataframe containing boosting information + item popularity
"""
print("Adding item popularity...")
data_frame = data_frame.copy()
pop_items = (URM_train > 0).sum(axis=0)
pop_items = np.array(pop_items).squeeze()
item_ids = np.arange(URM_train.shape[1])
data = np.array([item_ids, pop_items])
data = np.transpose(data)
new_df = pd.DataFrame(data=data, columns=["row", "item_pop"])
data_frame = pd.merge(data_frame, new_df, left_on="item_id", right_on="row")
data_frame = data_frame.drop(columns=["row"], inplace=False)
return data_frame
def get_label_array(data_frame: pd.DataFrame, URM_train: csr_matrix):
"""
Create a dataframe with a single column with the correct predictions
:param data_frame: data frame containing information for boosting
:param URM_train: URM train matrix
:return: numpy array containing y information
"""
print("Retrieving training labels...")
user_ids = data_frame['user_id'].values
item_ids = data_frame['item_id'].values
y = np.zeros(user_ids.size, dtype=np.int)
labels = np.array(URM_train[user_ids, item_ids].tolist()).flatten()
y[labels > 0] = 1
non_zero_count = np.count_nonzero(y)
print("\t- There are {} non-zero ratings in {}".format(non_zero_count, y.size))
return y, non_zero_count, y.size
def add_user_len_information(data_frame: pd.DataFrame, URM_train: csr_matrix):
"""
Add information concerning the user profile length to the row of the dataframe
:param data_frame: data frame that is being pre-processed from boosting
:param URM_train: URM train from which to take profile length information
:return: data frame with new content inserted
"""
print("Adding user profile length...")
data_frame = data_frame.copy()
user_act = (URM_train > 0).sum(axis=1)
user_act = np.array(user_act).squeeze()
user_ids = np.arange(URM_train.shape[0])
data = np.array([user_ids, user_act])
data = np.transpose(data)
new_df = pd.DataFrame(data=data, columns=["row", "user_act"])
data_frame = pd.merge(data_frame, new_df, left_on="user_id", right_on="row")
data_frame = data_frame.drop(columns=["row"], inplace=False)
return data_frame
def remap_data_frame(df: pd.DataFrame, mapper):
"""
Change user_id columns of the df given in input, according to the mapper.
Users that are not present will be removed, and the others will be mapped to the correct number.
:param df: dataframe that will be modified
:param mapper: mapper according to which the dataframe will be modified
:return: dataframe with "user_id" column modified properly
"""
df = df.copy()
# Remove users that are not present in the mapper
original_users = df['row'].values
new_users_key = list(mapper.keys())
new_users_key = list(map(int, new_users_key))
new_users_key = np.array(new_users_key)
mask = np.in1d(original_users, new_users_key, invert=True)
remove = original_users[mask]
df = df.set_index("row")
mask = np.in1d(df.index, remove)
df = df.drop(df.index[mask])
# Map the index to the new one
df = df.reset_index()
df['row'] = df['row'].map(lambda x: mapper[str(x)])
return df
def add_UCM_information(data_frame: pd.DataFrame, user_mapper, path="../../data/", use_region=True, use_age=True,
use_age_onehot=False):
"""
Add UCM information to the data frame for XGboost
:param data_frame: data frame containing information being pre-processed for boosting
:param user_mapper: mapper original users to train users
:param path: where to read UCM csv files
:param use_region: True is region information should be used, false otherwise
:param use_age: True if age information should be used, false otherwise
:param use_age_onehot: True if age information added is one hot, false otherwise
:return: pd.DataFrame containing the original data frame+ UCM information
"""
print("Adding UCM information...")
t_users = get_total_number_of_users() # Total number of users (-1 since indexing from 0)
data_frame = data_frame.copy()
df_region: pd.DataFrame = pd.read_csv(path + "data_UCM_region.csv")
df_age: pd.DataFrame = pd.read_csv(path + "data_UCM_age.csv")
# Re-map UCM data frame in order to have the correct user information
if use_region:
df_region = df_region[['row', 'col']]
df_dummies = pd.get_dummies(df_region['col'], prefix='region')
df_dummies = df_dummies.join(df_region['row'])
df_dummies = df_dummies.groupby(['row'], as_index=False).sum()
# Fill missing values
user_present = df_dummies['row'].values
total_users = np.arange(t_users)
mask = np.in1d(total_users, user_present, invert=True)
missing_users = total_users[mask]
num_col = df_dummies.columns.size
imputed_users = np.zeros(shape=(num_col, missing_users.size))
imputed_users[0] = missing_users
missing_df = pd.DataFrame(data=np.transpose(imputed_users), dtype=np.int32, columns=df_dummies.columns)
df_region_onehot = df_dummies.append(missing_df, sort=False)
if user_mapper is not None:
df_region_onehot = remap_data_frame(df=df_region_onehot, mapper=user_mapper)
data_frame = pd.merge(data_frame, df_region_onehot, right_on="row", left_on="user_id")
data_frame = data_frame.drop(columns=["row"], inplace=False)
if use_age:
df_age = df_age[['row', 'col']]
# Handle missing values: fill with mode + 1
users_present = df_age['row'].values
total_users = np.arange(t_users)
mask = np.in1d(total_users, users_present, invert=True)
missing_users = total_users[mask].astype(np.int32)
missing_val_filled = np.ones(missing_users.size) * (int(df_age['col'].mode()) + 1)
missing = np.array([missing_users, missing_val_filled], dtype=np.int32)
missing_df = pd.DataFrame(data=np.transpose(missing), columns=["row", "col"])
df_age_imputed = df_age.copy().append(missing_df, sort=False)
df_age_imputed = df_age_imputed.reset_index()
df_age_imputed = df_age_imputed[['row', 'col']]
if user_mapper is not None:
df_age_imputed = remap_data_frame(df=df_age_imputed, mapper=user_mapper)
df_age_imputed = df_age_imputed.rename(columns={"col": "age"})
if use_age_onehot:
row = df_age_imputed['row']
df_age_imputed = pd.get_dummies(df_age_imputed['age'], prefix='age')
df_age_imputed = df_age_imputed.join(row)
data_frame = pd.merge(data_frame, df_age_imputed, right_on="row", left_on="user_id")
data_frame = data_frame.drop(columns=["row"], inplace=False)
# Add dummy variables indicating that the region has been imputed
df_age_dummy_imputation = df_age.copy()
df_age_dummy_imputation['col'] = 0
imputed_df = pd.DataFrame(
data={"row": missing_users, "col": np.ones(shape=missing_users.size, dtype=np.int)})
df_age_dummy_imputation = df_age_dummy_imputation.append(imputed_df, sort=False)
df_age_dummy_imputation = df_age_dummy_imputation.rename(columns={"col": "age_imputed_flag"})
if user_mapper is not None:
df_age_dummy_imputation = remap_data_frame(df=df_age_dummy_imputation, mapper=user_mapper)
data_frame = pd.merge(data_frame, df_age_dummy_imputation, right_on="row", left_on="user_id")
data_frame = data_frame.drop(columns=["row"], inplace=False)
return data_frame
def advanced_subclass_handling(data_frame: pd.DataFrame, URM_train: csr_matrix, path="../../data/",
add_subclass=False):
"""
Here we want to include in the training set sub class information in the following way:
- A column encoding the mean of 'label' for a certain couple (user, subclass): i.e. how many
items of that subclass the user liked
- Including information about the popularity of the subclass (how many items for that subclass
- Including ratings of that subclass
:param URM_train: mean response will be retrieved from here
:param data_frame: dataframe being pre-processed for boosting
:param path: path to the folder containing subclass dataframe
:return: dataframe with augmented information
"""
print("Adding subclass and feature engineering subclass...")
data_frame = data_frame.copy()
df_subclass: pd.DataFrame = pd.read_csv(path + "data_ICM_sub_class.csv")
df_subclass = df_subclass[['row', 'col']]
df_subclass = df_subclass.rename(columns={"col": "subclass"})
# Merging sub class information
data_frame = pd.merge(data_frame, df_subclass, right_on="row", left_on="item_id")
data_frame = data_frame.drop(columns=["row"], inplace=False)
print("\t- Add items present for each subclass")
# Add subclass item-popularity: how many items are present of that subclass
subclass_item_count = df_subclass.groupby("subclass").count()
data_frame = pd.merge(data_frame, subclass_item_count, right_index=True, left_on="subclass")
data_frame = data_frame.rename(columns={"row": "item_per_subclass"})
print("\t- Add ratings popularity for each subclass")
# Add subclass ratings-popularity: how many interactions we have for each subclass
URM_train_csc = URM_train.tocsc()
n_ratings_sub = []
sorted_sub_indices = np.argsort(df_subclass['subclass'].values)
sorted_sub = df_subclass['subclass'][sorted_sub_indices].values
sorted_item_subclass = df_subclass['row'][sorted_sub_indices].values
unique_sorted_sub, sub_indptr = np.unique(sorted_sub, return_index=True)
sub_indptr = np.concatenate([sub_indptr, [sorted_sub.size]])
for i, sub in tqdm(enumerate(unique_sorted_sub), total=unique_sorted_sub.size, desc="\t\tProcessing"):
item_sub = sorted_item_subclass[sub_indptr[i]: sub_indptr[i + 1]]
n_ratings_sub.append(URM_train_csc[:, item_sub].data.size)
ratings_sub = np.array([unique_sorted_sub, n_ratings_sub])
ratings_per_sub_df = pd.DataFrame(data=np.transpose(ratings_sub),
columns=["subclass", "global_ratings_per_subclass"])
data_frame = pd.merge(data_frame, ratings_per_sub_df, left_on="subclass", right_on="subclass")
# Add subclass ratings-popularity for each user using rating percentage
print("\t- Add ratings popularity for pairs (user, subclass)")
users = data_frame['user_id'].values
sub = data_frame['subclass'].values
perc_array = np.zeros(users.size)
rat_array = np.zeros(users.size)
for i, user in tqdm(enumerate(users), total=users.size, desc="\t\tProcessing"):
curr_sub = sub[i]
curr_sub_index = np.searchsorted(unique_sorted_sub, curr_sub)
# Find items of this subclass
item_sub = sorted_item_subclass[sub_indptr[curr_sub_index]: sub_indptr[curr_sub_index + 1]]
user_item = URM_train.indices[URM_train.indptr[user]: URM_train.indptr[user + 1]]
total_user_likes = user_item.size
mask = np.in1d(item_sub, user_item)
likes_per_sub = item_sub[mask].size
user_p = likes_per_sub / total_user_likes
perc_array[i] = user_p
rat_array[i] = likes_per_sub
data_frame["subclass_user_like_perc"] = perc_array
data_frame["subclass_user_like_quantity"] = rat_array
if not add_subclass:
data_frame = data_frame.drop(columns=["subclass"], inplace=False)
return data_frame
def add_ICM_information(data_frame: pd.DataFrame, path="../../data/", use_price=True, use_asset=True,
use_subclass=True, one_hot_encoding_subclass=False):
"""
Add information form the ICM files to the data frame
:param one_hot_encoding_subclass: if one hot encoding should be applied to subclass or not
:param data_frame: data frame that is being pre-processed for boosting
:param path: path to the folder containing the csv files
:param use_price: True if you wish to append price information, false otherwise
:param use_asset: True if you wish to append asset information, false otherwise
:param use_subclass: True if you wish to append subclass information, false otherwise
:return: pd.DataFrame containing the information
"""
print("Adding ICM information...")
data_frame = data_frame.copy()
df_price: pd.DataFrame = pd.read_csv(path + "data_ICM_price.csv")
df_asset: pd.DataFrame = pd.read_csv(path + "data_ICM_asset.csv")
df_subclass: pd.DataFrame = pd.read_csv(path + "data_ICM_sub_class.csv")
total_items = get_total_number_of_items()
total_items = np.arange(total_items)
if use_price:
# Handle missing values
item_present = df_price['row'].values
mask = np.in1d(total_items, item_present, invert=True)
missing_items = total_items[mask].astype(np.int32)
missing_val_filled = np.ones(missing_items.size) * df_price['data'].median()
missing = np.array([missing_items, missing_val_filled])
missing_df = pd.DataFrame(data=np.transpose(missing), columns=['row', 'data'])
df_price = df_price.append(missing_df, sort=False)
df_price = df_price.reset_index()
df_price = df_price[['row', 'data']]
# TODO remove outliers and add dummy variable
df_price = df_price.rename(columns={"data": "price"})
data_frame = pd.merge(data_frame, df_price, right_on="row", left_on="item_id")
data_frame = data_frame.drop(columns=['row'], inplace=False)
if use_asset:
# Handle missing values
item_present = df_asset['row'].values
mask = np.in1d(total_items, item_present, invert=True)
missing_items = total_items[mask].astype(np.int32)
missing_val_filled = np.ones(missing_items.size) * df_asset['data'].median()
missing = np.array([missing_items, missing_val_filled])
missing_df = pd.DataFrame(data=np.transpose(missing), columns=['row', 'data'])
df_asset = df_asset.append(missing_df, sort=False)
df_asset = df_asset.reset_index()
df_asset = df_asset[['row', 'data']]
# TODO remove outliers and add dummy variable
df_asset = df_asset.rename(columns={"data": "asset"})
data_frame = pd.merge(data_frame, df_asset, right_on="row", left_on="item_id")
data_frame = data_frame.drop(columns=["row"], inplace=False)
if use_subclass:
df_subclass = df_subclass[['row', 'col']]
df_subclass = df_subclass.rename(columns={"col": "subclass"})
if not one_hot_encoding_subclass:
data_frame = pd.merge(data_frame, df_subclass, right_on="row", left_on="item_id")
else:
dummies = pd.get_dummies(df_subclass['subclass'])
dummies = dummies.join(df_subclass['row'])
data_frame = pd.merge(data_frame, dummies, right_on="row", left_on="item_id")
data_frame = data_frame.drop(columns=["row"], inplace=False)
return data_frame
def add_recommender_predictions(data_frame: pd.DataFrame, recommender: BaseRecommender,
column_name: str, min_max_scaling=True):
"""
Add predictions of a recommender to the dataframe and return the new dataframe
Note: Assumes that the data_frame is ordered by user_id (increasingly)
:param data_frame: dataframe on which predictions will be added
:param recommender: recommender of which the predictions will be added
:param column_name: name of the new column
:param min_max_scaling: whether to apply min-max scaling or not
:return: new dataframe containing recommender predictions
"""
print("Adding recommender predictions - COLUMN NAME: {}".format(column_name))
new_df = data_frame.copy()
items = new_df['item_id'].values.astype(int)
users = new_df['user_id'].values.astype(int)
# Check if dataframe is sorted by user_id
if not np.all(users[i] <= users[i + 1] for i in range(users.size - 1)):
raise ValueError("The dataframe is not sorted by user_id")
prediction_values = np.zeros(items.size, dtype=np.float32)
# Use indptr to avoid using query of dataframe
unique_users, user_indptr = np.unique(users, return_index=True)
user_indptr = np.concatenate([user_indptr, [users.size]])
all_scores = recommender._compute_item_score(unique_users)
if min_max_scaling:
scaler = MinMaxScaler()
scaler.fit(all_scores.reshape(-1, 1))
all_scores = np.reshape(scaler.transform(all_scores.reshape(-1, 1)), newshape=all_scores.shape)
for i, user_id in tqdm(enumerate(unique_users), total=unique_users.size,
desc="\tAdd users predictions".format(column_name)):
items_for_user_id = items[user_indptr[i]: user_indptr[i + 1]]
scores = all_scores[i, items_for_user_id].copy()
prediction_values[user_indptr[i]: user_indptr[i + 1]] = scores
new_df[column_name] = prediction_values
del all_scores # Remove this variable in order to let the garbage collector collect it
return new_df
def user_uniform_sampling(user: int, URM_train: csr_matrix, items_to_exclude: np.array, sample_size: int,
batch_size=1000):
"""
Sample negative interactions at random for a given users from URM_train
:param items_to_exclude: exclude these items from the sampling
:param user: sample negative interactions for this user
:param URM_train: URM from which samples will be taken
:param sample_size: how many samples to take
:param batch_size: batch size dimension for the number of random sampling to do at each iteration
:return: np.array containing the collected samples
"""
sampled = 0
invalid_items = URM_train.indices[URM_train.indptr[user]: URM_train.indptr[user + 1]]
collected_samples = []
while sampled < sample_size:
items_sampled = np.random.randint(low=0, high=URM_train.shape[1], size=batch_size)
items_sampled = np.unique(items_sampled)
# Remove items already seen and items to exclude
valid_items = np.setdiff1d(items_sampled, invalid_items, assume_unique=True)
valid_items = | np.setdiff1d(valid_items, items_to_exclude, assume_unique=True) | numpy.setdiff1d |
#!/usr/bin/python
# encoding: utf-8
import random
import torch
from torch.utils.data import Dataset
from torch.utils.data import sampler
import torchvision.transforms as transforms
import lmdb
import six
import sys
import bisect
import warnings
from PIL import Image
import numpy as np
import string
import cv2
import os
import re
sys.path.append('../')
from utils import str_filt
from utils.labelmaps import get_vocabulary, labels2strs
from IPython import embed
from pyfasttext import FastText
random.seed(0)
from utils import utils_deblur
from utils import utils_sisr as sr
from utils import utils_image as util
import imgaug.augmenters as iaa
from scipy import io as sio
scale = 0.90
kernel = utils_deblur.fspecial('gaussian', 15, 1.)
noise_level_img = 0.
def rand_crop(im):
w, h = im.size
p1 = (random.uniform(0, w*(1-scale)), random.uniform(0, h*(1-scale)))
p2 = (p1[0] + scale*w, p1[1] + scale*h)
return im.crop(p1 + p2)
def central_crop(im):
w, h = im.size
p1 = (((1-scale)*w/2), (1-scale)*h/2)
p2 = ((1+scale)*w/2, (1+scale)*h/2)
return im.crop(p1 + p2)
def buf2PIL(txn, key, type='RGB'):
imgbuf = txn.get(key)
buf = six.BytesIO()
buf.write(imgbuf)
buf.seek(0)
im = Image.open(buf).convert(type)
return im
class lmdbDataset_realBadSet(Dataset):
def __init__(self, root=None, voc_type='upper', max_len=100, test=False, rotate=False):
super(lmdbDataset_realBadSet, self).__init__()
# root should be detailed by upper folder of images
# anno_dir = os.path.join(root, "ANNOTATION")
self.imlist = os.listdir(root)
self.image_dir = root
# self.impath_list = []
# self.anno_list = []
print("collect images from:", root)
# mode = "train" if root.split("/")[-2] == "TRAIN" else "test"
self.nSamples = len(self.imlist)
print("Done, we have ", self.nSamples, "samples...")
self.voc_type = voc_type
self.max_len = max_len
self.test = test
def __len__(self):
return self.nSamples
def __getitem__(self, index):
idx = index % self.nSamples
imfile = self.imlist[index]
image_path = os.path.join(self.image_dir, imfile)
print("imfile:", imfile)
word = imfile.split("_")[1] if len(imfile.split("_")) > 1 else ""
if not os.path.isfile(image_path):
print("File not found for", image_path)
return self[index+1]
try:
img_HR = Image.open(image_path)
img_lr = img_HR.copy()
img_lr_np = np.array(img_lr).astype(np.uint8)
img_lry = cv2.cvtColor(img_lr_np, cv2.COLOR_RGB2YUV)[..., 0]
img_lry = Image.fromarray(img_lry)
img_HR_np = np.array(img_HR).astype(np.uint8)
img_HRy = cv2.cvtColor(img_HR_np, cv2.COLOR_RGB2YUV)[..., 0]
img_HRy = Image.fromarray(img_HRy)
if img_HR.size[0] < 2 or img_HR.size[1] < 2:
print("img_HR:", img_HR.size)
return self[(index + 1) % self.nSamples]
except ValueError:
print("File not found for", image_path)
return self[(index + 1) % self.nSamples]
# print("annos:", img_HR_np.shape, img_lr_np.shape)
# label_str = str_filt(word, self.voc_type)
return img_HR, img_lr, img_HRy, img_lry, imfile
class lmdbDataset(Dataset):
def __init__(self, root=None, voc_type='upper', max_len=31, test=True):
super(lmdbDataset, self).__init__()
self.env = lmdb.open(
root,
max_readers=1,
readonly=True,
lock=False,
readahead=False,
meminit=False)
if not self.env:
print('cannot creat lmdb from %s' % (root))
sys.exit(0)
with self.env.begin(write=False) as txn:
nSamples = int(txn.get(b'num-samples'))
self.nSamples = nSamples
self.max_len = max_len
self.voc_type = voc_type
def __len__(self):
return self.nSamples
def __getitem__(self, index):
assert index <= len(self), 'index range error'
index += 1
txn = self.env.begin(write=False)
label_key = b'label-%09d' % index
word = str(txn.get(label_key).decode())
try:
img = buf2PIL(txn, b'image_hr-%09d' % index, 'RGB')
except TypeError:
img = buf2PIL(txn, b'image-%09d' % index, 'RGB')
except IOError or len(label) > self.max_len:
return self[index + 1]
label_str = str_filt(word, self.voc_type)
return img, label_str
def get_Syn_800K_with_words(mode, dataset_dir, lang_seq=False):
# if mode == 'train':
# image_dir = os.path.join(dataset_dir, 'image_9000/')
# gt_dir = os.path.join(dataset_dir, 'txt_9000/')
# ./ICPR_dataset/update_ICPR_text_train_part1_20180316/train_1000/
# else:
# image_dir = os.path.join(dataset_dir, 'image_1000/')
# gt_dir = os.path.join(dataset_dir, 'txt_1000/')
word2vec_mat = '../selected_smaller_dic.mat'
#mat_data = sio.loadmat(word2vec_mat)
#all_words = mat_data['selected_vocab']
#all_vecs = mat_data['selected_dict']
#w2v_dict = {}
#print('Building w2v dictionary...')
#for i in range(len(all_words)):
# w2v_dict[all_words[i][0][0]] = all_vecs[i]
#print('done')
mat_file = os.path.join(dataset_dir, 'gt.mat')
# print('mat_file:', mat_file)
mat_f = sio.loadmat(mat_file)
wordBBs = mat_f['wordBB'][0]
txt_annos = mat_f['txt'][0]
im_names = mat_f['imnames'][0]
sam_size = len(txt_annos)
# image_list = os.listdir(image_dir)
# image_list.sort()
im_infos = []
if mode == 'train':
cache_pkl = './data_cache/Syn_800K_training'
else:
cache_pkl = './data_cache/Syn_800K_testing'
if lang_seq:
cache_pkl += "_lang_seq"
cache_pkl += "_E2E.pkl"
if os.path.isfile(cache_pkl):
return pickle.load(open(cache_pkl, 'rb'))
pro_cnt = 0
im_range = (0, 200000) if mode == "train" else (200000, 205000)
for i in range(im_range[0], im_range[1]):
txts = txt_annos[i]
im_path = os.path.join(dataset_dir, im_names[i][0])
word_boxes = wordBBs[i]
pro_cnt += 1
if pro_cnt % 2000 == 0:
print('processed image:', str(pro_cnt) + '/' + str(im_range[1] - im_range[0]))
cnt = 0
# print('word_boxes:', word_boxes.shape)
im = cv2.imread(im_path)
if len(word_boxes.shape) < 3:
word_boxes = np.expand_dims(word_boxes, -1)
words = []
boxes = []
word_vecs = []
for txt in txts:
txtsp = txt.split('\n')
for line in txtsp:
line = line.replace('\n', '').replace('\n', '').replace('\r', '').replace('\t', '').split(' ')
# print('line:', line)
for w in line:
# w = w
if len(w) > 0:
gt_ind = np.transpose(np.array(word_boxes[:, :, cnt], dtype=np.int32), (1, 0)).reshape(8)
# print(imname, gt_ind, w)
cnt += 1
'''
cv2.line(im, (box[0], box[1]), (box[2], box[3]), (0, 0, 255), 3)
cv2.line(im, (box[2], box[3]), (box[4], box[5]), (0, 0, 255), 3)
cv2.line(im, (box[4], box[5]), (box[6], box[7]), (0, 0, 255), 3)
cv2.line(im, (box[6], box[7]), (box[0], box[1]), (0, 0, 255), 3)
cv2.putText(im, w, (box[0], box[1]), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 122), 2)
'''
pt1 = (int(gt_ind[0]), int(gt_ind[1]))
pt2 = (int(gt_ind[2]), int(gt_ind[3]))
pt3 = (int(gt_ind[4]), int(gt_ind[5]))
pt4 = (int(gt_ind[6]), int(gt_ind[7]))
edge1 = np.sqrt((pt1[0] - pt2[0]) * (pt1[0] - pt2[0]) + (pt1[1] - pt2[1]) * (pt1[1] - pt2[1]))
edge2 = np.sqrt((pt2[0] - pt3[0]) * (pt2[0] - pt3[0]) + (pt2[1] - pt3[1]) * (pt2[1] - pt3[1]))
angle = 0
if edge1 > edge2:
width = edge1
height = edge2
if pt1[0] - pt2[0] != 0:
angle = -np.arctan(float(pt1[1] - pt2[1]) / float(pt1[0] - pt2[0])) / 3.1415926 * 180
else:
angle = 90.0
elif edge2 >= edge1:
width = edge2
height = edge1
# print pt2[0], pt3[0]
if pt2[0] - pt3[0] != 0:
angle = -np.arctan(float(pt2[1] - pt3[1]) / float(pt2[0] - pt3[0])) / 3.1415926 * 180
else:
angle = 90.0
if angle < -45.0:
angle = angle + 180
x_ctr = float(pt1[0] + pt3[0]) / 2 # pt1[0] + np.abs(float(pt1[0] - pt3[0])) / 2
y_ctr = float(pt1[1] + pt3[1]) / 2 # pt1[1] + np.abs(float(pt1[1] - pt3[1])) / 2
if height * width * (800 / float(im.shape[0])) < 16 * 32 and mode == "train":
continue
if x_ctr >= im.shape[1] or x_ctr < 0 or y_ctr >= im.shape[0] or y_ctr < 0:
continue
#com_num = re.compile('[0-9]+')
#com_prices = re.compile('[$¥€£]+')
#match_num = re.findall(com_num, w)
#match_prices = re.findall(com_prices, w)
# choices: original, prices, others
# 2 for English
if lang_seq:
w = ["1" for i in range(len(w))]
w = "".join(w)
words.append(w)
'''
w = w.lower()
if w in w2v_dict:
word_vecs.append(w2v_dict[w.lower()])
elif match_prices and match_num:
word_vecs.append(w2v_dict['price'])
elif match_num and not match_prices:
word_vecs.append(w2v_dict['ten'])
else:
print(im_path, w)
word_vecs.append(np.zeros(100, dtype=np.float32) + 1e-10)
'''
gt_ptx = gt_ind.reshape(-1, 2)
xmax = np.max(gt_ptx[:, 0])
xmin = np.min(gt_ptx[:, 0])
ymax = np.max(gt_ptx[:, 1])
ymin = np.min(gt_ptx[:, 1])
# return to width, height
boxes.append([xmin, ymin, xmax - xmin, ymax - ymin]) #x_ctr, y_ctr, width, height, angle, w
cls_num = 2
len_of_bboxes = len(boxes)
gt_boxes = np.zeros((len_of_bboxes, 4), dtype=np.int16)
gt_classes = np.zeros((len_of_bboxes), dtype=np.int32)
overlaps = np.zeros((len_of_bboxes, cls_num), dtype=np.float32) # text or non-text
seg_areas = np.zeros((len_of_bboxes), dtype=np.float32)
for idx in range(len(boxes)):
gt_classes[idx] = 1 # cls_text
overlaps[idx, 1] = 1.0 # prob
seg_areas[idx] = (boxes[idx][2]) * (boxes[idx][3])
gt_boxes[idx, :] = [boxes[idx][0], boxes[idx][1], boxes[idx][2], boxes[idx][3]] #, boxes[idx][4]
# print ("boxes_size:", gt_boxes.shape[0])
if gt_boxes.shape[0] > 0:
max_overlaps = overlaps.max(axis=1)
# gt class that had the max overlap
max_classes = overlaps.argmax(axis=1)
else:
continue
im_info = {
'gt_classes': gt_classes,
'max_classes': max_classes,
'image': im_path,
'boxes': gt_boxes,
'flipped': False,
'gt_overlaps': overlaps,
'seg_areas': seg_areas,
'height': im.shape[0],
'width': im.shape[1],
'gt_words': words,
# 'gt_wordvec': np.array(word_vecs),
'max_overlaps': max_overlaps,
'rotated': True
}
im_infos.append(im_info)
f_save_pkl = open(cache_pkl, 'wb')
pickle.dump(im_infos, f_save_pkl)
f_save_pkl.close()
print("Save pickle done.")
return im_infos
class lmdbDataset_GlobalSR(Dataset):
def __init__(self, root=None, voc_type='upper', max_len=31, test=False, rotate=False):
super(lmdbDataset_GlobalSR, self).__init__()
if test:
mode = "test"
else:
mode = "train"
self.image_dataset = get_Syn_800K_with_words(mode, dataset_dir=root, lang_seq=False)
self.nSamples = len(self.image_dataset)
def __len__(self):
return self.nSamples
def __getitem__(self, index):
assert index <= len(self), 'index range error'
# index += 1
'''
txn = self.env.begin(write=False)
label_key = b'label-%09d' % index
word = str(txn.get(label_key).decode())
try:
img = buf2PIL(txn, b'image_hr-%09d' % index, 'RGB')
except TypeError:
img = buf2PIL(txn, b'image-%09d' % index, 'RGB')
except IOError or len(label) > self.max_len:
return self[index + 1]
label_str = str_filt(word, self.voc_type)
'''
image_info = self.image_dataset[index]
impath = image_info['image']
image_pil = Image.open(impath)
boxes = image_info['boxes']
gt_words = image_info['gt_words']
return image_pil, boxes, gt_words
def gauss_unsharp_mask(rgb, shp_kernel, shp_sigma, shp_gain):
LF = cv2.GaussianBlur(rgb, (shp_kernel, shp_kernel), shp_sigma)
HF = rgb - LF
RGB_peak = rgb + HF * shp_gain
RGB_noise_NR_shp = np.clip(RGB_peak, 0.0, 255.0)
return RGB_noise_NR_shp, LF
def add_shot_gauss_noise(rgb, shot_noise_mean, read_noise):
noise_var_map = shot_noise_mean * rgb + read_noise
noise_dev_map = np.sqrt(noise_var_map)
noise = np.random.normal(loc=0.0, scale = noise_dev_map, size=None)
if (rgb.mean() > 252.0):
noise_rgb = rgb
else:
noise_rgb = rgb + noise
noise_rgb = np.clip(noise_rgb, 0.0, 255.0)
return noise_rgb
def degradation(src_img):
# RGB Image input
GT_RGB = np.array(src_img)
GT_RGB = GT_RGB.astype(np.float32)
pre_blur_kernel_set = [3, 5]
sharp_kernel_set = [3, 5]
blur_kernel_set = [5, 7, 9, 11]
NR_kernel_set = [3, 5]
# Pre Blur
kernel = pre_blur_kernel_set[random.randint(0, (len(pre_blur_kernel_set) - 1))]
blur_sigma = random.uniform(5., 6.)
RGB_pre_blur = cv2.GaussianBlur(GT_RGB, (kernel, kernel), blur_sigma)
rand_p = random.random()
if rand_p > 0.2:
# Noise
shot_noise = random.uniform(0, 0.005)
read_noise = random.uniform(0, 0.015)
GT_RGB_noise = add_shot_gauss_noise(RGB_pre_blur, shot_noise, read_noise)
else:
GT_RGB_noise = RGB_pre_blur
# Noise Reduction
choice = random.uniform(0, 1.0)
GT_RGB_noise = np.round(GT_RGB_noise)
GT_RGB_noise = GT_RGB_noise.astype(np.uint8)
# if (shot_noise < 0.06):
if (choice < 0.7):
NR_kernel = NR_kernel_set[random.randint(0, (len(NR_kernel_set) - 1))] ###3,5,7,9
NR_sigma = random.uniform(2., 3.)
GT_RGB_noise_NR = cv2.GaussianBlur(GT_RGB_noise, (NR_kernel, NR_kernel), NR_sigma)
else:
value_sigma = random.uniform(70, 80)
space_sigma = random.uniform(70, 80)
GT_RGB_noise_NR = cv2.bilateralFilter(GT_RGB_noise, 7, value_sigma, space_sigma)
# Sharpening
GT_RGB_noise_NR = GT_RGB_noise_NR.astype(np.float32)
shp_kernel = sharp_kernel_set[random.randint(0, (len(sharp_kernel_set) - 1))] ###5,7,9
shp_sigma = random.uniform(2., 3.)
shp_gain = random.uniform(3., 4.)
RGB_noise_NR_shp, LF = gauss_unsharp_mask(GT_RGB_noise_NR, shp_kernel, shp_sigma, shp_gain)
# print("RGB_noise_NR_shp:", RGB_noise_NR_shp.shape)
return Image.fromarray(RGB_noise_NR_shp.astype(np.uint8))
def noisy(noise_typ,image):
if noise_typ == "gauss":
row,col,ch= image.shape
mean = 0
var = 50
sigma = var**0.5
gauss = np.random.normal(mean,sigma,(row,col,ch))
gauss = gauss.reshape(row,col,ch)
# print("gauss:", np.unique(gauss))
noisy = image + gauss
return noisy
elif noise_typ == "s&p":
row,col,ch = image.shape
s_vs_p = 0.5
amount = 0.004
out = np.copy(image)
# Salt mode
num_salt = np.ceil(amount * image.size * s_vs_p)
coords = [np.random.randint(0, i - 1, int(num_salt))
for i in image.shape]
out[coords] = 1
# Pepper mode
num_pepper = np.ceil(amount* image.size * (1. - s_vs_p))
coords = [np.random.randint(0, i - 1, int(num_pepper))
for i in image.shape]
out[coords] = 0
return out
elif noise_typ == "poisson":
vals = len(np.unique(image))
vals = 2 ** np.ceil(np.log2(vals))
noisy = np.random.poisson(image * vals) / float(vals)
return noisy
elif noise_typ == "speckle":
row, col, ch = image.shape
gauss = np.random.randn(row, col, ch)
gauss = gauss.reshape(row, col, ch)
noisy = image + image * gauss
return noisy
def apply_brightness_contrast(input_img, brightness=0, contrast=0):
if brightness != 0:
if brightness > 0:
shadow = brightness
highlight = 255
else:
shadow = 0
highlight = 255 + brightness
alpha_b = (highlight - shadow) / 255
gamma_b = shadow
buf = cv2.addWeighted(input_img, alpha_b, input_img, 0, gamma_b)
else:
buf = input_img.copy()
if contrast != 0:
f = 131 * (contrast + 127) / (127 * (131 - contrast))
alpha_c = f
gamma_c = 127 * (1 - f)
buf = cv2.addWeighted(buf, alpha_c, buf, 0, gamma_c)
return buf
def JPEG_compress(image):
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 40]
result, encimg = cv2.imencode('.jpg', image, encode_param)
ret_img = cv2.imdecode(encimg, 1)
return ret_img
class lmdbDataset_real(Dataset):
def __init__(
self, root=None,
voc_type='upper',
max_len=100,
test=False,
cutblur=False,
manmade_degrade=False,
rotate=None
):
super(lmdbDataset_real, self).__init__()
self.env = lmdb.open(
root,
max_readers=1,
readonly=True,
lock=False,
readahead=False,
meminit=False)
self.cb_flag = cutblur
self.rotate = rotate
if not self.env:
print('cannot creat lmdb from %s' % (root))
sys.exit(0)
with self.env.begin(write=False) as txn:
nSamples = int(txn.get(b'num-samples'))
self.nSamples = nSamples
print("nSamples:", nSamples)
self.voc_type = voc_type
self.max_len = max_len
self.test = test
self.manmade_degrade = manmade_degrade
def __len__(self):
return self.nSamples
def rotate_img(self, image, angle):
# convert to cv2 image
if not angle == 0.0:
image = np.array(image)
(h, w) = image.shape[:2]
scale = 1.0
# set the rotation center
center = (w / 2, h / 2)
# anti-clockwise angle in the function
M = cv2.getRotationMatrix2D(center, angle, scale)
image = cv2.warpAffine(image, M, (w, h))
# back to PIL image
image = Image.fromarray(image)
return image
def cutblur(self, img_hr, img_lr):
p = random.random()
img_hr_np = np.array(img_hr)
img_lr_np = np.array(img_lr)
randx = int(img_hr_np.shape[1] * (0.2 + 0.8 * random.random()))
if p > 0.7:
left_mix = random.random()
if left_mix <= 0.5:
img_lr_np[:, randx:] = img_hr_np[:, randx:]
else:
img_lr_np[:, :randx] = img_hr_np[:, :randx]
return Image.fromarray(img_lr_np)
def __getitem__(self, index):
assert index <= len(self), 'index range error'
index += 1
txn = self.env.begin(write=False)
label_key = b'label-%09d' % index
word = ""#str(txn.get(label_key).decode())
# print("in dataset....")
img_HR_key = b'image_hr-%09d' % index # 128*32
img_lr_key = b'image_lr-%09d' % index # 64*16
try:
img_HR = buf2PIL(txn, img_HR_key, 'RGB')
if self.manmade_degrade:
img_lr = degradation(img_HR)
else:
img_lr = buf2PIL(txn, img_lr_key, 'RGB')
# print("GOGOOGO..............", img_HR.size)
if self.cb_flag and not self.test:
img_lr = self.cutblur(img_HR, img_lr)
if not self.rotate is None:
if not self.test:
angle = random.random() * self.rotate * 2 - self.rotate
else:
angle = 0 #self.rotate
# img_HR = self.rotate_img(img_HR, angle)
# img_lr = self.rotate_img(img_lr, angle)
img_lr_np = np.array(img_lr).astype(np.uint8)
img_lry = cv2.cvtColor(img_lr_np, cv2.COLOR_RGB2YUV)
img_lry = Image.fromarray(img_lry)
img_HR_np = np.array(img_HR).astype(np.uint8)
img_HRy = cv2.cvtColor(img_HR_np, cv2.COLOR_RGB2YUV)
img_HRy = Image.fromarray(img_HRy)
word = txn.get(label_key)
if word is None:
print("None word:", label_key)
word = " "
else:
word = str(word.decode())
# print("img_HR:", img_HR.size, img_lr.size())
except IOError or len(word) > self.max_len:
return self[index + 1]
label_str = str_filt(word, self.voc_type)
return img_HR, img_lr, img_HRy, img_lry, label_str
class lmdbDataset_realDistorted(Dataset):
def __init__(
self, root=None,
voc_type='upper',
max_len=100,
test=False,
cutblur=False,
manmade_degrade=False,
rotate=None
):
super(lmdbDataset_realDistorted, self).__init__()
self.env = lmdb.open(
root,
max_readers=1,
readonly=True,
lock=False,
readahead=False,
meminit=False)
self.cb_flag = cutblur
self.rotate = rotate
self.split = root.split("/")[-1]
self.picked_index = open(os.path.join('./datasets/', self.split + "_distorted.txt"), "r").readlines()
self.picked_index = [int(index) for index in self.picked_index if len(index) > 0]
if not self.env:
print('cannot creat lmdb from %s' % (root))
sys.exit(0)
with self.env.begin(write=False) as txn:
nSamples = int(txn.get(b'num-samples'))
self.nSamples = nSamples
self.nSamples = len(self.picked_index)
print("nSamples:", self.nSamples)
self.voc_type = voc_type
self.max_len = max_len
self.test = test
self.manmade_degrade = manmade_degrade
def __len__(self):
return self.nSamples
def rotate_img(self, image, angle):
# convert to cv2 image
if not angle == 0.0:
image = np.array(image)
(h, w) = image.shape[:2]
scale = 1.0
# set the rotation center
center = (w / 2, h / 2)
# anti-clockwise angle in the function
M = cv2.getRotationMatrix2D(center, angle, scale)
image = cv2.warpAffine(image, M, (w, h))
# back to PIL image
image = Image.fromarray(image)
return image
def cutblur(self, img_hr, img_lr):
p = random.random()
img_hr_np = np.array(img_hr)
img_lr_np = np.array(img_lr)
randx = int(img_hr_np.shape[1] * (0.2 + 0.8 * random.random()))
if p > 0.7:
left_mix = random.random()
if left_mix <= 0.5:
img_lr_np[:, randx:] = img_hr_np[:, randx:]
else:
img_lr_np[:, :randx] = img_hr_np[:, :randx]
return Image.fromarray(img_lr_np)
def __getitem__(self, index_):
assert index_ <= len(self), 'index range error'
# index += 1
#####################################
index = self.picked_index[index_]
#####################################
txn = self.env.begin(write=False)
label_key = b'label-%09d' % index
word = "" # str(txn.get(label_key).decode())
# print("in dataset....")
img_HR_key = b'image_hr-%09d' % index # 128*32
img_lr_key = b'image_lr-%09d' % index # 64*16
try:
img_HR = buf2PIL(txn, img_HR_key, 'RGB')
if self.manmade_degrade:
img_lr = degradation(img_HR)
else:
img_lr = buf2PIL(txn, img_lr_key, 'RGB')
# print("GOGOOGO..............", img_HR.size)
if self.cb_flag and not self.test:
img_lr = self.cutblur(img_HR, img_lr)
if not self.rotate is None:
if not self.test:
angle = random.random() * self.rotate * 2 - self.rotate
else:
angle = 0 # self.rotate
# img_HR = self.rotate_img(img_HR, angle)
# img_lr = self.rotate_img(img_lr, angle)
img_lr_np = np.array(img_lr).astype(np.uint8)
img_lry = cv2.cvtColor(img_lr_np, cv2.COLOR_RGB2YUV)
img_lry = Image.fromarray(img_lry)
img_HR_np = np.array(img_HR).astype(np.uint8)
img_HRy = cv2.cvtColor(img_HR_np, cv2.COLOR_RGB2YUV)
img_HRy = Image.fromarray(img_HRy)
word = txn.get(label_key)
if word is None:
print("None word:", label_key)
word = " "
else:
word = str(word.decode())
# print("img_HR:", img_HR.size, img_lr.size())
except IOError or len(word) > self.max_len:
return self[index + 1]
label_str = str_filt(word, self.voc_type)
return img_HR, img_lr, img_HRy, img_lry, label_str
import pickle
class lmdbDataset_realCHNSyn(Dataset):
def __init__(self, root=None, voc_type='upper', max_len=100, test=False):
super(lmdbDataset_realCHNSyn, self).__init__()
flist = os.listdir(root)
self.root_dir = root
self.database_dict = {}
print("Loading pkl files from", root, "...")
for f in flist:
if f.endswith(".pkl"):
print("f:", f)
with open(os.path.join(root, f), "rb") as pkl_f:
self.database_dict.update(pickle.load(pkl_f))
self.nSamples = len(self.database_dict.keys())
self.keys = list(self.database_dict.keys())
print("done")
print("All data:", self.nSamples)
self.voc_type = voc_type
def __len__(self):
return self.nSamples
def __getitem__(self, index):
assert index <= len(self), 'index range error'
index += 1
imkey = self.keys[index % self.nSamples]
impath = os.path.join(self.root_dir, imkey + ".jpg")
word = self.database_dict[imkey]
try:
img_HR = Image.open(impath)
img_lr = img_HR.copy()
img_lr_np = np.array(img_lr).astype(np.uint8)
img_lr_np = cv2.GaussianBlur(img_lr_np, (5, 5), 1)
img_lr = Image.fromarray(img_lr_np)
img_lry = cv2.cvtColor(img_lr_np, cv2.COLOR_RGB2YUV)[..., 0]
img_lry = Image.fromarray(img_lry)
img_HR_np = np.array(img_HR).astype(np.uint8)
img_HRy = cv2.cvtColor(img_HR_np, cv2.COLOR_RGB2YUV)[..., 0]
img_HRy = Image.fromarray(img_HRy)
# print("img_HR:", img_HR.size, img_lr.size())
except IOError or len(word) > self.max_len:
return self[index + 1]
label_str = str_filt(word, self.voc_type)
return img_HR, img_lr, img_HRy, img_lry, label_str #
class lmdbDataset_realIC15TextSR(Dataset):
def __init__(self, root=None, voc_type='upper', max_len=100, test=False):
super(lmdbDataset_realIC15TextSR, self).__init__()
# root should be detailed by upper folder of images
hr_image_dir = os.path.join(root, "HR")
lr_image_dir = os.path.join(root, "LR")
anno_dir = os.path.join(root, "ANNOTATION")
hr_image_list = os.listdir(hr_image_dir)
self.hr_impath_list = []
self.lr_impath_list = []
self.anno_list = []
print("collect images from:", root)
mode = "train" if root.split("/")[-2] == "TRAIN" else "test"
for i in range(len(hr_image_list)):
hr_impath = os.path.join(hr_image_dir, mode + '-hr-' + str(i+1).rjust(4, '0') + ".pgm")
lr_impath = os.path.join(lr_image_dir, mode + '-lr-' + str(i+1).rjust(4, '0') + ".pgm")
anno_path = os.path.join(anno_dir, mode + '-annot-' + str(i+1).rjust(4, '0') + ".txt")
self.hr_impath_list.append(hr_impath)
self.lr_impath_list.append(lr_impath)
self.anno_list.append(anno_path)
self.nSamples = len(self.anno_list)
print("Done, we have ", self.nSamples, "samples...")
self.voc_type = voc_type
self.max_len = max_len
self.test = test
def read_pgm(self, filename, byteorder='>'):
"""Return image data from a raw PGM file as numpy array.
Format specification: http://netpbm.sourceforge.net/doc/pgm.html
"""
with open(filename, 'rb') as f:
buffer = f.read()
try:
header, width, height, maxval = re.search(
b"(^P5\s(?:\s*#.*[\r\n])*"
b"(\d+)\s(?:\s*#.*[\r\n])*"
b"(\d+)\s(?:\s*#.*[\r\n])*"
b"(\d+)\s(?:\s*#.*[\r\n]\s)*)", buffer).groups()
return np.frombuffer(buffer,
dtype='u1' if int(maxval) < 256 else byteorder + 'u2',
count=int(width) * int(height),
offset=len(header)
).reshape((int(height), int(width)))
except AttributeError:
raise ValueError("Not a raw PGM file: '%s'" % filename)
def __len__(self):
return self.nSamples
def __getitem__(self, index):
idx = index % self.nSamples
# assert index <= len(self), 'index range error'
if not os.path.isfile(self.hr_impath_list[idx]):
print("File not found for", self.hr_impath_list[idx])
return self[index+1]
try:
img_HR_np = self.read_pgm(self.hr_impath_list[idx], byteorder='<')
img_lr_np = self.read_pgm(self.lr_impath_list[idx], byteorder='<')
label_str = open(self.anno_list[idx], "r").readlines()[0].replace("\n", "").strip()
label_str = str_filt(label_str, self.voc_type)
except ValueError:
print("File not found for", self.hr_impath_list[idx])
return self[index + 1]
# print("annos:", img_HR_np.shape, img_lr_np.shape)
img_HR = Image.fromarray(cv2.cvtColor(img_HR_np, cv2.COLOR_GRAY2RGB))
img_lr = Image.fromarray(cv2.cvtColor(img_lr_np, cv2.COLOR_GRAY2RGB))
return img_HR, img_lr, label_str
class lmdbDataset_realSVT(Dataset):
def __init__(self, root=None, voc_type='upper', max_len=100, test=False):
super(lmdbDataset_realSVT, self).__init__()
# root should be detailed by upper folder of images
# anno_dir = os.path.join(root, "ANNOTATION")
split = ("svt_" + "train") if not test else ("svt_" + "test")
dataset_dir = os.path.join(root, split)
self.image_dir = os.path.join(dataset_dir, "IMG")
self.anno_dir = os.path.join(dataset_dir, "label")
# self.impath_list = os.listdir(image_dir)
self.anno_list = os.listdir(self.anno_dir)
# self.impath_list = []
# self.anno_list = []
print("collect images from:", root)
# mode = "train" if root.split("/")[-2] == "TRAIN" else "test"
self.nSamples = len(self.anno_list)
print("Done, we have ", self.nSamples, "samples...")
self.voc_type = voc_type
self.max_len = max_len
self.test = test
def __len__(self):
return self.nSamples
def __getitem__(self, index):
idx = index % self.nSamples
anno = self.anno_list[index]
image_path = os.path.join(self.image_dir, anno.split(".")[0] + ".jpg")
anno_path = os.path.join(self.anno_dir, anno)
if not os.path.isfile(image_path):
print("File not found for", image_path)
return self[index+1]
try:
word = open(anno_path, "r").readlines()[0].replace("\n", "")
img_HR = Image.open(image_path)
img_lr = img_HR
except ValueError:
print("File not found for", image_path)
return self[index + 1]
# print("annos:", img_HR_np.shape, img_lr_np.shape)
label_str = str_filt(word, self.voc_type)
return img_HR, img_lr, label_str
class lmdbDataset_realIC15(Dataset):
def __init__(self, root=None, voc_type='upper', max_len=100, test=False, rotate=None):
super(lmdbDataset_realIC15, self).__init__()
self.env = lmdb.open(
root,
max_readers=1,
readonly=True,
lock=False,
readahead=False,
meminit=False)
self.degrade = True
if not self.env:
print('cannot creat lmdb from %s' % (root))
sys.exit(0)
with self.env.begin(write=False) as txn:
nSamples = int(txn.get(b'num-samples'))
self.nSamples = nSamples
self.voc_type = voc_type
self.max_len = max_len
self.test = test
'''
if not self.degrade:
valid_cnt = 0
for index in range(1, self.nSamples + 1):
txn = self.env.begin(write=False)
label_key = b'label-%09d' % index
word = str(txn.get(label_key).decode())
img_key = b'image-%09d' % index # 128*32
# img_lr_key = b'image_lr-%09d' % index # 64*16
# try:
img_HR = buf2PIL(txn, img_key, 'RGB')
img_lr_np = np.array(img_HR).astype(np.uint8)
H, W = img_lr_np.shape[:2]
if H * W < 1024:
valid_cnt += 1
self.nSamples = valid_cnt
'''
print("We have", self.nSamples, "samples from", root)
def __len__(self):
return self.nSamples
def __getitem__(self, index):
assert index <= len(self), 'index range error'
index += 1
index = index % (self.nSamples+1)
# print(self.nSamples, index)
txn = self.env.begin(write=False)
label_key = b'label-%09d' % index
word = str(txn.get(label_key).decode())
img_key = b'image-%09d' % index # 128*32
# img_lr_key = b'image_lr-%09d' % index # 64*16
try:
img_HR = buf2PIL(txn, img_key, 'RGB')
img_lr = img_HR
img_lr_np = np.array(img_lr).astype(np.uint8)
# print("img_lr_np:", img_lr_np.shape)
if self.degrade:
# img_lr_np = cv2.GaussianBlur(img_lr_np, (5, 5), 1)
# shot_noise = random.uniform(0, 0.005)
# read_noise = random.uniform(0, 0.015)
# img_lr_np = add_shot_gauss_noise(img_lr_np, shot_noise, read_noise).astype(np.uint8)
pass
# print("img_lr_np:", img_lr_np.shape)
else:
if img_lr_np.shape[0] * img_lr_np.shape[1] > 1024:
return self[(index + 1) % self.nSamples]
img_lr = Image.fromarray(img_lr_np)
if img_lr.size[0] < 4 or img_lr.size[1] < 4:
return self[index + 1]
# print("img:", img_HR.size, word)
# img_lr = buf2PIL(txn, img_lr_key, 'RGB')
except IOError or len(word) > self.max_len:
return self[index + 1]
# if img_HR.size[0] < 4 or img_HR.size[1] < 4:
# return self[index + 1]
label_str = str_filt(word, self.voc_type)
return img_HR, img_lr, img_HR, img_lr, label_str
class lmdbDataset_CSVTR(Dataset):
def __init__(self, root=None, voc_type='chinese', max_len=100, test=False):
super(lmdbDataset_CSVTR, self).__init__()
self.image_path_list = []
self.imdir = os.path.join(root, "filter_dir")
self.gt_file = os.path.join(root, "filter_train_test.list")
self.gt_pairs = []
gt_lines = open(self.gt_file, "r").readlines()
for line in gt_lines:
items = line.replace("\n", "").split("\t")
self.gt_pairs.append([os.path.join(self.imdir, items[2]), items[3]])
self.nSamples = len(self.gt_pairs)
print("nSamples test:", self.nSamples)
self.voc_type = voc_type
self.max_len = max_len
self.test = test
def __len__(self):
return self.nSamples
def __getitem__(self, index):
word = self.gt_pairs[index][1]
# print("word:", word)
try:
img_HR = Image.open(self.gt_pairs[index][0]) # for color image
img_lr = Image.open(self.gt_pairs[index][0])
except IOError:
return self[index+1]
#label_str = str_filt(word, self.voc_type)
return img_HR, img_lr, img_HR, img_lr, word
class lmdbDataset_realCOCOText(Dataset):
def __init__(self, root=None, voc_type='upper', max_len=100, test=False):
super(lmdbDataset_realCOCOText, self).__init__()
if test:
gt_file = "val_words_gt.txt"
im_dir = "val_words"
else:
gt_file = "train_words_gt.txt"
im_dir = "train_words"
self.image_dir = os.path.join(root, im_dir)
self.gt_file = os.path.join(root, gt_file)
self.gtlist = open(self.gt_file, "r").readlines()
if test:
self.gtlist = self.gtlist[:3000]
self.nSamples = len(self.gtlist)
self.voc_type = voc_type
self.max_len = max_len
self.test = test
def __len__(self):
return self.nSamples
def __getitem__(self, index):
assert index <= len(self), 'index range error'
# index += 1
gt_anno = self.gtlist[index].replace("\n", "")
if len(gt_anno.split(",")) < 2:
return self[index + 1]
img_id, label_str = gt_anno.split(",")[:2]
impath = os.path.join(self.image_dir, img_id + ".jpg")
try:
img_HR = Image.open(impath)
img_lr = img_HR
# print("img:", img_HR.size, word)
# img_lr = buf2PIL(txn, img_lr_key, 'RGB')
except IOError or len(label_str) > self.max_len:
return self[index + 1]
label_str = str_filt(label_str, self.voc_type)
return img_HR, img_lr, label_str
class lmdbDatasetWithW2V_real(Dataset):
def __init__(
self,
root=None,
voc_type='upper',
max_len=100,
test=False,
w2v_lexicons="cc.en.300.bin"
):
super(lmdbDatasetWithW2V_real, self).__init__()
self.env = lmdb.open(
root,
max_readers=1,
readonly=True,
lock=False,
readahead=False,
meminit=False)
if not self.env:
print('cannot creat lmdb from %s' % (root))
sys.exit(0)
with self.env.begin(write=False) as txn:
nSamples = int(txn.get(b'num-samples'))
self.nSamples = nSamples
self.voc_type = voc_type
self.max_len = max_len
self.test = test
# self.w2v_lexicon = FastText(w2v_lexicons)
def __len__(self):
return self.nSamples
def __getitem__(self, index):
assert index <= len(self), 'index range error'
index += 1
txn = self.env.begin(write=False)
label_key = b'label-%09d' % index
word = str(txn.get(label_key).decode())
img_HR_key = b'image_hr-%09d' % index # 128*32
img_lr_key = b'image_lr-%09d' % index # 64*16
try:
img_HR = buf2PIL(txn, img_HR_key, 'RGB')
img_lr = buf2PIL(txn, img_lr_key, 'RGB')
except IOError or len(word) > self.max_len:
return self[index + 1]
label_str = str_filt(word, self.voc_type)
# print("HR, LR:", img_HR.size, img_lr.size)
w2v = None# self.w2v_lexicon.get_numpy_vector(label_str.lower())
return img_HR, img_lr, label_str, w2v
class resizeNormalize(object):
def __init__(self, size, mask=False, interpolation=Image.BICUBIC, aug=None, blur=False):
self.size = size
self.interpolation = interpolation
self.toTensor = transforms.ToTensor()
self.mask = mask
self.aug = aug
self.blur = blur
def __call__(self, img, ratio_keep=False):
size = self.size
if ratio_keep:
ori_width, ori_height = img.size
ratio = float(ori_width) / ori_height
if ratio < 3:
width = 100# if self.size[0] == 32 else 50
else:
width = int(ratio * self.size[1])
size = (width, self.size[1])
# print("size:", size)
img = img.resize(size, self.interpolation)
if self.blur:
# img_np = np.array(img)
# img_np = cv2.GaussianBlur(img_np, (5, 5), 1)
#print("in degrade:", np.unique(img_np))
# img_np = noisy("gauss", img_np).astype(np.uint8)
# img_np = apply_brightness_contrast(img_np, 40, 40).astype(np.uint8)
# img_np = JPEG_compress(img_np)
# img = Image.fromarray(img_np)
pass
if not self.aug is None:
img_np = np.array(img)
# print("imgaug_np:", imgaug_np.shape)
imgaug_np = self.aug(images=img_np[None, ...])
img = Image.fromarray(imgaug_np[0, ...])
img_tensor = self.toTensor(img)
if self.mask:
mask = img.convert('L')
thres = np.array(mask).mean()
mask = mask.point(lambda x: 0 if x > thres else 255)
mask = self.toTensor(mask)
img_tensor = torch.cat((img_tensor, mask), 0)
return img_tensor
class NormalizeOnly(object):
def __init__(self, size, mask=False, interpolation=Image.BICUBIC, aug=None, blur=False):
self.size = size
self.interpolation = interpolation
self.toTensor = transforms.ToTensor()
self.mask = mask
self.aug = aug
self.blur = blur
def __call__(self, img, ratio_keep=False):
size = self.size
if ratio_keep:
ori_width, ori_height = img.size
ratio = float(ori_width) / ori_height
if ratio < 3:
width = 100# if self.size[0] == 32 else 50
else:
width = int(ratio * self.size[1])
size = (width, self.size[1])
# print("size:", size)
# img = img.resize(size, self.interpolation)
if self.blur:
img_np = np.array(img)
# img_np = cv2.GaussianBlur(img_np, (5, 5), 1)
#print("in degrade:", np.unique(img_np))
# img_np = noisy("gauss", img_np).astype(np.uint8)
# img_np = apply_brightness_contrast(img_np, 40, 40).astype(np.uint8)
# img_np = JPEG_compress(img_np)
img = Image.fromarray(img_np)
if not self.aug is None:
img_np = np.array(img)
# print("imgaug_np:", imgaug_np.shape)
imgaug_np = self.aug(images=img_np[None, ...])
img = Image.fromarray(imgaug_np[0, ...])
img_tensor = self.toTensor(img)
if self.mask:
mask = img.convert('L')
thres = np.array(mask).mean()
mask = mask.point(lambda x: 0 if x > thres else 255)
mask = self.toTensor(mask)
img_tensor = torch.cat((img_tensor, mask), 0)
return img_tensor
class resizeNormalizeRandomCrop(object):
def __init__(self, size, mask=False, interpolation=Image.BICUBIC):
self.size = size
self.interpolation = interpolation
self.toTensor = transforms.ToTensor()
self.mask = mask
def __call__(self, img, interval=None):
w, h = img.size
if w < 32 or not interval is None:
img = img.resize(self.size, self.interpolation)
img_tensor = self.toTensor(img)
else:
np_img = np.array(img)
h, w = np_img.shape[:2]
np_img_crop = np_img[:, int(w * interval[0]):int(w * interval[1])]
# print("size:", self.size, np_img_crop.shape, np_img.shape, interval)
img = Image.fromarray(np_img_crop)
img = img.resize(self.size, self.interpolation)
img_tensor = self.toTensor(img)
if self.mask:
mask = img.convert('L')
thres = np.array(mask).mean()
mask = mask.point(lambda x: 0 if x > thres else 255)
mask = self.toTensor(mask)
img_tensor = torch.cat((img_tensor, mask), 0)
return img_tensor
class resizeNormalizeKeepRatio(object):
def __init__(self, size, mask=False, interpolation=Image.BICUBIC):
self.size = size
self.interpolation = interpolation
self.toTensor = transforms.ToTensor()
self.mask = mask
def __call__(self, img, label_str):
o_w, o_h = img.size
ratio = o_w / float(o_h)
re_h = self.size[1]
re_w = int(re_h * ratio)
if re_w > self.size[0]:
img = img.resize(self.size, self.interpolation)
img_tensor = self.toTensor(img).float()
else:
img = img.resize((re_w, re_h), self.interpolation)
img_np = np.array(img)
# if len(label_str) > 4:
# print("img_np:", img_np.shape)
shift_w = int((self.size[0] - img_np.shape[1]) / 2)
re_img = np.zeros((self.size[1], self.size[0], img_np.shape[-1]))
re_img[:, shift_w:img_np.shape[1]+shift_w] = img_np
re_img = Image.fromarray(re_img.astype(np.uint8))
img_tensor = self.toTensor(re_img).float()
if o_h / o_w < 0.5 and len(label_str) > 4:
# cv2.imwrite("mask_h_" + label_str + ".jpg", re_mask.astype(np.uint8))
# cv2.imwrite("img_h_" + label_str + ".jpg", np.array(re_img))
# print("img_np_h:", o_h, o_w, img_np.shape, label_str)
pass
if self.mask:
mask = img.convert('L')
thres = np.array(mask).mean()
mask = mask.point(lambda x: 0 if x > thres else 255)
if re_w > self.size[0]:
# img = img.resize(self.size, self.interpolation)
re_mask_cpy = np.ones((mask.size[1], mask.size[0]))
mask = self.toTensor(mask)
img_tensor = torch.cat((img_tensor, mask), 0).float()
else:
mask = np.array(mask)
mask = cv2.resize(mask, (re_w, re_h), cv2.INTER_NEAREST)
shift_w = int((self.size[0] - mask.shape[1]) / 2)
# print("resize mask:", mask.shape)
re_mask = np.zeros((self.size[1], self.size[0]))
re_mask_cpy = re_mask.copy()
re_mask_cpy[:, shift_w:mask.shape[1] + shift_w] = np.ones(mask.shape)
re_mask[:, shift_w:mask.shape[1] + shift_w] = mask
'''
if o_h / o_w > 2 and len(label_str) > 4:
cv2.imwrite("mask_" + label_str + ".jpg", re_mask.astype(np.uint8))
cv2.imwrite("img_" + label_str + ".jpg", re_img.astype(np.uint8))
print("img_np:", o_h, o_w, img_np.shape, label_str)
if o_h / o_w < 0.5 and len(label_str) > 4:
cv2.imwrite("mask_h_" + label_str + ".jpg", re_mask.astype(np.uint8))
cv2.imwrite("img_h_" + label_str + ".jpg", re_img.astype(np.uint8))
print("img_np_h:", o_h, o_w, img_np.shape, label_str)
'''
re_mask = self.toTensor(re_mask).float()
img_tensor = torch.cat((img_tensor, re_mask), 0)
return img_tensor, torch.tensor(cv2.resize(re_mask_cpy, (self.size[0] * 2, self.size[1] * 2), cv2.INTER_NEAREST)).float()
class lmdbDataset_mix(Dataset):
def __init__(self, root=None, voc_type='upper', max_len=100, test=False):
super(lmdbDataset_mix, self).__init__()
self.env = lmdb.open(
root,
max_readers=1,
readonly=True,
lock=False,
readahead=False,
meminit=False)
if not self.env:
print('cannot creat lmdb from %s' % (root))
sys.exit(0)
with self.env.begin(write=False) as txn:
nSamples = int(txn.get(b'num-samples'))
self.nSamples = nSamples
self.voc_type = voc_type
self.max_len = max_len
self.test = test
def __len__(self):
return self.nSamples
def __getitem__(self, index):
assert index <= len(self), 'index range error'
index += 1
txn = self.env.begin(write=False)
label_key = b'label-%09d' % index
word = str(txn.get(label_key).decode())
if self.test:
try:
img_HR = buf2PIL(txn, b'image_hr-%09d' % index, 'RGB')
img_lr = buf2PIL(txn, b'image_lr-%09d' % index, 'RGB')
except:
img_HR = buf2PIL(txn, b'image-%09d' % index, 'RGB')
img_lr = img_HR
else:
img_HR = buf2PIL(txn, b'image_hr-%09d' % index, 'RGB')
if random.uniform(0, 1) < 0.5:
img_lr = buf2PIL(txn, b'image_lr-%09d' % index, 'RGB')
else:
img_lr = img_HR
label_str = str_filt(word, self.voc_type)
return img_HR, img_lr, label_str
class lmdbDatasetWithMask_real(Dataset):
def __init__(self, root=None, voc_type='upper', max_len=100, test=False):
super(lmdbDatasetWithMask_real, self).__init__()
self.env = lmdb.open(
root,
max_readers=1,
readonly=True,
lock=False,
readahead=False,
meminit=False)
if not self.env:
print('cannot creat lmdb from %s' % (root))
sys.exit(0)
with self.env.begin(write=False) as txn:
nSamples = int(txn.get(b'num-samples'))
self.nSamples = nSamples
self.voc_type = voc_type
self.max_len = max_len
self.test = test
def __len__(self):
return self.nSamples
def get_mask(self, image):
img_hr = np.array(image)
img_hr_gray = cv2.cvtColor(img_hr, cv2.COLOR_BGR2GRAY)
kernel = np.ones((5, 5), np.uint8)
hr_canny = cv2.Canny(img_hr_gray, 20, 150)
hr_canny = cv2.dilate(hr_canny, kernel, iterations=1)
hr_canny = cv2.GaussianBlur(hr_canny, (5, 5), 1)
weighted_mask = 0.4 + (hr_canny / 255.0) * 0.5
return weighted_mask
def __getitem__(self, index):
assert index <= len(self), 'index range error'
index += 1
txn = self.env.begin(write=False)
label_key = b'label-%09d' % index
word = str(txn.get(label_key).decode())
img_HR_key = b'image_hr-%09d' % index # 128*32
img_lr_key = b'image_lr-%09d' % index # 64*16
try:
img_HR = buf2PIL(txn, img_HR_key, 'RGB')
img_lr = buf2PIL(txn, img_lr_key, 'RGB')
except IOError or len(word) > self.max_len:
return self[index + 1]
label_str = str_filt(word, self.voc_type)
weighted_mask = self.get_mask(img_HR)
return img_HR, img_lr, label_str, weighted_mask
class randomSequentialSampler(sampler.Sampler):
def __init__(self, data_source, batch_size):
self.num_samples = len(data_source)
self.batch_size = batch_size
def __iter__(self):
n_batch = len(self) // self.batch_size
tail = len(self) % self.batch_size
index = torch.LongTensor(len(self)).fill_(0)
for i in range(n_batch):
random_start = random.randint(0, len(self) - self.batch_size)
batch_index = random_start + torch.arange(0, self.batch_size)
index[i * self.batch_size:(i + 1) * self.batch_size] = batch_index
# deal with tail
if tail:
random_start = random.randint(0, len(self) - self.batch_size)
tail_index = random_start + torch.arange(0, tail)
index[(i + 1) * self.batch_size:] = tail_index
return iter(index)
def __len__(self):
return self.num_samples
class alignCollate_syn(object):
def __init__(self, imgH=64,
imgW=256,
down_sample_scale=4,
keep_ratio=False,
min_ratio=1,
mask=False,
alphabet=53,
train=True,
y_domain=False
):
sometimes = lambda aug: iaa.Sometimes(0.2, aug)
aug = [
iaa.GaussianBlur(sigma=(0.0, 3.0)),
iaa.AverageBlur(k=(1, 5)),
iaa.MedianBlur(k=(3, 7)),
iaa.BilateralBlur(
d=(3, 9), sigma_color=(10, 250), sigma_space=(10, 250)),
iaa.MotionBlur(k=3),
iaa.MeanShiftBlur(),
iaa.Superpixels(p_replace=(0.1, 0.5), n_segments=(1, 7))
]
self.aug = iaa.Sequential([sometimes(a) for a in aug], random_order=True)
# self.y_domain = y_domain
self.imgH = imgH
self.imgW = imgW
self.keep_ratio = keep_ratio
self.min_ratio = min_ratio
self.down_sample_scale = down_sample_scale
self.mask = mask
# self.alphabet = "0123456789abcdefghijklmnopqrstuvwxyz"
self.alphabet = open("al_chinese.txt", "r").readlines()[0].replace("\n", "")
self.d2a = "-" + self.alphabet
self.alsize = len(self.d2a)
self.a2d = {}
cnt = 0
for ch in self.d2a:
self.a2d[ch] = cnt
cnt += 1
imgH = self.imgH
imgW = self.imgW
self.transform = resizeNormalize((imgW, imgH), self.mask)
self.transform2 = resizeNormalize((imgW // self.down_sample_scale, imgH // self.down_sample_scale), self.mask, blur=True)
self.transform_pseudoLR = resizeNormalize((imgW // self.down_sample_scale, imgH // self.down_sample_scale), self.mask, aug=self.aug)
self.train = train
def degradation(self, img_L):
# degradation process, blur + bicubic downsampling + Gaussian noise
# if need_degradation:
# img_L = util.modcrop(img_L, sf)
img_L = | np.array(img_L) | numpy.array |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from functools import partial
import numpy as np
import pytest
from utils import opr_test
import megengine.functional as F
from megengine import jit, tensor
def common_test_reduce(opr, ref_opr):
data1_shape = (5, 6, 7)
data2_shape = (2, 9, 12)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [
{"input": data1},
{"input": data2},
{"input": np.array([[[1, 2, np.nan, 4], [8, 6, 5, 2], [2, 3, 4, 5]]])},
]
if opr not in (F.argmin, F.argmax):
# test default axis
opr_test(cases, opr, ref_fn=ref_opr)
# test all axises in range of input shape
for axis in range(-3, 3):
# test keepdims False
opr_test(cases, opr, ref_fn=lambda x: ref_opr(x, axis=axis), axis=axis)
# test keepdims True
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis, keepdims=True),
axis=axis,
keepdims=True,
)
else:
# test defaut axis
opr_test(cases, opr, ref_fn=lambda x: ref_opr(x).astype(np.int32))
# test all axises in range of input shape
for axis in range(0, 3):
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis).astype(np.int32),
axis=axis,
)
# test negative axis
axis = axis - len(data1_shape)
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis).astype(np.int32),
axis=axis,
)
def test_sum():
common_test_reduce(opr=F.sum, ref_opr=np.sum)
def test_prod():
common_test_reduce(opr=F.prod, ref_opr=np.prod)
def test_mean():
common_test_reduce(opr=F.mean, ref_opr=np.mean)
def test_var():
common_test_reduce(opr=F.var, ref_opr=np.var)
def test_std():
common_test_reduce(opr=F.std, ref_opr=np.std)
def test_min():
common_test_reduce(opr=F.min, ref_opr=np.min)
def test_max():
common_test_reduce(opr=F.max, ref_opr=np.max)
def test_argmin():
common_test_reduce(opr=F.argmin, ref_opr=np.argmin)
def test_argmax():
common_test_reduce(opr=F.argmax, ref_opr=np.argmax)
def test_sqrt():
d1_shape = (15,)
d2_shape = (25,)
d1 = np.random.random(d1_shape).astype(np.float32)
d2 = np.random.random(d2_shape).astype(np.float32)
cases = [{"input": d1}, {"input": d2}]
opr_test(cases, F.sqrt, ref_fn=np.sqrt)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output1 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output2 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output1},
{"input": data2, "output": output2},
]
opr_test(cases, F.sort)
@pytest.mark.parametrize("is_symbolic", [None, False, True])
def test_sort_empty(is_symbolic):
data_shapes = [
(0,),
(10, 0),
]
def fn(x):
return F.sort(x)
for shape in data_shapes:
if is_symbolic is not None:
fn_ = jit.trace(symbolic=is_symbolic)(fn)
else:
fn_ = fn
data = np.random.random(shape).astype(np.float32)
for _ in range(3):
outs = fn_(tensor(data))
ref_outs = (np.sort(data), np.argsort(data))
assert len(ref_outs) == len(outs)
for i in range(len(outs)):
np.testing.assert_equal(outs[i].numpy(), ref_outs[i])
if is_symbolic is None:
break
def test_normalize():
cases = [
{"input": np.random.random((2, 3, 12, 12)).astype(np.float32)} for i in range(2)
]
def np_normalize(x, p=2, axis=None, eps=1e-12):
if axis is None:
norm = np.sum(x ** p) ** (1.0 / p)
else:
norm = np.sum(x ** p, axis=axis, keepdims=True) ** (1.0 / p)
return x / np.clip(norm, a_min=eps, a_max=np.inf)
# # Test L-2 norm along all dimensions
# opr_test(cases, F.normalize, ref_fn=np_normalize)
# # Test L-1 norm along all dimensions
# opr_test(cases, partial(F.normalize, p=1), ref_fn=partial(np_normalize, p=1))
# Test L-2 norm along the second dimension
opr_test(cases, partial(F.normalize, axis=1), ref_fn=partial(np_normalize, axis=1))
# Test some norm == 0
cases[0]["input"][0, 0, 0, :] = 0
cases[1]["input"][0, 0, 0, :] = 0
opr_test(cases, partial(F.normalize, axis=3), ref_fn=partial(np_normalize, axis=3))
def test_sum_neg_axis():
shape = (2, 3)
data = np.random.random(shape).astype(np.float32)
for axis in (-1, -2, (-2, 1), (-1, 0)):
get = F.sum(tensor(data), axis=axis)
ref = np.sum(data, axis=axis)
np.testing.assert_allclose(get.numpy(), ref, rtol=1e-6)
with pytest.raises(AssertionError):
F.sum(tensor(data), axis=(-1, 1))
def test_non_finite():
shape = (32, 3, 32, 32)
data1 = np.random.random(shape).astype(np.float32)
data2 = np.random.random(shape).astype(np.float32)
rst = F.math._check_non_finite([tensor(data1), tensor(data2)])
np.testing.assert_equal(rst.numpy(), [0])
data2[0][0][0][0] = float("inf")
rst = F.math._check_non_finite([tensor(data1), tensor(data2)])
np.testing.assert_equal(rst.numpy(), [1])
data2[0][0][0][0] = float("nan")
rst = F.math._check_non_finite([tensor(data1), tensor(data2)])
np.testing.assert_equal(rst.numpy(), [1])
@pytest.mark.parametrize("descending", [True, False])
@pytest.mark.parametrize("sorted", [True, False])
@pytest.mark.parametrize("inp1d", [True, False])
@pytest.mark.parametrize("kth_only", [True, False])
def test_topk(descending, sorted, inp1d, kth_only):
k = 3
if inp1d:
data = np.random.permutation(7)
else:
data = np.random.permutation(5 * 7).reshape(5, 7)
data = data.astype(np.int32)
def np_sort(x):
if descending:
return | np.sort(x) | numpy.sort |
#! /usr/bin/python
### Make Origami vertices using the Miura-Ori design
import numpy as np
from scipy.interpolate import griddata
import math
def make_ori_MO(m_in,n_in,scale_in):
# Origami parameters
ori_size = 100.0
m = m_in*2
n = n_in*2
Tx = 2.0*ori_size/(m)
Ty = 2.0*ori_size/(n)
hx = Tx*scale_in
hy = Ty*scale_in
Vx = np.zeros([m,n])
Vy = np.zeros([m,n])
Vz = np.zeros([m,n])
for i in range(1,m+1):
for j in range(1,n+1):
Vx[i-1,j-1] = (i-1)*Tx/2
Vy[i-1,j-1] = ((j-1)*Ty/2) + ((1 + (-1)**i)/2)*(hx/hy)*np.sqrt((Ty/2)**2 + hy**2)
Vz[i-1,j-1] = (1 + (-1)**j)*hy/2
# Make creases
Vtx = np.zeros([m,n])
Vty = np.zeros([m,n])
Vtx[0,0] = 0.0
Vty[0,0] = 0.0
for i in range(1,m):
vec_i_1 = np.array([ Vx[i-1,0], Vy[i-1,0], Vz[i-1,0] ])
vec_i_2 = np.array([ Vx[i-1,1], Vy[i-1,2], Vz[i-1,2] ])
vec_ip1_1 = np.array([ Vx[i,0], Vy[i,0], Vz[i,0] ])
vec_ip1_2 = np.array([ Vx[i,1], Vy[i,1], Vz[i,1] ])
cos_phi = ((np.linalg.norm(vec_i_1-vec_i_2)**2) + (np.linalg.norm(vec_i_1-vec_ip1_1)**2) - (np.linalg.norm(vec_i_2-vec_ip1_1)**2))/(2.0*np.linalg.norm(vec_i_1-vec_i_2)*np.linalg.norm(vec_i_1-vec_ip1_1))
cos_chi = ((np.linalg.norm(vec_ip1_1-vec_ip1_2)**2) + (np.linalg.norm(vec_i_1-vec_ip1_1)**2) - (np.linalg.norm(vec_ip1_2-vec_i_1)**2))/(2.0*np.linalg.norm(vec_ip1_1-vec_ip1_2)*np.linalg.norm(vec_i_1-vec_ip1_1))
if ((cos_phi <= 1.0) and (cos_phi >= 0.0)):
phi = np.arccos(cos_phi)
Vtx[i,0] = Vtx[i-1,0] + (np.sin(phi)*np.linalg.norm(vec_i_1-vec_ip1_1))
Vty[i,0] = Vty[i-1,0] + (np.cos(phi)*np.linalg.norm(vec_i_1-vec_ip1_1))
else:
chi = np.arccos(cos_chi)
Vtx[i,0] = Vtx[i-1,0] + (np.sin(chi)*np.linalg.norm(vec_i_1-vec_ip1_1))
Vty[i,0] = Vty[i-1,0] - (np.cos(chi)*np.linalg.norm(vec_i_1-vec_ip1_1))
for j in range(1,n):
for i in range(0,m):
vec_i_j = np.array([ Vx[i,j], Vy[i,j], Vz[i,j] ])
vec_i_jm1 = np.array([ Vx[i,j-1], Vy[i,j-1], Vz[i,j-1] ])
Vtx[i,j] = Vtx[i,j-1]
Vty[i,j] = Vty[i,j-1] + np.linalg.norm(vec_i_jm1-vec_i_j)
# Convert to tuples
x_extent = (max(Vtx[:,0]) - min(Vtx[:,0]))*(m/(m-1.0))
y_extent = (max(Vty[0,:]) - min(Vty[0,:]))*(n/(n-1.0))
crease_x = Vtx.reshape(m*n,1)
crease_y = Vty.reshape(m*n,1)
initpoints_i_j = np.array([])
initpoints_i_j = np.append(crease_x,crease_y,1) # i,j
initpoints_ip1_j = np.append(crease_x+x_extent,crease_y,1) # i+1,j
initpoints_im1_j = np.append(crease_x-x_extent,crease_y,1) # i-1,j
initpoints_i_jp1 = np.append(crease_x,crease_y+y_extent,1) # i,j+1
initpoints_i_jm1 = np.append(crease_x,crease_y-y_extent,1) # i,j-1
initpoints_ip1_jp1 = np.append(crease_x+x_extent,crease_y+y_extent,1) # i+1,j+1
initpoints_ip1_jm1 = np.append(crease_x+x_extent,crease_y-y_extent,1) # i+1,j-1
initpoints_im1_jp1 = np.append(crease_x-x_extent,crease_y+y_extent,1) # i-1,j+1
initpoints_im1_jm1 = np.append(crease_x-x_extent,crease_y-y_extent,1) # i-1,j-1
initpoints = initpoints_i_j
initpoints = np.append(initpoints,initpoints_ip1_j,0)
initpoints = np.append(initpoints,initpoints_im1_j,0)
initpoints = np.append(initpoints,initpoints_i_jp1,0)
initpoints = np.append(initpoints,initpoints_i_jm1,0)
initpoints = np.append(initpoints,initpoints_ip1_jp1,0)
initpoints = np.append(initpoints,initpoints_ip1_jm1,0)
initpoints = np.append(initpoints,initpoints_im1_jp1,0)
initpoints = np.append(initpoints,initpoints_im1_jm1,0)
## Normalize to [0,1]
initpoints[:,0] = initpoints[:,0]/x_extent
initpoints[:,1] = initpoints[:,1]/y_extent
# Convert Origami vertices to tuples
values_ij = Vx.reshape(m*n,1)
x_extent = (max(Vx[:,0]) - min(Vx[:,0]))*(m/(m-1.0))
values = values_ij
values = np.append(values,values_ij+x_extent,0)
values = np.append(values,values_ij-x_extent,0)
values = np.append(values,values_ij,0)
values = np.append(values,values_ij,0)
values = np.append(values,values_ij+x_extent,0)
values = np.append(values,values_ij+x_extent,0)
values = np.append(values,values_ij-x_extent,0)
values = | np.append(values,values_ij-x_extent,0) | numpy.append |
# sample_submission.py
import numpy as np
from scipy.special import expit
import sys
class xor_net(object):
"""
This code will train and test the Neural Network for XOR data.
Args:
data: Is a tuple, ``(x,y)``
``x`` is a two or one dimensional ndarray ordered such that axis 0 is independent
data and data is spread along axis 1. If the array had only one dimension, it implies
that data is 1D.
``y`` is a 1D ndarray it will be of the same length as axis 0 or x.
"""
def __init__(self, data, labels):
self.x = data
self.y = labels
maxiteration = 300000
if self.x.shape[0] <= 100:
learningrate = .001
maxiteration = 1000000
elif self.x.shape[0] <= 500:
learningrate = .0001
maxiteration = 500000
else:
learningrate = .00001
R = .01
xdimension = self.x.shape[1]
neuorons = 3
self.w = np.random.rand(xdimension + 1, neuorons)
tempX = np.insert(self.x, 0, 1, axis=1)
tempX = np.array(tempX, dtype=np.float64)
validsize = int(.2 * len(self.x))
validsetX = tempX[0:validsize, :]
trainX = tempX[validsize:, :]
validsetY = self.y[0:validsize]
trainY = self.y[validsize:]
previouserror = sys.maxint
count = 0
self.wprime = np.random.rand(neuorons + 1, 1)
finalW = self.w
finalWprime = self.wprime
iteration = 0
momentum = .9
prevloss = np.random.rand(self.w.shape[0], self.w.shape[1])
prevlossprime = np.random.rand(self.wprime.shape[0], self.wprime.shape[1])
while True:
u = np.dot(self.w.T, trainX.T)
h = expit(u)
temph = h
h = np.insert(h, 0, 1, axis=0)
h = np.array(h, dtype=np.float64)
uprime = np.dot(self.wprime.T, h)
yprime = expit(uprime)
uvalid = np.dot(self.w.T, validsetX.T)
hvalid = expit(uvalid)
hvalid = np.insert(hvalid, 0, 1, axis=0)
uvalidprime = np.dot(self.wprime.T, hvalid)
yvalidprime = expit(uvalidprime)
currenterror = (np.mean((validsetY - yvalidprime) ** 2)) / 2
if iteration >= maxiteration:
finalW = self.w
finalWprime = self.wprime
break
if currenterror > previouserror:
if count == 0:
finalW = self.w
finalWprime = self.wprime
count = count + 1
if count >= 10 and iteration > 100000:
break
else:
count = 0
previouserror = currenterror
regwprime = np.multiply(learningrate, np.multiply(2, np.multiply(R, self.wprime)))
l2delta = np.multiply(np.subtract(yprime, trainY.T), np.multiply(yprime, np.subtract(1, yprime)))
lossprime = np.multiply(learningrate, np.dot(l2delta, h.T))
self.wprime = np.subtract(self.wprime, lossprime.T)
self.wprime = np.subtract(self.wprime, regwprime)
self.wprime = np.subtract(self.wprime, np.multiply(momentum, prevlossprime))
prevlossprime = lossprime.T
tempWprime = self.wprime[1:]
regw = np.multiply(learningrate, np.multiply(2, np.multiply(R, self.w)))
l1delta = (l2delta.T.dot(tempWprime.T)).T * (temph * (1 - temph))
loss = learningrate * (trainX.T.dot(l1delta.T))
self.w = np.subtract(self.w, loss)
self.w = np.subtract(self.w, regw)
self.w = np.subtract(self.w, np.multiply(momentum, prevloss))
prevloss = loss
iteration = iteration + 1
self.w = finalW
self.wprime = finalWprime
self.params = [(self.w[0, :], self.w[1:, :]), (self.wprime[0], self.wprime[1:])] # [(w,b),(w,b)]
def get_params(self):
"""
This code will return Weights and Bias of the trained network.
Returns:
tuple of numpy.ndarray: (w, b).
"""
return self.params
def get_predictions(self, x):
"""
This method will return prediction for unseen data.
Args:
x: array similar to ``x`` in ``data``. Might be of different size.
Returns:
numpy.ndarray: ``y`` which is a 1D array of predictions of the same length as axis 0 of
``x``
"""
testX = np.insert(x, 0, 1, axis=1)
utest = np.dot(self.w.T, testX.T)
htest = expit(utest)
htest = np.insert(htest, 0, 1, axis=0)
utestprime = np.dot(self.wprime.T, htest)
ytestprime = expit(utestprime)
predY = ytestprime > .5
predY = predY.astype(int)
predY = predY.flatten()
return predY
class mlnn(object):
"""
This code will train and test the Neural Network for image data.
Args:
data: Is a tuple, ``(x,y)``
``x`` is a two or one dimensional ndarray ordered such that axis 0 is independent
data and data is spread along axis 1. If the array had only one dimension, it implies
that data is 1D.
``y`` is a 1D ndarray it will be of the same length as axis 0 or x.
"""
def __init__(self, data, labels):
self.x = data / 255.0
self.y = labels
maxiteration=40000
if self.x.shape[0]<=100:
learningrate = .0001
elif self.x.shape[0]<=500:
learningrate=.0001
else:
learningrate = .00001
if self.x.shape[0]>500:
maxiteration=15000
R = 0.01
neuorons = 100
self.w = 0.01 * np.random.rand(self.x.shape[1] + 1, neuorons)
tempX = np.insert(self.x, 0, 1, axis=1)
tempX = np.array(tempX, dtype=np.float64)
validsize = int(.2 * len(self.x))
validsetX = tempX[0:validsize, :]
validsetX -= np.mean(validsetX, axis=0)
trainX = tempX[validsize:, :]
trainX -= np.mean(trainX, axis=0)
validsetY = self.y[0:validsize]
trainY = self.y[validsize:]
previouserror = sys.maxint
count = 0
self.wprime = 0.01 * np.random.rand(neuorons + 1, 1)
finalW = self.w
finalWprime = self.wprime
iteration = 0
while True:
randomTrainX = trainX
randomTrainY = trainY
h = 1.0 / (1.0 + np.exp(-1.0 * np.dot(self.w.T, randomTrainX.T)))
temph = h
h = np.insert(h, 0, 1, axis=0)
uprime = np.dot(self.wprime.T, h)
yprime = expit(uprime)
uvalid = np.dot(self.w.T, validsetX.T)
hvalid = expit(uvalid)
hvalid = np.insert(hvalid, 0, 1, axis=0)
uvalidprime = np.dot(self.wprime.T, hvalid)
yvalidprime = expit(uvalidprime)
currenterror = (np.mean((validsetY - yvalidprime) ** 2)) / 2
if iteration >= maxiteration:
finalW = self.w
finalWprime = self.wprime
break
if currenterror > previouserror:
if count == 0:
finalW = self.w
finalWprime = self.wprime
count = count + 1
if count >= 10 and iteration>=10000:
break
else:
count = 0
previouserror = currenterror
regwprime = np.multiply(learningrate, np.multiply(2, np.multiply(R, self.wprime)))
l2delta = np.multiply( | np.subtract(yprime, randomTrainY.T) | numpy.subtract |
import numpy as np
from multiagent.core import World, Agent, Landmark
from multiagent.scenario import BaseScenario
import random
class Teams:
def __init__(self):
self.teams = []
def add_agent_to_new_team(self, agent):
self.teams.append(set([agent]))
def combine_teams(self, agent1, agent2):
#combine the two teams of these agents, without permission from teammates
team1 = self.__find_agent_team(agent1)
team2 = self.__find_agent_team(agent2)
team2 = team2.union(team1)
team1 = team1.clear()
def make_new_team(self, agents):
#put all these agents on a new team, without permission from teammates
#note: you can call this with one agent + call combine_teams to make only
#a single agent defect
team_new = set()
for agent in agents:
team = self.__find_agent_team(agents)
team.pop(agent)
team_new.add(agent)
self.teams.append(team_new)
def __clear_empty_teams(self):
self.teams = [team for team in self.teams if len(team) > 0]
def __find_agent_team(self,agent):
for team in self.teams:
if agent in team:
return team
raise(ValueError('agent isnt on any team'))
def are_adversaries(self, agent1,agent2):
team1 = self.__find_agent_team(agent1)
team2 = self.__find_agent_team(agent2)
return team1 != team2
class Territories:
def __init__(self, landmarks):
self.landmarks = {ld:None for ld in landmarks}
def takeover(self,agent,ld):
self.landmarks[ld] = agent
def is_owner(self,agent,ld):
return self.landmarks[ld] == agent
def get_owner(self,ld):
return self.landmarks[ld]
class Scenario(BaseScenario):
COLOURS = [[0.85,0.35,0.35],
[0.35,0.35,0.85],
[0.35,0.85,0.35],
[0.15,0.65,0.15],
[0.15,0.15,0.65],
[0.65,0.15,0.15],
[0.15,0.15,0.15]]
def setup_new_agent(self, agent, world):
agent.name = 'agent %d' %i
agent.collide = True
agent.silent = True
#agent.adversary = True
agent.size = 0.15
agent.original_size = 0.15
agent.n_landmarks = 0
agent.territory = set()
agent.collisions = set()
agent.size_zero_flag = False
#size, accel, max_speed = defaults
world.teams.add_agent_to_new_team(agent)
def setup_landmarks(self, world):
world.landmarks = [Landmark() for i in range(num_landmarks)]
for i, landmark in enumerate(world.landmarks):
landmark.name = 'landmark %d' %i
landmark.collide = True
landmark.movable = False
landmark.original_size = landmark.size
#landmark.size = 0
landmark.boundary = False
world.territories = Territories(world.landmarks)
def make_world(self):
world = World()
world.teams = Teams()
#set any world properties
world.dim_c = 0 #no communication channel
world.dim_p = 2 #position dimenstionality
world.dim_color = 2 #number of teams
#world.collaborative = False #????? since they don't share rewards
#agents
num_agents = 4#7
num_landmarks = 6#34
#add the agents
world.agents = [Agent() for i in range(num_agents)]
for i, agent in enumerate(world.agents):
self.setup_new_agent(i,agent, world)
#add the landmarks
self.setup_landmarks(world)
#make intial conditions
self.reset_world(world)
return world
def reset_world(self, world):
# set agent teams
for i, agent in enumerate(world.agents):
agent.color = np.array(Scenario.COLOURS[i]) #make them all different
agent.size = 0.15
#landmarks
#for i, landmark in enumerate(world.landmarks):
#initial states
for agent in world.agents:
agent.state.p_pos = np.random.uniform(-1,+1,world.dim_p)
agent.state.p_vel = np.zeros(world.dim_p)
agent.state.c = np.zeros(world.dim_c)
for i, landmark in enumerate(world.landmarks):
landmark.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
landmark.state.p_vel = np.zeros(world.dim_p)
landmark.color = np.array(Scenario.COLOURS[-1])
def benchmark_data(self,agent, world):
pass
def teams(self,world):
return self.world.teams
def is_collision(self, agent1, agent2):
delta_pos = agent1.state.p_pos - agent2.state.p_pos
dist = np.sqrt(np.sum(np.square(delta_pos)))
dist_min = agent1.size + agent2.size
return True if dist < dist_min else False
def reward(self,agent,world):
rew = 0
#agents are rewarded for gaining new territory and for losing territory
#add shape clause to affect reward basedon distance from homebase
#agent.collisions.clear()
if agent.collide:
for ag in world.agents:
if self.is_collision(agent,ag):
if world.teams.are_adversaries(agent,ag):
#combat - randomly make one of them smaller
#
#if already calculated
if(not ag in agent.collisions):
agent.collisions.add(ag)
#agent.collisions.add(ag)
neg_rew_agent, neg_rew_ag = prob_shrink(agent, ag, shrink_size = 0.02)
agent.size_zero_flag |= neg_rew_agent
ag.size_zero_flag |= neg_rew_ag
else:
agent.collisions.remove(ag)
#if(agent in ag.collisions):
# #don't collide3
# ag.collisions.remove(agent)
# agent.collisions.remove(ag)
if(ag in agent.collisions):
#undo it
agent.collisions.remove(ag)
# if p < 0.5:
# agent.size -= 0.01
# else:
# ag.size -= 0.01
#
if agent.size_zero_flag:
#agent.size_zero_flag
agent.size_zero_flag = False
rew -= 50
had_landmark_collision = False
for ld in world.landmarks:
if self.is_collision(agent,ld):
old_owner = world.territories.get_owner(ld)
world.territories.takeover(agent,ld)
ld.color = agent.color
if(ld not in agent.territory):
agent.territory.add(ld)
rew += 5
had_landmark_collision = True
if(not had_landmark_collision):
rew -= 0.5
#for negative reward, see if anybody collided with our territory
for ag in world.agents:
if ag == agent:
continue
if ag.collide:
for ld in world.landmarks:
if(self.is_collision(ag,ld)):
if(ld in agent.territory):
agent.territory.remove(ld)
rew -=5
# agents are penalized for exiting the screen, so that they can be caught by the adversaries
def bound(x):
if x < 0.9:
return 0
if x < 1.0:
return (x - 0.9) * 10
return min( | np.exp(2 * x - 2) | numpy.exp |
from __future__ import print_function
import argparse
import os
import random
import sys
sys.path.append(os.getcwd())
import pdb
import time
import numpy as np
import json
import progressbar
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
from misc.utils import repackage_hidden, clip_gradient, adjust_learning_rate, \
decode_txt, sample_batch_neg, l2_norm
import misc.dataLoader as dl
import misc.model as model
from misc.encoder_QIH import _netE
from misc.netG import _netG
import datetime
parser = argparse.ArgumentParser()
parser.add_argument('--input_img_h5', default='data/vdl_img_vgg.h5', help='path to dataset, now hdf5 file')
parser.add_argument('--input_ques_h5', default='data/visdial_data.h5', help='path to dataset, now hdf5 file')
parser.add_argument('--input_json', default='data/visdial_params.json', help='path to dataset, now hdf5 file')
parser.add_argument('--outf', default='./save', help='folder to output images and model checkpoints')
parser.add_argument('--encoder', default='G_QIH_VGG', help='what encoder to use.')
parser.add_argument('--model_path', default='', help='folder to output images and model checkpoints')
parser.add_argument('--num_val', default=0, help='number of image split out as validation set.')
parser.add_argument('--niter', type=int, default=50, help='number of epochs to train for')
parser.add_argument('--start_epoch', type=int, default=0, help='start of epochs to train for')
parser.add_argument('--negative_sample', type=int, default=20, help='folder to output images and model checkpoints')
parser.add_argument('--neg_batch_sample', type=int, default=30, help='folder to output images and model checkpoints')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=6)
parser.add_argument('--batchSize', type=int, default=128, help='input batch size')
parser.add_argument('--save_iter', type=int, default=1, help='number of epochs to train for')
parser.add_argument('--adam', action='store_true', help='Whether to use adam (default is rmsprop)')
parser.add_argument('--lr', type=float, default=0.0004, help='learning rate for, default=0.00005')
parser.add_argument('--beta1', type=float, default=0.8, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')
parser.add_argument('--verbose' , action='store_true', help='show the sampled caption')
parser.add_argument('--conv_feat_size', type=int, default=512, help='input batch size')
parser.add_argument('--model', type=str, default='LSTM', help='type of recurrent net (RNN_TANH, RNN_RELU, LSTM, GRU)')
parser.add_argument('--ninp', type=int, default=300, help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=512, help='humber of hidden units per layer')
parser.add_argument('--nlayers', type=int, default=1, help='number of layers')
parser.add_argument('--dropout', type=int, default=0.5, help='number of layers')
parser.add_argument('--clip', type=float, default=5, help='gradient clipping')
parser.add_argument('--margin', type=float, default=2, help='number of epochs to train for')
parser.add_argument('--log_interval', type=int, default=50, help='how many iterations show the log info')
opt = parser.parse_args()
print(opt)
opt.manualSeed = random.randint(1, 10000) # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
cudnn.benchmark = True
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
if opt.model_path != '':
print("=> loading checkpoint '{}'".format(opt.model_path))
checkpoint = torch.load(opt.model_path)
model_path = opt.model_path
opt = checkpoint['opt']
opt.start_epoch = checkpoint['epoch']
opt.model_path = model_path
opt.batchSize = 128
opt.niter = 100
else:
t = datetime.datetime.now()
cur_time = '%s-%s-%s' %(t.day, t.month, t.hour)
save_path = os.path.join(opt.outf, opt.encoder + '.' + cur_time)
try:
os.makedirs(save_path)
except OSError:
pass
####################################################################################
# Data Loader
####################################################################################
dataset = dl.train(input_img_h5=opt.input_img_h5, input_ques_h5=opt.input_ques_h5,
input_json=opt.input_json, negative_sample = opt.negative_sample,
num_val = opt.num_val, data_split = 'train')
dataset_val = dl.validate(input_img_h5=opt.input_img_h5, input_ques_h5=opt.input_ques_h5,
input_json=opt.input_json, negative_sample = opt.negative_sample,
num_val = opt.num_val, data_split = 'test')
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers))
dataloader_val = torch.utils.data.DataLoader(dataset_val, batch_size=5,
shuffle=False, num_workers=int(opt.workers))
####################################################################################
# Build the Model
####################################################################################
vocab_size = dataset.vocab_size
ques_length = dataset.ques_length
ans_length = dataset.ans_length + 1
his_length = dataset.ques_length + dataset.ans_length
itow = dataset.itow
img_feat_size = opt.conv_feat_size
netE = _netE(opt.model, opt.ninp, opt.nhid, opt.nlayers, opt.dropout, img_feat_size)
netW = model._netW(vocab_size, opt.ninp, opt.dropout)
netG = _netG(opt.model, vocab_size, opt.ninp, opt.nhid, opt.nlayers, opt.dropout)
critG = model.LMCriterion()
sampler = model.gumbel_sampler()
if opt.cuda:
netW.cuda()
netE.cuda()
netG.cuda()
critG.cuda()
sampler.cuda()
if opt.model_path != '':
netW.load_state_dict(checkpoint['netW'])
netE.load_state_dict(checkpoint['netE'])
netG.load_state_dict(checkpoint['netG'])
# training function
def train(epoch):
netW.train()
netE.train()
netG.train()
lr = adjust_learning_rate(optimizer, epoch, opt.lr)
data_iter = iter(dataloader)
ques_hidden = netE.init_hidden(opt.batchSize)
hist_hidden = netE.init_hidden(opt.batchSize)
average_loss = 0
count = 0
i = 0
total_loss = 0
while i < len(dataloader):
data = data_iter.next()
image, history, question, answer, answerT, answerLen, answerIdx, \
questionL, negAnswer, negAnswerLen, negAnswerIdx = data
batch_size = question.size(0)
image = image.view(-1, img_feat_size)
img_input.data.resize_(image.size()).copy_(image)
for rnd in range(10):
ques = question[:,rnd,:].t()
his = history[:,:rnd+1,:].clone().view(-1, his_length).t()
ans, tans = answer[:,rnd,:].t(), answerT[:,rnd,:].t()
his_input.data.resize_(his.size()).copy_(his)
ques_input.data.resize_(ques.size()).copy_(ques)
ans_input.data.resize_(ans.size()).copy_(ans)
ans_target.data.resize_(tans.size()).copy_(tans)
ques_emb = netW(ques_input, format = 'index')
his_emb = netW(his_input, format = 'index')
ques_hidden = repackage_hidden(ques_hidden, batch_size)
hist_hidden = repackage_hidden(hist_hidden, his_input.size(1))
encoder_feat, ques_hidden = netE(ques_emb, his_emb, img_input, \
ques_hidden, hist_hidden, rnd+1)
_, ques_hidden = netG(encoder_feat.view(1,-1,opt.ninp), ques_hidden)
ans_emb = netW(ans_input)
logprob, ques_hidden = netG(ans_emb, ques_hidden)
loss = critG(logprob, ans_target.view(-1, 1))
loss = loss / torch.sum(ans_target.data.gt(0))
average_loss += loss.data[0]
total_loss += loss.data[0]
# do backward.
netW.zero_grad()
netE.zero_grad()
netG.zero_grad()
loss.backward()
optimizer.step()
count += 1
i += 1
if i % opt.log_interval == 0:
average_loss /= count
print("step {} / {} (epoch {}), g_loss {:.3f}, lr = {:.6f}"\
.format(i, len(dataloader), epoch, average_loss, lr))
average_loss = 0
count = 0
return total_loss / (10 * i), lr
def val():
netE.eval()
netW.eval()
netG.eval()
data_iter_val = iter(dataloader_val)
ques_hidden = netE.init_hidden(opt.batchSize)
hist_hidden = netE.init_hidden(opt.batchSize)
i = 0
average_loss = 0
rank_all_tmp = []
while i < len(dataloader_val):
data = data_iter_val.next()
image, history, question, answer, answerT, questionL, opt_answer, \
opt_answerT, answer_ids, answerLen, opt_answerLen, img_id = data
batch_size = question.size(0)
image = image.view(-1, img_feat_size)
img_input.data.resize_(image.size()).copy_(image)
for rnd in range(10):
# get the corresponding round QA and history.
ques, tans = question[:,rnd,:].t(), opt_answerT[:,rnd,:].clone().view(-1, ans_length).t()
his = history[:,:rnd+1,:].clone().view(-1, his_length).t()
ans = opt_answer[:,rnd,:,:].clone().view(-1, ans_length).t()
gt_id = answer_ids[:,rnd]
his_input.data.resize_(his.size()).copy_(his)
ques_input.data.resize_(ques.size()).copy_(ques)
ans_input.data.resize_(ans.size()).copy_(ans)
ans_target.data.resize_(tans.size()).copy_(tans)
gt_index.data.resize_(gt_id.size()).copy_(gt_id)
ques_emb = netW(ques_input, format = 'index')
his_emb = netW(his_input, format = 'index')
ques_hidden = repackage_hidden(ques_hidden, batch_size)
hist_hidden = repackage_hidden(hist_hidden, his_input.size(1))
encoder_feat, ques_hidden = netE(ques_emb, his_emb, img_input, \
ques_hidden, hist_hidden, rnd+1)
_, ques_hidden = netG(encoder_feat.view(1,-1,opt.ninp), ques_hidden)
hidden_replicated = []
for hid in ques_hidden:
hidden_replicated.append(hid.view(opt.nlayers, batch_size, 1, \
opt.nhid).expand(opt.nlayers, batch_size, 100, opt.nhid).clone().view(opt.nlayers, -1, opt.nhid))
hidden_replicated = tuple(hidden_replicated)
ans_emb = netW(ans_input, format = 'index')
output, _ = netG(ans_emb, hidden_replicated)
logprob = - output
logprob_select = torch.gather(logprob, 1, ans_target.view(-1,1))
mask = ans_target.data.eq(0) # generate the mask
if isinstance(logprob, Variable):
mask = Variable(mask, volatile=logprob.volatile)
logprob_select.masked_fill_(mask.view_as(logprob_select), 0)
prob = logprob_select.view(ans_length, -1, 100).sum(0).view(-1,100)
for b in range(batch_size):
gt_index.data[b] = gt_index.data[b] + b*100
gt_score = prob.view(-1).index_select(0, gt_index)
sort_score, sort_idx = torch.sort(prob, 1)
count = sort_score.lt(gt_score.view(-1,1).expand_as(sort_score))
rank = count.sum(1) + 1
rank_all_tmp += list(rank.view(-1).data.cpu().numpy())
i += 1
return rank_all_tmp, average_loss
####################################################################################
# Main
####################################################################################
img_input = torch.FloatTensor(opt.batchSize, 49, 512)
ques_input = torch.LongTensor(ques_length, opt.batchSize)
his_input = torch.LongTensor(his_length, opt.batchSize)
ans_input = torch.LongTensor(ans_length, opt.batchSize)
ans_target = torch.LongTensor(ans_length, opt.batchSize)
ans_sample = torch.LongTensor(1, opt.batchSize)
noise_input = torch.FloatTensor(opt.batchSize)
gt_index = torch.LongTensor(opt.batchSize)
if opt.cuda:
img_input, his_input = img_input.cuda(), his_input.cuda()
ques_input, ans_input = ques_input.cuda(), ans_input.cuda()
ans_target, ans_sample = ans_target.cuda(), ans_sample.cuda()
noise_input = noise_input.cuda()
gt_index = gt_index.cuda()
ques_input = Variable(ques_input)
ans_input = Variable(ans_input)
ans_target = Variable(ans_target)
ans_sample = Variable(ans_sample)
noise_input = Variable(noise_input)
img_input = Variable(img_input)
his_input = Variable(his_input)
gt_index = Variable(gt_index)
optimizer = optim.Adam([{'params': netW.parameters()},
{'params': netG.parameters()},
{'params': netE.parameters()}], lr=opt.lr, betas=(opt.beta1, 0.999))
history = []
for epoch in range(opt.start_epoch+1, opt.niter):
t = time.time()
train_loss, lr = train(epoch)
print ('Epoch: %d learningRate %4f train loss %4f Time: %3f' % (epoch, lr, train_loss, time.time()-t))
print('Evaluating ... ')
rank_all, val_loss = val()
R1 = np.sum(np.array(rank_all)==1) / float(len(rank_all))
R5 = np.sum(np.array(rank_all)<=5) / float(len(rank_all))
R10 = np.sum( | np.array(rank_all) | numpy.array |
"""
Implementation of DOGRE / WOGRE approach.
Notice projection==embedding.
"""
import numpy as np
from sklearn.linear_model import Ridge
import time
from state_of_the_art.state_of_the_art_embedding import final
from math import pow
from utils import get_initial_proj_nodes_by_k_core, get_initial_proj_nodes_by_degrees
import heapq
import networkx as nx
def user_print(item, user_wish):
"""
A function to show the user the state of the our_embeddings_methods. If you want a live update of the current state
of code and some details: set user wish to True else False
"""
if user_wish is True:
print(item, sep=' ', end='', flush=True)
time.sleep(3)
print(" ", end='\r')
def create_sub_G(proj_nodes, G):
"""
Creating a new graph from the nodes in the initial embedding so we can do the initial embedding on it
:param proj_nodes: The nodes in the initial embedding
:param G: Our graph
:return: A sub graph of G that its nodes are the nodes in the initial embedding.
"""
sub_G = G.subgraph(list(proj_nodes))
return sub_G
def create_dict_neighbors(G):
"""
Create a dictionary of neighbors.
:param G: Our graph
:return: neighbors_dict where key==node and value==set_of_neighbors (both incoming and outgoing)
"""
G_nodes = list(G.nodes())
neighbors_dict = {}
for i in range(len(G_nodes)):
node = G_nodes[i]
neighbors_dict.update({node: set(G[node])})
return neighbors_dict
def create_dicts_same_nodes(my_set, neighbors_dict, node, dict_out, dict_in):
"""
A function to create useful dictionaries to represent connections between nodes that have the same type, i.e between
nodes that are in the embedding and between nodes that aren't in the embedding. It depends on the input.
:param my_set: Set of the nodes that aren't currently in the embedding OR Set of the nodes that are currently in
the embedding
:param neighbors_dict: Dictionary of all nodes and neighbors (both incoming and outgoing)
:param node: Current node
:param dict_out: explained below
:param dict_in: explained below
:return: There are 4 possibilities (2 versions, 2 to every version):
A) 1. dict_node_node_out: key == nodes not in embedding, value == set of outgoing nodes not in embedding
(i.e there is a directed edge (i,j) when i is the key node and j isn't in the embedding)
2. dict_node_node_in: key == nodes not in embedding , value == set of incoming nodes not in embedding
(i.e there is a directed edge (j,i) when i is the key node and j isn't in the embedding)
B) 1. dict_enode_enode_out: key == nodes in embedding , value == set of outgoing nodes in embedding
(i.e there is a directed edge (i,j) when i is the key node and j is in the embedding)
2. dict_enode_enode_in: key == nodes in embedding , value == set of incoming nodes in embedding
(i.e there is a directed edge (j,i) when i is the key node and j is in the embedding)
"""
set1 = neighbors_dict[node].intersection(my_set)
count_1 = 0
count_2 = 0
if (len(set1)) > 0:
count_1 += 1
dict_out.update({node: set1})
neigh = list(set1)
for j in range(len(neigh)):
if dict_in.get(neigh[j]) is None:
dict_in.update({neigh[j]: set([node])})
else:
dict_in[neigh[j]].update(set([node]))
else:
count_2 += 1
return dict_out, dict_in
def create_dict_node_enode(set_proj_nodes, neighbors_dict, H, node, dict_node_enode, dict_enode_node):
"""
A function to create useful dictionaries to represent connections between nodes that are in the embedding and
nodes that are not in the embedding.
:param set_proj_nodes: Set of the nodes that are in the embedding
:param neighbors_dict: Dictionary of all nodes and neighbors (both incoming and outgoing)
:param H: H is the undirected version of our graph
:param node: Current node
:param dict_node_enode: explained below
:param dict_enode_node: explained below
:return: 1. dict_node_enode: key == nodes not in embedding, value == set of outdoing nodes in embedding (i.e
there is a directed edge (i,j) when i is the key node and j is in the embedding)
2. dict_enode_node: key == nodes not in embedding, value == set of incoming nodes in embedding (i.e
there is a directed edge (j,i) when i is the key node and j is in the embedding)
"""
set2 = neighbors_dict[node].intersection(set_proj_nodes)
set_all = set(H[node]).intersection(set_proj_nodes)
set_in = set_all - set2
if len(set2) > 0:
dict_node_enode.update({node: set2})
if len(set_in) > 0:
dict_enode_node.update({node: set_in})
return dict_node_enode, dict_enode_node
def create_dicts_of_connections(set_proj_nodes, set_no_proj_nodes, neighbors_dict, G):
"""
A function that creates 6 dictionaries of connections between different types of nodes.
:param set_proj_nodes: Set of the nodes that are in the projection
:param set_no_proj_nodes: Set of the nodes that aren't in the projection
:param neighbors_dict: Dictionary of neighbours
:return: 6 dictionaries, explained above (in the two former functions)
"""
dict_node_node_out = {}
dict_node_node_in = {}
dict_node_enode = {}
dict_enode_node = {}
dict_enode_enode_out = {}
dict_enode_enode_in = {}
list_no_proj = list(set_no_proj_nodes)
list_proj = list(set_proj_nodes)
H = G.to_undirected()
for i in range(len(list_no_proj)):
node = list_no_proj[i]
dict_node_node_out, dict_node_node_in = create_dicts_same_nodes(set_no_proj_nodes, neighbors_dict, node,
dict_node_node_out, dict_node_node_in)
dict_node_enode, dict_enode_node = create_dict_node_enode(set_proj_nodes, neighbors_dict, H, node,
dict_node_enode, dict_enode_node)
for i in range(len(list_proj)):
node = list_proj[i]
dict_enode_enode_out, dict_enode_enode_in = create_dicts_same_nodes(set_proj_nodes, neighbors_dict, node,
dict_enode_enode_out, dict_enode_enode_in)
return dict_node_node_out, dict_node_node_in, dict_node_enode, dict_enode_node, dict_enode_enode_out, dict_enode_enode_in
def calculate_average_projection_second_order(dict_proj, node, dict_enode_enode, average_two_order_proj, dim, G):
"""
A function to calculate the average embeddings of the second order neighbors, both outgoing and incoming,
depends on the input.
:param dict_proj: Dict of embeddings (key==node, value==its embedding)
:param node: Current node we're dealing with
:param dict_enode_enode: key==node in embedding , value==set of neighbors that are in the embedding. Direction
(i.e outgoing or incoming depends on the input)
:param average_two_order_proj: explained below
:return: Average embedding of second order neighbours, outgoing or incoming.
"""
two_order_neighs = dict_enode_enode.get(node)
k2 = 0
# if the neighbors in the projection also have neighbors in the projection calculate the average projection
if two_order_neighs is not None:
two_order_neighs_in = list(two_order_neighs)
k2 += len(two_order_neighs_in)
two_order_projs = []
for i in range(len(two_order_neighs_in)):
if G[node].get(two_order_neighs_in[i]) is not None:
two_order_proj = G[node][two_order_neighs_in[i]]["weight"]*dict_proj[two_order_neighs_in[i]]
else:
two_order_proj = G[two_order_neighs_in[i]][node]["weight"]*dict_proj[two_order_neighs_in[i]]
two_order_projs.append(two_order_proj)
two_order_projs = np.array(two_order_projs)
two_order_projs = np.mean(two_order_projs, axis=0)
# else, the average projection is 0
else:
two_order_projs = np.zeros(dim)
average_two_order_proj.append(two_order_projs)
return average_two_order_proj, k2
def calculate_projection_of_neighbors(current_node, proj_nodes, dict_proj, dict_enode_enode_in, dict_enode_enode_out, dim, G):
"""
A function to calculate average degree of first order neighbors and second order neighbors, direction
(outgoing or incoming) depends on the input.
:param proj_nodes: Neighbors that are in the embedding, direction depends on the input.
:param dict_proj: Dict of embeddings (key==node, value==embedding)
:return: Average degree of first order neighbors and second order neighbors
"""
proj = []
# average projections of the two order neighbors, both incoming and outgoing
average_two_order_proj_in = []
average_two_order_proj_out = []
list_proj_nodes = list(proj_nodes)
# the number of first order neighbors
k1 = len(proj_nodes)
# to calculate the number of the second order neighbors
k2_in = 0
k2_out = 0
# to calculate the average projection of the second order neighbors
for k in range(len(list_proj_nodes)):
node = list_proj_nodes[k]
average_two_order_proj_in, a = calculate_average_projection_second_order(dict_proj, node, dict_enode_enode_in,
average_two_order_proj_in, dim, G)
k2_in += a
average_two_order_proj_out, b = calculate_average_projection_second_order(dict_proj, node, dict_enode_enode_out,
average_two_order_proj_out, dim, G)
k2_out += b
proj.append(dict_proj[node])
if G[current_node].get(node) is not None:
proj.append(G[current_node][node]["weight"] * dict_proj[node])
else:
proj.append(G[node][current_node]["weight"] * dict_proj[node])
# for every neighbor we have the average projection of its neighbors, so now do average on all of them
average_two_order_proj_in = np.array(average_two_order_proj_in)
average_two_order_proj_in = np.mean(average_two_order_proj_in, axis=0)
average_two_order_proj_out = np.array(average_two_order_proj_out)
average_two_order_proj_out = np.mean(average_two_order_proj_out, axis=0)
proj = np.array(proj)
# find the average proj
proj = np.mean(proj, axis=0)
return proj, average_two_order_proj_in, average_two_order_proj_out, k1, k2_in, k2_out
def calculate_projection(current_node, G, proj_nodes_in, proj_nodes_out, dict_proj, dict_enode_enode_in, dict_enode_enode_out, dim,
alpha1, alpha2, beta_11, beta_12, beta_21, beta_22):
"""
A function to calculate the final embedding of the node by D-VERSE method as explained in the pdf file in github.
:param proj_nodes_in: embeddings of first order incoming neighbors.
:param proj_nodes_out: embeddings of first order outgoing neighbors.
:param dict_proj: Dict of embeddings (key==node, value==projection)
:param dim: Dimension of the embedding
:param alpha1, alpha2, beta_11, beta_12, beta_21, beta_22: Parameters to calculate the final projection.
:return: The final projection of our node.
"""
if len(proj_nodes_in) > 0:
x_1, z_11, z_12, k1, k2_in_in, k2_in_out = calculate_projection_of_neighbors(current_node,
proj_nodes_in, dict_proj, dict_enode_enode_in, dict_enode_enode_out, dim, G)
else:
x_1, z_11, z_12 = np.zeros(dim), np.zeros(dim), np.zeros(dim)
if len(proj_nodes_out) > 0:
x_2, z_21, z_22, k1, k2_in_out, k2_out_out = calculate_projection_of_neighbors(current_node,
proj_nodes_out, dict_proj, dict_enode_enode_in, dict_enode_enode_out, dim, G)
else:
x_2, z_21, z_22 = np.zeros(dim), np.zeros(dim), np.zeros(dim)
# the final projection of the node
final_proj = alpha1*x_1+alpha2*x_2 - beta_11*z_11 - beta_12*z_12 - \
beta_21*z_21 - beta_22*z_22
return final_proj
def calculate_projection_weighted(current_node, G, proj_nodes_in, proj_nodes_out, dict_proj, dict_enode_enode_in, dict_enode_enode_out,
dim, params):
"""
A function to calculate the final embedding of the node by We-VERSE method as explained in the pdf file in github.
:param proj_nodes_in: embeddings of first order incoming neighbors.
:param proj_nodes_out: embeddings of first order outgoing neighbors.
:param dict_proj: Dict of embeddings (key==node, value==projection)
:param dict_enode_enode_in: key == nodes in embedding , value == set of incoming nodes in embedding
(i.e there is a directed edge (j,i) when i is the key node and j is in the embedding)
:param dict_enode_enode_out: key == nodes in embedding , value == set of outgoing nodes in embedding
(i.e there is a directed edge (i,j) when i is the key node and j is in the embedding)
:param dim: Dimension of the embedding space
:param params: Optimal parameters to calculate the embedding
:return: The final projection of our node.
"""
if len(proj_nodes_in) > 0:
x_1, z_11, z_12, k1_in, k2_in_in, k2_in_out = calculate_projection_of_neighbors(current_node,
proj_nodes_in, dict_proj, dict_enode_enode_in, dict_enode_enode_out, dim, G)
else:
x_1, z_11, z_12 = np.zeros(dim), np.zeros(dim), np.zeros(dim)
k1_in = 0
k2_in_in = 0
k2_in_out = 0
if len(proj_nodes_out) > 0:
x_2, z_21, z_22, k1_out, k2_out_in, k2_out_out = calculate_projection_of_neighbors(current_node,
proj_nodes_out, dict_proj, dict_enode_enode_in, dict_enode_enode_out, dim, G)
else:
x_2, z_21, z_22 = | np.zeros(dim) | numpy.zeros |
#!/usr/bin/env python
import numpy as np
from scipy.spatial import Delaunay
from . import pg_utilities
from . import imports_and_exports
"""
.. module:: generate_shapes
:synopsis: Contains code to generate placental shapes for generic placental models.
:synopsis:Contains code to generate placental shapes for generic placental models \n
(i.e. from literature measures without specific data from an individual
"""
def equispaced_data_in_ellipsoid(n, volume, thickness, ellipticity):
"""
:Function name: **equispaced_data_in_ellipsoid**
Generates equally spaced data points in an ellipsoid.
:inputs:
- n: Number of data points which we aim to generate
- volume: Volume of ellipsoid
- thickness: Placental thickness (z-dimension)
- ellipticity: Ratio of y to x axis dimensions
return:
- Edata: A nx3 array of datapoints, with each point being defined by its x-,y-, and z- coordinates
A way you might want to use me is:
>>> n = 100
>>> volume = 10
>>> thickness = 3
>>> ellipticity = 1.1
>>> equispaced_data_in_ellipsoid(n, volume, thickness, ellipticity)
This will return 100 data points in an ellipse with z-axis thickness 3, volume 10, and with the y-axis dimension 1.1 times the x-axis dimension.
"""
data_spacing = (volume / n) ** (1.0 / 3.0)
print('Generating data ' + str(data_spacing) + ' apart')
radii = pg_utilities.calculate_ellipse_radii(volume, thickness, ellipticity)
z_radius = radii['z_radius']
x_radius = radii['x_radius']
y_radius = radii['y_radius']
# Aiming to generate seed points that fill a cuboid encompasing the placental volume then remove seed points that
# are external to the ellipsoid
num_data = 0 # zero the total number of data points
# Calculate the number of points that should lie in each dimension in a cube
nd_x = np.floor(2.0 * (x_radius + data_spacing) / data_spacing)
nd_y = np.floor(2.0 * (y_radius + data_spacing) / data_spacing)
nd_z = np.floor(2.0 * (z_radius + data_spacing) / data_spacing)
nd_x = int(nd_x)
nd_y = int(nd_y)
nd_z = int(nd_z)
# Set up edge node coordinates
x_coord = np.linspace(-x_radius - data_spacing / 2.0, x_radius + data_spacing / 2.0, nd_x)
y_coord = np.linspace(-y_radius - data_spacing / 2.0, y_radius + data_spacing / 2.0, nd_y)
z_coord = np.linspace(-z_radius - data_spacing / 2.0, z_radius + data_spacing / 2.0, nd_z)
# Use these vectors to form a unifromly spaced grid
data_coords = np.vstack(np.meshgrid(x_coord, y_coord, z_coord)).reshape(3, -1).T
# Store nodes that lie within ellipsoid
datapoints = np.zeros((nd_x * nd_y * nd_z, 3))
for i in range(len(data_coords)): # Loop through grid
coord_check = pg_utilities.check_in_ellipsoid(data_coords[i][0], data_coords[i][1], data_coords[i][2], x_radius,
y_radius, z_radius)
if coord_check is True: # Has to be strictly in the ellipsoid
datapoints[num_data, :] = data_coords[i, :] # add to data array
num_data = num_data + 1
datapoints.resize(num_data, 3,refcheck=False) # resize data array to correct size
print('Data points within ellipsoid allocated. Total = ' + str(len(datapoints)))
return datapoints
def uniform_data_on_ellipsoid(n, volume, thickness, ellipticity, random_seed):
"""
:Function name: **uniform_data_on_ellipsoid**
Generates equally spaced data points on the positive z-surface of an ellipsoid
:inputs:
- n: number of data points which we aim to generate
- volume: volume of ellipsoid
- thickness: placental thickness (z-dimension)
- ellipticity: ratio of y to x axis dimensions
:return:
- chorion_data: A nx3 array of datapoints, with each point being defined by its x-,y-, and z- coordinates
A way you might want to use me is:
>>> n = 100
>>> volume = 10
>>> thickness = 3
>>> ellipticity = 1.1
>>> equispaced_data_on_ellipsoid(n, volume, thickness, ellipticity)
This will return 100 data points on the positive z-surface ellipse with z-axis thickness 3, volume 10,
and with the y-axis dimension 1.1 times the x-axis dimension.
"""
radii = pg_utilities.calculate_ellipse_radii(volume, thickness, ellipticity)
z_radius = radii['z_radius']
x_radius = radii['x_radius']
y_radius = radii['y_radius']
area_estimate = np.pi * x_radius * y_radius
data_spacing = 0.85 * np.sqrt(area_estimate / n)
chorion_data = np.zeros((n, 3))
np.random.seed(random_seed)
generated_seed = 0
acceptable_attempts = n * 1000 # try not to have too many failures
attempts = 0
while generated_seed < n and attempts < acceptable_attempts:
# generate random x-y coordinates between negative and positive radii
new_x = np.random.uniform(-x_radius, x_radius)
new_y = np.random.uniform(-y_radius, y_radius)
# check if new coordinate is on the ellipse
if ((new_x / x_radius) ** 2 + (new_y / y_radius) ** 2) < 1: # on the surface
if generated_seed == 0:
generated_seed = generated_seed + 1
new_z = pg_utilities.z_from_xy(new_x, new_y, x_radius, y_radius, z_radius)
chorion_data[generated_seed - 1][:] = [new_x, new_y, new_z]
else:
reject = False
for j in range(0, generated_seed + 1):
distance = (chorion_data[j - 1][0] - new_x) ** 2 + (chorion_data[j - 1][1] - new_y) ** 2
distance = np.sqrt(distance)
if distance <= data_spacing:
reject = True
break
if reject is False:
generated_seed = generated_seed + 1
new_z = pg_utilities.z_from_xy(new_x, new_y, x_radius, y_radius, z_radius)
chorion_data[generated_seed - 1][:] = [new_x, new_y, new_z]
attempts = attempts + 1
chorion_data.resize(generated_seed, 3) # resize data array to correct size
print('Data points on ellipsoid allocated. Total = ' + str(len(chorion_data)) )
return chorion_data
def gen_rect_cover_ellipsoid(volume, thickness, ellipticity, x_spacing, y_spacing, z_spacing):
# Generates equally spaced data nodes and elements and constructs a rectangular 'mesh' that covers the space that is
# made up of an ellipsoidal placenta
# volume=volume of ellipsoid
# thickness = placental thickness (z-dimension)
# ellipticity = ratio of y to x axis dimensions
# X,Y,Z spacing is the number of elements required in each of the x, y z directions
# Calculate the dimensions of the ellipsoid
radii = pg_utilities.calculate_ellipse_radii(volume, thickness, ellipticity)
z_radius = radii['z_radius']
x_radius = radii['x_radius']
y_radius = radii['y_radius']
# z height of ellipsoid is 2* zradius
# We want number of nodes to cover height and have prescribed spaing
nnod_x = int(np.ceil(x_radius * 2.0 / x_spacing)) + 1
x_width = x_spacing * (nnod_x - 1)
nnod_y = int(np.ceil(y_radius * 2.0 / y_spacing)) + 1
y_width = y_spacing * (nnod_y - 1)
nnod_z = int(np.ceil(z_radius * 2.0 / z_spacing)) + 1
z_width = z_spacing * (nnod_z - 1)
node_loc = gen_rectangular_node(x_width, y_width, z_width, nnod_x, nnod_y, nnod_z)
# Generating the element connectivity of each cube element, 8 nodes for each 3D cube element
elems = cube_mesh_connectivity(nnod_x, nnod_y, nnod_z)
return {'nodes': node_loc, 'elems': elems, 'total_nodes': nnod_x * nnod_y * nnod_z,
'total_elems': (nnod_x - 1) * (nnod_y - 1) * (nnod_z - 1)}
def gen_ellip_mesh_tet(volume, thickness, ellipticity, n):
""" Generates ellipsoid tetrahedral mesh for 3D problems
Inputs:
- volume: volume of placental ellipsoid
- thickness: placental thickness (z-dimension)
- ellipticity: ratio of y to x axis dimensions
- n: number of datapoints generated to create the mesh
Returns:
- nodes: nodes location of mesh
- elems: element connectivity of mesh (tetrahedral element)
- node_array: array of nodes
- element_array: array of elements
"""
radii = pg_utilities.calculate_ellipse_radii(volume, thickness, ellipticity)
z_radius = radii['z_radius']
x_radius = radii['x_radius']
y_radius = radii['y_radius']
nodeSpacing = (n / (2 * x_radius * 2 * y_radius * 2 * z_radius)) ** (1. / 3)
nnod_x = 2 * x_radius * nodeSpacing
nnod_y = 2 * y_radius * nodeSpacing
nnod_z = 2 * z_radius * nodeSpacing
nodes = gen_rectangular_node(x_radius * 2, y_radius * 2, z_radius * 2, nnod_x, nnod_y, nnod_z)
# nodes inside the ellipsoid
ellipsoid_node = np.zeros((len(nodes), 3))
count = 0
for nnode in range(0, len(nodes)):
coord_point = nodes[nnode][0:3]
inside = pg_utilities.check_in_on_ellipsoid(coord_point[0], coord_point[1], coord_point[2], x_radius, y_radius,
z_radius)
if inside:
ellipsoid_node[count, :] = coord_point[:]
count = count + 1
ellipsoid_node.resize(count, 3,refcheck=False)
xyList = ellipsoid_node[:, [0, 1]]
xyListUnique = np.vstack({tuple(row) for row in xyList})
# looking for z_coordinate of surface nodes
for xyColumn in xyListUnique:
xyNodes = np.where(np.all(xyList == xyColumn, axis=1))[0]
if len(xyNodes) > 1:
x_coord = ellipsoid_node[xyNodes[0], 0]
y_coord = ellipsoid_node[xyNodes[0], 1]
ellipsoid_node[xyNodes[len(xyNodes) - 1], 2] = pg_utilities.z_from_xy(x_coord, y_coord, x_radius, y_radius,
z_radius)
ellipsoid_node[xyNodes[0], 2] = -1 * (
pg_utilities.z_from_xy(x_coord, y_coord, x_radius, y_radius, z_radius))
# generate tetrahedral mesh
pyMesh = Delaunay(ellipsoid_node)
# Build arrays to pass into openCMISS conversion:
node_loc = pyMesh.points
temp_elems = pyMesh.simplices
# CHECK ELEMENTS FOR 0 VOLUME:
min_vol = 0.00001
index = 0
indexArr = []
for element in temp_elems:
x_coor = []
y_coor = []
z_coor = []
for node in element:
x_coor.append(node_loc[node][0])
y_coor.append(node_loc[node][1])
z_coor.append(node_loc[node][2])
vmat = np.vstack((x_coor, y_coor, z_coor, [1.0, 1.0, 1.0, 1.0])) # matrix of coor of element
elem_volume = (1 / 6.0) * abs(np.linalg.det(vmat)) # volume of each tetrahedral element
# if volume is not zero
if elem_volume > min_vol:
indexArr.append(index)
index = index + 1
# update arrays without 0 volume elements, to pass into openCMISS
elems = temp_elems[indexArr, :]
for i in range(len(elems)):
elems[i] = [x + 1 for x in elems[i]]
element_array = range(1, len(elems) + 1)
node_array = range(1, len(node_loc) + 1)
return {'nodes': node_loc, 'elems': elems, 'element_array': element_array, 'node_array': node_array,
'nodeSpacing': nodeSpacing}
def gen_rectangular_node(x_width, y_width, z_width, nnod_x, nnod_y, nnod_z):
# Create linspaces for x y and z coordinates
x = np.linspace(-x_width / 2.0, x_width / 2.0, int(nnod_x)) # linspace for x axis
y = np.linspace(-y_width / 2.0, y_width / 2.0, int(nnod_y)) # linspace for y axis
z = np.linspace(-z_width / 2.0, z_width / 2.0, int(nnod_z)) # linspace for z axis
node_loc_temp = np.vstack(np.meshgrid(y, z, x)).reshape(3, -1).T # generate nodes for rectangular mesh
node_loc = np.zeros((len(node_loc_temp), 3))
for i in range(0, len(node_loc)):
node_loc[i][0] = node_loc_temp[i][2]
node_loc[i][1] = node_loc_temp[i][0]
node_loc[i][2] = node_loc_temp[i][1]
return node_loc
def gen_rectangular_mesh2(nel_x, nel_y, nel_z, xdim, ydim, zdim, element_type):
# generates a rectangular mesh of defined dimenions using either linear or quadratic elements
if element_type == 1: # linear element
nnod_x = int(nel_x + 1)
nnod_y = int(nel_y + 1)
nnod_z = int(nel_z + 1)
elif element_type == 2: # quadratic element
nnod_x = int((nel_x * 2) + 1)
nnod_y = int((nel_y * 2) + 1)
nnod_z = int((nel_z * 2) + 1)
node = gen_rectangular_node(xdim, ydim, zdim, nnod_x, nnod_y, nnod_z) # getting nodes
if element_type == 1: # linear element
elems = cube_mesh_connectivity(nnod_x, nnod_y, nnod_z) # getting elem connectivity
elif element_type == 2: # quadratic element
elems = cube_mesh_connectivity_quadratic(nel_x, nel_y, nel_z, nnod_x, nnod_y,
nnod_z) # getting element connectivity
element_array = range(1, len(elems) + 1)
node_array = range(1, len(node) + 1)
if element_type == 2:
surfacenodes = identify_surface_node_quad(nel_x, nel_y, nel_z)
else:
print("This element type has no implemented surface node definition")
surfacenodes = 0
return {'nodes': node, 'elems': elems, 'element_array': element_array,
'node_array': node_array, 'surface_nodes': surfacenodes}
def gen_3d_ellipsoid(nel_x, nel_y, nel_z, volume, thickness, ellipticity, element_type):
""" Generates ellipsoid placental mesh to solve 3D problems (note this is not a quality structured mesh)
Inputs:
- nel: number of element in x,y,z axis , the more nel, the rounder the mesh
- volume: volume of placental ellipsoid
- thickness: placental thickness (z-dimension)
- ellipticity: ratio of y to x axis dimensions
Returns:
- placental_node_coor: nodes location of mesh
- placental_el_con: element connectivity of mesh (tetrahedral element)
- node_array: array of nodes
- element_array: array of elements
"""
# creating cube between -1 and 1 with n number of element
# cubelength=2
if element_type == 1: # linear element
nnod_x = int(nel_x + 1)
nnod_y = int(nel_y + 1)
nnod_z = int(nel_z + 1)
elif element_type == 2: # quadratic element
nnod_x = int((nel_x * 2) + 1)
nnod_y = int((nel_y * 2) + 1)
nnod_z = int((nel_z * 2) + 1)
radii = pg_utilities.calculate_ellipse_radii(volume, thickness, ellipticity)
z_radius = radii['z_radius']
x_radius = radii['x_radius']
y_radius = radii['y_radius']
cube_node = gen_rectangular_node(2 * x_radius, 2 * y_radius, 2 * z_radius, nnod_x, nnod_y, nnod_z)
if element_type == 1: # linear element
cube_elems = cube_mesh_connectivity(nnod_x, nnod_y, nnod_z) # getting elem connectivity
elif element_type == 2: # quadratic element
cube_elems = cube_mesh_connectivity_quadratic(nel_x, nel_y, nel_z, nnod_x, nnod_y,
nnod_z) # getting element connectivity
ellipsoid_coor = np.zeros((len(cube_node), 3))
for ii in range(0, len(cube_node)):
ellipsoid_coor[ii, 0] = cube_node[ii, 0] * np.sqrt(1.0 - cube_node[ii, 1] ** 2 / (2.0 * y_radius ** 2) -
cube_node[ii, 2] ** 2 / (2.0 * z_radius ** 2) + cube_node[
ii, 1] ** 2 *
cube_node[ii, 2] ** 2 / (
3.0 * y_radius ** 2 * z_radius ** 2)) # for x_coor
ellipsoid_coor[ii, 1] = cube_node[ii, 1] * np.sqrt(1.0 - cube_node[ii, 0] ** 2 / (2.0 * x_radius ** 2) -
cube_node[ii, 2] ** 2 / (2.0 * z_radius ** 2) + cube_node[
ii, 0] ** 2 * cube_node[ii, 2] ** 2
/ (3.0 * x_radius ** 2 * z_radius ** 2)) # for y_coor
ellipsoid_coor[ii, 2] = cube_node[ii, 2] * np.sqrt(1.0 - cube_node[ii, 1] ** 2 / (2.0 * y_radius ** 2) -
cube_node[ii, 0] ** 2 / (2.0 * x_radius ** 2) + cube_node[
ii, 1] ** 2 * cube_node[ii, 0] ** 2
/ (3.0 * y_radius ** 2 * x_radius ** 2)) # for z_coor
element_array = range(1, len(cube_elems) + 1)
node_array = range(1, len(ellipsoid_coor) + 1)
if element_type == 2:
surfacenodes = identify_surface_node_quad(nel_x, nel_y, nel_z)
else:
print("This element type has no implemented surface node definition")
surfacenodes = 0
return {'placental_node_coor': ellipsoid_coor, 'placental_el_con': cube_elems, 'element_array': element_array,
'node_array': node_array, 'surface_nodes': surfacenodes}
def cube_mesh_connectivity(nnod_x, nnod_y, nnod_z):
"""Generates element connectivity in cube mesh
Inputs:
- nnod_x:number of node in x axis
- nnod_y:number of node in y axis
- nnod_z:number of node in z axis
Outputs:
- elems: array of element connectivity
"""
num_elems = (nnod_x - 1) * (nnod_y - 1) * (nnod_z - 1)
elems = np.zeros((num_elems, 9),
dtype=int) # this stores first element number and then the nodes of each mesh element
element_number = 0
ne = 0
# loop through elements
for k in range(1, nnod_z):
for j in range(1, nnod_y):
for i in range(1, nnod_x):
elems[ne][0] = ne # store element number
elems[ne][1] = (i - 1) + (nnod_x) * (j - 1) + nnod_x * nnod_y * (k - 1) # lowest coordinates
elems[ne][2] = elems[ne][1] + 1 # add one in x
elems[ne][3] = elems[ne][1] + nnod_x # go through x and find first in y
elems[ne][4] = elems[ne][3] + 1 # add one in y
elems[ne][5] = elems[ne][1] + nnod_x * nnod_y # same as 1 -4 but at higher z -coord
elems[ne][6] = elems[ne][2] + nnod_x * nnod_y
elems[ne][7] = elems[ne][3] + nnod_x * nnod_y
elems[ne][8] = elems[ne][4] + nnod_x * nnod_y
ne = ne + 1
return elems
def cube_mesh_connectivity_quadratic(nel_x, nel_y, nel_z, nnod_x, nnod_y, nnod_z):
"""Generates element connectivity in quadratic cube mesh
Inputs:
- nnod_x:number of node in x axis
- nnod_y:number of node in y axis
- nnod_z:number of node in z axis
Outputs:
- elems: array of element connectivity in quadratic
"""
num_elems = nel_x * nel_y * nel_z
elems = np.zeros((num_elems, 28), dtype=int)
element_number = 0
ne = 0
# Got the element
for k in range(1, nnod_z, 2):
for j in range(1, nnod_y, 2):
for i in range(1, nnod_x, 2):
# 1st layer
elems[ne][0] = ne
elems[ne][1] = (i - 1) + (nnod_x) * (j - 1) + nnod_x * nnod_y * (k - 1) # 1st node
elems[ne][2] = (i - 1) + (nnod_x) * (j - 1) + nnod_x * nnod_y * (k - 1) + 1 # right subsequent node
elems[ne][3] = (i - 1) + (nnod_x) * (j - 1) + nnod_x * nnod_y * (k - 1) + 2 # right subsequent node
elems[ne][4] = elems[ne][1] + nnod_x # 1st node in another y layer
elems[ne][5] = elems[ne][1] + nnod_x + 1 # right subsequent node
elems[ne][6] = elems[ne][1] + nnod_x + 2 # right subsequent node
elems[ne][7] = elems[ne][1] + 2 * (nnod_x) # 1st node in another y layer
elems[ne][8] = elems[ne][1] + 2 * (nnod_x) + 1 # right subsequent node
elems[ne][9] = elems[ne][1] + 2 * (nnod_x) + 2 # right subsequent node
# 2nd layer
elems[ne][10] = elems[ne][1] + nnod_x * nnod_y # same in one z layer
elems[ne][11] = elems[ne][2] + nnod_x * nnod_y
elems[ne][12] = elems[ne][3] + nnod_x * nnod_y
elems[ne][13] = elems[ne][4] + nnod_x * nnod_y
elems[ne][14] = elems[ne][5] + nnod_x * nnod_y
elems[ne][15] = elems[ne][6] + nnod_x * nnod_y
elems[ne][16] = elems[ne][7] + nnod_x * nnod_y
elems[ne][17] = elems[ne][8] + nnod_x * nnod_y
elems[ne][18] = elems[ne][9] + nnod_x * nnod_y
# thrid layer
elems[ne][19] = elems[ne][1] + nnod_x * nnod_y * 2 # same in another z layer
elems[ne][20] = elems[ne][2] + nnod_x * nnod_y * 2
elems[ne][21] = elems[ne][3] + nnod_x * nnod_y * 2
elems[ne][22] = elems[ne][4] + nnod_x * nnod_y * 2
elems[ne][23] = elems[ne][5] + nnod_x * nnod_y * 2
elems[ne][24] = elems[ne][6] + nnod_x * nnod_y * 2
elems[ne][25] = elems[ne][7] + nnod_x * nnod_y * 2
elems[ne][26] = elems[ne][8] + nnod_x * nnod_y * 2
elems[ne][27] = elems[ne][9] + nnod_x * nnod_y * 2
ne = ne + 1
return elems
def identify_surface_node_quad(nel_x, nel_y, nel_z):
"""Generates collection of nodes that are on the surface of in quadratic placental mesh
Inputs:
- nel_x:number of elem in x axis
- nel_y:number of elem in y axis
- nel_z:number of elem in z axis
Outputs:
- surfacenode: collection of nodes on the surface of placental mesh
"""
nnod_x = int((nel_x * 2) + 1) # number of nodes in x axis
nnod_y = int((nel_y * 2) + 1) # number of nodes in y axis
nnod_z = int((nel_z * 2) + 1) # number of nodes in z axis
# For left and right surface
sIEN = np.zeros((9, nel_y * nel_z), dtype=int) # to store surface indiviaul element nodes (sIEN)
e = 0
for k in range(1, nnod_x * nnod_y * (nnod_z - 1), (nnod_x * nnod_y) * 2): # go up
for j in range(1, nnod_x * (nnod_y - 1), 2 * nnod_x): # go left
sIEN[0, e] = j + (k - 1) # 1st node
sIEN[1, e] = sIEN[0, e] + (nnod_x) * (nnod_y) # 2nd node
sIEN[2, e] = sIEN[1, e] + (nnod_x) * (nnod_y) # 3rd node
sIEN[3, e] = sIEN[0, e] + nnod_x # 4th node
sIEN[4, e] = sIEN[1, e] + nnod_x # 5th node
sIEN[5, e] = sIEN[2, e] + nnod_x # 6th node
sIEN[6, e] = sIEN[3, e] + nnod_x # 7th node
sIEN[7, e] = sIEN[4, e] + nnod_x # 8th node
sIEN[8, e] = sIEN[5, e] + nnod_x # 9th node
e = e + 1
left = np.unique(sIEN) # collection of nodes of left surface
right = np.unique(sIEN.T + (nnod_x - 1)) # collection of nodes on right surface
# For front and back surface
sIEN = np.zeros((9, nel_x * nel_z), dtype=int)
e = 0
for k in range(1, nnod_x * nnod_y * (nnod_z - 2), (nnod_x * nnod_y) * 2): # go up
for i in range(1, nnod_x - 1, 2): # go right
sIEN[0, e] = i + (k - 1)
sIEN[1, e] = sIEN[0, e] + 1
sIEN[2, e] = sIEN[0, e] + 2
sIEN[3, e] = sIEN[0, e] + (nnod_x * nnod_y)
sIEN[4, e] = sIEN[3, e] + 1
sIEN[5, e] = sIEN[3, e] + 2
sIEN[6, e] = sIEN[3, e] + (nnod_x * nnod_y)
sIEN[7, e] = sIEN[6, e] + 1
sIEN[8, e] = sIEN[6, e] + 2
e = e + 1
front = np.unique(sIEN) # collection of nodes on front surface
back = np.unique(sIEN.T + (nnod_x * (nnod_y - 1))) # collection of nodes on back surface
# For top and bottom surface
sIEN = np.zeros((9, nel_x * nel_y), dtype=int)
e = 0
for j in range(1, nnod_x * (nnod_y - 1), nnod_x * 2): # go up
for i in range(1, nnod_x - 1, 2): # go back
sIEN[0, e] = i + (j - 1)
sIEN[1, e] = sIEN[0, e] + 1
sIEN[2, e] = sIEN[0, e] + 2
sIEN[3, e] = sIEN[0, e] + nnod_x
sIEN[4, e] = sIEN[3, e] + 1
sIEN[5, e] = sIEN[3, e] + 2
sIEN[6, e] = sIEN[3, e] + nnod_x
sIEN[7, e] = sIEN[6, e] + 1
sIEN[8, e] = sIEN[6, e] + 2
e = e + 1
bottom = np.unique(sIEN) # collection of nodes on bottom surface
top = np.unique(sIEN.T + (nnod_x * nnod_y) * (nnod_z - 1)) # collection of nodes on top surface
surfacenode = np.hstack((front, back, left, right, bottom, top))
surfacenode = np.unique(surfacenode) # collection of surface nodes from all surface
return surfacenode
def identify_node_from_coord(nodes, filename):
# reading in the node location
xyz = open(filename, 'r')
xyz_coor = xyz.readlines() # readlines
startLines = range(0, len(xyz_coor))
for i in range(len(xyz_coor)):
xyz_coor[i] = xyz_coor[i].split()
xyzList = []
for i in startLines:
targetpoint = []
targetpoint.append(float(xyz_coor[i][0])) # x coor
targetpoint.append((float(xyz_coor[i][1]))) # y coor
targetpoint.append((float(xyz_coor[i][1]))) # y coor
xyzList.append(targetpoint)
xyz.close()
node_list = np.zeros(len(xyzList))
mindist = 100000
for i in range(0, len(xyzList)):
for j in range(0, len(nodes)):
print(xyzList[i][0], nodes[j][0])
return i
def identify_vessel_node(ellipsoid_coor, surfacenode, stem_file, sa_radius, dv_radius, volume,thickness, ellipticity):
"""Generates array of spiral artery nodes and decidual vein nodes. Spiral artery nodes are mapped with stem villi.
Inputs:
- ellipsoid_coor:coordinate of nodes of placental mesh
- surfacenode:array of surface nodes
- stem_file:txt file that described stem villi locations
Outputs:
- spiral_array: array of spiral artery nodes
- decidual_array: array of decidual artery nodes
- vesselnode: array of both spiral and decidual nodes
- surfnode_ex_vessel: array of surface node excluding vessel nodes
"""
radii = pg_utilities.calculate_ellipse_radii(volume, thickness, ellipticity)
z_radius = radii['z_radius']
x_radius = radii['x_radius']
y_radius = radii['y_radius']
xyList = np.zeros((len(surfacenode), 4))
count = 0
for i in range(0, len(surfacenode)): # taking only x and y coordinates
if ellipsoid_coor[surfacenode[i] - 1, 3] < 0: # take if upper surface nodes as this is where vessele reside
# location from upper surface nodes only
xyList[count, 0] = ellipsoid_coor[surfacenode[i] - 1, 0] #node number
xyList[count, 1] = ellipsoid_coor[surfacenode[i] - 1, 1] #x-coordinate
xyList[count, 2] = ellipsoid_coor[surfacenode[i] - 1, 2] #y-coordinate
xyList[count, 3] = ellipsoid_coor[surfacenode[i] - 1, 3] #z-coordinate
count = count + 1
xyList = xyList[0:count, :]
surfnode_ex_vessel = np.copy(surfacenode)
vesselnode_temp = np.vstack({tuple(row) for row in xyList}) #nodes that might be vessels
# reading in the stem vessel to map the spiral artery location
stem_xy = open(stem_file, 'r')
stem_coor = stem_xy.readlines() # readlines
stem_xyList = imports_and_exports.import_stemxy(stem_file)['stem_xy']
print('Total stem read = '+ str(len(stem_xyList)))
vessel_mapped_stem = stem_xyList # this is the x,y location where we want to put spiral artery
spiral_array = np.zeros((len(xyList)), dtype=int) # store the node nuber of spiral artery
decidual_array = np.zeros((len(xyList)), dtype=int) # store the node number of decidual vein
check = ellipsoid_coor[:, 0:2]
np.random.seed(0)
sa_nodes = 0
dv_nodes = 0
for i in range(0, len(vessel_mapped_stem)): # for each blood vessel,Cycle through to find closest nodes
closest_node = 0
for nodeX in vesselnode_temp:
distance=np.sqrt((vessel_mapped_stem[i][0] - nodeX[1]) ** 2 + (
vessel_mapped_stem[i][1] - nodeX[2]) ** 2 ) # distance from the nodes
if(distance < sa_radius):
#print('SA Node', int(nodeX[0]),nodeX[1],nodeX[2],vessel_mapped_stem[i][0],vessel_mapped_stem[i][1])
arterynode = nodeX[0]
A = np.where(vesselnode_temp == arterynode)
vesselnode_temp = | np.delete(vesselnode_temp, A[0], axis=0) | numpy.delete |
# -*- coding: utf-8 -*-
# Copyright 2020 The PsiZ Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Module for preprocessing observations.
Functions:
identify_catch_trials: Identify catch trials.
grade_catch_trials: Identify and grade catch trials.
quality_control: Remove observations belonging to agents that do
not meet quality control standards.
remove_catch_trials: Remove catch trials.
"""
from pathlib import Path
import numpy as np
import pandas as pd
def identify_catch_trials(obs):
"""Identify catch trials.
Catch trials are assumed to be any trial where at least one of the
references is the same stimulus as the query.
Arguments:
obs: A psiz.trials.RankObservations object.
Returns:
is_catch: Boolean array indicating catch trial locations.
shape = (n_trial,)
"""
n_trial = obs.n_trial
is_catch = np.zeros([n_trial], dtype=bool)
for i_trial in range(n_trial):
# Determine which references are identical to the query.
is_identical = np.equal(
obs.stimulus_set[i_trial, 0], obs.stimulus_set[i_trial, 1:]
)
if np.sum(is_identical) > 0:
is_catch[i_trial] = True
return is_catch
def grade_catch_trials(obs, grade_mode='lenient'):
"""Grade catch trials.
Catch trials are assumed to be any trial where at least one of the
references is the same stimulus as the query. A catch trial is
graded as correct depending on the grade_mode.
Arguments:
obs: A psiz.trials.RankObservations object.
grade_mode (optional): Determines the manner in which responses
are graded. Can be either 'strict', 'partial', or
'lenient'. The options 'strict' and 'partial' are only
relevant for trials where participants provide ranked
responses, otherwise they are equivalent to 'lenient'. If
'lenient', then one of the selected references must include
the copy of the query. If 'strict', then the first choice
must be the copy of the query. If 'partial', then full
credit is given if the first choice is the copy of the
query and half credit is given if a choice other than the
first choice includes a copy of the query.
Returns:
avg_grade: Scalar indicating average grade on all catch trials.
Is np.nan if there are no catch trials.
grade: Array indicating grade of catch trial. The value can be
between 0 and 1, where 1 is a perfect score.
shape = (n_catch_trial,)
is_catch: Boolean array indicating catch trial locations.
shape = (n_trial,)
"""
n_trial = obs.n_trial
is_catch = identify_catch_trials(obs)
grade = np.zeros([n_trial])
for i_trial in range(n_trial):
if is_catch[i_trial]:
# Determine which references are identical to the query.
is_identical = np.equal(
obs.stimulus_set[i_trial, 0], obs.stimulus_set[i_trial, 1:]
)
# Grade response.
if grade_mode == 'lenient':
is_identical_selected = is_identical[0:obs.n_select[i_trial]]
if np.sum(is_identical_selected) > 0:
grade[i_trial] = 1
elif grade_mode == 'strict':
if obs.is_ranked[i_trial]:
is_identical_selected = is_identical[0]
else:
is_identical_selected = is_identical[
0:obs.n_select[i_trial]
]
if np.sum(is_identical_selected) > 0:
grade[i_trial] = 1
elif grade_mode == 'partial':
if obs.is_ranked[i_trial]:
is_identical_selected = is_identical[0]
if is_identical_selected:
grade[i_trial] = 1
else:
is_identical_selected = is_identical[
0:obs.n_select[i_trial]
]
if np.sum(is_identical_selected) > 0:
grade[i_trial] = .5
else:
is_identical_selected = is_identical[
0:obs.n_select[i_trial]
]
if | np.sum(is_identical_selected) | numpy.sum |
import time
import json
import sys
import os
import abc
import numpy as np
import pandas as pd
from functools import partial
import keras
import keras.backend as k
from keras.models import Model, load_model, model_from_yaml
from keras.layers import Input, Concatenate, Conv2D, Lambda, Dense, Add, Average, Multiply
from keras.engine.training_utils import is_sequence, iter_sequence_infinite, should_run_validation
from keras.utils.data_utils import Sequence, OrderedEnqueuer, GeneratorEnqueuer
from keras.utils.generic_utils import Progbar, to_list, unpack_singleton
from keras.utils import multi_gpu_model
from keras.utils import to_categorical
import keras.callbacks as cbks
import tensorflow as tf
from . import loss_and_metric
from .tensorboard_utils import *
from .ops import *
from ._build_base_network import *
#####################################################################################################################
# ProductSpaceOAE_GAN Network with HSIC
#####################################################################################################################
class ProductSpaceOAEHSIC_GAN(WAE_GAN):
def __init__(self, log, path_info, network_info, n_label, is_profiling=False):
super(ProductSpaceOAEHSIC_GAN, self).__init__(log, path_info, network_info, n_label, is_profiling=is_profiling)
self.metrics_names = ['main_loss', 'reconstruction', 'penalty_e', 'penalty_b', 'penalty_hsic',
'discriminator_loss',
]
self.TB = ProductSpaceOAETensorBoardWrapper_GAN
self.b_sd = float(network_info['model_info']['b_sd'])
self.lambda_b = float(network_info['model_info']['lambda_b'])
self.lambda_hsic = float(network_info['model_info']['lambda_hsic'])
try: self.e_weight = float(network_info['model_info']['e_weight'])
except: self.e_weight = 1.
try: self.e_train = not ('false' == network_info['model_info']['e_train'].strip().lower())
except: self.e_train = True
try: self.b_train = not ('false' == network_info['model_info']['b_train'].strip().lower())
except: self.b_train = True
try: self.feature_b = 'true' == network_info['model_info']['feature_b'].strip().lower()
except: self.feature_b = False
try: self.reset_e = 'true' == network_info['model_info']['reset_e'].strip().lower()
except: self.reset_e = False
def build_model(self, model_yaml_dir=None, verbose=0):
"""
verbose
0: Not show any model
1: Show AE, Discriminator model
2: Show all models
"""
# Load Models : encoder, decoder, discriminator
if model_yaml_dir == None: model_yaml_dir = os.path.join(self.model_save_dir, self.path_info['model_info']['model_architecture'])
self._encoder_base_model = self._load_model(model_yaml_dir+self.path_info['model_info']['model_encoder_base'], verbose=verbose==2)
self._encoder_b_model = self._load_model(model_yaml_dir+self.path_info['model_info']['model_encoder_b'], verbose=verbose==2)
self._encoder_e_model = self._load_model(model_yaml_dir+self.path_info['model_info']['model_encoder_e'], verbose=verbose==2)
self.decoder_model = self._load_model(model_yaml_dir+self.path_info['model_info']['model_decoder'], verbose=verbose==2)
self._discriminator_e_model = self._load_model(model_yaml_dir+self.path_info['model_info']['model_discriminator'], verbose=verbose==2)
self.save_models = {"encoder_base":self._encoder_base_model,
"encoder_b":self._encoder_b_model,
"encoder_e":self._encoder_e_model,
"decoder":self.decoder_model,
"discriminator":self._discriminator_e_model
}
self._discriminator_e_model.name = 'discriminator_e'
# build blocks
self.image_shape = self._encoder_base_model.input_shape[1:]
if self.feature_b: self.feature_shape = self._encoder_b_model.input_shape[1:]
self.b_z_dim = self._encoder_b_model.output_shape[-1]
self.e_z_dim = self._encoder_e_model.output_shape[-1]
real_image = Input(shape=self.image_shape, name='real_image_input', dtype='float32')
cls_info = Input(shape=(1,), name='class_info_input', dtype='int32')
prior_e_noise = Input(shape=(self.e_z_dim,), name='prior_e_input', dtype='float32')
prior_b_noise = Input(shape=(self.b_z_dim,), name='prior_b_input', dtype='float32')
if self.feature_b: feature_for_b = Input(shape=self.feature_shape, name='feature_b_input', dtype='float32')
# Encoder base
last_h = self._encoder_base_model([real_image])
# B_i ~ Q_B|X=x^i
if self.feature_b: b_j_given_x_j = self._encoder_b_model([feature_for_b])
else: b_j_given_x_j = self._encoder_b_model([last_h])
sample_b, b_given_x = Lambda(get_b, name='get_b_given_x')([b_j_given_x_j, cls_info])
if self.feature_b: self.encoder_b_model = Model([feature_for_b, cls_info], [sample_b, b_given_x, b_j_given_x_j], name='encoder_b_model')
else: self.encoder_b_model = Model([real_image, cls_info], [sample_b, b_given_x, b_j_given_x_j], name='encoder_b_model')
# E^i_j ~Q_E_0|X_0,B=X^i_j,B_i
e_given_x_b = self._encoder_e_model([last_h, sample_b])
if self.feature_b: self.encoder_e_model = Model([real_image, feature_for_b, cls_info], [e_given_x_b], name='encoder_e_model')
else: self.encoder_e_model = Model([real_image, cls_info], [e_given_x_b], name='encoder_e_model')
# Z^i_j = (B_i, E^i_j)
b_input = Input(shape=(self.b_z_dim,), name='estimated_b_input', dtype='float32')
noise_input = Input(shape=(self.e_z_dim,), name='noise_input', dtype='float32')
if self.e_weight != 1.: noise_weighted = Lambda(lambda x : self.e_weight*x, name='noise_weighted')(noise_input)
else: noise_weighted = noise_input
latent = Concatenate(axis=1, name='concat_latent')([b_input, noise_weighted])
self.z_encoder_model = Model([b_input, noise_input], [latent], name='encoder_z_model')
# Build connections
if self.feature_b:
sample_b, b_given_x, b_j_given_x_j = self.encoder_b_model([feature_for_b, cls_info])
e_given_x_b = self.encoder_e_model([real_image, feature_for_b, cls_info])
else:
sample_b, b_given_x, b_j_given_x_j = self.encoder_b_model([real_image, cls_info])
e_given_x_b = self.encoder_e_model([real_image, cls_info])
fake_latent = self.z_encoder_model([sample_b, e_given_x_b])
recon_image = self.decoder_model(fake_latent)
if self.feature_b: self.ae_model = Model(inputs=[real_image, feature_for_b, cls_info], outputs=[recon_image], name='ae_model')
else: self.ae_model = Model(inputs=[real_image, cls_info], outputs=[recon_image], name='ae_model')
if verbose==2:
self.log.info('Auto-Encoder model')
self.ae_model.summary(line_length=200, print_fn=self.log.info)
sys.stdout.flush()
# GAN model
p_e = self._discriminator_e_model(prior_e_noise)
q_e = self._discriminator_e_model(e_given_x_b)
output = Concatenate(name='mlp_concat')([p_e, q_e]) ## TODO : fix..
if self.feature_b: self.gan_model = Model(inputs=[real_image, feature_for_b, cls_info, prior_e_noise], outputs=[output], name='GAN_model')
else: self.gan_model = Model(inputs=[real_image, cls_info, prior_e_noise], outputs=[output], name='GAN_model')
# WAE model
recon_error = Lambda(mean_reconstruction_l2sq_loss, name='mean_recon_error')([real_image, recon_image])
# recon_error = Lambda(mean_reconstruction_l2sq_loss_e, name='mean_recon_error')([real_image, recon_image, cls_info])
penalty_e = Lambda(get_qz_trick_loss, name='penalty_e')(q_e)
penalty_b = Lambda(get_b_penalty_loss, name='penalty_b',
arguments={'sigma':self.b_sd, 'zdim':self.b_z_dim, 'kernel':'RBF', 'p_z':'normal'})([prior_b_noise, b_given_x])
penalty_hsic = Lambda(get_hsic, name="penalty_hsic")([e_given_x_b, sample_b])
if self.feature_b: self.main_model = Model(inputs=[real_image, feature_for_b, cls_info, prior_b_noise],
outputs=[recon_error, penalty_e, penalty_b, penalty_hsic], name='main_model')
else: self.main_model = Model(inputs=[real_image, cls_info, prior_b_noise],
outputs=[recon_error, penalty_e, penalty_b, penalty_hsic], name='main_model')
# Blur information
prior_latent = Input(shape=(self.b_z_dim + self.e_z_dim,), name='prior_z_input', dtype='float32')
self.blurr_model = get_compute_blurriness_model(self.image_shape)
gen_image = self.decoder_model(prior_latent)
gen_sharpness = self.blurr_model(gen_image)
self.gen_blurr_model = Model(inputs=[prior_latent], outputs=[gen_sharpness], name='gen_blurr_model')
if verbose==2:
self.log.info('Generative sample blurr model')
self.gen_blurr_model.summary(line_length=200, print_fn=self.log.info)
sys.stdout.flush()
# cluster information
ssw, ssb, n_points_mean, n_l = Lambda(self._get_cluster_information_by_class_index,
name='get_cluster_information_by_class_index')([b_j_given_x_j, cls_info])
if self.feature_b:
self.cluster_info_model = Model(inputs=[feature_for_b, cls_info],
outputs=[ssw, ssb, n_points_mean, n_l], name='get_cluster_info')
else:
self.cluster_info_model = Model(inputs=[real_image, cls_info], outputs=[ssw, ssb, n_points_mean, n_l], name='get_cluster_info')
if verbose==2:
self.log.info('Cluster information model')
self.cluster_info_model.summary(line_length=200, print_fn=self.log.info)
sys.stdout.flush()
try:
self.parallel_main_model = multi_gpu_model(self.main_model, gpus=self.number_of_gpu)
self.parallel_gan_model = multi_gpu_model(self.gan_model, gpus=self.number_of_gpu)
self.log.info("Training using multiple GPUs")
except ValueError:
self.parallel_main_model = self.main_model
self.parallel_gan_model = self.gan_model
self.log.info("Training using single GPU or CPU")
self.train_models = {'discriminator':self.gan_model, 'main':self.main_model}
self.parallel_train_models = {'discriminator':self.parallel_gan_model, 'main':self.parallel_main_model}
self.train_models_lr = {'discriminator':{'lr':float(self.network_info['model_info']['lr_e_adv']),
'decay':float(self.network_info['model_info']['lr_e_adv_decay'])},
'main':{'lr':float(self.network_info['model_info']['lr_e']),
'decay':float(self.network_info['model_info']['lr_e_decay'])}}
if verbose:
self.log.info('Main model')
self.main_model.summary(line_length=200, print_fn=self.log.info)
sys.stdout.flush()
self.log.info('Discriminator model')
self.gan_model.summary(line_length=200, print_fn=self.log.info)
sys.stdout.flush()
def model_compile(self, verbose=0):
self.log.info('Start models compile.')
if self.network_info['model_info']['optimizer'] =='adam':
optimizer_e = getattr(keras.optimizers,
self.network_info['model_info']['optimizer'])(lr=self.train_models_lr['main']['lr'],
beta_1=float(self.network_info['model_info']['lr_e_beta1']))
optimizer_e_adv = getattr(keras.optimizers,
self.network_info['model_info']['optimizer'])(lr=self.train_models_lr['discriminator']['lr'],
beta_1=float(self.network_info['model_info']['lr_e_adv_beta1']))
else:
optimizer_e = getattr(keras.optimizers,
self.network_info['model_info']['optimizer'])(lr=self.train_models_lr['main']['lr'])
optimizer_e_adv = getattr(keras.optimizers,
self.network_info['model_info']['optimizer'])(lr=self.train_models_lr['discriminator']['lr'])
if self.reset_e:
self.reset_weights(self._encoder_base_model)
self.reset_weights(self._encoder_e_model)
self.reset_weights(self._discriminator_e_model)
# GAN model compile
self._encoder_b_model.trainable = False
self._encoder_e_model.trainable = False
self._encoder_base_model.trainable = False
self.decoder_model.trainable = False
self._discriminator_e_model.trainable = self.e_train
self.parallel_gan_model.compile(loss=getattr(loss_and_metric, self.network_info['model_info']['discriminator_loss']),
optimizer=optimizer_e_adv, options=self.run_options, run_metadata=self.run_metadata)
# WAE model compile
self.decoder_model.trainable = True
self._encoder_b_model.trainable = self.b_train
self._encoder_e_model.trainable = self.e_train
self._encoder_base_model.trainable = self.e_train
self._discriminator_e_model.trainable = False
self.parallel_main_model.compile(loss={'mean_recon_error':getattr(loss_and_metric, self.network_info['model_info']['main_loss']),
'penalty_e':getattr(loss_and_metric, self.network_info['model_info']['penalty_e']),
'penalty_b':getattr(loss_and_metric, self.network_info['model_info']['penalty_b']),
'penalty_hsic':getattr(loss_and_metric, self.network_info['model_info']['penalty_b']),
},
loss_weights=[1., self.lambda_e, self.lambda_b, self.lambda_hsic],
optimizer=optimizer_e, options=self.run_options, run_metadata=self.run_metadata)
if verbose:
for name, model in self.parallel_train_models.items():
self.log.info('%s model' % name)
model.summary(line_length=200, print_fn=self.log.info)
sys.stdout.flush()
self.log.info('Model compile done.')
def save(self, filepath, is_compile=True, overwrite=True, include_optimizer=True):
model_path = self.path_info['model_info']['weight']
for name, model in self.save_models.items():
model.save("%s/%s_%s" % (filepath, name, model_path), overwrite=overwrite, include_optimizer=include_optimizer)
self.log.debug('Save model at %s' % filepath)
def load(self, filepath, verbose=0):
model_path = self.path_info['model_info']['weight']
loss_list = [self.network_info['model_info']['main_loss'],
self.network_info['model_info']['penalty_e'],
self.network_info['model_info']['discriminator_loss']]
load_dict = dict([(loss_name, getattr(loss_and_metric, loss_name)) for loss_name in loss_list])
load_dict['SelfAttention2D'] = SelfAttention2D
load_dict['get_qz_trick_loss'] = get_qz_trick_loss
load_dict['get_qz_trick_with_weight_loss'] = get_qz_trick_with_weight_loss
load_dict['get_hsic'] = get_hsic
load_dict['mmd_penalty'] = mmd_penalty
load_dict['get_b'] = get_b
load_dict['get_b_estimation_var'] = get_b_estimation_var
load_dict['get_b_penalty_loss'] = get_b_penalty_loss
load_dict['mean_reconstruction_l2sq_loss'] = mean_reconstruction_l2sq_loss
load_dict['get_class_mean_by_class_index'] = get_class_mean_by_class_index
load_dict['concat_with_uniform_sample'] = concat_with_uniform_sample
load_dict['get_batch_covariance'] = get_batch_covariance
load_dict['get_mutual_information_from_gaussian_sample'] = get_mutual_information_from_gaussian_sample
# TODO : fix save & load
tmp_model = load_model("%s/%s_%s" % (filepath, "encoder_base", model_path), custom_objects=load_dict)
tmp_model.save_weights("%s/tmp_%s_%s" % (filepath, "encoder_base", model_path), overwrite=False)
self._encoder_base_model.load_weights("%s/tmp_%s_%s" % (filepath, "encoder_base", model_path), by_name=True)
os.remove("%s/tmp_%s_%s" % (filepath, "encoder_base", model_path))
tmp_model = load_model("%s/%s_%s" % (filepath, "encoder_b", model_path), custom_objects=load_dict)
tmp_model.save_weights("%s/tmp_%s_%s" % (filepath, "encoder_b", model_path), overwrite=False)
self._encoder_b_model.load_weights("%s/tmp_%s_%s" % (filepath, "encoder_b", model_path), by_name=True)
os.remove("%s/tmp_%s_%s" % (filepath, "encoder_b", model_path))
tmp_model = load_model("%s/%s_%s" % (filepath, "encoder_e", model_path), custom_objects=load_dict)
tmp_model.save_weights("%s/tmp_%s_%s" % (filepath, "encoder_e", model_path), overwrite=False)
self._encoder_e_model.load_weights("%s/tmp_%s_%s" % (filepath, "encoder_e", model_path), by_name=True)
os.remove("%s/tmp_%s_%s" % (filepath, "encoder_e", model_path))
tmp_model = load_model("%s/%s_%s" % (filepath, "decoder", model_path), custom_objects=load_dict)
tmp_model.save_weights("%s/tmp_%s_%s" % (filepath, "decoder", model_path), overwrite=False)
self.decoder_model.load_weights("%s/tmp_%s_%s" % (filepath, "decoder", model_path), by_name=True)
os.remove("%s/tmp_%s_%s" % (filepath, "decoder", model_path))
tmp_model = load_model("%s/%s_%s" % (filepath, "discriminator", model_path), custom_objects=load_dict)
tmp_model.save_weights("%s/tmp_%s_%s" % (filepath, "discriminator", model_path), overwrite=False)
self._discriminator_e_model.load_weights("%s/tmp_%s_%s" % (filepath, "discriminator", model_path), by_name=True)
os.remove("%s/tmp_%s_%s" % (filepath, "discriminator", model_path))
self._discriminator_e_model.name = 'discriminator_e'
# build blocks
self.image_shape = self._encoder_base_model.input_shape[1:]
if self.feature_b: self.feature_shape = self._encoder_b_model.input_shape[1:]
self.b_z_dim = self._encoder_b_model.output_shape[-1]
self.e_z_dim = self._encoder_e_model.output_shape[-1]
real_image = Input(shape=self.image_shape, name='real_image_input', dtype='float32')
cls_info = Input(shape=(1,), name='class_info_input', dtype='int32')
prior_e_noise = Input(shape=(self.e_z_dim,), name='prior_e_input', dtype='float32')
prior_b_noise = Input(shape=(self.b_z_dim,), name='prior_b_input', dtype='float32')
if self.feature_b: feature_for_b = Input(shape=self.feature_shape, name='feature_b_input', dtype='float32')
# Encoder base
last_h = self._encoder_base_model([real_image])
# B_i ~ Q_B|X=x^i
if self.feature_b: b_j_given_x_j = self._encoder_b_model([feature_for_b])
else: b_j_given_x_j = self._encoder_b_model([last_h])
sample_b, b_given_x = Lambda(get_b, name='get_b_given_x')([b_j_given_x_j, cls_info])
if self.feature_b: self.encoder_b_model = Model([feature_for_b, cls_info], [sample_b, b_given_x, b_j_given_x_j], name='encoder_b_model')
else: self.encoder_b_model = Model([real_image, cls_info], [sample_b, b_given_x, b_j_given_x_j], name='encoder_b_model')
# E^i_j ~Q_E_0|X_0,B=X^i_j,B_i
e_given_x_b = self._encoder_e_model([last_h, sample_b])
if self.feature_b: self.encoder_e_model = Model([real_image, feature_for_b, cls_info], [e_given_x_b], name='encoder_e_model')
else: self.encoder_e_model = Model([real_image, cls_info], [e_given_x_b], name='encoder_e_model')
# Z^i_j = (B_i, E^i_j)
b_input = Input(shape=(self.b_z_dim,), name='estimated_b_input', dtype='float32')
noise_input = Input(shape=(self.e_z_dim,), name='noise_input', dtype='float32')
if self.e_weight != 1.: noise_weighted = Lambda(lambda x : self.e_weight*x, name='noise_weighted')(noise_input)
else: noise_weighted = noise_input
latent = Concatenate(axis=1, name='concat_latent')([b_input, noise_weighted])
self.z_encoder_model = Model([b_input, noise_input], [latent], name='encoder_z_model')
# Build connections
if self.feature_b:
sample_b, b_given_x, b_j_given_x_j = self.encoder_b_model([feature_for_b, cls_info])
e_given_x_b = self.encoder_e_model([real_image, feature_for_b, cls_info])
else:
sample_b, b_given_x, b_j_given_x_j = self.encoder_b_model([real_image, cls_info])
e_given_x_b = self.encoder_e_model([real_image, cls_info])
fake_latent = self.z_encoder_model([sample_b, e_given_x_b])
recon_image = self.decoder_model(fake_latent)
if self.feature_b: self.ae_model = Model(inputs=[real_image, feature_for_b, cls_info], outputs=[recon_image], name='ae_model')
else: self.ae_model = Model(inputs=[real_image, cls_info], outputs=[recon_image], name='ae_model')
if verbose==2:
self.log.info('Auto-Encoder model')
self.ae_model.summary(line_length=200, print_fn=self.log.info)
sys.stdout.flush()
# GAN model
p_e = self._discriminator_e_model(prior_e_noise)
q_e = self._discriminator_e_model(e_given_x_b)
output = Concatenate(name='mlp_concat')([p_e, q_e]) ## TODO : fix..
if self.feature_b: self.gan_model = Model(inputs=[real_image, feature_for_b, cls_info, prior_e_noise], outputs=[output], name='GAN_model')
else: self.gan_model = Model(inputs=[real_image, cls_info, prior_e_noise], outputs=[output], name='GAN_model')
# WAE model
recon_error = Lambda(mean_reconstruction_l2sq_loss, name='mean_recon_error')([real_image, recon_image])
penalty_e = Lambda(get_qz_trick_loss, name='penalty_e')(q_e)
penalty_b = Lambda(get_b_penalty_loss, name='penalty_b',
arguments={'sigma':self.b_sd, 'zdim':self.b_z_dim, 'kernel':'RBF', 'p_z':'normal'})([prior_b_noise, b_given_x])
penalty_hsic = Lambda(get_hsic, name="penalty_hsic")([e_given_x_b, sample_b])
if self.feature_b: self.main_model = Model(inputs=[real_image, feature_for_b, cls_info, prior_b_noise],
outputs=[recon_error, penalty_e, penalty_b, penalty_hsic], name='main_model')
else: self.main_model = Model(inputs=[real_image, cls_info, prior_b_noise],
outputs=[recon_error, penalty_e, penalty_b, penalty_hsic], name='main_model')
# Blur information
prior_latent = Input(shape=(self.b_z_dim + self.e_z_dim,), name='prior_z_input', dtype='float32')
self.blurr_model = get_compute_blurriness_model(self.image_shape)
gen_image = self.decoder_model(prior_latent)
gen_sharpness = self.blurr_model(gen_image)
self.gen_blurr_model = Model(inputs=[prior_latent], outputs=[gen_sharpness], name='gen_blurr_model')
# cluster information
ssw, ssb, n_points_mean, n_l = Lambda(self._get_cluster_information_by_class_index,
name='get_cluster_information_by_class_index')([b_j_given_x_j, cls_info])
if self.feature_b:
self.cluster_info_model = Model(inputs=[feature_for_b, cls_info],
outputs=[ssw, ssb, n_points_mean, n_l], name='get_cluster_info')
else:
self.cluster_info_model = Model(inputs=[real_image, cls_info], outputs=[ssw, ssb, n_points_mean, n_l], name='get_cluster_info')
self.model_compile()
self.log.info('Loaded WAE model')
self.main_model.summary(line_length=200, print_fn=self.log.info)
sys.stdout.flush()
self.log.info('Loaded Discriminator model: GAN')
self.gan_model.summary(line_length=200, print_fn=self.log.info)
sys.stdout.flush()
def discriminator_sampler(self, x, y):
e_noise = self.noise_sampler(y.shape[0], self.e_z_dim, self.e_sd)
if self.feature_b:
return [x[0], x[1], y[:, np.newaxis], e_noise], [np.zeros([x[0].shape[0],2], dtype='float32')]
else:
return [x, y[:, np.newaxis], e_noise], [np.zeros([x.shape[0],2], dtype='float32')]
def main_sampler(self, x, y):
b_noise = self.noise_sampler(y.shape[0], self.b_z_dim, self.b_sd) #, dist='spherical_uniform')
if self.feature_b:
return [x[0], x[1], y[:,np.newaxis], b_noise], [np.zeros(x[0].shape[0], dtype='float32')]*4
else:
return [x, y[:,np.newaxis], b_noise], [np.zeros(x.shape[0], dtype='float32')]*4
def train_on_batch(self, x, y, sample_weight=None, class_weight=None, reset_metrics=True):
wx, wy = self.main_sampler(x, y)
main_outs = self.parallel_main_model.train_on_batch(wx, wy,
sample_weight=sample_weight, class_weight=class_weight, reset_metrics=reset_metrics)
dx, dy = self.discriminator_sampler(x, y)
if self.e_train: d_outs = self.parallel_gan_model.train_on_batch(dx, dy,
sample_weight=sample_weight, class_weight=class_weight, reset_metrics=reset_metrics)
else: d_outs = 0
return (main_outs +
[d_outs]
)
def test_on_batch(self, x, y, sample_weight=None, reset_metrics=True):
wx, wy = self.main_sampler(x, y)
main_outs = self.parallel_main_model.test_on_batch(wx, wy,
sample_weight=sample_weight, reset_metrics = reset_metrics)
dx, dy = self.discriminator_sampler(x, y)
if self.e_train: d_outs = self.parallel_gan_model.test_on_batch(dx, dy,
sample_weight=sample_weight, reset_metrics = reset_metrics)
else: d_outs = 0
return (main_outs +
[d_outs]
)
def get_reference_images(self, generator):
batches = [generator[i] for i in range(4)]
self.fixed_classes = np.concatenate([batch[1] for batch in batches])
if self.feature_b:
self.fixed_feature = np.concatenate([batch[0][1] for batch in batches])
return np.concatenate([batch[0][0] for batch in batches])
else:
return np.concatenate([batch[0] for batch in batches])
def on_train_begin(self, x):
self.fixed_images = x[self.fixed_classes == self.fixed_classes[0]]
if self.feature_b: self.fixed_feature = self.fixed_feature[self.fixed_classes == self.fixed_classes[0]]
self.fixed_classes = self.fixed_classes[self.fixed_classes == self.fixed_classes[0]]
real_image_blurriness = self.blurr_model.predict_on_batch(x)
self.fixed_noise = self.noise_sampler(x.shape[0], self.e_z_dim, self.e_sd)
self.log.info("Real image's sharpness = %.5f" % np.min(real_image_blurriness))
def on_epoch_end(self, epoch):
for name in self.train_models_lr.keys():
if self.train_models_lr[name]['decay'] > 0.:
self.train_models_lr[name]['lr'] = self._update_lr(epoch, lr=self.train_models_lr[name]['lr'],
decay=self.train_models_lr[name]['decay'])
k.set_value(self.parallel_train_models[name].optimizer.lr, self.train_models_lr[name]['lr'])
#####################################################################################################################
# ProductSpaceOAE using fixed b and HSIC GAN Network
#####################################################################################################################
class ProductSpaceOAEFixedBHSIC_GAN(WAE_GAN):
def __init__(self, log, path_info, network_info, n_label, is_profiling=False):
super(ProductSpaceOAEFixedBHSIC_GAN, self).__init__(log, path_info, network_info, n_label, is_profiling=is_profiling)
self.metrics_names = ['main_loss', 'reconstruction', 'penalty_e', 'penalty_hsic',
'discriminator_loss',
]
self.TB = ProductSpaceOAEFixedBTensorBoardWrapper_GAN
self.b_sd = float(network_info['model_info']['b_sd'])
self.lambda_hsic = float(network_info['model_info']['lambda_hsic'])
try: self.e_weight = float(network_info['model_info']['e_weight'])
except: self.e_weight = 1.
try: self.e_train = not ('false' == network_info['model_info']['e_train'].strip().lower())
except: self.e_train = True
try: self.reset_e = 'true' == network_info['model_info']['reset_e'].strip().lower()
except: self.reset_e = False
try: self.feature_b = 'true' == network_info['model_info']['feature_b'].strip().lower()
except: self.feature_b = False
try: self.fixed_b_path = network_info['training_info']['fixed_b_path'].strip()
except: raise ValueError("Need to set fixed_b_path")
def build_model(self, model_yaml_dir=None, verbose=0):
"""
verbose
0: Not show any model
1: Show AE, Discriminator model
2: Show all models
"""
# Load Models : encoder, decoder, discriminator
if model_yaml_dir == None: model_yaml_dir = os.path.join(self.model_save_dir, self.path_info['model_info']['model_architecture'])
self._encoder_base_model = self._load_model(model_yaml_dir+self.path_info['model_info']['model_encoder_base'], verbose=verbose==2)
self._encoder_b_model = self._load_model(model_yaml_dir+self.path_info['model_info']['model_encoder_b'], verbose=verbose==2)
self._encoder_e_model = self._load_model(model_yaml_dir+self.path_info['model_info']['model_encoder_e'], verbose=verbose==2)
self.decoder_model = self._load_model(model_yaml_dir+self.path_info['model_info']['model_decoder'], verbose=verbose==2)
self._discriminator_e_model = self._load_model(model_yaml_dir+self.path_info['model_info']['model_discriminator'], verbose=verbose==2)
self.save_models = {"encoder_base":self._encoder_base_model,
"encoder_b":self._encoder_b_model,
"encoder_e":self._encoder_e_model,
"decoder":self.decoder_model,
"discriminator":self._discriminator_e_model
}
self._discriminator_e_model.name = 'discriminator_e'
# build blocks
self.image_shape = self._encoder_base_model.input_shape[1:]
if self.feature_b: self.feature_shape = self._encoder_b_model.input_shape[1:]
self.b_z_dim = self._encoder_b_model.output_shape[-1]
self.e_z_dim = self._encoder_e_model.output_shape[-1]
real_image = Input(shape=self.image_shape, name='real_image_input', dtype='float32')
cls_info = Input(shape=(1,), name='class_info_input', dtype='int32')
prior_e_noise = Input(shape=(self.e_z_dim,), name='prior_e_input', dtype='float32')
prior_b_noise = Input(shape=(self.b_z_dim,), name='prior_b_input', dtype='float32')
b_input = Input(shape=(self.b_z_dim,), name='b_input', dtype='float32')
if self.feature_b: feature_for_b = Input(shape=self.feature_shape, name='feature_b_input', dtype='float32')
# Encoder base
last_h = self._encoder_base_model([real_image])
# B_i ~ Q_B|X=x^i
if self.feature_b: b_j_given_x_j = self._encoder_b_model([feature_for_b])
else: b_j_given_x_j = self._encoder_b_model([last_h])
sample_b, b_given_x = Lambda(get_b, name='get_b_given_x')([b_j_given_x_j, cls_info])
if self.feature_b: self.encoder_b_model = Model([feature_for_b, cls_info], [sample_b, b_given_x, b_j_given_x_j], name='encoder_b_model')
else: self.encoder_b_model = Model([real_image, cls_info], [sample_b, b_given_x, b_j_given_x_j], name='encoder_b_model')
# E^i_j ~Q_E_0|X_0,B=X^i_j,B_i
e_given_x_b = self._encoder_e_model([last_h, b_input])
self.encoder_e_model = Model([real_image, b_input], [e_given_x_b], name='encoder_e_model')
# Z^i_j = (B_i, E^i_j)
noise_input = Input(shape=(self.e_z_dim,), name='noise_input', dtype='float32')
if self.e_weight != 1.: noise_weighted = Lambda(lambda x : self.e_weight*x, name='noise_weighted')(noise_input)
else: noise_weighted = noise_input
latent = Concatenate(axis=1, name='concat_latent')([b_input, noise_weighted])
self.z_encoder_model = Model([b_input, noise_input], [latent], name='encoder_z_model')
# Build connections
e_given_x_b = self.encoder_e_model([real_image, b_input])
fake_latent = self.z_encoder_model([b_input, e_given_x_b])
recon_image = self.decoder_model(fake_latent)
self.ae_model = Model(inputs=[real_image, b_input], outputs=[recon_image], name='ae_model')
if verbose==2:
self.log.info('Auto-Encoder model')
self.ae_model.summary(line_length=200, print_fn=self.log.info)
sys.stdout.flush()
# GAN model
p_e = self._discriminator_e_model(prior_e_noise)
q_e = self._discriminator_e_model(e_given_x_b)
output = Concatenate(name='mlp_concat')([p_e, q_e]) ## TODO : fix..
self.gan_model = Model(inputs=[real_image, b_input, prior_e_noise], outputs=[output], name='GAN_model')
# WAE model
recon_error = Lambda(mean_reconstruction_l2sq_loss, name='mean_recon_error')([real_image, recon_image])
penalty_e = Lambda(get_qz_trick_loss, name='penalty_e')(q_e)
penalty_hsic = Lambda(get_hsic, name="penalty_hsic")([e_given_x_b, b_input])
self.main_model = Model(inputs=[real_image, b_input, cls_info],
outputs=[recon_error, penalty_e, penalty_hsic], name='main_model')
# Blur information
prior_latent = Input(shape=(self.b_z_dim + self.e_z_dim,), name='prior_z_input', dtype='float32')
self.blurr_model = get_compute_blurriness_model(self.image_shape)
gen_image = self.decoder_model(prior_latent)
gen_sharpness = self.blurr_model(gen_image)
self.gen_blurr_model = Model(inputs=[prior_latent], outputs=[gen_sharpness], name='gen_blurr_model')
if verbose==2:
self.log.info('Generative sample blurr model')
self.gen_blurr_model.summary(line_length=200, print_fn=self.log.info)
sys.stdout.flush()
try:
self.parallel_main_model = multi_gpu_model(self.main_model, gpus=self.number_of_gpu)
self.parallel_gan_model = multi_gpu_model(self.gan_model, gpus=self.number_of_gpu)
self.log.info("Training using multiple GPUs")
except ValueError:
self.parallel_main_model = self.main_model
self.parallel_gan_model = self.gan_model
self.log.info("Training using single GPU or CPU")
self.train_models = {'discriminator':self.gan_model, 'main':self.main_model}
self.parallel_train_models = {'discriminator':self.parallel_gan_model, 'main':self.parallel_main_model}
self.train_models_lr = {'discriminator':{'lr':float(self.network_info['model_info']['lr_e_adv']),
'decay':float(self.network_info['model_info']['lr_e_adv_decay'])},
'main':{'lr':float(self.network_info['model_info']['lr_e']),
'decay':float(self.network_info['model_info']['lr_e_decay'])}}
if verbose:
self.log.info('Main model')
self.main_model.summary(line_length=200, print_fn=self.log.info)
sys.stdout.flush()
self.log.info('Discriminator model')
self.gan_model.summary(line_length=200, print_fn=self.log.info)
sys.stdout.flush()
def model_compile(self, verbose=0):
self.log.info('Start models compile.')
if self.network_info['model_info']['optimizer'] =='adam':
optimizer_e = getattr(keras.optimizers,
self.network_info['model_info']['optimizer'])(lr=self.train_models_lr['main']['lr'],
beta_1=float(self.network_info['model_info']['lr_e_beta1']))
optimizer_e_adv = getattr(keras.optimizers,
self.network_info['model_info']['optimizer'])(lr=self.train_models_lr['discriminator']['lr'],
beta_1=float(self.network_info['model_info']['lr_e_adv_beta1']))
else:
optimizer_e = getattr(keras.optimizers,
self.network_info['model_info']['optimizer'])(lr=self.train_models_lr['main']['lr'])
optimizer_e_adv = getattr(keras.optimizers,
self.network_info['model_info']['optimizer'])(lr=self.train_models_lr['discriminator']['lr'])
if self.reset_e:
self.reset_weights(self._encoder_base_model)
self.reset_weights(self._encoder_e_model)
self.reset_weights(self._discriminator_e_model)
# GAN model compile
self._encoder_b_model.trainable = False
self._encoder_e_model.trainable = False
self._encoder_base_model.trainable = False
self.decoder_model.trainable = False
self._discriminator_e_model.trainable = self.e_train
self.parallel_gan_model.compile(loss=getattr(loss_and_metric, self.network_info['model_info']['discriminator_loss']),
optimizer=optimizer_e_adv, options=self.run_options, run_metadata=self.run_metadata)
# WAE model compile
self.decoder_model.trainable = True
self._encoder_b_model.trainable = False
self._encoder_e_model.trainable = self.e_train
self._encoder_base_model.trainable = self.e_train
self._discriminator_e_model.trainable = False
self.parallel_main_model.compile(loss={'mean_recon_error':getattr(loss_and_metric, self.network_info['model_info']['main_loss']),
'penalty_e':getattr(loss_and_metric, self.network_info['model_info']['penalty_e']),
'penalty_hsic':getattr(loss_and_metric, self.network_info['model_info']['penalty_b']),
},
loss_weights=[1., self.lambda_e, self.lambda_hsic],
optimizer=optimizer_e, options=self.run_options, run_metadata=self.run_metadata)
if verbose:
for name, model in self.parallel_train_models.items():
self.log.info('%s model' % name)
model.summary(line_length=200, print_fn=self.log.info)
sys.stdout.flush()
self.log.info('Model compile done.')
def save(self, filepath, is_compile=True, overwrite=True, include_optimizer=True):
model_path = self.path_info['model_info']['weight']
for name, model in self.save_models.items():
model.save("%s/%s_%s" % (filepath, name, model_path), overwrite=overwrite, include_optimizer=include_optimizer)
self.log.debug('Save model at %s' % filepath)
def load(self, filepath, verbose=0):
model_path = self.path_info['model_info']['weight']
loss_list = [self.network_info['model_info']['main_loss'],
self.network_info['model_info']['penalty_e'],
self.network_info['model_info']['discriminator_loss']]
load_dict = dict([(loss_name, getattr(loss_and_metric, loss_name)) for loss_name in loss_list])
load_dict['SelfAttention2D'] = SelfAttention2D
load_dict['get_qz_trick_loss'] = get_qz_trick_loss
load_dict['get_qz_trick_with_weight_loss'] = get_qz_trick_with_weight_loss
load_dict['get_entropy_loss_with_logits'] = get_entropy_loss_with_logits
load_dict['mmd_penalty'] = mmd_penalty
load_dict['get_b'] = get_b
load_dict['get_b_estimation_var'] = get_b_estimation_var
load_dict['get_b_penalty_loss'] = get_b_penalty_loss
load_dict['mean_reconstruction_l2sq_loss'] = mean_reconstruction_l2sq_loss
load_dict['get_class_mean_by_class_index'] = get_class_mean_by_class_index
load_dict['concat_with_uniform_sample'] = concat_with_uniform_sample
load_dict['get_batch_covariance'] = get_batch_covariance
load_dict['get_mutual_information_from_gaussian_sample'] = get_mutual_information_from_gaussian_sample
# TODO : fix save & load
tmp_model = load_model("%s/%s_%s" % (filepath, "encoder_base", model_path), custom_objects=load_dict)
tmp_model.save_weights("%s/tmp_%s_%s" % (filepath, "encoder_base", model_path), overwrite=False)
self._encoder_base_model.load_weights("%s/tmp_%s_%s" % (filepath, "encoder_base", model_path), by_name=True)
os.remove("%s/tmp_%s_%s" % (filepath, "encoder_base", model_path))
tmp_model = load_model("%s/%s_%s" % (filepath, "encoder_b", model_path), custom_objects=load_dict)
tmp_model.save_weights("%s/tmp_%s_%s" % (filepath, "encoder_b", model_path), overwrite=False)
self._encoder_b_model.load_weights("%s/tmp_%s_%s" % (filepath, "encoder_b", model_path), by_name=True)
os.remove("%s/tmp_%s_%s" % (filepath, "encoder_b", model_path))
tmp_model = load_model("%s/%s_%s" % (filepath, "encoder_e", model_path), custom_objects=load_dict)
tmp_model.save_weights("%s/tmp_%s_%s" % (filepath, "encoder_e", model_path), overwrite=False)
self._encoder_e_model.load_weights("%s/tmp_%s_%s" % (filepath, "encoder_e", model_path), by_name=True)
os.remove("%s/tmp_%s_%s" % (filepath, "encoder_e", model_path))
tmp_model = load_model("%s/%s_%s" % (filepath, "decoder", model_path), custom_objects=load_dict)
tmp_model.save_weights("%s/tmp_%s_%s" % (filepath, "decoder", model_path), overwrite=False)
self.decoder_model.load_weights("%s/tmp_%s_%s" % (filepath, "decoder", model_path), by_name=True)
os.remove("%s/tmp_%s_%s" % (filepath, "decoder", model_path))
tmp_model = load_model("%s/%s_%s" % (filepath, "discriminator", model_path), custom_objects=load_dict)
tmp_model.save_weights("%s/tmp_%s_%s" % (filepath, "discriminator", model_path), overwrite=False)
self._discriminator_e_model.load_weights("%s/tmp_%s_%s" % (filepath, "discriminator", model_path), by_name=True)
os.remove("%s/tmp_%s_%s" % (filepath, "discriminator", model_path))
self._discriminator_e_model.name = 'discriminator_e'
# build blocks
self.image_shape = self._encoder_base_model.input_shape[1:]
if self.feature_b: self.feature_shape = self._encoder_b_model.input_shape[1:]
self.b_z_dim = self._encoder_b_model.output_shape[-1]
self.e_z_dim = self._encoder_e_model.output_shape[-1]
real_image = Input(shape=self.image_shape, name='real_image_input', dtype='float32')
cls_info = Input(shape=(1,), name='class_info_input', dtype='int32')
prior_e_noise = Input(shape=(self.e_z_dim,), name='prior_e_input', dtype='float32')
prior_b_noise = Input(shape=(self.b_z_dim,), name='prior_b_input', dtype='float32')
b_input = Input(shape=(self.b_z_dim,), name='b_input', dtype='float32')
if self.feature_b: feature_for_b = Input(shape=self.feature_shape, name='feature_b_input', dtype='float32')
# Encoder base
last_h = self._encoder_base_model([real_image])
# B_i ~ Q_B|X=x^i
if self.feature_b: b_j_given_x_j = self._encoder_b_model([feature_for_b])
else: b_j_given_x_j = self._encoder_b_model([last_h])
sample_b, b_given_x = Lambda(get_b, name='get_b_given_x')([b_j_given_x_j, cls_info])
if self.feature_b: self.encoder_b_model = Model([feature_for_b, cls_info], [sample_b, b_given_x, b_j_given_x_j], name='encoder_b_model')
else: self.encoder_b_model = Model([real_image, cls_info], [sample_b, b_given_x, b_j_given_x_j], name='encoder_b_model')
# E^i_j ~Q_E_0|X_0,B=X^i_j,B_i
e_given_x_b = self._encoder_e_model([last_h, b_input])
self.encoder_e_model = Model([real_image, b_input], [e_given_x_b], name='encoder_e_model')
# Z^i_j = (B_i, E^i_j)
noise_input = Input(shape=(self.e_z_dim,), name='noise_input', dtype='float32')
if self.e_weight != 1.: noise_weighted = Lambda(lambda x : self.e_weight*x, name='noise_weighted')(noise_input)
else: noise_weighted = noise_input
latent = Concatenate(axis=1, name='concat_latent')([b_input, noise_weighted])
self.z_encoder_model = Model([b_input, noise_input], [latent], name='encoder_z_model')
# Build connections
e_given_x_b = self.encoder_e_model([real_image, b_input])
fake_latent = self.z_encoder_model([b_input, e_given_x_b])
recon_image = self.decoder_model(fake_latent)
self.ae_model = Model(inputs=[real_image, b_input], outputs=[recon_image], name='ae_model')
if verbose==2:
self.log.info('Auto-Encoder model')
self.ae_model.summary(line_length=200, print_fn=self.log.info)
sys.stdout.flush()
# GAN model
p_e = self._discriminator_e_model(prior_e_noise)
q_e = self._discriminator_e_model(e_given_x_b)
output = Concatenate(name='mlp_concat')([p_e, q_e]) ## TODO : fix..
self.gan_model = Model(inputs=[real_image, b_input, prior_e_noise], outputs=[output], name='GAN_model')
# WAE model
recon_error = Lambda(mean_reconstruction_l2sq_loss, name='mean_recon_error')([real_image, recon_image])
penalty_e = Lambda(get_qz_trick_loss, name='penalty_e')(q_e)
penalty_hsic = Lambda(get_hsic, name="penalty_hsic")([e_given_x_b, b_input])
self.main_model = Model(inputs=[real_image, b_input, cls_info],
outputs=[recon_error, penalty_e, penalty_hsic], name='main_model')
# Blur information
prior_latent = Input(shape=(self.b_z_dim + self.e_z_dim,), name='prior_z_input', dtype='float32')
self.blurr_model = get_compute_blurriness_model(self.image_shape)
gen_image = self.decoder_model(prior_latent)
gen_sharpness = self.blurr_model(gen_image)
self.gen_blurr_model = Model(inputs=[prior_latent], outputs=[gen_sharpness], name='gen_blurr_model')
if verbose==2:
self.log.info('Generative sample blurr model')
self.gen_blurr_model.summary(line_length=200, print_fn=self.log.info)
sys.stdout.flush()
self.model_compile()
self.log.info('Loaded WAE model')
self.main_model.summary(line_length=200, print_fn=self.log.info)
sys.stdout.flush()
self.log.info('Loaded Discriminator model: GAN')
self.gan_model.summary(line_length=200, print_fn=self.log.info)
sys.stdout.flush()
def discriminator_sampler(self, x, y):
e_noise = self.noise_sampler(y.shape[0], self.e_z_dim, self.e_sd)
## TODO: not using feature_b
return [x[0], x[2], e_noise], [np.zeros([y.shape[0],2], dtype='float32')]
def main_sampler(self, x, y):
## TODO: not using feature_b
return [x[0], x[2], y[:,np.newaxis]], [np.zeros(y.shape[0], dtype='float32')]*3
def train_on_batch(self, x, y, sample_weight=None, class_weight=None, reset_metrics=True):
wx, wy = self.main_sampler(x, y)
main_outs = self.parallel_main_model.train_on_batch(wx, wy,
sample_weight=sample_weight, class_weight=class_weight, reset_metrics=reset_metrics)
dx, dy = self.discriminator_sampler(x, y)
if self.e_train: d_outs = self.parallel_gan_model.train_on_batch(dx, dy,
sample_weight=sample_weight, class_weight=class_weight, reset_metrics=reset_metrics)
else: d_outs = 0
return (main_outs + [d_outs]
)
def test_on_batch(self, x, y, sample_weight=None, reset_metrics=True):
wx, wy = self.main_sampler(x, y)
main_outs = self.parallel_main_model.test_on_batch(wx, wy,
sample_weight=sample_weight, reset_metrics = reset_metrics)
dx, dy = self.discriminator_sampler(x, y)
if self.e_train: d_outs = self.parallel_gan_model.test_on_batch(dx, dy,
sample_weight=sample_weight, reset_metrics = reset_metrics)
else: d_outs = 0
return (main_outs + [d_outs]
)
def get_reference_images(self, generator):
## TODO: not using feature_b
batches = [generator[i] for i in range(4)]
self.fixed_classes = np.concatenate([batch[1] for batch in batches])
if self.feature_b:
self.fixed_feature = np.concatenate([batch[0][1] for batch in batches])
return np.concatenate([batch[0][0] for batch in batches])
else:
return np.concatenate([batch for batches in batches])
def on_train_begin(self, x):
self.fixed_images = x[self.fixed_classes == self.fixed_classes[0]]
if self.feature_b: self.fixed_feature = self.fixed_feature[self.fixed_classes == self.fixed_classes[0]]
self.fixed_classes = self.fixed_classes[self.fixed_classes == self.fixed_classes[0]]
real_image_blurriness = self.blurr_model.predict_on_batch(x)
self.fixed_noise = self.noise_sampler(x.shape[0], self.e_z_dim, self.e_sd)
self.log.info("Real image's sharpness = %.5f" % | np.min(real_image_blurriness) | numpy.min |
import sys
import numpy as np
from functools import partial
if sys.version_info[0] >= 3:
xrange = range
def pldist(point, start, end):
if np.all(np.equal(start, end)):
return np.linalg.norm(point - start)
return np.divide(
np.abs(np.linalg.norm( | np.cross(end - start, start - point) | numpy.cross |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
def RadarFigure(scoreslist, linestylelist, modelnamelist):
# 模板数据和标签
dataLenth = 8
angles = np.linspace((1 / 4) * np.pi, (2 + 1 / 4) * np.pi, dataLenth, endpoint=False)
angles = np.concatenate((angles, [angles[0]]))
labels = np.array(['TS of \n Starting Position', 'Trajectories Pattern', 'Dis of \n Route Length'
, 'Dis of \n Travel Time', 'Speed Choice',
'Fundamental Diagram', 'Direction Choice', 'TS of \n Destination Position'])
# labels = np.array(['TS of \n Starting Position', 'Direction choice', 'Dis of \n Route Length'
# , 'Dis of \n Travel Time', 'Dis of \n Speed',
# 'Speed choice', 'TS of \n Speed', 'TS of \n Destination Position'])
fig = plt.figure(figsize=(3.4, 3.3), dpi=300, linewidth=0.5)
ax = fig.add_subplot(111, polar=True)
plt.rcParams.update({'font.size': 6.5})
plt.grid(linewidth=0.25, linestyle='--')
# # data
# scores = [index_speed, # speed choice
# index_Dis_RL, index_Dis_TT, index_Dis_Speed, # static distribution
# index_TS_OriPoint, index_TS_DestPoint, index_TS_Speed, # dynamic time series
# index_direction] # direction choice
linelist = []
for i in range(len(scoreslist)):
z = [scoreslist[i][4], scoreslist[i][7], scoreslist[i][1], scoreslist[i][2], scoreslist[i][3], scoreslist[i][0],
scoreslist[i][6], scoreslist[i][5]]
data = np.array(z)
data = np.concatenate((data, [data[0]]))
li, = ax.plot(angles, data, linestylelist[i], linewidth=0.75, markersize=1.5) # 线样 1
linelist.append(li)
ax.set_thetagrids(angles * 180 / np.pi, labels, fontproperties="Calibri")
plt.ylim(-0.25, 1.25) # y axis size
plt.yticks(np.arange(0, 1.5, step=0.5))
# plt.yticks.grid = True
# label rotation
plt.gcf().canvas.draw()
# angles1 = np.linspace(0.5*np.pi + (1/8) * np.pi, 0.5*np.pi +(2+1/8) * np.pi, dataLenth, endpoint=False)
angles1 = angles + 0.5 * np.pi
angles1[np.cos(angles1) < 0] = angles1[np.cos(angles1) < 0] + np.pi
angles1 = np.rad2deg(angles1)
labels = []
for label, angle in zip(ax.get_xticklabels(), angles1):
x, y = label.get_position()
lab = ax.text(x, y, label.get_text(), transform=label.get_transform(), ha=label.get_ha(), va=label.get_va())
lab.set_rotation(angle)
labels.append(lab)
ax.set_xticklabels([])
# plt.subplots_adjust(top=0.68,bottom=0.32,left=0.05,right=0.95)
# 设置刻度标签的大小
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(25)
ax.grid(True)
# 设置legend
plt.legend(handles=linelist, labels=modelnamelist, fontsize=7,
labelspacing=0.075, borderpad=None, edgecolor='white', # borderaxespad = None,
loc=5, bbox_to_anchor=(1.175, -0.06))
# plt.figure(figsize = (2,2))
plt.savefig("radar.jpg")
plt.show()
def SoloRadarFigure(scoreslist, linestylelist, modelnamelist):
for i in range(len(scoreslist)):
# 模板数据和标签
dataLenth = 8
angles = np.linspace((1 / 4) * np.pi, (2 + 1 / 4) * np.pi, dataLenth, endpoint=False)
angles = np.concatenate((angles, [angles[0]]))
labels = np.array(['TS of \n Starting Position', 'Trajectories Pattern', 'Dis of \n Route Length'
, 'Dis of \n Travel Time', 'Speed Choice',
'Fundamental Diagram', 'Direction Choice', 'TS of \n Destination Position'])
fig = plt.figure(figsize=(3.4, 3.3), dpi=300, linewidth=0.5)
ax = fig.add_subplot(111, polar=True)
plt.rcParams.update({'font.size': 6.5})
plt.grid(linewidth=0.25, linestyle='--')
### data
linelist = []
z = [scoreslist[i][4], scoreslist[i][7], scoreslist[i][1], scoreslist[i][2], scoreslist[i][3], scoreslist[i][0],
scoreslist[i][6], scoreslist[i][5]]
data = np.array(z)
data = np.concatenate((data, [data[0]]))
li, = ax.plot(angles, data, linestylelist[i], linewidth=0.75, markersize=1.5) # 线样 1
linelist.append(li)
ax.set_thetagrids(angles * 180 / np.pi, labels, fontproperties="Calibri")
plt.ylim(-0.25, 1.25) # y axis size
plt.yticks( | np.arange(0, 1.5, step=0.5) | numpy.arange |
#!/usr/bin/env python
# libraries
from __future__ import print_function, division
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
import os
import h5py
import re
import shutil
import copy
import time
import random
import warnings
import operator
from datetime import datetime
from tqdm import tqdm, tqdm_notebook
from matplotlib.pyplot import specgram
import torch
import torchvision
from torchvision import datasets, models, transforms
import torch.optim as optim
from torch.optim import lr_scheduler
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from torchtext import data
from sklearn.metrics import precision_score, f1_score, recall_score, accuracy_score, confusion_matrix
from sklearn.metrics.cluster import contingency_matrix
import joblib
import csv
import warnings
warnings.filterwarnings('ignore')
#%matplotlib inline
### 1. LOAD DATA:
### Manual inputs (denoted by #-markers throughout):
subject = sys.argv[1] ###### CHANGE: subject name (make sure name does NOT have "_" in it; used for subject col and exportname)
eegfileloading = 'sub-'+subject+'_eegdata.csv' ############################################################### CHANGE: filename here
### LOAD EEG DATA --- rows = channels, cols = timepoints
eegdir = sys.argv[2] ################################################################################ CHANGE: dir here
# load file, checking for header
input_csv_file = eegdir+'/'+eegfileloading
with open(input_csv_file, 'rb') as csvfile:
csv_test_bytes = csvfile.read(10) # grab sample of .csv for format detection
headertest = csv_test_bytes.decode("utf-8")
if any(c.isalpha() for c in headertest) == True:
data = pd.read_csv(input_csv_file, header=0)
channels = data.columns
else:
data = pd.read_csv(input_csv_file, header=None)
### quick check: transpose if not in proper format (rows = chans, cols = timepoints) - build on this later.
if len(data) > len(data.columns):
data = data.T
if type(data[0][0]) == str:
data = data.drop(data.columns[0], axis=1)
data = data.astype(float)
print('CHECK: Number of channels ~ %d' % len(data))
else:
data = data.astype(float)
### AUTO DUMP IED IMAGES: clears dir containing spectrograms if produced in previous iteration
spectdir = eegdir+'/SPECTS/IEDS/' ################################################################## CHANGE: dir here
os.makedirs(spectdir, exist_ok = True)
for filename in os.listdir(spectdir):
file_path = os.path.join(spectdir, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path, e))
### 2. LOAD TEMPLATE-MATCHING DETECTOR FUNCTIONS:
def detect_peaks(x, mph=None, mpd=1, threshold=0, edge='rising',
kpsh=False, valley=False, show=False, ax=None):
"""Detect peaks in data based on their amplitude and other features."""
x = np.atleast_1d(x).astype('float64')
if x.size < 3:
return np.array([], dtype=int)
if valley:
x = -x
# find indices of all peaks
dx = x[1:] - x[:-1]
indnan = np.where(np.isnan(x))[0]
if indnan.size:
x[indnan] = np.inf
dx[np.where(np.isnan(dx))[0]] = np.inf
ine, ire, ife = np.array([[], [], []], dtype=int)
if not edge:
ine = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) > 0))[0]
else:
if edge.lower() in ['rising', 'both']:
ire = np.where((np.hstack((dx, 0)) <= 0) & (np.hstack((0, dx)) > 0))[0]
if edge.lower() in ['falling', 'both']:
ife = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) >= 0))[0]
ind = np.unique(np.hstack((ine, ire, ife)))
if ind.size and indnan.size:
# NaN's and values close to NaN's cannot be peaks
ind = ind[np.in1d(ind, np.unique(np.hstack((indnan, indnan-1, indnan+1))), invert=True)]
# first and last values of x cannot be peaks
if ind.size and ind[0] == 0:
ind = ind[1:]
if ind.size and ind[-1] == x.size-1:
ind = ind[:-1]
# remove peaks < minimum peak height
if ind.size and mph is not None:
ind = ind[x[ind] >= mph]
# remove peaks - neighbors < threshold
if ind.size and threshold > 0:
dx = np.min(np.vstack([x[ind]-x[ind-1], x[ind]-x[ind+1]]), axis=0)
ind = np.delete(ind, np.where(dx < threshold)[0])
# detect small peaks closer than minimum peak distance
if ind.size and mpd > 1:
ind = ind[np.argsort(x[ind])][::-1] # sort ind by peak height
idel = np.zeros(ind.size, dtype=bool)
for i in range(ind.size):
if not idel[i]:
# keep peaks with the same height if kpsh is True
idel = idel | (ind >= ind[i] - mpd) & (ind <= ind[i] + mpd) \
& (x[ind[i]] > x[ind] if kpsh else True)
idel[i] = 0 # Keep current peak
# remove the small peaks and sort back the indices by their occurrence
ind = np.sort(ind[~idel])
if show:
if indnan.size:
x[indnan] = np.nan
if valley:
x = -x
_plot(x, mph, mpd, threshold, edge, valley, ax, ind)
return ind
def _plot(x, mph, mpd, threshold, edge, valley, ax, ind):
"""Plot results of the detect_peaks function, see its help."""
try:
import matplotlib.pyplot as plt
except ImportError:
print('matplotlib is not available.')
else:
if ax is None:
_, ax = plt.subplots(1, 1, figsize=(8, 4))
ax.plot(x, 'b', lw=1)
if ind.size:
label = 'valley' if valley else 'peak'
label = label + 's' if ind.size > 1 else label
ax.plot(ind, x[ind], '+', mfc=None, mec='r', mew=2, ms=8,
label='%d %s' % (ind.size, label))
ax.legend(loc='best', framealpha=.5, numpoints=1)
ax.set_xlim(-.02*x.size, x.size*1.02-1)
ymin, ymax = x[np.isfinite(x)].min(), x[np.isfinite(x)].max()
yrange = ymax - ymin if ymax > ymin else 1
ax.set_ylim(ymin - 0.1*yrange, ymax + 0.1*yrange)
ax.set_xlabel('Data #', fontsize=14)
ax.set_ylabel('Amplitude', fontsize=14)
mode = 'Valley detection' if valley else 'Peak detection'
ax.set_title("%s (mph=%s, mpd=%d, threshold=%s, edge='%s')"
% (mode, str(mph), mpd, str(threshold), edge))
# plt.grid()
plt.show()
def locate_downsample_freq(sample_freq, min_freq=200, max_freq=340):
min_up_factor = np.inf
best_candidate_freq = None
for candidate in range(min_freq, max_freq+1):
down_samp_rate = sample_freq / float(candidate)
down_factor, up_factor = down_samp_rate.as_integer_ratio()
if up_factor <= min_up_factor:
min_up_factor = up_factor
best_candidate_freq = candidate
return best_candidate_freq
def butter_bandpass(low_limit, high_limit, samp_freq, order=5):
nyquist_limit = samp_freq / 2
low_prop = low_limit / nyquist_limit
high_prop = high_limit / nyquist_limit
b, a = signal.butter(order, [low_prop, high_prop], btype='band')
def bb_filter(data):
return signal.filtfilt(b, a, data)
return bb_filter
def detect(channel, samp_freq, return_eeg=False, temp_func=None, signal_func=None):
# assume that eeg is [channels x samples]
# Round samp_freq to the nearest integer if it is large
if samp_freq > 100:
samp_freq = int(np.round(samp_freq))
down_samp_freq = locate_downsample_freq(samp_freq)
template = signal.triang(np.round(down_samp_freq * 0.06))
kernel = np.array([-2, -1, 1, 2]) / float(8)
template = np.convolve(kernel, np.convolve(template, kernel, 'valid') ,'full')
if temp_func:
template = temp_func(template, samp_freq)
if signal_func:
channel = signal_func(channel, samp_freq)
down_samp_rate = samp_freq / float(down_samp_freq)
down_samp_factor, up_samp_factor = down_samp_rate.as_integer_ratio()
channel = signal.detrend(channel, type='constant')
results = template_match(channel, template, down_samp_freq)
up_samp_results = [np.round(spikes * down_samp_factor / float(up_samp_factor)).astype(int) for spikes in results]
if return_eeg:
return up_samp_results, [channel[start:end] for start, end in results]
else:
return up_samp_results
def template_match(channel, template, down_samp_freq, thresh=7, min_spacing=0): #######@@@############################## CHANGE: d:7,0
template_len = len(template)
cross_corr = np.convolve(channel, template, 'valid')
cross_corr_std = med_std(cross_corr, down_samp_freq)
detections = []
# catch empty channels
if cross_corr_std > 0:
# normalize the cross-correlation
cross_corr_norm = ((cross_corr - np.mean(cross_corr)) / cross_corr_std)
cross_corr_norm[1] = 0
cross_corr_norm[-1] = 0
# find regions with high cross-corr
if np.any(abs(cross_corr_norm > thresh)):
peaks = detect_peaks(abs(cross_corr_norm), mph=thresh, mpd=template_len)
peaks += int(np.ceil(template_len / 2.)) # center detection on template
peaks = [peak for peak in peaks if peak > template_len and peak <= len(channel)-template_len]
if peaks:
# find peaks that are at least (min_spacing) secs away
distant_peaks = np.diff(peaks) > min_spacing * down_samp_freq
# always keep the first peak
to_keep = np.insert(distant_peaks, 0, True)
peaks = [peaks[x] for x in range(len(peaks)) if to_keep[x] == True]
detections = [(peak-template_len, peak+template_len) for peak in peaks]
return np.array(detections)
def med_std(signal, window_len):
window = np.zeros(window_len) + (1 / float(window_len))
std = np.sqrt(np.median(np.convolve(np.square(signal), window, 'valid') - np.square(np.convolve(signal, window, 'valid'))))
return std
### 3. RUN TEMPLATE-MATCHING DETECTOR:
def autoDetect(eegdata, samp_freq = 200, subject = subject):
"""
AUTODETECT: DETECTS ALL SPIKES IN EACH CHANNEL
INPUT: raw eeg file (preprocessed signal)
OUTPUT: all_detections (list containing a list of arrays for all detections),
channel_names (eeg channel names corresponding to each detection list)
"""
### DETECT SPIKES:
all_detections = []
channel_names = []
for i in range(eegdata.shape[0]):
channel = eegdata.iloc[i,:].astype(float) # run on each row (chan)
detections = detect(channel, samp_freq, return_eeg=False, temp_func=None, signal_func=None)
all_detections.append(detections)
channel_names.append(int(float((eegdata.columns[i]))))
### REFORMAT SPIKES:
detections = pd.DataFrame(all_detections)
channels = pd.DataFrame(channel_names)
spikes = pd.concat([channels,detections], axis = 1)
newspikes = spikes.transpose()
newspikes.columns = newspikes.iloc[0]
newspikes = newspikes.iloc[1:] # remove duplicate channel_name row
### AUTO LONG-FORMATTING OF SPIKES
spikeDf = pd.DataFrame() # empty df to store final spikes and spikeTimes
for idx, col in enumerate(newspikes.columns):
# extract spikes for each column
tempSpikes = newspikes.iloc[:,idx].dropna() # column corresponding to channel with all spikes
tempSpikes2 = tempSpikes.tolist() # convert series to list
# extract channel name for each spike (duplicate based on the number of spikes)
tempName = tempSpikes.name # channel name
tempName2 = [tempName] * len(tempSpikes) # repeat col name by the number of spikes in this channel
tempDf = pd.DataFrame({'channel': tempName2, 'spikeTime': tempSpikes2})
# save and append to final df
spikeDf = spikeDf.append(tempDf)
spikeDf['fs'] = samp_freq
spikeDf['subject'] = subject
return(spikeDf)
spikes = autoDetect(data) ### eegfile, Fs, sessionname; kleen_fs=200, preprocess_fs=200
print("SPIKES DETECTED (TEMP MATCH) = ", len(spikes))
print("")
print(spikes[:3])
### 4. GENERATE INPUT IMAGES FOR CNN:
def spectimgs(eegdata, spikedf):
"""
SPECTS: GENERATE SPECTS FOR CNN
INPUT: 1) eegdata, 2) spikedf (df from automated template-matching spike detector)
OUTPUT: spects within ./SPECTS/IEDS
"""
for i in tqdm(range(0,len(spikedf))):
samp_freq = int(float(spikedf.fs.values[0]))
#######################################
pad = 1 # d:1 number of seconds for window
dpi_setting = 300 # d:300
Nfft = 128*(samp_freq/500) # d: 128
h = 3
w = 3
#######################################
try:
subject = spikedf.subject.values[0]
chan_name = int(spikedf.channel.values[i]) # zero idxed -1
spikestart = spikedf.spikeTime.values[i][0] # start spike
### select eeg data row
ecogclip = eegdata.iloc[chan_name]
### filter out line noise
b_notch, a_notch = signal.iirnotch(60.0, 30.0, samp_freq)
ecogclip = pd.Series(signal.filtfilt(b_notch, a_notch, ecogclip))
### trim eeg clip based on cushion
### mean imputation if missing indices
end = int(float((spikestart+int(float(pad*samp_freq)))))
start = int(float((spikestart-int(float(pad*samp_freq)))))
if end > max(ecogclip.index):
temp = list(ecogclip[list(range(spikestart-int(float(pad*samp_freq)), max(ecogclip.index)))])
cushend = [np.mean(ecogclip)]*(end - max(ecogclip.index))
temp = np.array(temp + cushend)
elif start < min(ecogclip.index):
temp = list(ecogclip[list(range(min(ecogclip.index), spikestart+pad*samp_freq))])
cushstart = [ | np.mean(ecogclip) | numpy.mean |
# -*- coding: utf-8 -*-
from warnings import warn
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats
from ..complexity import (complexity_lempelziv, entropy_approximate,
entropy_fuzzy, entropy_multiscale, entropy_sample,
entropy_shannon, fractal_correlation, fractal_dfa,
fractal_higuchi, fractal_katz)
from ..misc import NeuroKitWarning, find_consecutive
from ..signal import signal_zerocrossings
from .hrv_utils import _hrv_get_rri, _hrv_sanitize_input
def hrv_nonlinear(peaks, sampling_rate=1000, show=False, **kwargs):
"""**Computes nonlinear indices of Heart Rate Variability (HRV)**
Non-linear indices include features derived from the *Poincaré plot*, as well as other
:func:`.complexity` indices. Note that there exist many more complexity indices that are
available in NeuroKit2 and that could be applied to HRV. The :func:`.hrv_nonlinear` function
only includes the most commonly used indices.
The **Poincaré plot** is a graphical representation of each NN interval plotted against its
preceding NN interval. The ellipse that emerges is a visual quantification of the correlation
between successive NN intervals.
Indices derived from the Poincaré plot analysis are:
* **SD1**: Standard deviation perpendicular to the line of identity. It is an index of
short-term RR interval fluctuations, i.e., beat-to-beat variability. It is equivalent
(although on another scale) to RMSSD, and therefore it is redundant to report correlation
with both.
* **SD2**: Standard deviation along the identity line. Index of long-term HRV changes.
* **SD1/SD2**: ratio of *SD1* to *SD2*. Describes the ratio of short term to long term
variations in HRV.
* **S**: Area of ellipse described by *SD1* and *SD2* (``pi * SD1 * SD2``). It is
proportional to *SD1SD2*.
* **CSI**: The Cardiac Sympathetic Index (Toichi, 1997) is a measure of cardiac sympathetic
function independent of vagal activity, calculated by dividing the longitudinal variability of
the Poincaré plot (``4*SD2``) by its transverse variability (``4*SD1``).
* **CVI**: The Cardiac Vagal Index (Toichi, 1997) is an index of cardiac parasympathetic
function (vagal activity unaffected by sympathetic activity), and is equal equal to the
logarithm of the product of longitudinal (``4*SD2``) and transverse variability (``4*SD1``).
* **CSI_Modified**: The modified CSI (Jeppesen, 2014) obtained by dividing the square of the
longitudinal variability by its transverse variability.
Indices of **Heart Rate Asymmetry** (HRA), i.e., asymmetry of the Poincaré plot (Yan, 2017),
include:
* **GI**: Guzik's Index, defined as the distance of points above line of identity (LI) to LI
divided by the distance of all points in Poincaré plot to LI except those that are located on
LI.
* **SI**: Slope Index, defined as the phase angle of points above LI divided by the phase angle
of all points in Poincaré plot except those that are located on LI.
* **AI**: Area Index, defined as the cumulative area of the sectors corresponding to the points
that are located above LI divided by the cumulative area of sectors corresponding to all
points in the Poincaré plot except those that are located on LI.
* **PI**: Porta's Index, defined as the number of points below LI divided by the total number
of points in Poincaré plot except those that are located on LI.
* **SD1d** and **SD1a**: short-term variance of contributions of decelerations (prolongations
of RR intervals) and accelerations (shortenings of RR intervals), respectively (Piskorski,
2011)
* **C1d** and **C1a**: the contributions of heart rate decelerations and accelerations to s
short-term HRV, respectively (Piskorski, 2011).
* **SD2d** and **SD2a**: long-term variance of contributions of decelerations (prolongations of
RR intervals) and accelerations (shortenings of RR intervals), respectively (Piskorski, 2011).
* **C2d** and **C2a**: the contributions of heart rate decelerations and accelerations to
long-term HRV, respectively (Piskorski, 2011).
* **SDNNd** and **SDNNa**: total variance of contributions of decelerations (prolongations of
RR intervals) and accelerations (shortenings of RR intervals), respectively (Piskorski, 2011).
* **Cd** and **Ca**: the total contributions of heart rate decelerations and accelerations to
HRV.
Indices of **Heart Rate Fragmentation** (Costa, 2017) include:
* **PIP**: Percentage of inflection points of the RR intervals series.
* **IALS**: Inverse of the average length of the acceleration/deceleration segments.
* **PSS**: Percentage of short segments.
* **PAS**: IPercentage of NN intervals in alternation segments.
Indices of **Complexity** and **Fractal Physiology** include:
* **ApEn**: See :func:`.entropy_approximate`.
* **SampEn**: See :func:`.entropy_sample`.
* **ShanEn**: See :func:`.entropy_shannon`.
* **FuzzyEn**: See :func:`.entropy_fuzzy`.
* **MSE**: See :func:`.entropy_multiscale`.
* **CMSE**: See :func:`.entropy_multiscale`.
* **RCMSE**: See :func:`.entropy_multiscale`.
* **CD**: See :func:`.fractal_correlation`.
* **HFD**: See :func:`.fractal_higuchi` (with ``kmax`` set to ``"default"``).
* **KFD**: See :func:`.fractal_katz`.
* **LZC**: See :func:`.fractal_lempelziv`.
* **DFA_alpha1**: The monofractal detrended fluctuation analysis of the HR signal,
corresponding to short-term correlations. See :func:`.fractal_dfa`.
* **DFA_alpha2**: The monofractal detrended fluctuation analysis of the HR signal,
corresponding to long-term correlations. See :func:`.fractal_dfa`.
* **MFDFA indices**: Indices related to the :func:`multifractal spectrum <.fractal_dfa()>`.
Other non-linear indices include those based on Recurrence Quantification Analysis (RQA), but
are not implemented yet (but soon).
.. tip::
We strongly recommend checking our open-access paper `Pham et al. (2021)
<https://doi.org/10.3390/s21123998>`_ on HRV indices, as well as `Lau et al. (2021)
<https://psyarxiv.com/f8k3x/>`_ on complexity, for more information.
Parameters
----------
peaks : dict
Samples at which cardiac extrema (i.e., R-peaks, systolic peaks) occur.
Can be a list of indices or the output(s) of other functions such as :func:`.ecg_peaks`,
:func:`.ppg_peaks`, :func:`.ecg_process` or :func:`.bio_process`.
sampling_rate : int, optional
Sampling rate (Hz) of the continuous cardiac signal in which the peaks occur. Should be at
least twice as high as the highest frequency in vhf. By default 1000.
show : bool, optional
If ``True``, will return a Poincaré plot, a scattergram, which plots each RR interval
against the next successive one. The ellipse centers around the average RR interval. By
default ``False``.
**kwargs
Other arguments to be passed into :func:`.fractal_dfa` and :func:`.fractal_correlation`.
Returns
-------
DataFrame
Contains non-linear HRV metrics.
See Also
--------
ecg_peaks, ppg_peaks, hrv_frequency, hrv_time, hrv_summary
Examples
--------
.. ipython:: python
import neurokit2 as nk
# Download data
data = nk.data("bio_resting_5min_100hz")
# Find peaks
peaks, info = nk.ecg_peaks(data["ECG"], sampling_rate=100)
# Compute HRV indices
@savefig p_hrv_nonlinear1.png scale=100%
hrv = nk.hrv_nonlinear(peaks, sampling_rate=100, show=True)
@suppress
plt.close()
.. ipython:: python
hrv
References
----------
* <NAME>., <NAME>., <NAME>., & <NAME>. (2021). Heart Rate Variability in
Psychology: A Review of HRV Indices and an Analysis Tutorial. Sensors, 21(12), 3998.
https://doi.org/10.3390/s21123998
* <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2017). Area asymmetry of heart
rate variability signal. Biomedical engineering online, 16(1), 112.
* <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>.\
(2017). Reminder: RMSSD and SD1 are identical heart rate variability metrics. Muscle & nerve,
56 (4), 674-678.
* <NAME>., & <NAME>. (2017). An overview of heart rate variability metrics and
norms. Frontiers in public health, 5, 258.
* <NAME>., <NAME>., & <NAME>. (2017). Heart rate fragmentation: a new
approach to the analysis of cardiac interbeat interval dynamics. Front. Physiol. 8, 255.
* <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2014).
Using Lorenz plot and Cardiac Sympathetic Index of heart rate variability for detecting
seizures for patients with epilepsy. In 2014 36th Annual International Conference of the IEE
Engineering in Medicine and Biology Society (pp. 4563-4566). IEEE.
* <NAME>., & <NAME>. (2011). Asymmetric properties of long-term and total heart rate
variability. Medical & biological engineering & computing, 49(11), 1289-1297.
* <NAME>. (2002). Assessing heart rate variability from real-world Holter reports. Cardiac
electrophysiology review, 6(3), 239-244.
* <NAME>. et al. (2001). Do Existing Measures of Poincaré Plot Geometry Reflect Nonlinear
Features of Heart Rate Variability?. IEEE Transactions on Biomedical Engineering, 48(11),
1342-1347.
* <NAME>., <NAME>., <NAME>., & <NAME>. (1997). A new method of assessing cardiac
autonomic function and its comparison with spectral analysis and coefficient of variation of
R-R interval. Journal of the autonomic nervous system, 62(1-2), 79-84.
* <NAME>., <NAME>., & <NAME>. (2002). Heart rate variability analysis using
correlation dimension and detrended fluctuation analysis. Itbm-Rbm, 23(6), 333-339.
"""
# Sanitize input
peaks = _hrv_sanitize_input(peaks)
if isinstance(peaks, tuple): # Detect actual sampling rate
peaks, sampling_rate = peaks[0], peaks[1]
# Compute R-R intervals (also referred to as NN) in milliseconds
rri, _ = _hrv_get_rri(peaks, sampling_rate=sampling_rate, interpolate=False)
# Initialize empty container for results
out = {}
# Poincaré features (SD1, SD2, etc.)
out = _hrv_nonlinear_poincare(rri, out)
# Heart Rate Fragmentation
out = _hrv_nonlinear_fragmentation(rri, out)
# Heart Rate Asymmetry
out = _hrv_nonlinear_poincare_hra(rri, out)
# DFA
out = _hrv_dfa(peaks, rri, out, **kwargs)
# Complexity
tolerance = 0.2 * np.std(rri, ddof=1)
out["ApEn"], _ = entropy_approximate(rri, delay=1, dimension=2, tolerance=tolerance)
out["SampEn"], _ = entropy_sample(rri, delay=1, dimension=2, tolerance=tolerance)
out["ShanEn"], _ = entropy_shannon(rri)
out["FuzzyEn"], _ = entropy_fuzzy(rri, delay=1, dimension=2, tolerance=tolerance)
out["MSEn"], _ = entropy_multiscale(rri, dimension=2, tolerance=tolerance, method="MSEn")
out["CMSEn"], _ = entropy_multiscale(rri, dimension=2, tolerance=tolerance, method="CMSEn")
out["RCMSEn"], _ = entropy_multiscale(rri, dimension=2, tolerance=tolerance, method="RCMSEn")
out["CD"], _ = fractal_correlation(rri, delay=1, dimension=2, **kwargs)
out["HFD"], _ = fractal_higuchi(rri, k_max=10, **kwargs)
out["KFD"], _ = fractal_katz(rri)
out["LZC"], _ = complexity_lempelziv(rri, **kwargs)
if show:
_hrv_nonlinear_show(rri, out)
out = pd.DataFrame.from_dict(out, orient="index").T.add_prefix("HRV_")
return out
# =============================================================================
# Get SD1 and SD2
# =============================================================================
def _hrv_nonlinear_poincare(rri, out):
"""Compute SD1 and SD2.
- Do existing measures of Poincare plot geometry reflect nonlinear features of heart rate
variability? - Brennan (2001)
"""
# HRV and hrvanalysis
rri_n = rri[:-1]
rri_plus = rri[1:]
x1 = (rri_n - rri_plus) / np.sqrt(2) # Eq.7
x2 = (rri_n + rri_plus) / np.sqrt(2)
sd1 = np.std(x1, ddof=1)
sd2 = | np.std(x2, ddof=1) | numpy.std |
# coding: utf-8
# In[1]:
import numpy as np
from matplotlib import pyplot as plt
get_ipython().magic(u'matplotlib inline')
np.random.seed(10)
# In[2]:
import os
# mask visible GPUs
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import tensorflow as tf
# In[3]:
# Define our points
# easy constant, separable points
X1 = [[-2,4], [4,1]]
Y1 = [-1, -1]
X2 = [[1,6], [2,4], [6,2]]
Y2 = [1, 1, 1]
# Random separable
# mean = [1,1]
# cov = [[1,0],[0,1]]
# size = 50
# X1 = np.random.multivariate_normal(mean, cov, size)
# Y1 = np.ones([size]) * -1
# mean = [6,6]
# cov = [[1,0],[0,1]]
# size = 50
# X2 = np.random.multivariate_normal(mean, cov, size)
# Y2 = np.ones([size])
# Random XOR
# mean = [1,1]
# cov = [[1,0],[0,1]]
# size = 50
# X1 = np.random.multivariate_normal(mean, cov, size)
# mean = [5,5]
# X1 = np.concatenate([X1, np.random.multivariate_normal(mean, cov, size)], axis=0)
# Y1 = np.ones([size*2]) * -1
# mean = [1,5]
# cov = [[1,0],[0,1]]
# size = 50
# X2 = np.random.multivariate_normal(mean, cov, size)
# Y2 = np.ones([size])
# mean = [5,1]
# X2 = np.concatenate([X2, np.random.multivariate_normal(mean, cov, size)], axis=0)
# Y2 = np.ones([size*2]) * 1
# In[4]:
# Helper visualization function
def visualize(points1, points2, line=[0,0,0]):
for ii, sample in enumerate(points1):
plt.scatter(sample[0], sample[1], s=120, marker='_', c='g')
for ii, sample in enumerate(points2):
plt.scatter(sample[0], sample[1], s=120, marker='+', c='r')
w1, w2, b = line
plt.plot([-5, 10], [-(b+-5*w1)/(w2+0.01), -(b+10*w1)/(w2+0.01)])
plt.ylim((-5,10))
plt.xlim((-5,10))
# In[5]:
# Initialize our perceptron
w = [0.,0.]
b = 0.
visualize(X1, X2, [w[0], w[1], b])
plt.show()
# In[6]:
# set our learning rate
lr = 1
epochs = 20
# combine our points and cast everything into numpy arrays
X = np.concatenate([X1, X2], axis=0)
Y = np.concatenate([Y1, Y2], axis=0)
w = | np.array(w) | numpy.array |
import numpy as np
from highway_env.road.road import Road, RoadNetwork
from highway_env.vehicle.controller import ControlledVehicle
from highway_env.vehicle.kinematics import Vehicle
from highway_env.road.lane import LineType, StraightLane, SineLane, CircularLane, AbstractLane
from highway_env.road.regulation import RegulatedRoad
from highway_env.vehicle.objects import Obstacle
from highway_env.vehicle.behavior import CustomVehicle
from highway_env import utils
from highway_env.road.lane import CircularLane
from highway_env.utils import near_split
from gym.utils import seeding
import random
import copy
class Scenario:
def __init__(self, env, scenario_number=0):
self.env = env
self.env.default_config = copy.deepcopy(env.config)
self.road = None
self.controlled_vehicles = None
# self.road_types = ["intersection", "roundabout", "highway","twoway","uturn","road_merge","road_exit"]
self.road_types = self.env.config['scenario']['road_types']
# self.road_types = ["road_exit"]
self.complex = self.env.config['scenario']['complex']
self.simple = self.env.config['scenario']['simple']
self.road_types_idx = -1
# self.road_missions = ["merging","exit"]
if scenario_number != 0:
if scenario_number == 2:
self.env.config.update(self.default_config_merge())
if scenario_number == 3:
self.env.config.update(self.default_config_exit())
self.random_scenario = self.env.config['scenario']['random_scenario']
if self.random_scenario:
# idx = np.random.randint(0, len(self.road_types))
self.road_types_idx = idx =self.env.episode%len(self.road_types)
self.road_type = self.road_types[idx]
self.env.config['scenario']['road_type'] = self.road_type
if self.road_type == "road_merge":
self.mission_type ="merging"
self.env.config['screen_width'] = 2900
self.env.config['screen_height'] = 300
elif self.road_type == "road_exit":
self.mission_type = "exit"
self.env.config['screen_width'] = 2900
self.env.config['screen_height'] = 300
elif self.road_type == "intersection":
self.env.config['screen_width'] =900
self.env.config['screen_height'] = 900
self.env.config['controlled_vehicle']['controlled_vehicle_speed'] = 15
self.mission_type = "none"
elif self.road_type == "roundabout":
self.env.config['screen_width'] = 900
self.env.config['screen_height'] = 900
self.mission_type = "none"
elif self.road_type == "uturn":
self.env.config['screen_width'] = 1000
self.env.config['screen_height'] = 500
self.mission_type = "none"
else:
self.env.config['screen_width'] = 2900
self.env.config['screen_height'] = 300
self.mission_type = "none"
self.env.config['scenario']['mission_type'] = self.mission_type
else:
self.road_type = self.env.config['scenario']['road_type']
random_offset = copy.deepcopy(self.env.config['scenario']['random_offset'])
delta_before, delta_converging, delta_merge = (0, 0, 0)
if self.env.config['scenario']['randomize_before']:
delta_before = np.random.randint(low=random_offset[0], high=random_offset[1])
if self.env.config['scenario']['randomize_converging']:
delta_converging = np.random.randint(low=random_offset[0], high=random_offset[1])
if self.env.config['scenario']['randomize_merge']:
delta_merge = np.random.randint(low=random_offset[0], high=random_offset[1])
self.before_merging = self.env.config['scenario']['before_merging'] + delta_before
self.converging_merging = self.env.config['scenario']['converging_merging'] + delta_converging
self.during_merging = self.env.config['scenario']['during_merging'] + delta_merge
self.after_merging = self.env.config['scenario']['after_merging']
self.randomize_vehicles = self.env.config['scenario']['randomize_vehicles']
self.random_offset_vehicles = copy.deepcopy(self.env.config['scenario']['random_offset_vehicles'])
self.randomize_speed = self.env.config['scenario']['randomize_speed']
self.randomize_speed_offset = copy.deepcopy(self.env.config['scenario']['randomize_speed_offset'])
self.controlled_vehicles_count = self.env.config['controlled_vehicles']
self.random_controlled_vehicle = self.env.config['scenario']['random_controlled_vehicle']
# if self.env.config['scenario']['randomize_vehicles']:
# self.cruising_vehicles_count_rightmost_lane = self.env.config['vehicles_in_rightmost_lane'] - 1
# self.cruising_vehicles_count_other_lanes = self.env.config['vehicles_in_other_lanes']
# else:
# self.cruising_vehicles_count_rightmost_lane = self.env.config['vehicles_count'] - 1
self.cruising_vehicles_count = self.env.config['vehicles_count'] - 1
self.cruising_vehicles_front_count = self.env.config['cruising_vehicles_front_count']
self.cruising_vehicles_front = self.env.config['cruising_vehicles_front']
self.cruising_vehicles_front_random_everywhere = self.env.config['cruising_vehicles_front_random_everywhere']
self.cruising_vehicles_front_initial_position = self.env.config['cruising_vehicles_front_initial_position']
self.total_number_of_vehicles = self.env.config['scenario']['total_number_of_vehicles']
self.prob_of_controlled_vehicle = self.env.config['scenario']['prob_of_controlled_vehicle']
self.controlled_baseline_vehicle = self.env.config['controlled_baseline_vehicle']
# self.np_random, seed = seeding.np_random(seed)
if self.env.config['scenario']['random_lane_count']:
lane_interval = copy.deepcopy(self.env.config['scenario']['lane_count_interval'])
self.lanes_count = np.random.randint(low=lane_interval[0], high=lane_interval[1])
else:
self.lanes_count = self.env.config['lanes_count']
# self.cruising_vehicle = copy.deepcopy({"vehicles_type": self.env.config['cruising_vehicle']["vehicles_type"],
# "speed": self.env.config['cruising_vehicle']["speed"],
# "enable_lane_change": self.env.config['cruising_vehicle']['enable_lane_change'],
# 'length': self.env.config['cruising_vehicle']['length']
# })
self.cruising_vehicle = copy.deepcopy(self.env.config['cruising_vehicle'])
self.merging_vehicle = copy.deepcopy(self.env.config['merging_vehicle'])
self.baseline_vehicle = copy.deepcopy(self.env.config['baseline_vehicle'])
self.controlled_vehicle = copy.deepcopy(self.env.config['controlled_vehicle'])
# self.controlled_vehicle_speed = self.env.config['scenario']['controlled_vehicle_speed']
self.controlled_vehicle_speed = self.controlled_vehicle['controlled_vehicle_speed']
# self.merging_vehicle = copy.deepcopy({'id': self.env.config['merging_vehicle']['id'],
# 'speed': self.env.config['merging_vehicle']['speed'],
# 'initial_position': self.env.config['merging_vehicle']['initial_position'],
# 'random_offset_merging': self.env.config['merging_vehicle']['random_offset_merging'],
# 'controlled_vehicle': self.env.config['merging_vehicle']['controlled_vehicle'],
# 'vehicles_type': self.env.config['merging_vehicle']["vehicles_type"],
# 'set_route': self.env.config['merging_vehicle']['set_route'],
# 'randomize': self.env.config['merging_vehicle']['randomize'],
# 'randomize_speed_merging': self.env.config['merging_vehicle']['randomize_speed_merging'],
# 'min_speed': self.env.config['merging_vehicle']['min_speed'],
# 'max_speed': self.env.config['merging_vehicle'][ 'max_speed'],
# })
# #
self.exit_vehicle = copy.deepcopy({'id': self.env.config['exit_vehicle']['id'],
'speed': self.env.config['exit_vehicle']['speed'],
'initial_position': self.env.config['exit_vehicle']['initial_position'],
'controlled_vehicle': self.env.config['exit_vehicle']['controlled_vehicle'],
'vehicles_type': self.env.config['exit_vehicle']["vehicles_type"],
'set_route': self.env.config['exit_vehicle']['set_route'],
'random_offset_exit': self.env.config['exit_vehicle']['random_offset_exit'],
'randomize': self.env.config['exit_vehicle']['randomize']
})
self.other_vehicles_type = self.env.config["other_vehicles_type"]
self.record_history = self.env.config["show_trajectories"]
self.ego_spacing = self.env.config["ego_spacing"]
self.initial_lane_id = self.env.config["initial_lane_id"]
self.vehicles_density = self.env.config["vehicles_density"]
self.scenario_config = copy.deepcopy(self.env.config['scenario'])
# self.before_exit = self.env.config['scenario']['before_exit']
# self.converging_exit = self.env.config['scenario']['converging_exit']
# self.taking_exit = self.env.config['scenario']['taking_exit']
# self.during_exit = self.env.config['scenario']['during_exit']
# self.after_exit = self.env.config['scenario']['after_merging']
self.exit_humans = self.env.config['scenario']['exit_humans']
self.exit_controlled = self.env.config['scenario']['exit_controlled']
self.exit_length = self.env.config['scenario']['exit_length']
self.after_exit = self.env.config['scenario']['after_exit']
self.simulation_frequency = self.env.config["simulation_frequency"]
self.np_random = self.env.np_random
self._create_road(self.road_type)
self._create_vehicles(self.road_type)
def create_random(self,cruising_vehicle_class, from_options =None, speed: float = None, lane_id = None, spacing: float = 1, initial_possition = None, enable_lane_change = True, vehicle_id = 0 , right_lane = None) \
-> "Vehicle":
"""
Create a random vehicle on the road.
The lane and /or speed are chosen randomly, while longitudinal position is chosen behind the last
vehicle in the road with density based on the number of lanes.
:param road: the road where the vehicle is driving
:param speed: initial speed in [m/s]. If None, will be chosen randomly
:param lane_id: id of the lane to spawn in
:param spacing: ratio of spacing to the front vehicle, 1 being the default
:return: A vehicle with random position and/or speed
"""
if speed is None:
speed = self.road.np_random.uniform(Vehicle.DEFAULT_SPEEDS[0], Vehicle.DEFAULT_SPEEDS[1])
default_spacing = 1.5 * speed
if from_options is None:
_from = self.road.np_random.choice(list(self.road.network.graph.keys()))
else:
_from = self.road.np_random.choice(from_options)
_to = self.road.np_random.choice(list(self.road.network.graph[_from].keys()))
if _from == "a" or _from == "b":
lanes_count = len(self.road.network.graph[_from][_to]) -1
else:
lanes_count = len(self.road.network.graph[_from][_to])
_id = lane_id if lane_id is not None else self.road.np_random.choice(lanes_count)
# if right_lane:
# _id = min(_id, right_lane)
lane = self.road.network.get_lane((_from, _to, _id))
offset = spacing * default_spacing * np.exp(-5 / 30 * len(self.road.network.graph[_from][_to]))
if initial_possition:
x0 = initial_possition
else:
# x0 = np.max([lane.local_coordinates(v.position)[0] for v in self.road.vehicles]) \
# if len(self.road.vehicles) else 3 * offset
distances = []
for v in self.road.vehicles:
test = v.lane_index[2]
if v.lane_index[2] <= lanes_count - 1:
distances.append(lane.local_coordinates(v.position)[0])
x0 = np.max([distances]) if distances else 3 * offset
x0 += offset * self.road.np_random.uniform(0.9, 1.1)
x0 = max(0, x0)
vehicle = cruising_vehicle_class(self.road,
lane.position(x0, 0),
speed=speed, enable_lane_change=enable_lane_change,
config=self.env.config, v_type='cruising_vehicle', id=vehicle_id)
# v = cls(road, lane.position(x0, 0), lane.heading_at(x0), speed)
return vehicle
def default_config_merge(self) -> dict:
"""
:return: a configuration dict
"""
return {
'duration': 15, # 40
'scenario': {
'scenario_number': 2,
'road_type': "road_merge",
# 1-highway, 2-road_closed , 3-road_merge , 4-road_exit, Road types should match with is vehicle_type 1,2,3
# for merging road
'lane_count_interval': [1, 4], # random number of lane range
'random_offset': [-5, 5], # offset values for before, converging, merge -+
'before_merging': 100,
'randomize_before': False, # random before road size
# distance before converging, converging is the start of the lane with slope
'converging_merging': 200,
'randomize_converging': False, # random converging road size
# distance from converging to merge, merge start when the slope lane ends
'during_merging': 110, # distance of the merging lane, paralles to highway
'randomize_merge': False, # random merge road size
'random_lane_count': False, # random number of lane
'after_merging': 1100, # distance of the highway after that
# for exit road
'before_exit': 100,
'converging_exit': 50,
'taking_exit': 80,
'during_exit': 100,
'after_exit': 1100,
'randomize_vehicles': True, # if true vehicles will be randomize based on random_offset_vehicles values
'random_offset_vehicles': [-5, 5],
# 'vehicles_in_rightmost_lane':10, # will overide_vehicle_count if randomize_vehicles = True
# 'vehicles_in_other_lanes':10,
'random_controlled_vehicle': False,
# will chose controlled_vehicle based on prob_of_controlled_vehicle, override controlled_vehicle
'total_number_of_vehicles': 13,
# will be the total number of vehicles in the scenario, AV or cruising will be chosen based on the prob, overide vehicle_count
'prob_of_controlled_vehicle': 0.5,
'mission_type': 'merging',
# if shuffle_controlled_vehicle , from total_number_of_vehicles with probability prob_of_controlled_vehicle AV willl be chosen
},
# 'cruising_vehicle': {
# 'acc_max': 6, # """Maximum acceleration."""
# 'comfort_acc_max': 4, # """Desired maximum acceleration."""
# 'comfort_acc_min': -12, # """Desired maximum deceleration."""
# 'distance_wanted': 0.51, # """Desired jam distance to the front vehicle."""
# 'time_wanted': 0.5, # """Desired time gap to the front vehicle."""
# 'delta': 4, # """Exponent of the velocity term."""
# 'speed': 25, # Vehicle speed
# 'enable_lane_change': False, # allow lane change
#
# 'vehicles_type': "highway_env.vehicle.behavior.CustomVehicle",
# # chose different vehicle types from :
# # "highway_env.vehicle.behavior.CustomVehicle" ,"highway_env.vehicle.behavior.AggressiveVehicle","highway_env.vehicle.behavior.DefensiveVehicle", "highway_env.vehicle.behavior.LinearVehicle" "highway_env.vehicle.behavior.IDMVehicle"
# # if CustomVehicle is chosen it will load the previous configurations, other vehicles types has their own predefiened configurations.
# 'length': 5.0, # Vehicle length [m]
# 'width': 2.0, # Vehicle width [m]
# 'max_speed': 40 # Maximum reachable speed [m/s]
# },
'merging_vehicle': {
'acc_max': 6, # """Maximum acceleration.""" 6
'comfort_acc_max': 3, # """Desired maximum acceleration.""" 3
'comfort_acc_min': -5, # """Desired maximum deceleration.""" -5
'distance_wanted': 0.5, # """Desired jam distance to the front vehicle.""" 5
'time_wanted': 0.5, # """Desired time gap to the front vehicle.""" 1.5
'delta': 4, # """Exponent of the velocity term.""" 4
'speed': 25,
'initial_position': [78, 0],
'enable_lane_change': False,
'controlled_vehicle': False, # chose if merging vehicle is AV or human
'vehicles_type': "highway_env.vehicle.behavior.CustomVehicle",
'set_route': False, # predefine the route
# "highway_env.vehicle.behavior.CustomVehicle" ,"highway_env.vehicle.behavior.AggressiveVehicle","highway_env.vehicle.behavior.DefensiveVehicle", "highway_env.vehicle.behavior.LinearVehicle" "highway_env.vehicle.behavior.IDMVehicle"
# if CustomVehicle is chosen it will load the previous configurations, other vehicles types has their own predefiened configurations.
'randomize': True,
'id': -1, # id for the merging vehicle
'length': 5.0, # Vehicle length [m]
'width': 2.0, # Vehicle width [m]
'max_speed': 40 # Maximum reachable speed [m/s]
},
"reward": {
"coop_reward_type": "multi_agent_tuple",
"reward_type": "merging_reward", # merging_reward
"normalize_reward": True,
"reward_speed_range": [20, 40],
"collision_reward": -2, # -1
"on_desired_lane_reward": 0.3,
"high_speed_reward": 0.6, # 0.4
"lane_change_reward": -0.2,
"target_lane": 1,
"distance_reward": -0.1,
"distance_merged_vehicle_reward": 0,
"distance_reward_type": "min",
"successful_merging_reward": 5,
"continuous_mission_reward": True,
"cooperative_flag": True,
"sympathy_flag": True,
"cooperative_reward": 0.9,
# True : after merging will keep receiving the reward, False: just received the reward once
}
}
def default_config_exit(self) -> dict:
"""
:return: a configuration dict
"""
return {
'scenario': {
'scenario_number': 3,
'road_type': "road_exit",
# 1-highway, 2-road_closed , 3-road_merge , 4-road_exit, 5-test Road types should match with is vehicle_type 1,2,3
# for merging road
'lane_count_interval': [1, 4], # random number of lane range
'random_offset': [-5, 5], # offset values for before, converging, merge -+
'before_merging': 100,
'randomize_before': False, # random before road size
# distance before converging, converging is the start of the lane with slope
'converging_merging': 200,
'randomize_converging': False, # random converging road size
# distance from converging to merge, merge start when the slope lane ends
'during_merging': 110, # distance of the merging lane, paralles to highway
'randomize_merge': False, # random merge road size
'random_lane_count': False, # random number of lane
'after_merging': 1100, # distance of the highway after that
# for exit road
'before_exit': 100,
'converging_exit': 50,
'taking_exit': 40,
'during_exit': 100,
'after_exit': 1100,
'randomize_vehicles': True, # if true vehicles will be randomize based on random_offset_vehicles values
'random_offset_vehicles': [-5, 5],
'random_controlled_vehicle': False,
# will chose controlled_vehicle based on prob_of_controlled_vehicle, override controlled_vehicle
'total_number_of_vehicles': 13,
# will be the total number of vehicles in the scenario, AV or cruising will be chosen based on the prob, overide vehicle_count
'prob_of_controlled_vehicle': 0.5,
'mission_type': 'exit',
# if shuffle_controlled_vehicle , from total_number_of_vehicles with probability prob_of_controlled_vehicle AV willl be chosen
},
# 'cruising_vehicle': {
# 'acc_max': 6, # """Maximum acceleration."""
# 'comfort_acc_max': 4, # """Desired maximum acceleration."""
# 'comfort_acc_min': -12, # """Desired maximum deceleration."""
# 'distance_wanted': 0.51, # """Desired jam distance to the front vehicle."""
# 'time_wanted': 0.5, # """Desired time gap to the front vehicle."""
# 'delta': 4, # """Exponent of the velocity term."""
# 'speed': 25, # Vehicle speed
# 'enable_lane_change': False, # allow lane change
#
# 'vehicles_type': "highway_env.vehicle.behavior.CustomVehicle",
# # chose different vehicle types from :
# # "highway_env.vehicle.behavior.CustomVehicle" ,"highway_env.vehicle.behavior.AggressiveVehicle","highway_env.vehicle.behavior.DefensiveVehicle", "highway_env.vehicle.behavior.LinearVehicle" "highway_env.vehicle.behavior.IDMVehicle"
# # if CustomVehicle is chosen it will load the previous configurations, other vehicles types has their own predefiened configurations.
#
# 'length': 5.0, # Vehicle length [m]
# 'width': 2.0, # Vehicle width [m]
# 'max_speed': 40 # Maximum reachable speed [m/s]
# },
'exit_vehicle': {
'acc_max': 6, # """Maximum acceleration.""" 6
'comfort_acc_max': 3, # """Desired maximum acceleration.""" 3
'comfort_acc_min': -5, # """Desired maximum deceleration.""" -5
'distance_wanted': 0.5, # """Desired jam distance to the front vehicle.""" 5
'time_wanted': 0.5, # """Desired time gap to the front vehicle.""" 1.5
'delta': 4, # """Exponent of the velocity term.""" 4
'speed': 25,
'initial_position': [78, 0],
'enable_lane_change': True,
'controlled_vehicle': False, # chose if merging vehicle is AV or human
'vehicles_type': "highway_env.vehicle.behavior.CustomVehicle",
'set_route': True, # predefine the route
# "highway_env.vehicle.behavior.CustomVehicle" ,"highway_env.vehicle.behavior.AggressiveVehicle","highway_env.vehicle.behavior.DefensiveVehicle", "highway_env.vehicle.behavior.LinearVehicle" "highway_env.vehicle.behavior.IDMVehicle"
# if CustomVehicle is chosen it will load the previous configurations, other vehicles types has their own predefiened configurations.
'randomize': True,
'id': -1, # id for the merging vehicle
'length': 5.0, # Vehicle length [m]
'width': 2.0, # Vehicle width [m]
'max_speed': 40 # Maximum reachable speed [m/s]
},
"reward": {
"coop_reward_type": "multi_agent_tuple",
"reward_type": "exit_reward", # merging_reward
"normalize_reward": True,
"reward_speed_range": [20, 40],
"collision_reward": -2, # -1
"on_desired_lane_reward": 0.3,
"high_speed_reward": 0.6, # 0.4
"lane_change_reward": -0.2,
"target_lane": 1,
"distance_reward": -0.1,
"distance_merged_vehicle_reward": 0,
"distance_reward_type": "min",
"successful_merging_reward": 5,
"continuous_mission_reward": True,
"cooperative_flag": True,
"sympathy_flag": True,
"cooperative_reward": 0.9,
# True : after merging will keep receiving the reward, False: just received the reward once
}
}
def _create_road(self, road_type) -> None:
if road_type == "highway":
self._road_highway()
elif road_type == "road_merge":
self._road_merge()
elif road_type == "road_exit":
self._road_exit()
elif road_type == "intersection":
self._road_intersection()
elif road_type == "roundabout":
self._road_roundabout()
elif road_type == "twoway":
self._road_twoway()
elif road_type == "uturn":
self._road_uturn()
elif road_type == "road_closed":
# TODO , fix arguments
self._road_closed(end=self.before_merging + self.converging_merging, after=self.after_merging)
elif road_type == "test":
self._road_test()
def _create_vehicles(self, road_type):
if road_type == "road_merge":
if self.random_controlled_vehicle:
self._vehicles_merge_to_highway_prob()
else:
self._vehicles_merge_to_highway()
elif road_type == "road_exit":
self._vehicles_exit_highway()
elif road_type == "intersection":
self._vehicles_intersection()
elif road_type == "roundabout":
self._vehicles_roundabout()
elif road_type == "road_closed":
# TODO , fix arguments
self._vehicles_road_closed(controlled_vehicles=self.controlled_vehicles,
cruising_vehicles_count=self.cruising_vehicles_count)
elif road_type == "highway":
self._vehicles_highway()
elif road_type == "twoway":
self._vehicles_twoway()
elif road_type == "uturn":
self._vehicles_uturn()
elif road_type == "test":
self._vehicle_road_test()
def _road_merge(self):
"""Create a road composed of straight adjacent lanes."""
net = RoadNetwork()
# Highway lanes
ends = [self.before_merging, self.converging_merging, self.during_merging,
self.after_merging] # Before, converging, merge, after
c, s, n = LineType.CONTINUOUS_LINE, LineType.STRIPED, LineType.NONE
for lane in range(self.lanes_count):
line_types = [LineType.CONTINUOUS_LINE if lane == 0 else LineType.STRIPED,
LineType.CONTINUOUS_LINE if lane == self.lanes_count - 1 else LineType.NONE]
net.add_lane("a", "b", StraightLane([0, StraightLane.DEFAULT_WIDTH * (lane + 1)],
[sum(ends[:2]), StraightLane.DEFAULT_WIDTH * (lane + 1)],
line_types=line_types))
net.add_lane("b", "c",
StraightLane([sum(ends[:2]), StraightLane.DEFAULT_WIDTH * (lane + 1)],
[sum(ends[:3]), StraightLane.DEFAULT_WIDTH * (lane + 1)], line_types=line_types))
net.add_lane("c", "d", StraightLane([sum(ends[:3]), StraightLane.DEFAULT_WIDTH * (lane + 1)],
[sum(ends), StraightLane.DEFAULT_WIDTH * (lane + 1)],
line_types=line_types))
# Merging lane
amplitude = 3.25
ljk = StraightLane([0, 6.5 + 4 + self.lanes_count * 4], [ends[0], 6.5 + 4 + self.lanes_count * 4],
line_types=[c, c],
forbidden=True)
lkb = SineLane(ljk.position(ends[0], -amplitude), ljk.position(sum(ends[:2]), -amplitude),
amplitude, 2 * np.pi / (2 * ends[1]), np.pi / 2, line_types=[c, c], forbidden=True)
lbc = StraightLane(lkb.position(ends[1], 0), lkb.position(ends[1], 0) + [ends[2], 0],
line_types=[n, c], forbidden=True)
net.add_lane("j", "k", ljk)
net.add_lane("k", "b", lkb)
net.add_lane("b", "c", lbc)
road = Road(network=net, np_random=self.env.np_random, record_history=self.record_history)
road.objects.append(Obstacle(road, lbc.position(ends[2], 0)))
self.road = road
def _road_exit1(self):
"""Create a road composed of straight adjacent lanes."""
net = RoadNetwork()
# Highway lanes
ends = [self.before_exit + self.converging_exit, self.taking_exit,
self.after_exit] # Before, converging, merge, after
c, s, n = LineType.CONTINUOUS_LINE, LineType.STRIPED, LineType.NONE
for lane in range(self.lanes_count):
line_types = [LineType.CONTINUOUS_LINE if lane == 0 else LineType.STRIPED,
LineType.CONTINUOUS_LINE if lane == self.lanes_count - 1 else LineType.NONE]
net.add_lane("a", "b", StraightLane([0, StraightLane.DEFAULT_WIDTH * (lane + 1)],
[sum(ends[:1]), StraightLane.DEFAULT_WIDTH * (lane + 1)],
line_types=line_types))
net.add_lane("b", "c",
StraightLane([sum(ends[:1]), StraightLane.DEFAULT_WIDTH * (lane + 1)],
[sum(ends[:2]), StraightLane.DEFAULT_WIDTH * (lane + 1)], line_types=line_types))
net.add_lane("c", "d", StraightLane([sum(ends[:2]), StraightLane.DEFAULT_WIDTH * (lane + 1)],
[sum(ends), StraightLane.DEFAULT_WIDTH * (lane + 1)],
line_types=line_types))
# Exit lane
amplitude = 3.25 / 4
lbp = StraightLane([self.before_exit + self.converging_exit, 4 + self.lanes_count * 4],
[self.before_exit + self.converging_exit + self.taking_exit, 4 + self.lanes_count * 4],
line_types=[n, c], forbidden=True)
# ljk = StraightLane([0, 6.5 + 4 +self.lanes_count*4], [ends[0], 6.5 + 4 + self.lanes_count*4 ], line_types=[c, c],
# forbidden=True)
lpk = SineLane(lbp.position(self.taking_exit, amplitude),
lbp.position(self.taking_exit + self.during_exit, amplitude),
-amplitude, 2 * np.pi / (2 * ends[1]), np.pi / 2, line_types=[c, c], forbidden=True)
lkj = StraightLane(lpk.position(self.during_exit, 0), lpk.position(self.during_exit + self.after_exit, 0),
line_types=[c, c], forbidden=True)
net.add_lane("b", "p", lbp)
net.add_lane("p", "k", lpk)
net.add_lane("k", "j", lkj)
road = Road(network=net, np_random=self.env.np_random, record_history=self.record_history)
# road.objects.append(Obstacle(road, lbp.position(ends[2], 0)))
self.road = road
def _road_exit(self):
# road_length = 1000, exit_humans = 400, exit_length = 100
exit_position = self.exit_humans + self.exit_controlled
exit_length = self.exit_length
after_exit = self.after_exit
net = RoadNetwork.straight_road_networkv2(self.lanes_count, start=0,
length=exit_position, nodes_str=("0", "1"))
net = RoadNetwork.straight_road_networkv2(self.lanes_count+ 1, start=exit_position,
length=exit_length, nodes_str=("1", "2"), net=net)
net = RoadNetwork.straight_road_networkv2(self.lanes_count, start=exit_position + exit_length,
length=after_exit,
nodes_str=("2", "3"), net=net)
for _from in net.graph:
for _to in net.graph[_from]:
for _id in range(len(net.graph[_from][_to])):
net.get_lane((_from, _to, _id)).speed_limit = 26 - 3.4 * _id
exit_position = np.array([exit_position + exit_length, self.lanes_count * CircularLane.DEFAULT_WIDTH])
radius = 150
exit_center = exit_position + np.array([0, radius])
lane = CircularLane(center=exit_center,
radius=radius,
start_phase=3 * np.pi / 2,
end_phase=2 * np.pi,
forbidden=True)
net.add_lane("2", "exit", lane)
self.road = Road(network=net,
np_random=self.env.np_random)
def _road_closed(self, end=200, after=1000):
"""Create a road composed of straight adjacent lanes."""
net = RoadNetwork()
last_lane = 0
# Highway lanes
c, s, n = LineType.CONTINUOUS_LINE, LineType.STRIPED, LineType.NONE
y = [last_lane + StraightLane.DEFAULT_WIDTH, last_lane + 2 * StraightLane.DEFAULT_WIDTH]
line_type = [[c, s], [n, c]]
line_type_merge = [[c, s], [n, s]]
new_lane = StraightLane([0, last_lane], [end, last_lane], line_types=[c, n], forbidden=True)
net.add_lane("a", "b", new_lane)
for i in range(self.self.lanes_count):
net.add_lane("a", "b", StraightLane([0, y[i]], [end, y[i]], line_types=line_type[i]))
net.add_lane("b", "c",
StraightLane([end, y[i]], [after, y[i]], line_types=line_type_merge[i]))
road = Road(network=net, np_random=self.env.np_random, record_history=self.record_history)
pos = new_lane.position(end, 0)
road.objects.append(Obstacle(road, pos))
self.road = road
def _road_highway(self) -> None:
"""Create a road composed of straight adjacent lanes."""
self.road = Road(network=RoadNetwork.straight_road_network(self.lanes_count),
np_random=self.env.np_random, record_history=self.record_history)
def _vehicles_highway(self) -> None:
"""Create some new random vehicles of a given type, and add them on the road."""
self.controlled_vehicles = []
back = False
basic = False
if back:
for _ in range(self.controlled_vehicles_count):
vehicle = self.env.action_type.vehicle_class.create_random(self.road,
speed=25,
spacing=self.ego_spacing)
self.controlled_vehicles.append(vehicle)
self.road.vehicles.append(vehicle)
# vehicles_type = cruising_vehicle = utils.class_from_path(self.cruising_vehicle["vehicles_type"])
cruising_vehicle = utils.class_from_path(self.cruising_vehicle["vehicles_type"])
vehicles_type = utils.class_from_path(self.other_vehicles_type)
for _ in range(self.cruising_vehicles_count):
self.road.vehicles.append(
vehicles_type.create_random(self.road, spacing=1 / self.vehicles_density))
elif basic:
other_vehicles_type = utils.class_from_path(self.other_vehicles_type)
other_per_controlled = near_split(self.cruising_vehicles_count,
num_bins=self.controlled_vehicles_count)
self.controlled_vehicles = []
for others in other_per_controlled:
controlled_vehicle = self.env.action_type.vehicle_class.create_random(
self.road,
speed=25,
spacing=self.ego_spacing
)
self.controlled_vehicles.append(controlled_vehicle)
self.road.vehicles.append(controlled_vehicle)
for _ in range(others):
self.road.vehicles.append(
other_vehicles_type.create_random(self.road, spacing=1 / self.vehicles_density)
)
else:
vehicle_id =1
cruising_vehicle = utils.class_from_path(self.cruising_vehicle["vehicles_type"])
other_per_controlled = near_split(self.cruising_vehicles_count,
num_bins=self.controlled_vehicles_count)
self.controlled_vehicles = []
speed_controlled = self.controlled_vehicle_speed
controlled_vehicle_id = self.cruising_vehicles_count +1
for others in other_per_controlled:
controlled_vehicle = self.env.action_type.vehicle_class.create_random(
self.road,
speed=speed_controlled,
spacing=self.ego_spacing, id=controlled_vehicle_id
)
controlled_vehicle_id+=1
for _ in range(others):
self.road.vehicles.append(
cruising_vehicle.create_random_custom(self.road, spacing=1 / self.vehicles_density,config=self.env.config, v_type='cruising_vehicle', id=vehicle_id)
)
vehicle_id += 1
self.controlled_vehicles.append(controlled_vehicle)
self.road.vehicles.append(controlled_vehicle)
for v in self.road.vehicles: # Prevent early collisions
if v is not controlled_vehicle and np.linalg.norm(v.position - controlled_vehicle.position) < 25:
self.road.vehicles.remove(v)
def _vehicles_road_closed(self, controlled_vehicles=4, cruising_vehicles_count=10) -> None:
"""Create some new random vehicles of a given type, and add them on the road."""
self.controlled_vehicles = []
road = self.road
vehicle_space = self.converging_merging / controlled_vehicles
pos = self.before_merging
for _ in range(controlled_vehicles):
vehicle = self.env.action_type.vehicle_class(road,
road.network.get_lane(("a", "b", 1)).position(pos, 0),
speed=30)
pos += vehicle_space
self.controlled_vehicles.append(vehicle)
road.vehicles.append(vehicle)
other_vehicles_type = utils.class_from_path(self.other_vehicles_type)
# spawn vehicles in lane and possition
road.vehicles.append(other_vehicles_type(road, road.network.get_lane(("a", "b", 0)).position(80, 0), speed=29))
# road.vehicles.append(other_vehicles_type(road, road.network.get_lane(("a", "b", 1)).position(30, 0), speed=31))
# road.vehicles.append(other_vehicles_type(road, road.network.get_lane(("a", "b", 0)).position(5, 0), speed=31.5))
# road.vehicles.append(other_vehicles_type(road, road.network.get_lane(("a", "b", 1)).position(90, 0), speed=29))
pos = 0
vehicle_space = self.before_merging / cruising_vehicles_count
for i in range(cruising_vehicles_count):
# spawn vehicles in lane and possition
road.vehicles.append(
CustomVehicle(road, road.network.get_lane(("a", "b", 1)).position(pos, 0), config=self.env.config,
speed=29,
enable_lane_change=False, id=i + 1))
# road.vehicles.append(other_vehicles_type(road, road.network.get_lane(("a", "b", 1)).position(30, 0), speed=31))
pos += vehicle_space
self.road = road
def _vehicles_merge_to_highway(self) -> None:
"""Create some new random vehicles of a given type, and add them on the road."""
self.controlled_vehicles = []
road = self.road
right_lane = len(self.road.network.graph['a']['b']) - 1
vehicle_id = 1
vehicle_position = 0
vehicle_space = self.before_merging / self.cruising_vehicles_count
if vehicle_space <= (abs(self.random_offset_vehicles[0]) + self.cruising_vehicle['length']):
exit() #comment
print(" warning, reduce number of vehicle or offset range")
# TODO , define default for this case
# TODO , consider speed also for positioning
cruising_vehicle_class = utils.class_from_path(self.cruising_vehicle["vehicles_type"])
speed = self.cruising_vehicle["speed"]
enable_lane_change = self.cruising_vehicle["enable_lane_change"]
for i in range(self.cruising_vehicles_count-1):
if self.randomize_vehicles:
random_offset = self.random_offset_vehicles
delta = np.random.randint(low=random_offset[0], high=random_offset[1])
vehicle_position += delta
vehicle_position = max(0, vehicle_position)
# vehicle_position = min(vehicle_position, self.before)
if self.randomize_speed:
# speed = road.np_random.uniform(Vehicle.DEFAULT_SPEEDS[0], Vehicle.DEFAULT_SPEEDS[1])
random_offset = self.randomize_speed_offset
delta = np.random.randint(low=random_offset[0], high=random_offset[1])
speed += delta
vehicle = cruising_vehicle_class(road,
road.network.get_lane(("a", "b", right_lane)).position(vehicle_position,
0),
speed=speed, enable_lane_change=enable_lane_change,
config=self.env.config, v_type='cruising_vehicle', id=vehicle_id)
vehicle_position += vehicle_space
road.vehicles.append(vehicle)
vehicle_id += 1
# controlled vehicles
vehicle_space = self.converging_merging / self.controlled_vehicles_count
vehicle_position = max(vehicle_position + self.cruising_vehicle['length'], self.before_merging + self.random_offset_vehicles[1])
baseline_vehicle_class = utils.class_from_path(self.baseline_vehicle["vehicles_type"])
if self.controlled_baseline_vehicle:
speed = self.baseline_vehicle["speed"]
else:
speed = self.controlled_vehicle_speed
enable_lane_change = self.baseline_vehicle["enable_lane_change"]
if vehicle_space <= (abs(self.random_offset_vehicles[0]) + self.cruising_vehicle['length']):
exit()
print(" warning, reduce number of vehicle or offset range")
# TODO , define default for this case
# TODO , consider speed also for positioning
# count = 0
# TODO fix it
multi_agent_setting = True
controlled_vehicle_id = self.cruising_vehicles_count + 1
for _ in range(self.controlled_vehicles_count):
if self.randomize_vehicles:
random_offset = self.random_offset_vehicles
delta = np.random.randint(low=random_offset[0], high=random_offset[1])
vehicle_position += delta
vehicle_position = max(0, vehicle_position)
if self.randomize_speed:
# speed = road.np_random.uniform(Vehicle.DEFAULT_SPEEDS[0], Vehicle.DEFAULT_SPEEDS[1])
random_offset = self.randomize_speed_offset
delta = np.random.randint(low=random_offset[0], high=random_offset[1])
speed += delta
if self.controlled_baseline_vehicle:
vehicle = baseline_vehicle_class(road,
road.network.get_lane(("a", "b", right_lane)).position(
vehicle_position,
0),
speed=speed, enable_lane_change=enable_lane_change,
config=self.env.config, v_type='baseline_vehicle', id=vehicle_id)
else:
vehicle = self.env.action_type.vehicle_class(road,
road.network.get_lane(("a", "b", right_lane)).position(
vehicle_position, 0),
speed=speed, id=controlled_vehicle_id)
# count +=1
# if count<=2 or multi_agent_setting:
# vehicle = self.env.action_type.vehicle_class(road,
# road.network.get_lane(("a", "b", right_lane)).position(
# vehicle_position, 0),
# speed=speed, id=vehicle_id)
# if not multi_agent_setting:
# vehicle_space = vehicle_space
# else:
# vehicle = cruising_vehicle_class(road,
# road.network.get_lane(("a", "b", right_lane)).position(
# vehicle_position,
# 0),
# speed=speed, enable_lane_change=enable_lane_change,
# config=self.env.config, v_type='cruising_vehicle', id=vehicle_id)
vehicle_position += vehicle_space
self.controlled_vehicles.append(vehicle)
# if count <= 2 or multi_agent_setting:
# self.controlled_vehicles.append(vehicle)
road.vehicles.append(vehicle)
controlled_vehicle_id += 1
if self.cruising_vehicles_front:
# vehicle_position = max(vehicle_position, self.cruising_vehicles_front_initial_position)
lane = road.network.get_lane(("b", "c", right_lane))
last_vehicle_position= lane.local_coordinates(vehicle.position)[0]
vehicle_position = max(last_vehicle_position + self.ego_spacing * self.cruising_vehicle['length'], self.cruising_vehicles_front_initial_position)
# vehicle_position = self.cruising_vehicles_front_initial_position
vehicle_space = self.ego_spacing * self.cruising_vehicle['length']
enable_lane_change = self.cruising_vehicle["enable_lane_change"]
speed = self.cruising_vehicle["speed"]
if vehicle_space <= (abs(self.random_offset_vehicles[0]) + self.cruising_vehicle['length']):
print(" warning, reduce number of vehicle or offset range")
exit()
# TODO , define default for this case
# TODO , consider speed also for positioning
for i in range(self.cruising_vehicles_front_count):
if self.cruising_vehicles_front_random_everywhere:
vehicle = self.create_random(cruising_vehicle_class, from_options=["a"],enable_lane_change = self.cruising_vehicle["enable_lane_change"], vehicle_id =vehicle_id)
else:
if self.randomize_vehicles:
random_offset = self.random_offset_vehicles
delta = np.random.randint(low=random_offset[0], high=random_offset[1])
vehicle_position += delta
vehicle_position = max(0, vehicle_position)
# vehicle_position = min(vehicle_position, self.before)
if self.randomize_speed:
# speed = road.np_random.uniform(Vehicle.DEFAULT_SPEEDS[0], Vehicle.DEFAULT_SPEEDS[1])
random_offset = self.randomize_speed_offset
delta = np.random.randint(low=random_offset[0], high=random_offset[1])
speed += delta
vehicle = cruising_vehicle_class(road,
road.network.get_lane(("b", "c", right_lane)).position(
vehicle_position,
0),
speed=speed, enable_lane_change=self.cruising_vehicle["enable_lane_change"],
config=self.env.config, v_type='cruising_vehicle', id=vehicle_id)
vehicle_position += vehicle_space
road.vehicles.append(vehicle)
vehicle_id += 1
id_merging_vehicle = self.merging_vehicle['id']
speed = self.merging_vehicle['speed']
# TODO check everytime we cahnge a var
initial_position = self.merging_vehicle['initial_position']
if self.complex:
self.merging_vehicle['randomize'] = True
# self.merging_vehicle['random_offset_merging'] = [-20,20]
self.merging_vehicle['random_offset_merging'] = [-100,50]
self.merging_vehicle['randomize_speed_merging'] = True
self.randomize_speed_offset =[-5,5]
if self.merging_vehicle['randomize']:
random_offset = self.merging_vehicle['random_offset_merging']
delta = np.random.randint(low=random_offset[0], high=random_offset[1])
# delta = np.random.normal(0, random_offset[1] / 3)
if delta > 0:
delta = min(delta, random_offset[1])
else:
delta = max(delta, random_offset[0])
initial_position[0] += delta
initial_position[0] = max(0, initial_position[0])
if self.merging_vehicle['randomize_speed_merging']:
# speed = road.np_random.uniform(Vehicle.DEFAULT_SPEEDS[0], Vehicle.DEFAULT_SPEEDS[1])
random_offset = self.randomize_speed_offset
delta = np.random.randint(low=random_offset[0], high=random_offset[1])
# delta = np.random.normal(0, random_offset[1]/3)
if delta > 0:
delta = min(delta,random_offset[1])
else:
delta = max(delta, random_offset[0])
speed += delta
speed = max(0, speed)
route = None
if self.merging_vehicle['controlled_vehicle']:
# if self.exit_vehicle['set_route']:
# route=[('j', 'k', 0), ('k', 'b', 0), ('b', 'c', 0),('c', 'd', 0)]
merging_v = self.env.action_type.vehicle_class(road,
road.network.get_lane(("j", "k", 0)).position(
initial_position[0],
initial_position[1]), speed=speed,
config=self.env.config, id=id_merging_vehicle, route=route , min_speed=self.merging_vehicle['min_speed'], max_speed=self.merging_vehicle['max_speed'])
else:
# route = [('j', 'k', 0), ('k', 'b', 0), ('b', 'c', 0), ('c', 'd', 0)]
merging_vehicle = utils.class_from_path(self.merging_vehicle['vehicles_type'])
merging_v = merging_vehicle(road, road.network.get_lane(("j", "k", 0)).position(initial_position[0],
initial_position[1]),
speed=speed,
config=self.env.config, v_type='merging_vehicle', id=id_merging_vehicle,
route=route)
road.vehicles.append(merging_v)
if self.merging_vehicle['controlled_vehicle']:
self.controlled_vehicles.append(merging_v)
self.road = road
def _vehicles_exit_highway1(self) -> None:
"""Create some new random vehicles of a given type, and add them on the road."""
# TODO always in the same possition ??
# random.seed(30)
self.controlled_vehicles = []
road = self.road
right_lane = len(self.road.network.graph['a']['b']) - 1
vehicle_space = self.converging_exit / self.controlled_vehicles_count
vehicle_position = self.before_exit
vehicle_id = 1
if self.randomize_vehicles:
random_offset = self.random_offset_vehicles
vehicle_position += random_offset[1]
baseline_vehicle_class = utils.class_from_path(self.baseline_vehicle["vehicles_type"])
speed = self.baseline_vehicle["speed"]
enable_lane_change = self.baseline_vehicle["enable_lane_change"]
for _ in range(self.controlled_vehicles_count):
if self.controlled_baseline_vehicle:
vehicle = baseline_vehicle_class(road,
road.network.get_lane(("a", "b", right_lane)).position(
vehicle_position,
0),
speed=speed, enable_lane_change=enable_lane_change,
config=self.env.config, v_type='baseline_vehicle', id=vehicle_id)
vehicle.is_controlled = 1
else:
vehicle = self.env.action_type.vehicle_class(road,
road.network.get_lane(("a", "b", right_lane)).position(
vehicle_position, 0),
speed=30, id=vehicle_id)
vehicle_position += vehicle_space
self.controlled_vehicles.append(vehicle)
road.vehicles.append(vehicle)
vehicle_id += 1
vehicle_position = 0
vehicle_space = self.before_exit / self.cruising_vehicles_count
cruising_vehicle = utils.class_from_path(self.cruising_vehicle["vehicles_type"])
# TODO ? speed random?
speed = self.cruising_vehicle['speed']
enable_lane_change = self.cruising_vehicle['enable_lane_change']
for i in range(self.cruising_vehicles_count):
# spawn vehicles in lane and possition
# if self.env.config['scenario']['randomize_vehicles']:
# # vehicle = cruising_vehicle.create_random(road,spacing=self.env.config["ego_spacing"],id=vehicle_id)
# vehicle_position
# else:
# vehicle = cruising_vehicle(road, road.network.get_lane(("a", "b", right_lane)).position(vehicle_position, 0), speed=speed,enable_lane_change=enable_lane_change,
# config=self.env.config,v_type='cruising_vehicle',id=vehicle_id)
# vehicle_position += vehicle_space
if self.randomize_vehicles:
random_offset = self.random_offset_vehicles
delta = np.random.randint(low=random_offset[0], high=random_offset[1])
vehicle_position += delta
vehicle_position = max(0, vehicle_position)
# vehicle_position = min(vehicle_position, self.before)
vehicle = cruising_vehicle(road,
road.network.get_lane(("a", "b", right_lane)).position(vehicle_position, 0),
speed=speed, enable_lane_change=enable_lane_change,
config=self.env.config, v_type='cruising_vehicle', id=vehicle_id)
vehicle_position += vehicle_space
road.vehicles.append(vehicle)
vehicle_id += 1
id_exit_vehicle = self.exit_vehicle['id']
speed = self.exit_vehicle['speed']
initial_position = self.exit_vehicle['initial_position']
if self.exit_vehicle['randomize']:
random_offset = self.random_offset_vehicles
delta = np.random.randint(low=random_offset[0], high=random_offset[1])
initial_position[0] += delta
initial_position[0] = max(0, initial_position[0])
route = None
if self.exit_vehicle['controlled_vehicle']:
if self.exit_vehicle['set_route']:
route = [('a', 'b', 0), ('b', 'p', 0), ('p', 'k', 0), ('k', 'j', 0)]
exit_v = self.env.action_type.vehicle_class(road,
road.network.get_lane(("a", "b", 0)).position(
initial_position[0],
initial_position[1]), speed=speed, config=self.env.config,
id=id_exit_vehicle, route=route)
else:
exit_vehicle = utils.class_from_path(self.exit_vehicle["vehicles_type"])
route = [('a', 'b', 0), ('b', 'p', 0), ('p', 'k', 0), ('k', 'j', 0)]
exit_v = exit_vehicle(road, road.network.get_lane(("a", "b", 0)).position(initial_position[0],
initial_position[1]),
speed=speed,
config=self.env.config, v_type='exit_vehicle', id=id_exit_vehicle,
route=route)
road.vehicles.append(exit_v)
if self.merging_vehicle['controlled_vehicle']:
self.controlled_vehicles.append(exit_v)
self.road = road
def _vehicles_exit_highwayv0(self):
"""Create some new random vehicles of a given type, and add them on the road."""
self.controlled_vehicles = []
road = self.road
for _ in range(self.controlled_vehicles_count):
vehicle = self.env.action_type.vehicle_class.create_randomv2(road,
speed=25,
lane_from="0",
lane_to="1",
lane_id=0,
spacing=self.ego_spacing)
vehicle.SPEED_MIN = 18
self.controlled_vehicles.append(vehicle)
road.vehicles.append(vehicle)
# vehicles_type = utils.class_from_path(self.config["other_vehicles_type"])
cruising_vehicle_class = vehicles_type = utils.class_from_path(self.cruising_vehicle["vehicles_type"])
for _ in range(self.cruising_vehicles_count):
lanes = np.arange(self.lanes_count)
lane_id = road.np_random.choice(lanes, size=1,
p=lanes / lanes.sum()).astype(int)[0]
lane = road.network.get_lane(("0", "1", lane_id))
vehicle = vehicles_type.create_randomv2(road,
lane_from="0",
lane_to="1",
lane_id=lane_id,
speed=lane.speed_limit,
spacing=1 / self.vehicles_density,
).plan_route_to("3")
vehicle.enable_lane_change = False
road.vehicles.append(vehicle)
self.road = road
def _vehicles_exit_highway(self):
"""Create some new random vehicles of a given type, and add them on the road."""
self.controlled_vehicles = []
road = self.road
right_lane = 1
vehicle_position = self.exit_humans + 10
vehicle_space = self.exit_controlled/ self.controlled_vehicles_count
vehicle_id = 1
controlled_vehicle_id = self.cruising_vehicles_count + 1
speed = self.controlled_vehicle_speed
for _ in range(self.controlled_vehicles_count):
vehicle = self.env.action_type.vehicle_class(road,
road.network.get_lane(("0", "1", right_lane)).position(
vehicle_position, 0),
speed=speed, id=controlled_vehicle_id)
vehicle_position += vehicle_space
controlled_vehicle_id += 1
self.controlled_vehicles.append(vehicle)
road.vehicles.append(vehicle)
# vehicles_type = utils.class_from_path(self.config["other_vehicles_type"])
cruising_vehicle_class= vehicles_type = utils.class_from_path(self.cruising_vehicle["vehicles_type"])
vehicle_position = 0
vehicle_space = (self.exit_humans) / self.cruising_vehicles_count
speed = self.cruising_vehicle['speed']
enable_lane_change = self.cruising_vehicle['enable_lane_change']
cruising_vehicle = utils.class_from_path(self.cruising_vehicle["vehicles_type"])
for _ in range(self.cruising_vehicles_count-1):
vehicle = cruising_vehicle(road,
road.network.get_lane(("0", "1", 1)).position(vehicle_position, 0),
speed=speed, enable_lane_change=enable_lane_change,
config=self.env.config, v_type='cruising_vehicle', id=vehicle_id)
vehicle_position += vehicle_space
road.vehicles.append(vehicle)
vehicle_id += 1
id_exit_vehicle = self.exit_vehicle['id']
speed = self.exit_vehicle['speed']
initial_position = self.exit_vehicle['initial_position']
route = None
if self.complex:
self.exit_vehicle['randomize'] = True
self.exit_vehicle['random_offset_exit'] =[-100,50]
# self.exit_vehicle['random_offset_exit'] =[-30,30]
if self.exit_vehicle['randomize']:
episode = self.env.episode
# initial_positions=[69,74]
# idx = episode % 2
# if episode%150 ==0:
# idx = 1
# else:
# idx =0
# initial_position[0] = initial_positions[idx]
random_offset = self.exit_vehicle['random_offset_exit']
delta = | np.random.randint(low=random_offset[0], high=random_offset[1]) | numpy.random.randint |
import sys, getopt
import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import os
import ICA_support_lib as sup
import ICA_coupling_pattern as cp
import ICA_ising as ising
class astro_pp_ising_creator:
def __init__(self):
self.main_Path = os.getcwd()
self.ising_model_Path = self.main_Path + '/Ising_models/'
self.data_Path = self.main_Path + '/dataFiles/'
sup.check_create_save_dir(self.ising_model_Path)
def create_ising_model(self, ising_num, size_exp,shift,shrink,stretch,scaler,clusters = 500,diam_mean_var = (6, 1),amp_mean_var = (.1, 1)):
'''
:param ising_num: ISING MODEL NUMBER TO GENERATE
:param size_exp: CONTROLS SIZE OF ISING LATTICE
:param shift: SURFACE GENERATION PARAMETER, CONSTANT AT 0
:param shrink: SURFACE GENERATION PARAMETER, CONSTANT AT 0
:param stretch: SURFACE GENERATION PARAMETER, CONSTANT AT 0
:param scaler: SURFACE GENERATION PARAMETER, CONSTANT AT 0
:param clusters: SURFACE GENERATION PARAMETER, CONSTANT AT 500
:param diam_mean_var: MEAN AND VARIANCE FOR THE DIAMETER OF EACH RADIAL BASIS FUNCTION FORMING THE SURFACE
:param amp_mean_var: MEAN AND VARIANCE FOR THE AMPLITUDE OF EACH RADIAL BASIS FUNCTION FORMING THE SURFACE
:return: SAVES ISING MODEL DATA
'''
np.random.seed(222)
# CREATE MODEL DIRECTORY
dataPath_model = self.ising_model_Path + '/Model_' + str(ising_num)
sup.check_create_save_dir(dataPath_model)
# SET SHAPE OF ISING 2D LATTICE
shp = (2 ** size_exp, 2 ** size_exp) # syn_space_size
# INITIALIZED NEED CLASSES
isi = ising.astro_pp_model_ising(synaptic_matrix_size=shp,shift=shift,shrink=shrink,stretch=stretch,scaler=scaler)
pat = cp.astro_pp_pattern_generator(space_dims=shp)
# CREATE LOG FOR MODEL PARAMETERS AND STATS
log_filename = 'Log_for_Ising_Model_' + str(ising_num)
log_fn = os.path.abspath(os.path.join(dataPath_model, log_filename))
with open(log_fn, 'w') as f:
f.write('LOG___ISING_MODEL_' + str(ising_num)+ '\n\n')
f.write('DATA PATH: ' + str(dataPath_model) + '\n\n\n')
f.write('INPUT PARAMETERS:\n\n')
f.write(' size_exp = ' + str(size_exp) + '\n')
f.write(' shape = ' + str(shp) + '\n\n')
f.write(' clusters = ' + str(clusters) + '\n')
f.write(' diam_mean_var = ' + str(diam_mean_var) + '\n')
f.write(' amp_mean_var = ' + str(amp_mean_var) + '\n')
f.write(' shift = ' + str(shift) + '\n')
f.write(' shrink = ' + str(shrink) + '\n')
f.write(' stretch = ' + str(stretch) + '\n')
f.write(' scaler = ' + str(scaler) + '\n')
# GENERATE 3D LANDSCAPE USING RADIAL BASIS FUNCTIONS
params = pat.generate_pattern_landscape_parameters_normal_dist(num_of_clusters=clusters,
diam_min_max=diam_mean_var,
amp_min_max=amp_mean_var)
out = pat.space_func_2d(pat.X, pat.Y, params[0], params[1], params[2], params[3])
f.write('Initial Out Landscape <M>, Min, Max : ' + str(
len(np.where(out >= 0)[0]) / np.size(out)) + ' , ' + str(np.amin(out)) + ' , ' + str(
np.amax(out)) + '\n')
# RESCALING SURFACE
out_rescaled = np.multiply(out, np.divide(1.0, np.maximum(np.absolute( | np.amin(out) | numpy.amin |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
import seaborn as sns
sns.set_style("whitegrid")
prop_cycle = plt.rcParams["axes.prop_cycle"]
colors = prop_cycle.by_key()["color"]
import ipywidgets as widgets
from consav import linear_interp
# local modules
import utility
######################
# decision functions #
######################
def _decision_functions(model,t,i_p,name):
if name == 'discrete':
_discrete(model,t,i_p)
elif name == 'adj':
_adj(model,t,i_p)
elif name == 'keep':
_keep(model,t,i_p)
elif name == 'post_decision' and t <= model.par.T-2:
_w(model,t,i_p)
def decision_functions(model):
widgets.interact(_decision_functions,
model=widgets.fixed(model),
t=widgets.Dropdown(description='t',
options=list(range(model.par.T)), value=0),
i_p=widgets.Dropdown(description='ip',
options=list(range(model.par.Np)), value=np.int(model.par.Np/2)),
name=widgets.Dropdown(description='name',
options=['discrete','adj','keep','post_decision'], value='discrete')
)
def _discrete(model,t,i_p):
par = model.par
# a. interpolation
n, m = | np.meshgrid(par.grid_n,par.grid_m,indexing='ij') | numpy.meshgrid |
import argparse
import os
import tarfile
import urllib
import numpy as np
import pyprind
import utils
"""
次の理由から、本家の```create_ubuntu_dataset.py```の代わりにこちらを使う。
- ubuntu_dialogs.tgzはサイズが莫大で、また今回に関してはそのすべては必要ではないため、解凍せずに条件にあうものだけを抽出する
- 今回に関しては (context, response, label) のtripletsの生成までは不要であり、条件にあうdialogだけが必要
"""
URL = 'http://cs.mcgill.ca/~jpineau/datasets/ubuntu-corpus-1.0/ubuntu_dialogs.tgz'
ARCHIVE_NAME = 'ubuntu_dialogs.tgz'
def main(args):
archive_dir = args.input
output_dir = args.output
# Download archived dialogues (```ubuntu_dialogs.tgz```) to ```archive_dir```
prepare_data_maybe_download(archive_dir)
# Extract dialogues that meet the given conditions
dialogues = extract_dialogues(archive_path=os.path.join(archive_dir, ARCHIVE_NAME),
n_dialogues=args.n_dialogues,
min_dialogue_length=args.min_dialogue_length,
max_dialogue_length=args.max_dialogue_length,
max_utterance_length=args.max_utterance_length,
max_speakers=args.max_speakers)
assert len(dialogues) <= args.n_dialogues
# Save the extracted dialogues to ```output_dir```
save_dialogues(output_dir=output_dir, dialogues=dialogues)
utils.writelog("Done.")
def prepare_data_maybe_download(archive_dir):
"""
Download archived dialogues if necessary.
This functions is mainly copied from the following original repository:
https://github.com/rkadlec/ubuntu-ranking-dataset-creator
"""
# Check
filenames = os.listdir(archive_dir)
assert "generate.sh" in filenames
assert "create_ubuntu_dataset.py" in filenames
assert "download_punkt.py" in filenames
assert "meta" in filenames
# dialogs are missing
archive_path = os.path.join(archive_dir, ARCHIVE_NAME)
if not os.path.exists(archive_path):
# archive missing, download it
utils.writelog("Downloading %s to %s" % (URL, archive_path))
filepath, _ = urllib.request.urlretrieve(URL, archive_path)
utils.writelog("Successfully downloaded " + filepath)
else:
utils.writelog("Found archive: %s" % archive_path)
def extract_dialogues(
archive_path,
n_dialogues,
min_dialogue_length,
max_dialogue_length,
max_utterance_length,
max_speakers):
utils.writelog("Number of dialogues: %d" % n_dialogues)
utils.writelog("Min. dialogue length: %d" % min_dialogue_length)
utils.writelog("Max. dialogue length: %d" % max_dialogue_length)
utils.writelog("Max. utterance length: %d" % max_utterance_length)
utils.writelog("Max. speakers: %d" % max_speakers)
utils.writelog("Extracting dialogues from %s ..." % archive_path)
dialogues = []
with tarfile.open(name=archive_path, mode="r") as tar:
# Get archived files (including directories)
utils.writelog("Extracting archived information ...")
members = tar.getmembers() # May take several minutes
utils.writelog("Number of archived entries (files + directories): %d" % len(members))
members = [m for m in members if m.name.endswith(".tsv")]
utils.writelog("Number of archived TSV files: %d" % len(members))
count = 0
avg_dialogue_length = []
avg_utterance_length = []
avg_speakers = []
for member_i, member in enumerate(members):
# Content
with tar.extractfile(member) as f:
binary = f.read()
text = binary.decode("utf-8")
lines = text.split("\n")
lines = [line.split("\t") for line in lines]
# Clean lines
new_lines = []
for items in lines:
assert len(items) == 4 or len(items) == 1 or len(items) == 0
if len(items) == 4:
new_lines.append(items)
lines = new_lines
# Clean utterance
lines = [items for items in lines if len(items) == 4]
for i in range(len(lines)):
assert len(lines[i]) == 4
utterance = lines[i][3]
utterance = utterance.strip()
lines[i][3] = utterance
# If conditions are met, record this dialogue
avg_dialogue_length.append(len(lines))
if min_dialogue_length <= len(lines) <= max_dialogue_length:
# Dialogue length is OK
all_with_response = True
for items in lines[2:]:
_, _, listener, _ = items
if listener == "":
all_with_response = False
all_with_utterance = True
for items in lines:
_, _, _, utterance = items
if utterance == "":
all_with_utterance = False
if all_with_response and all_with_utterance:
# All utterances (except for the first one) are with response-to markers
temp_max_utterance_length = -1
speakers = []
for items in lines:
_, speaker, listener, utterance = items
n_tokens = len(utterance.split(" ")) # rough whitespace-based tokenization
temp_max_utterance_length = max(temp_max_utterance_length, n_tokens)
speakers.append(speaker)
speakers.append(listener)
speakers = set(speakers)
avg_utterance_length.append(temp_max_utterance_length)
avg_speakers.append(len(speakers))
if temp_max_utterance_length <= max_utterance_length and len(speakers) <= max_speakers:
# Utterance length and the number of speakers are OK
dialogues.append(lines)
count += 1
# Progress
if count % 1000 == 0:
utils.writelog("##### Extracted %d dialogues #####" % count)
if count == n_dialogues:
break
# Progress
if (member_i + 1) % 5000 == 0:
utils.writelog("Processed %d dialogues" % (member_i + 1))
utils.writelog("Avg. dialogue length: %f" % np.mean(avg_dialogue_length))
utils.writelog("Avg. max utterange length: %f" % np.mean(avg_utterance_length))
utils.writelog("Avg. number of speakers: %f" % | np.mean(avg_speakers) | numpy.mean |
# -*- coding: utf-8 -*-
r"""Define an instrument for resolution calculations
"""
import numpy as np
from scipy.linalg import block_diag as blkdiag
from ..crystal import Sample
from ..energy import Energy
from .analyzer import Analyzer
from .exceptions import ScatteringTriangleError
from .general import GeneralInstrument
from .monochromator import Monochromator
from .plot import PlotInstrument
from .tools import GetTau, _CleanArgs, _Dummy, _modvec, _scalar, _star, _voigt
class TripleAxisInstrument(GeneralInstrument, PlotInstrument):
u"""An object that represents a Triple Axis Spectrometer (TAS) instrument
experimental configuration, including a sample.
Parameters
----------
efixed : float, optional
Fixed energy, either ei or ef, depending on the instrument
configuration. Default: 14.7
sample : obj, optional
Sample lattice constants, parameters, mosaic, and orientation
(reciprocal-space orienting vectors). Default: A crystal with
a,b,c = 6,7,8 and alpha,beta,gamma = 90,90,90 and orientation
vectors u=[1 0 0] and v=[0 1 0].
hcol : list(4)
Horizontal Soller collimations in minutes of arc starting from the
neutron guide. Default: [40 40 40 40]
vcol : list(4), optional
Vertical Soller collimations in minutes of arc starting from the
neutron guide. Default: [120 120 120 120]
mono_tau : str or float, optional
The monochromator reciprocal lattice vector in Å\ :sup:`-1`,
given either as a float, or as a string for common monochromator types.
Default: 'PG(002)'
mono_mosaic : float, optional
The mosaic of the monochromator in minutes of arc. Default: 25
ana_tau : str or float, optional
The analyzer reciprocal lattice vector in Å\ :sup:`-1`,
given either as a float, or as a string for common analyzer types.
Default: 'PG(002)'
ana_mosaic : float, optional
The mosaic of the monochromator in minutes of arc. Default: 25
Attributes
----------
method
moncor
mono
ana
hcol
vcol
arms
efixed
sample
orient1
orient2
infin
beam
detector
monitor
Smooth
guide
description_string
Methods
-------
calc_resolution
calc_resolution_in_Q_coords
calc_projections
get_angles_and_Q
get_lattice
get_resolution_params
get_resolution
plot_projections
plot_ellipsoid
plot_instrument
resolution_convolution
resolution_convolution_SMA
plot_slice
"""
def __init__(self, efixed=14.7, sample=None, hcol=None, vcol=None, mono='PG(002)',
mono_mosaic=25, ana='PG(002)', ana_mosaic=25, **kwargs):
if sample is None:
sample = Sample(6, 7, 8, 90, 90, 90)
sample.u = [1, 0, 0]
sample.v = [0, 1, 0]
if hcol is None:
hcol = [40, 40, 40, 40]
if vcol is None:
vcol = [120, 120, 120, 120]
self.mono = Monochromator(mono, mono_mosaic)
self.ana = Analyzer(ana, ana_mosaic)
self.hcol = np.array(hcol)
self.vcol = np.array(vcol)
self.efixed = efixed
self.sample = sample
self.orient1 = np.array(sample.u)
self.orient2 = np.array(sample.v)
self.detector = _Dummy('Detector')
self.monitor = _Dummy('Monitor')
self.guide = _Dummy('Guide')
for key, value in kwargs.items():
setattr(self, key, value)
def __repr__(self):
return "Instrument('tas', engine='neutronpy', efixed={0})".format(self.efixed)
def __eq__(self, right):
self_parent_keys = sorted(list(self.__dict__.keys()))
right_parent_keys = sorted(list(right.__dict__.keys()))
if not np.all(self_parent_keys == right_parent_keys):
return False
for key, value in self.__dict__.items():
right_parent_val = getattr(right, key)
if not np.all(value == right_parent_val):
print(value, right_parent_val)
return False
return True
def __ne__(self, right):
return not self.__eq__(right)
@property
def mono(self):
u"""A structure that describes the monochromator.
Attributes
----------
tau : str or float
The monochromator reciprocal lattice vector in Å\ :sup:`-1`.
Instead of a numerical input one can use one of the following
keyword strings:
+------------------+--------------+-----------+
| String | τ | |
+==================+==============+===========+
| Be(002) | 3.50702 | |
+------------------+--------------+-----------+
| Co0.92Fe0.08(200)| 3.54782 | (Heusler) |
+------------------+--------------+-----------+
| Cu(002) | 3.47714 | |
+------------------+--------------+-----------+
| Cu(111) | 2.99913 | |
+------------------+--------------+-----------+
| Cu(220) | 4.91642 | |
+------------------+--------------+-----------+
| Cu2MnAl(111) | 1.82810 | (Heusler) |
+------------------+--------------+-----------+
| Ge(111) | 1.92366 | |
+------------------+--------------+-----------+
| Ge(220) | 3.14131 | |
+------------------+--------------+-----------+
| Ge(311) | 3.68351 | |
+------------------+--------------+-----------+
| Ge(511) | 5.76968 | |
+------------------+--------------+-----------+
| Ge(533) | 7.28063 | |
+------------------+--------------+-----------+
| PG(002) | 1.87325 | |
+------------------+--------------+-----------+
| PG(004) | 3.74650 | |
+------------------+--------------+-----------+
| PG(110) | 5.49806 | |
+------------------+--------------+-----------+
| Si(111) | 2.00421 | |
+------------------+--------------+-----------+
mosaic : int
The monochromator mosaic in minutes of arc.
vmosaic : int
The vertical mosaic of monochromator in minutes of arc. If
this field is left unassigned, an isotropic mosaic is assumed.
dir : int
Direction of the crystal (left or right, -1 or +1, respectively).
Default: -1 (left-handed coordinate frame).
rh : float
Horizontal curvature of the monochromator in cm.
rv : float
Vertical curvature of the monochromator in cm.
"""
return self._mono
@mono.setter
def mono(self, value):
self._mono = value
@property
def ana(self):
u"""A structure that describes the analyzer and contains fields as in
:attr:`mono` plus optional fields.
Attributes
----------
thickness: float
The analyzer thickness in cm for ideal-crystal reflectivity
corrections (Section II C 3). If no reflectivity corrections are to
be made, this field should remain unassigned or set to a negative
value.
Q : float
The kinematic reflectivity coefficient for this correction. It is
given by
.. math:: Q = \\frac{4|F|**2}{V_0} \\frac{(2\\pi)**3}{\\tau**3},
where V0 is the unit cell volume for the analyzer crystal, F is the
structure factor of the analyzer reflection, and τ is the analyzer
reciprocal lattice vector. For PG(002) Q = 0.1287. Leave this field
unassigned or make it negative if you don’t want the correction
done.
horifoc : bool
A flag that is set to 1 if a horizontally focusing analyzer is used
(Section II D). In this case ``hcol[2]`` (see below) is the angular
size of the analyzer, as seen from the sample position. If the
field is unassigned or equal to -1, a flat analyzer is assumed.
Note that this option is only available with the Cooper-Nathans
method.
dir : int
Direction of the crystal (left or right, -1 or +1, respectively).
Default: -1 (left-handed coordinate frame).
rh : float
Horizontal curvature of the analyzer in cm.
rv : float
Vertical curvature of the analyzer in cm.
"""
return self._ana
@ana.setter
def ana(self, value):
self._ana = value
@property
def method(self):
"""Selects the computation method.
If ``method=0`` or left undefined, a Cooper-Nathans calculation is
performed. For a Popovici calculation set ``method=1``.
"""
return self._method
@method.setter
def method(self, value):
self._method = value
@property
def moncor(self):
"""Selects the type of normalization used to calculate ``R0``
If ``moncor=1`` or left undefined, ``R0`` is calculated in
normalization to monitor counts (Section II C 2). 1/k\ :sub:`i` monitor
efficiency correction is included automatically. To normalize ``R0`` to
source flux (Section II C 1), use ``moncor=0``.
"""
return self._moncar
@moncor.setter
def moncor(self, value):
self._moncar = value
@property
def hcol(self):
r""" The horizontal Soller collimations in minutes of arc (FWHM beam
divergence) starting from the in-pile collimator. In case of a
horizontally-focusing analyzer ``hcol[2]`` is the angular size of the
analyzer, as seen from the sample position. If the beam divergence is
limited by a neutron guide, the corresponding element of :attr:`hcol`
is the negative of the guide’s *m*-value. For example, for a 58-Ni
guide ( *m* = 1.2 ) before the monochromator, ``hcol[0]`` should be
-1.2.
"""
return self._hcol
@hcol.setter
def hcol(self, value):
self._hcol = value
@property
def vcol(self):
"""The vertical Soller collimations in minutes of arc (FWHM beam
divergence) starting from the in-pile collimator. If the beam
divergence is limited by a neutron guide, the corresponding element of
:attr:`vcol` is the negative of the guide’s *m*-value. For example, for
a 58-Ni guide ( *m* = 1.2 ) before the monochromator, ``vcol[0]``
should be -1.2.
"""
return self._vcol
@vcol.setter
def vcol(self, value):
self._vcol = value
@property
def arms(self):
"""distances between the source and monochromator, monochromator
and sample, sample and analyzer, analyzer and detector, and
monochromator and monitor, respectively. The 5th element is only needed
if ``moncor=1``
"""
return self._arms
@arms.setter
def arms(self, value):
self._arms = value
@property
def efixed(self):
"""the fixed incident or final neutron energy, in meV.
"""
return self._efixed
@efixed.setter
def efixed(self, value):
self._efixed = value
@property
def sample(self):
"""A structure that describes the sample.
Attributes
----------
mosaic
FWHM sample mosaic in the scattering plane
in minutes of arc. If left unassigned, no sample
mosaic corrections (section II E) are performed.
vmosaic
The vertical sample mosaic in minutes of arc.
If left unassigned, isotropic mosaic is assumed.
dir
The direction of the crystal (left or right, -1 or +1,
respectively). Default: -1 (left-handed coordinate frame).
"""
return self._sample
@sample.setter
def sample(self, value):
self._sample = value
@property
def orient1(self):
"""Miller indexes of the first reciprocal-space orienting vector for
the S coordinate system, as explained in Section II G.
"""
return self._sample.u
@orient1.setter
def orient1(self, value):
self._sample.u = np.array(value)
@property
def orient2(self):
"""Miller indexes of the second reciprocal-space orienting vector
for the S coordinate system, as explained in Section II G.
"""
return self._sample.v
@orient2.setter
def orient2(self, value):
self._sample.v = np.array(value)
@property
def infin(self):
"""a flag set to -1 or left unassigned if the final energy is fixed, or
set to +1 in a fixed-incident setup.
"""
return self._infin
@infin.setter
def infin(self, value):
self._infin = value
@property
def guide(self):
r"""A structure that describes the source
"""
return self._guide
@guide.setter
def guide(self, value):
self._guide = value
@property
def detector(self):
"""A structure that describes the detector
"""
return self._detector
@detector.setter
def detector(self, value):
self._detector = value
@property
def monitor(self):
"""A structure that describes the monitor
"""
return self._monitor
@monitor.setter
def monitor(self, value):
self._monitor = value
@property
def Smooth(self):
u"""Defines the smoothing parameters as explained in Section II H. Leave this
field unassigned if you don’t want this correction done.
* ``Smooth.E`` is the smoothing FWHM in energy (meV). A small number
means “no smoothing along this direction”.
* ``Smooth.X`` is the smoothing FWHM along the first orienting vector
(x0 axis) in Å\ :sup:`-1`.
* ``Smooth.Y`` is the smoothing FWHM along the y axis in Å\ :sup:`-1`.
* ``Smooth.Z`` is the smoothing FWHM along the vertical direction in
Å\ :sup:`-1`.
"""
return self._Smooth
@Smooth.setter
def Smooth(self, value):
self._Smooth = value
def get_lattice(self):
r"""Extracts lattice parameters from EXP and returns the direct and
reciprocal lattice parameters in the form used by _scalar.m, _star.m,
etc.
Returns
-------
[lattice, rlattice] : [class, class]
Returns the direct and reciprocal lattice sample classes
Notes
-----
Translated from ResLib 3.4c, originally authored by <NAME>,
1999-2007, Oak Ridge National Laboratory
"""
lattice = Sample(self.sample.a,
self.sample.b,
self.sample.c,
np.deg2rad(self.sample.alpha),
np.deg2rad(self.sample.beta),
np.deg2rad(self.sample.gamma))
rlattice = _star(lattice)[-1]
return [lattice, rlattice]
def _StandardSystem(self):
r"""Returns rotation matrices to calculate resolution in the sample view
instead of the instrument view
Attributes
----------
EXP : class
Instrument class
Returns
-------
[x, y, z, lattice, rlattice] : [array, array, array, class, class]
Returns the rotation matrices and real and reciprocal lattice
sample classes
Notes
-----
Translated from ResLib 3.4c, originally authored by <NAME>,
1999-2007, Oak Ridge National Laboratory
"""
[lattice, rlattice] = self.get_lattice()
orient1 = self.orient1
orient2 = self.orient2
modx = _modvec(orient1, rlattice)
x = orient1 / modx
proj = _scalar(orient2, x, rlattice)
y = orient2 - x * proj
mody = _modvec(y, rlattice)
if len(np.where(mody <= 0)[0]) > 0:
raise ScatteringTriangleError('Orienting vectors are colinear')
y /= mody
z = np.array([ x[1] * y[2] - y[1] * x[2],
x[2] * y[0] - y[2] * x[0],
-x[1] * y[0] + y[1] * x[0]], dtype=np.float64)
proj = _scalar(z, x, rlattice)
z -= x * proj
proj = _scalar(z, y, rlattice)
z -= y * proj
modz = _modvec(z, rlattice)
z /= modz
return [x, y, z, lattice, rlattice]
def calc_resolution_in_Q_coords(self, Q, W):
r"""For a momentum transfer Q and energy transfers W, given experimental
conditions specified in EXP, calculates the Cooper-Nathans or Popovici
resolution matrix RM and resolution prefactor R0 in the Q coordinate
system (defined by the scattering vector and the scattering plane).
Parameters
----------
Q : ndarray or list of ndarray
The Q vectors in reciprocal space at which resolution should be
calculated, in inverse angstroms
W : float or list of floats
The energy transfers at which resolution should be calculated in meV
Returns
-------
[R0, RM] : list(float, ndarray)
Resolution pre-factor (R0) and resolution matrix (RM) at the given
reciprocal lattice vectors and energy transfers
Notes
-----
Translated from ResLib 3.4c, originally authored by <NAME>,
1999-2007, Oak Ridge National Laboratory
"""
CONVERT1 = np.pi / 60. / 180. / np.sqrt(8 * np.log(2))
CONVERT2 = 2.072
[length, Q, W] = _CleanArgs(Q, W)
RM = np.zeros((length, 4, 4), dtype=np.float64)
R0 = np.zeros(length, dtype=np.float64)
RM_ = np.zeros((4, 4), dtype=np.float64)
# the method to use
method = 0
if hasattr(self, 'method'):
method = self.method
# Assign default values and decode parameters
moncor = 1
if hasattr(self, 'moncor'):
moncor = self.moncor
alpha = np.array(self.hcol) * CONVERT1
beta = np.array(self.vcol) * CONVERT1
mono = self.mono
etam = np.array(mono.mosaic) * CONVERT1
etamv = np.copy(etam)
if hasattr(mono, 'vmosaic') and (method == 1 or method == 'Popovici'):
etamv = np.array(mono.vmosaic) * CONVERT1
ana = self.ana
etaa = np.array(ana.mosaic) * CONVERT1
etaav = np.copy(etaa)
if hasattr(ana, 'vmosaic'):
etaav = np.array(ana.vmosaic) * CONVERT1
sample = self.sample
infin = -1
if hasattr(self, 'infin'):
infin = self.infin
efixed = self.efixed
monitorw = 1.
monitorh = 1.
beamw = 1.
beamh = 1.
monow = 1.
monoh = 1.
monod = 1.
anaw = 1.
anah = 1.
anad = 1.
detectorw = 1.
detectorh = 1.
sshapes = np.repeat(np.eye(3, dtype=np.float64)[np.newaxis].reshape((1, 3, 3)), length, axis=0)
sshape_factor = 12.
L0 = 1.
L1 = 1.
L1mon = 1.
L2 = 1.
L3 = 1.
monorv = 1.e6
monorh = 1.e6
anarv = 1.e6
anarh = 1.e6
if hasattr(self, 'guide'):
beam = self.guide
if hasattr(beam, 'width'):
beamw = beam.width ** 2 / 12.
if hasattr(beam, 'height'):
beamh = beam.height ** 2 / 12.
bshape = np.diag([beamw, beamh])
if hasattr(self, 'monitor'):
monitor = self.monitor
if hasattr(monitor, 'width'):
monitorw = monitor.width ** 2 / 12.
monitorh = monitorw
if hasattr(monitor, 'height'):
monitorh = monitor.height ** 2 / 12.
monitorshape = np.diag([monitorw, monitorh])
if hasattr(self, 'detector'):
detector = self.detector
if hasattr(detector, 'width'):
detectorw = detector.width ** 2 / 12.
if hasattr(detector, 'height'):
detectorh = detector.height ** 2 / 12.
dshape = np.diag([detectorw, detectorh])
if hasattr(mono, 'width'):
monow = mono.width ** 2 / 12.
if hasattr(mono, 'height'):
monoh = mono.height ** 2 / 12.
if hasattr(mono, 'depth'):
monod = mono.depth ** 2 / 12.
mshape = np.diag([monod, monow, monoh])
if hasattr(ana, 'width'):
anaw = ana.width ** 2 / 12.
if hasattr(ana, 'height'):
anah = ana.height ** 2 / 12.
if hasattr(ana, 'depth'):
anad = ana.depth ** 2 / 12.
ashape = np.diag([anad, anaw, anah])
if hasattr(sample, 'shape_type'):
if sample.shape_type == 'cylindrical':
sshape_factor = 16.
elif sample.shape_type == 'rectangular':
sshape_factor = 12.
if hasattr(sample, 'width') and hasattr(sample, 'depth') and hasattr(sample, 'height'):
_sshape = np.diag([sample.depth, sample.width, sample.height]).astype(np.float64) ** 2 / sshape_factor
sshapes = np.repeat(_sshape[np.newaxis].reshape((1, 3, 3)), length, axis=0)
elif hasattr(sample, 'shape'):
_sshape = sample.shape.astype(np.float64) / sshape_factor
if len(_sshape.shape) == 2:
sshapes = np.repeat(_sshape[np.newaxis].reshape((1, 3, 3)), length, axis=0)
else:
sshapes = _sshape
if hasattr(self, 'arms') and method == 1:
arms = self.arms
L0, L1, L2, L3 = arms[:4]
L1mon = np.copy(L1)
if len(arms) > 4:
L1mon = np.copy(arms[4])
if hasattr(mono, 'rv'):
monorv = mono.rv
if hasattr(mono, 'rh'):
monorh = mono.rh
if hasattr(ana, 'rv'):
anarv = ana.rv
if hasattr(ana, 'rh'):
anarh = ana.rh
taum = GetTau(mono.tau)
taua = GetTau(ana.tau)
horifoc = -1
if hasattr(self, 'horifoc'):
horifoc = self.horifoc
if horifoc == 1:
alpha[2] = alpha[2] * np.sqrt(8. * np.log(2.) / 12.)
sm = self.mono.dir
ss = self.sample.dir
sa = self.ana.dir
for ind in range(length):
sshape = sshapes[ind, :, :]
# Calculate angles and energies
w = W[ind]
q = Q[ind]
ei = efixed
ef = efixed
if infin > 0:
ef = efixed - w
else:
ei = efixed + w
ki = np.sqrt(ei / CONVERT2)
kf = np.sqrt(ef / CONVERT2)
thetam = np.arcsin(taum / (2. * ki)) * sm
thetaa = np.arcsin(taua / (2. * kf)) * sa
s2theta = np.arccos(np.complex((ki ** 2 + kf ** 2 - q ** 2) / (2. * ki * kf))) * ss
if np.abs(np.imag(s2theta)) > 1e-12:
raise ScatteringTriangleError(
'KI,KF,Q triangle will not close. Change the value of KFIX,FX,QH,QK or QL.')
else:
s2theta = np.real(s2theta)
# correct sign of curvatures
monorh = monorh * sm
monorv = monorv * sm
anarh = anarh * sa
anarv = anarv * sa
thetas = s2theta / 2.
phi = np.arctan2(-kf * np.sin(s2theta), ki - kf * np.cos(s2theta))
# Calculate beam divergences defined by neutron guides
alpha[alpha < 0] = -alpha[alpha < 0] * 0.1 * 60. * (2. * np.pi / ki) / 0.427 / np.sqrt(3.)
beta[beta < 0] = -beta[beta < 0] * 0.1 * 60. * (2. * np.pi / ki) / 0.427 / np.sqrt(3.)
# Redefine sample geometry
psi = thetas - phi # Angle from sample geometry X axis to Q
rot = np.matrix([[np.cos(psi), np.sin(psi), 0],
[-np.sin(psi), np.cos(psi), 0],
[0, 0, 1]], dtype=np.float64)
# sshape=rot'*sshape*rot
sshape = np.matrix(rot) * np.matrix(sshape) * np.matrix(rot).H
# Definition of matrix G
G = np.matrix(
np.diag(1. / np.array([alpha[:2], beta[:2], alpha[2:], beta[2:]], dtype=np.float64).flatten() ** 2))
# Definition of matrix F
F = np.matrix(np.diag(1. / np.array([etam, etamv, etaa, etaav], dtype=np.float64) ** 2))
# Definition of matrix A
A = np.matrix([[ki / 2. / np.tan(thetam), -ki / 2. / np.tan(thetam), 0, 0, 0, 0, 0, 0],
[0, ki, 0, 0, 0, 0, 0, 0],
[0, 0, 0, ki, 0, 0, 0, 0],
[0, 0, 0, 0, kf / 2. / np.tan(thetaa), -kf / 2. / np.tan(thetaa), 0, 0],
[0, 0, 0, 0, kf, 0, 0, 0],
[0, 0, 0, 0, 0, 0, kf, 0]], dtype=np.float64)
# Definition of matrix C
C = np.matrix([[0.5, 0.5, 0, 0, 0, 0, 0, 0],
[0., 0., 1. / (2. * np.sin(thetam)), -1. / (2. * np.sin(thetam)), 0, 0, 0, 0],
[0, 0, 0, 0, 0.5, 0.5, 0, 0],
[0, 0, 0, 0, 0, 0, 1. / (2. * np.sin(thetaa)), -1. / (2. * np.sin(thetaa))]],
dtype=np.float64)
# Definition of matrix Bmatrix
Bmatrix = np.matrix([[np.cos(phi), np.sin(phi), 0, -np.cos(phi - s2theta), -np.sin(phi - s2theta), 0],
[-np.sin(phi), np.cos(phi), 0, np.sin(phi - s2theta), -np.cos(phi - s2theta), 0],
[0, 0, 1, 0, 0, -1],
[2. * CONVERT2 * ki, 0, 0, -2. * CONVERT2 * kf, 0, 0]], dtype=np.float64)
# Definition of matrix S
Sinv = np.matrix(blkdiag(np.array(bshape, dtype=np.float64), mshape, sshape, ashape, dshape)) # S-1 matrix
S = Sinv.I
# Definition of matrix T
T = np.matrix([[-1. / (2. * L0), 0, np.cos(thetam) * (1. / L1 - 1. / L0) / 2.,
np.sin(thetam) * (1. / L0 + 1. / L1 - 2. / (monorh * np.sin(thetam))) / 2., 0,
np.sin(thetas) / (2. * L1), np.cos(thetas) / (2. * L1), 0, 0, 0, 0, 0, 0],
[0, -1. / (2. * L0 * np.sin(thetam)), 0, 0,
(1. / L0 + 1. / L1 - 2. * np.sin(thetam) / monorv) / (2. * np.sin(thetam)), 0, 0,
-1. / (2. * L1 * np.sin(thetam)), 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, np.sin(thetas) / (2. * L2), -np.cos(thetas) / (2. * L2), 0,
np.cos(thetaa) * (1. / L3 - 1. / L2) / 2.,
np.sin(thetaa) * (1. / L2 + 1. / L3 - 2. / (anarh * np.sin(thetaa))) / 2., 0,
1. / (2. * L3), 0],
[0, 0, 0, 0, 0, 0, 0, -1. / (2. * L2 * np.sin(thetaa)), 0, 0,
(1. / L2 + 1. / L3 - 2. * np.sin(thetaa) / anarv) / (2. * np.sin(thetaa)), 0,
-1. / (2. * L3 * np.sin(thetaa))]], dtype=np.float64)
# Definition of matrix D
# Lots of index mistakes in paper for matrix D
D = np.matrix([[-1. / L0, 0, -np.cos(thetam) / L0, np.sin(thetam) / L0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, np.cos(thetam) / L1, np.sin(thetam) / L1, 0, np.sin(thetas) / L1, np.cos(thetas) / L1,
0, 0, 0, 0, 0, 0],
[0, -1. / L0, 0, 0, 1. / L0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, -1. / L1, 0, 0, 1. / L1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, np.sin(thetas) / L2, -np.cos(thetas) / L2, 0, -np.cos(thetaa) / L2,
np.sin(thetaa) / L2, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, np.cos(thetaa) / L3, np.sin(thetaa) / L3, 0, 1. / L3, 0],
[0, 0, 0, 0, 0, 0, 0, -1. / L2, 0, 0, 1. / L2, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1. / L3, 0, 1. / L3]], dtype=np.float64)
# Definition of resolution matrix M
if method == 1 or method == 'popovici':
K = S + T.H * F * T
H = np.linalg.inv(D * np.linalg.inv(K) * D.H)
Ninv = A * np.linalg.inv(H + G) * A.H
else:
H = G + C.H * F * C
Ninv = A * np.linalg.inv(H) * A.H
# Horizontally focusing analyzer if needed
if horifoc > 0:
Ninv = np.linalg.inv(Ninv)
Ninv[3:5, 3:5] = np.matrix([[(np.tan(thetaa) / (etaa * kf)) ** 2, 0],
[0, (1 / (kf * alpha[2])) ** 2]], dtype=np.float64)
Ninv = np.linalg.inv(Ninv)
Minv = Bmatrix * Ninv * Bmatrix.H
M = np.linalg.inv(Minv)
RM_ = np.copy(M)
# Calculation of prefactor, normalized to source
Rm = ki ** 3 / np.tan(thetam)
Ra = kf ** 3 / np.tan(thetaa)
R0_ = Rm * Ra * (2. * np.pi) ** 4 / (64. * np.pi ** 2 * np.sin(thetam) * np.sin(thetaa))
if method == 1 or method == 'popovici':
# Popovici
R0_ = R0_ * np.sqrt(np.linalg.det(F) / np.linalg.det(H + G))
else:
# Cooper-Nathans (popovici Eq 5 and 9)
R0_ = R0_ * np.sqrt(np.linalg.det(F) / np.linalg.det(H))
# Normalization to flux on monitor
if moncor == 1:
g = G[:4, :4]
f = F[:2, :2]
c = C[:2, :4]
t = np.matrix([[-1. / (2. * L0), 0, np.cos(thetam) * (1. / L1mon - 1. / L0) / 2.,
np.sin(thetam) * (1. / L0 + 1. / L1mon - 2. / (monorh * np.sin(thetam))) / 2., 0, 0,
1. / (2. * L1mon)],
[0, -1. / (2. * L0 * np.sin(thetam)), 0, 0,
(1. / L0 + 1. / L1mon - 2. * np.sin(thetam) / monorv) / (2. * np.sin(thetam)), 0, 0]],
dtype=np.float64)
sinv = blkdiag(np.array(bshape, dtype=np.float64), mshape, monitorshape) # S-1 matrix
s = np.linalg.inv(sinv)
d = np.matrix([[-1. / L0, 0, -np.cos(thetam) / L0, np.sin(thetam) / L0, 0, 0, 0],
[0, 0, np.cos(thetam) / L1mon, np.sin(thetam) / L1mon, 0, 0, 1. / L1mon],
[0, -1. / L0, 0, 0, 1. / L0, 0, 0],
[0, 0, 0, 0, -1. / L1mon, 0, 0]], dtype=np.float64)
if method == 1 or method == 'popovici':
# Popovici
Rmon = Rm * (2 * np.pi) ** 2 / (8 * np.pi * np.sin(thetam)) * np.sqrt(
np.linalg.det(f) / np.linalg.det(np.linalg.inv(d * np.linalg.inv(s + t.H * f * t) * d.H) + g))
else:
# Cooper-Nathans
Rmon = Rm * (2 * np.pi) ** 2 / (8 * np.pi * np.sin(thetam)) * np.sqrt(
np.linalg.det(f) / np.linalg.det(g + c.H * f * c))
R0_ = R0_ / Rmon
R0_ = R0_ * ki # 1/ki monitor efficiency
# Transform prefactor to Chesser-Axe normalization
R0_ = R0_ / (2. * np.pi) ** 2 * np.sqrt(np.linalg.det(RM_))
# Include kf/ki part of cross section
R0_ = R0_ * kf / ki
# Take care of sample mosaic if needed
# [<NAME> & <NAME>, J. Appl. Phys. 42, 4736, (1971), eq 19]
if hasattr(sample, 'mosaic'):
etas = sample.mosaic * CONVERT1
etasv = np.copy(etas)
if hasattr(sample, 'vmosaic'):
etasv = sample.vmosaic * CONVERT1
R0_ = R0_ / np.sqrt((1 + (q * etas) ** 2 * RM_[2, 2]) * (1 + (q * etasv) ** 2 * RM_[1, 1]))
Minv[1, 1] = Minv[1, 1] + q ** 2 * etas ** 2
Minv[2, 2] = Minv[2, 2] + q ** 2 * etasv ** 2
RM_ = np.linalg.inv(Minv)
# Take care of analyzer reflectivity if needed [<NAME>, BNL]
if hasattr(ana, 'thickness') and hasattr(ana, 'Q'):
KQ = ana.Q
KT = ana.thickness
toa = (taua / 2.) / np.sqrt(kf ** 2 - (taua / 2.) ** 2)
smallest = alpha[3]
if alpha[3] > alpha[2]:
smallest = alpha[2]
Qdsint = KQ * toa
dth = (np.arange(1, 201) / 200.) * np.sqrt(2. * np.log(2.)) * smallest
wdth = np.exp(-dth ** 2 / 2. / etaa ** 2)
sdth = KT * Qdsint * wdth / etaa / np.sqrt(2. * np.pi)
rdth = 1. / (1 + 1. / sdth)
reflec = sum(rdth) / sum(wdth)
R0_ = R0_ * reflec
R0[ind] = R0_
RM[ind] = RM_.copy()
return [R0, RM]
def calc_resolution(self, hkle):
r"""For a scattering vector (H,K,L) and energy transfers W, given
experimental conditions specified in EXP, calculates the Cooper-Nathans
resolution matrix RMS and Cooper-Nathans Resolution prefactor R0 in a
coordinate system defined by the crystallographic axes of the sample.
Parameters
----------
hkle : list
Array of the scattering vector and energy transfer at which the
calculation should be performed
Notes
-----
Translated from ResLib, originally authored by <NAME>, 1999-2007,
Oak Ridge National Laboratory
"""
self.HKLE = hkle
[H, K, L, W] = hkle
[length, H, K, L, W] = _CleanArgs(H, K, L, W)
self.H, self.K, self.L, self.W = H, K, L, W
[x, y, z, sample, rsample] = self._StandardSystem()
del z, sample
Q = _modvec([H, K, L], rsample)
uq = np.vstack((H / Q, K / Q, L / Q))
xq = _scalar(x, uq, rsample)
yq = _scalar(y, uq, rsample)
tmat = np.array(
[np.array([[xq[i], yq[i], 0, 0], [-yq[i], xq[i], 0, 0], [0, 0, 1., 0], [0, 0, 0, 1.]], dtype=np.float64) for i in range(len(xq))])
RMS = np.zeros((length, 4, 4), dtype=np.float64)
rot = np.zeros((3, 3), dtype=np.float64)
# Sample shape matrix in coordinate system defined by scattering vector
sample = self.sample
if hasattr(sample, 'shape'):
samples = []
for i in range(length):
rot = tmat[i, :3, :3]
samples.append(np.matrix(rot) * np.matrix(sample.shape) * np.matrix(rot).H)
self.sample.shape = np.array(samples)
[R0, RM] = self.calc_resolution_in_Q_coords(Q, W)
for i in range(length):
RMS[i] = np.matrix(tmat[i]).H * np.matrix(RM[i]) * np.matrix(tmat[i])
e = np.identity(4)
for i in range(length):
if hasattr(self, 'Smooth'):
if self.Smooth.X:
mul = np.diag([1 / (self.Smooth.X ** 2 / 8 / np.log(2)),
1 / (self.Smooth.Y ** 2 / 8 / np.log(2)),
1 / (self.Smooth.E ** 2 / 8 / np.log(2)),
1 / (self.Smooth.Z ** 2 / 8 / np.log(2))])
R0[i] = R0[i] / np.sqrt(np.linalg.det(np.matrix(e) / np.matrix(RMS[i]))) * np.sqrt(
np.linalg.det(np.matrix(e) / np.matrix(mul) + np.matrix(e) / np.matrix(RMS[i])))
RMS[i] = np.matrix(e) / (
np.matrix(e) / np.matrix(mul) + np.matrix(e) / np.matrix(RMS[i]))
self.R0, self.RMS, self.RM = [np.squeeze(item) for item in (R0, RMS, RM)]
def get_angles_and_Q(self, hkle):
r"""Returns the Triple Axis Spectrometer angles and Q-vector given
position in reciprocal space
Parameters
----------
hkle : list
Array of the scattering vector and energy transfer at which the
calculation should be performed
Returns
-------
[A, Q] : list
The angles A (A1 -- A5 in a list of floats) and Q (ndarray)
"""
# compute all TAS angles (in plane)
h, k, l, w = hkle
# compute angles
try:
fx = 2 * int(self.infin == -1) + int(self.infin == 1)
except AttributeError:
fx = 2
kfix = Energy(energy=self.efixed).wavevector
f = 0.4826 # f converts from energy units into k^2, f=0.4826 for meV
ki = np.sqrt(kfix ** 2 + (fx - 1) * f * w) # kinematical equations.
kf = np.sqrt(kfix ** 2 - (2 - fx) * f * w)
# compute the transversal Q component, and A3 (sample rotation)
# from McStas templateTAS.instr and TAS MAD ILL
a = np.array([self.sample.a, self.sample.b, self.sample.c]) / (2 * np.pi)
alpha = np.deg2rad([self.sample.alpha, self.sample.beta, self.sample.gamma])
cosa = np.cos(alpha)
sina = np.sin(alpha)
cc = np.sum(cosa * cosa)
cc = 1 + 2 * np.product(cosa) - cc
cc = np.sqrt(cc)
b = sina / (a * cc)
c1 = np.roll(cosa[np.newaxis].T, -1)
c2 = np.roll(c1, -1)
s1 = np.roll(sina[np.newaxis].T, -1)
s2 = np.roll(s1, -1)
cosb = (c1 * c2 - cosa[np.newaxis].T) / (s1 * s2)
sinb = np.sqrt(1 - cosb * cosb)
bb = np.array([[b[0], 0, 0],
[b[1] * cosb[2], b[1] * sinb[2], 0],
[b[2] * cosb[1], -b[2] * sinb[1] * cosa[0], 1 / a[2]]])
bb = bb.T
aspv = np.hstack((self.orient1[np.newaxis].T, self.orient2[np.newaxis].T))
vv = np.zeros((3, 3))
vv[0:2, :] = np.transpose(np.dot(bb, aspv))
for m in range(2, 0, -1):
vt = np.roll(np.roll(vv, -1, axis=0), -1, axis=1) * np.roll(np.roll(vv, -2, axis=0), -2, axis=1) - np.roll(
np.roll(vv, -1, axis=0), -2, axis=1) * np.roll(np.roll(vv, -2, axis=0), -1, axis=1)
vv[m, :] = vt[m, :]
c = np.sqrt(np.sum(vv * vv, axis=0))
vv = vv / np.tile(c, (3, 1))
s = vv.T * bb
qt = np.squeeze(np.dot(np.array([h, k, l]).T, s.T))
qs = np.sum(qt ** 2)
Q = np.sqrt(qs)
sm = self.mono.dir
ss = self.sample.dir
sa = self.ana.dir
dm = 2 * np.pi / GetTau(self.mono.tau)
da = 2 * np.pi / GetTau(self.ana.tau)
thetaa = sa * np.arcsin(np.pi / (da * kf)) # theta angles for analyser
thetam = sm * np.arcsin(np.pi / (dm * ki)) # and monochromator.
thetas = ss * 0.5 * np.arccos((ki ** 2 + kf ** 2 - Q ** 2) / (2 * ki * kf)) # scattering angle from sample.
A3 = -np.arctan2(qt[1], qt[0]) - np.arccos(
(np.dot(kf, kf) - np.dot(Q, Q) - np.dot(ki, ki)) / (-2 * np.dot(Q, ki)))
A3 = ss * A3
A1 = thetam
A2 = 2 * A1
A4 = 2 * thetas
A5 = thetaa
A6 = 2 * A5
A = np.rad2deg([np.squeeze(a) for a in [A1, A2, A3, A4, A5, A6]])
return [A, Q]
def resolution_convolution(self, sqw, pref, nargout, hkle, METHOD='fix', ACCURACY=None, p=None, seed=None):
r"""Numerically calculate the convolution of a user-defined
cross-section function with the resolution function for a
3-axis neutron scattering experiment.
Parameters
----------
sqw : func
User-supplied "fast" model cross section.
pref : func
User-supplied "slow" cross section prefactor and background
function.
nargout : int
Number of arguments returned by the pref function
hkle : tup
Tuple of H, K, L, and W, specifying the wave vector and energy
transfers at which the convolution is to be calculated (i.e.
define $\mathbf{Q}_0$). H, K, and L are given in reciprocal
lattice units and W in meV.
EXP : obj
Instrument object containing all information on experimental setup.
METHOD : str
Specifies which 4D-integration method to use. 'fix' (Default):
sample the cross section on a fixed grid of points uniformly
distributed $\phi$-space. 2*ACCURACY[0]+1 points are sampled
along $\phi_1$, $\phi_2$, and $\phi_3$, and 2*ACCURACY[1]+1
along $\phi_4$ (vertical direction). 'mc': 4D Monte Carlo
integration. The cross section is sampled in 1000*ACCURACY
randomly chosen points, uniformly distributed in $\phi$-space.
ACCURACY : array(2) or int
Determines the number of sampling points in the integration.
p : list
A parameter that is passed on, without change to sqw and pref.
Returns
-------
conv : array
Calculated value of the cross section, folded with the resolution
function at the given $\mathbf{Q}_0$
Notes
-----
Translated from ResLib 3.4c, originally authored by <NAME>,
1999-2007, Oak Ridge National Laboratory
"""
self.calc_resolution(hkle)
[R0, RMS] = [np.copy(self.R0), self.RMS.copy()]
H, K, L, W = hkle
[length, H, K, L, W] = _CleanArgs(H, K, L, W)
[xvec, yvec, zvec] = self._StandardSystem()[:3]
Mxx = RMS[:, 0, 0]
Mxy = RMS[:, 0, 1]
Mxw = RMS[:, 0, 3]
Myy = RMS[:, 1, 1]
Myw = RMS[:, 1, 3]
Mzz = RMS[:, 2, 2]
Mww = RMS[:, 3, 3]
Mxx -= Mxw ** 2. / Mww
Mxy -= Mxw * Myw / Mww
Myy -= Myw ** 2. / Mww
MMxx = Mxx - Mxy ** 2. / Myy
detM = MMxx * Myy * Mzz * Mww
tqz = 1. / np.sqrt(Mzz)
tqx = 1. / np.sqrt(MMxx)
tqyy = 1. / np.sqrt(Myy)
tqyx = -Mxy / Myy / np.sqrt(MMxx)
tqww = 1. / np.sqrt(Mww)
tqwy = -Myw / Mww / np.sqrt(Myy)
tqwx = -(Mxw / Mww - Myw / Mww * Mxy / Myy) / np.sqrt(MMxx)
inte = sqw(H, K, L, W, p)
[modes, points] = inte.shape
if pref is None:
prefactor = np.ones((modes, points))
bgr = 0
else:
if nargout == 2:
[prefactor, bgr] = pref(H, K, L, W, self, p)
elif nargout == 1:
prefactor = pref(H, K, L, W, self, p)
bgr = 0
else:
raise ValueError('Invalid number or output arguments in prefactor function')
if METHOD == 'fix':
if ACCURACY is None:
ACCURACY = np.array([7, 0])
M = ACCURACY
step1 = np.pi / (2 * M[0] + 1)
step2 = np.pi / (2 * M[1] + 1)
dd1 = np.linspace(-np.pi / 2 + step1 / 2, np.pi / 2 - step1 / 2, (2 * M[0] + 1))
dd2 = np.linspace(-np.pi / 2 + step2 / 2, np.pi / 2 - step2 / 2, (2 * M[1] + 1))
convs = np.zeros((modes, length))
conv = np.zeros(length)
[cw, cx, cy] = np.meshgrid(dd1, dd1, dd1, indexing='ij')
tx = np.tan(cx)
ty = np.tan(cy)
tw = np.tan(cw)
tz = np.tan(dd2)
norm = np.exp(-0.5 * (tx ** 2 + ty ** 2)) * (1 + tx ** 2) * (1 + ty ** 2) * np.exp(-0.5 * (tw ** 2)) * (
1 + tw ** 2)
normz = np.exp(-0.5 * (tz ** 2)) * (1 + tz ** 2)
for iz in range(len(tz)):
for i in range(length):
dQ1 = tqx[i] * tx
dQ2 = tqyy[i] * ty + tqyx[i] * tx
dW = tqwx[i] * tx + tqwy[i] * ty + tqww[i] * tw
dQ4 = tqz[i] * tz[iz]
H1 = H[i] + dQ1 * xvec[0] + dQ2 * yvec[0] + dQ4 * zvec[0]
K1 = K[i] + dQ1 * xvec[1] + dQ2 * yvec[1] + dQ4 * zvec[1]
L1 = L[i] + dQ1 * xvec[2] + dQ2 * yvec[2] + dQ4 * zvec[2]
W1 = W[i] + dW
inte = sqw(H1, K1, L1, W1, p)
for j in range(modes):
add = inte[j, :] * norm * normz[iz]
convs[j, i] = convs[j, i] + np.sum(add)
conv[i] = np.sum(convs[:, i] * prefactor[:, i])
conv = conv * step1 ** 3 * step2 / np.sqrt(detM)
if M[1] == 0:
conv *= 0.79788
if M[0] == 0:
conv *= 0.79788 ** 3
elif METHOD == 'mc':
if isinstance(ACCURACY, (list, np.ndarray, tuple)):
if len(ACCURACY) == 1:
ACCURACY = ACCURACY[0]
else:
raise ValueError('ACCURACY must be an int when using Monte Carlo method: {0}'.format(ACCURACY))
if ACCURACY is None:
ACCURACY = 10
M = ACCURACY
convs = np.zeros((modes, length))
conv = np.zeros(length)
for i in range(length):
for MonteCarlo in range(M):
if seed is not None:
np.random.seed(seed)
r = np.random.randn(4, 1000) * np.pi - np.pi / 2
cx = r[0, :]
cy = r[1, :]
cz = r[2, :]
cw = r[3, :]
tx = np.tan(cx)
ty = np.tan(cy)
tz = np.tan(cz)
tw = np.tan(cw)
norm = np.exp(-0.5 * (tx ** 2 + ty ** 2 + tz ** 2 + tw ** 2)) * (1 + tx ** 2) * (1 + ty ** 2) * (
1 + tz ** 2) * (1 + tw ** 2)
dQ1 = tqx[i] * tx
dQ2 = tqyy[i] * ty + tqyx[i] * tx
dW = tqwx[i] * tx + tqwy[i] * ty + tqww[i] * tw
dQ4 = tqz[i] * tz
H1 = H[i] + dQ1 * xvec[0] + dQ2 * yvec[0] + dQ4 * zvec[0]
K1 = K[i] + dQ1 * xvec[1] + dQ2 * yvec[1] + dQ4 * zvec[1]
L1 = L[i] + dQ1 * xvec[2] + dQ2 * yvec[2] + dQ4 * zvec[2]
W1 = W[i] + dW
inte = sqw(H1, K1, L1, W1, p)
for j in range(modes):
add = inte[j, :] * norm
convs[j, i] = convs[j, i] + np.sum(add)
conv[i] = np.sum(convs[:, i] * prefactor[:, i])
conv = conv / M / 1000 * np.pi ** 4. / np.sqrt(detM)
else:
raise ValueError('Unknown METHOD: {0}. Valid options are: "fix", "mc"'.format(METHOD))
conv *= R0
conv += bgr
return conv
def resolution_convolution_SMA(self, sqw, pref, nargout, hkle, METHOD='fix', ACCURACY=None, p=None, seed=None):
r"""Numerically calculate the convolution of a user-defined single-mode
cross-section function with the resolution function for a 3-axis
neutron scattering experiment.
Parameters
----------
sqw : func
User-supplied "fast" model cross section.
pref : func
User-supplied "slow" cross section prefactor and background
function.
nargout : int
Number of arguments returned by the pref function
hkle : tup
Tuple of H, K, L, and W, specifying the wave vector and energy
transfers at which the convolution is to be calculated (i.e.
define $\mathbf{Q}_0$). H, K, and L are given in reciprocal
lattice units and W in meV.
EXP : obj
Instrument object containing all information on experimental setup.
METHOD : str
Specifies which 3D-integration method to use. 'fix' (Default):
sample the cross section on a fixed grid of points uniformly
distributed $\phi$-space. 2*ACCURACY[0]+1 points are sampled
along $\phi_1$, and $\phi_2$, and 2*ACCURACY[1]+1 along $\phi_3$
(vertical direction). 'mc': 3D Monte Carlo integration. The cross
section is sampled in 1000*ACCURACY randomly chosen points,
uniformly distributed in $\phi$-space.
ACCURACY : array(2) or int
Determines the number of sampling points in the integration.
p : list
A parameter that is passed on, without change to sqw and pref.
Returns
-------
conv : array
Calculated value of the cross section, folded with the resolution
function at the given $\mathbf{Q}_0$
Notes
-----
Translated from ResLib 3.4c, originally authored by <NAME>,
1999-2007, Oak Ridge National Laboratory
"""
self.calc_resolution(hkle)
[R0, RMS] = [np.copy(self.R0), self.RMS.copy()]
H, K, L, W = hkle
[length, H, K, L, W] = _CleanArgs(H, K, L, W)
[xvec, yvec, zvec] = self._StandardSystem()[:3]
Mww = RMS[:, 3, 3]
Mxw = RMS[:, 0, 3]
Myw = RMS[:, 1, 3]
GammaFactor = np.sqrt(Mww / 2)
OmegaFactorx = Mxw / np.sqrt(2 * Mww)
OmegaFactory = Myw / np.sqrt(2 * Mww)
Mzz = RMS[:, 2, 2]
Mxx = RMS[:, 0, 0]
Mxx -= Mxw ** 2 / Mww
Myy = RMS[:, 1, 1]
Myy -= Myw ** 2 / Mww
Mxy = RMS[:, 0, 1]
Mxy -= Mxw * Myw / Mww
detxy = np.sqrt(Mxx * Myy - Mxy ** 2)
detz = np.sqrt(Mzz)
tqz = 1. / detz
tqy = np.sqrt(Mxx) / detxy
tqxx = 1. / np.sqrt(Mxx)
tqxy = Mxy / np.sqrt(Mxx) / detxy
[disp, inte] = sqw(H, K, L, p)[:2]
[modes, points] = disp.shape
if pref is None:
prefactor = | np.ones(modes, points) | numpy.ones |
#******************************************************************************
#
# tempoGAN: A Temporally Coherent, Volumetric GAN for Super-resolution Fluid Flow
# Copyright 2018 <NAME>, <NAME>, <NAME>, <NAME>
#
# This program is free software, distributed under the terms of the
# Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
#******************************************************************************
import os, math
import shutil, sys
from random import seed, random, randrange
import uniio
import numpy as np
import scipy.misc
import scipy.ndimage
import imageio
# check whether matplotlib is available to generate vector/quiver plots
import imp
try:
imp.find_module('matplotlib')
import matplotlib.pyplot
found_matplotlib = True
except ImportError:
found_matplotlib = False
#import matplotlib.pyplot as plt
# global channel keys, have to be one char
C_KEY_DEFAULT = 'd'
C_KEY_VELOCITY = 'v'
C_KEY_VORTICITY = 'x'
C_KEY_POSITION = 'p'
DATA_KEY_LOW = 0
DATA_KEY_HIGH= 1
#keys for augmentation operations
AOPS_KEY_ROTATE = 'rot'
AOPS_KEY_SCALE = 'scale'
AOPS_KEY_ROT90 = 'rot90'
AOPS_KEY_FLIP = 'flip'
seed( 42 )
# default channel layouts
C_LAYOUT = {
'dens':C_KEY_DEFAULT,
'dens_vel':'d,vx,vy,vz'
}
class TileCreator(object):
def __init__(self, tileSizeLow, simSizeLow=64, upres=2, dim=2, dim_t=1, overlapping=0, densityMinimum=0.02, premadeTiles=False, partTrain=0.8, partTest=0.2, partVal=0, channelLayout_low=C_LAYOUT['dens_vel'], channelLayout_high=C_LAYOUT['dens'], highIsLabel=False, loadPN=False, padding=0):
'''
tileSizeLow, simSizeLow: int, [int,int] if 2D, [int,int,int]
channelLayout: 'key,key,...'
the keys are NOT case sensitive and leading and trailing whitespace characters are REMOVED.
key:
default: d
velocity: v[label](x|y|z)
label can be arbitrary or empty,
key must be unique and x,y must exist while z is optional in 2D, x,y,z must exist in 3D.
if x does not exist y,z will be ignored (treaded as 'd').
rest is not yet supported
premadeTiles: cut regular tiles when loading data, can't use data augmentation
part(Train|Test|Val): relative size of the different data sets
highIsLabel: high data is not augmented
loadHigh:
simPath: path to the uni simulation files
loadPath: packed simulations are stored here
'''
# DATA DIMENSION
self.dim_t = dim_t # same for hi_res or low_res
if dim!=2 and dim!=3:
self.TCError('Data dimension must be 2 or 3.')
self.dim = dim
# TILE SIZE
if np.isscalar(tileSizeLow):
self.tileSizeLow = [tileSizeLow, tileSizeLow, tileSizeLow]
elif len(tileSizeLow)==2 and self.dim==2:
self.tileSizeLow = [1]+tileSizeLow
elif len(tileSizeLow)==3:
self.tileSizeLow = tileSizeLow
else:
self.TCError('Tile size mismatch.')
self.tileSizeLow = np.asarray(self.tileSizeLow)
#SIM SIZE
if np.isscalar(simSizeLow):
self.simSizeLow = [simSizeLow, simSizeLow, simSizeLow]
elif len(simSizeLow)==2 and self.dim==2:
self.simSizeLow = [1]+simSizeLow
elif len(simSizeLow)==3:
self.simSizeLow = simSizeLow
else:
self.TCError('Simulation size mismatch.')
self.simSizeLow = np.asarray(self.simSizeLow)
if upres < 1:
self.TCError('Upres must be at least 1.')
self.upres = upres
if not highIsLabel:
self.tileSizeHigh = self.tileSizeLow*upres
self.simSizeHigh = self.simSizeLow*upres
else:
self.tileSizeHigh = np.asarray([1])
self.simSizeHigh = np.asarray([1])
if self.dim==2:
self.tileSizeLow[0]=1
self.tileSizeHigh[0]=1
self.simSizeLow[0]=1
self.simSizeHigh[0]=1
if np.less(self.simSizeLow, self.tileSizeLow).any():
self.TCError('Tile size {} can not be larger than sim size {}, {}.'.format(self.tileSizeLow,self.simSizeLow))
if densityMinimum<0.:
self.TCError('densityMinimum can not be negative.')
self.densityMinimum = densityMinimum
self.premadeTiles = premadeTiles
self.useDataAug = False
#CHANNELS
self.c_lists = {}
self.c_low, self.c_lists[DATA_KEY_LOW] = self.parseChannels(channelLayout_low)
self.c_high, self.c_lists[DATA_KEY_HIGH] = self.parseChannels(channelLayout_high)
# print info
print('\n')
print('Dimension: {}, time dimension: {}'.format(self.dim,self.dim_t))
print('Low-res data:')
print(' channel layout: {}'.format(self.c_low))
print(' default channels: {}'.format(self.c_lists[DATA_KEY_LOW][C_KEY_DEFAULT]))
if len(self.c_lists[DATA_KEY_LOW][C_KEY_VELOCITY])>0:
print(' velocity channels: {}'.format(self.c_lists[DATA_KEY_LOW][C_KEY_VELOCITY]))
if len(self.c_lists[DATA_KEY_LOW][C_KEY_VORTICITY])>0:
print(' vorticity channels: {}'.format(self.c_lists[DATA_KEY_LOW][C_KEY_VORTICITY]))
print('High-res data:')
if highIsLabel:
print(' is Label')
print(' channel layout: {}'.format(self.c_high))
print(' default channels: {}'.format(self.c_lists[DATA_KEY_HIGH][C_KEY_DEFAULT]))
if len(self.c_lists[DATA_KEY_HIGH][C_KEY_VELOCITY])>0:
print(' velocity channels: {}'.format(self.c_lists[DATA_KEY_HIGH][C_KEY_VELOCITY]))
if len(self.c_lists[DATA_KEY_HIGH][C_KEY_VORTICITY])>0:
print(' vorticity channels: {}'.format(self.c_lists[DATA_KEY_HIGH][C_KEY_VORTICITY]))
#self.channels=len(self.c)
self.data_flags = {
DATA_KEY_LOW:{
'isLabel':False,
'channels':len(self.c_low),
C_KEY_VELOCITY:len(self.c_lists[DATA_KEY_LOW][C_KEY_VELOCITY])>0,
C_KEY_VORTICITY:len(self.c_lists[DATA_KEY_LOW][C_KEY_VORTICITY])>0,
C_KEY_POSITION:False
},
DATA_KEY_HIGH:{
'isLabel':highIsLabel,
'channels':len(self.c_high),
C_KEY_VELOCITY:len(self.c_lists[DATA_KEY_HIGH][C_KEY_VELOCITY])>0,
C_KEY_VORTICITY:len(self.c_lists[DATA_KEY_HIGH][C_KEY_VORTICITY])>0,
C_KEY_POSITION:False
}
}
if loadPN:
self.TCError('prev and next tiles not supported.')
self.hasPN = loadPN
self.padding=padding
#if self.hasPN:
#[z,y,x, velocities an/or position if enabled (density,vel,vel,vel, pos, pos [,pos])]
#DATA SHAPES
self.tile_shape_low = np.append(self.tileSizeLow, [self.data_flags[DATA_KEY_LOW]['channels']])
self.frame_shape_low = np.append(self.simSizeLow, [self.data_flags[DATA_KEY_LOW]['channels']])
if not self.data_flags[DATA_KEY_HIGH]['isLabel']:
self.tile_shape_high = np.append(self.tileSizeHigh, [self.data_flags[DATA_KEY_HIGH]['channels']])
self.frame_shape_high = np.append(self.simSizeHigh, [self.data_flags[DATA_KEY_HIGH]['channels']])
else:
self.tile_shape_high = self.tileSizeHigh[:]
self.frame_shape_high = self.simSizeHigh[:]
self.densityThreshold = (self.densityMinimum * self.tile_shape_low[0] * self.tile_shape_low[1] * self.tile_shape_low[2])
self.data = {
DATA_KEY_LOW:[],
DATA_KEY_HIGH:[]
}
all=partTrain+partTest+partVal
self.part_train=partTrain/all
self.part_test=partTest/all
self.part_validation=partVal/all
def initDataAugmentation(self, rot=2, minScale=0.85, maxScale=1.15 ,flip=True):
'''
set up data augmentation
rot: 1: 90 degree rotations; 2: full rotation; else: nop rotation
Scale: if both 1 disable scaling
'''
self.useDataAug = True
"""
specify the special augmentation operation needed for some channel types here
will only be applyed if the specified channel type is in the data
** Tempo Datum may have multiple channels as coherent frames, [batch, z, y, x, t*channels]
** They are reshaped first before these aops, [batch, z, y, x, t, channels], and then reshape back
** Because of this extra time dimention, all aops can only do isolate calculations, for e.g., value scaling,
** Any calculation relay on neighborhood will be wrong, for e.g., spacial scaling (zoom).
"""
self.aops = {
DATA_KEY_LOW:{
AOPS_KEY_ROTATE:{
C_KEY_VELOCITY:self.rotateVelocities,
C_KEY_VORTICITY:self.rotateVelocities
},
AOPS_KEY_SCALE:{
C_KEY_VELOCITY:self.scaleVelocities,
C_KEY_VORTICITY:self.scaleVelocities
},
AOPS_KEY_ROT90:{
C_KEY_VELOCITY:self.rotate90Velocities,
C_KEY_VORTICITY:self.rotate90Velocities
},
AOPS_KEY_FLIP:{
C_KEY_VELOCITY:self.flipVelocities,
C_KEY_VORTICITY:self.flipVelocities
}
},
DATA_KEY_HIGH:{
AOPS_KEY_ROTATE:{
C_KEY_VELOCITY:self.rotateVelocities,
C_KEY_VORTICITY:self.rotateVelocities
},
AOPS_KEY_SCALE:{
C_KEY_VELOCITY:self.scaleVelocities,
C_KEY_VORTICITY:self.scaleVelocities
},
AOPS_KEY_ROT90:{
C_KEY_VELOCITY:self.rotate90Velocities,
C_KEY_VORTICITY:self.rotate90Velocities
},
AOPS_KEY_FLIP:{
C_KEY_VELOCITY:self.flipVelocities,
C_KEY_VORTICITY:self.flipVelocities
}
}
}
msg = 'data augmentation: '
if rot==2:
self.do_rotation = True
self.do_rot90 = False
msg += 'rotation, '
elif rot==1:
self.do_rotation = False
self.do_rot90 = True
msg += 'rot90, '
z=(2,1)
nz=(1,2)
x=(1,0)
y=(0,2)
nx=(0,1)
ny=(2,0)
# thanks to http://www.euclideanspace.com/maths/discrete/groups/categorise/finite/cube/
self.cube_rot = {2: [[],[z],[z,z],[nz]], 3: [[],[x],[y],[x,x],[x,y],[y,x],[y,y],[nx],[x,x,y],[x,y,x],[x,y,y],[y,x,x],[y,y,x],[ny],[nx,y],[x,x,y,x],[x,x,y,y],[x,y,x,x],[x,ny],[y,nx],[ny,x],[nx,y,x],[x,y,nx],[x,ny,x]]}
else:
self.do_rotation = False
self.do_rot90 = False
self.scaleFactor = [minScale, maxScale]
if (self.scaleFactor[0]==1 and self.scaleFactor[1]==1):
self.do_scaling = False
else:
self.do_scaling = True
msg += 'scaling, '
self.do_flip = flip
if self.do_flip:
msg += 'flip'
msg += '.'
print(msg)
self.interpolation_order = 1
self.fill_mode = 'constant'
def addData(self, low, high):
'''
add data, tiles if premadeTiles, frames otherwise.
low, high: list of or single 3D data np arrays
'''
# check data shape
low = np.asarray(low)
high = np.asarray(high)
if not self.data_flags[DATA_KEY_HIGH]['isLabel']:
if len(low.shape)!=len(high.shape): #high-low mismatch
self.TCError('Data shape mismatch. Dimensions: {} low vs {} high. Dimensions must match or use highIsLabel.'.format(len(low.shape),len(high.shape)) )
if not (len(low.shape)==4 or len(low.shape)==5): #not single frame or sequence of frames
self.TCError('Input must be single 3D data or sequence of 3D data. Format: ([batch,] z, y, x, channels). For 2D use z=1.')
if (low.shape[-1]!=(self.dim_t * self.data_flags[DATA_KEY_LOW]['channels'])):
self.TCError('Dim_t ({}) * Channels ({}, {}) configured for LOW-res data don\'t match channels ({}) of input data.'.format(self.dim_t, self.data_flags[DATA_KEY_LOW]['channels'], self.c_low, low.shape[-1]) )
if not self.data_flags[DATA_KEY_HIGH]['isLabel']:
if (high.shape[-1]!=(self.dim_t * self.data_flags[DATA_KEY_HIGH]['channels'])):
self.TCError('Dim_t ({}) * Channels ({}, {}) configured for HIGH-res data don\'t match channels ({}) of input data.'.format(self.dim_t, self.data_flags[DATA_KEY_HIGH]['channels'], self.c_high, high.shape[-1]) )
low_shape = low.shape
high_shape = high.shape
if len(low.shape)==5: #sequence
if low.shape[0]!=high.shape[0]: #check amount
self.TCError('Unequal amount of low ({}) and high ({}) data.'.format(low.shape[1], high.shape[1]))
# get single data shape
low_shape = low_shape[1:]
if not self.data_flags[DATA_KEY_HIGH]['isLabel']:
high_shape = high_shape[1:]
else: high_shape = [1]
else: #single
low = [low]
high = [high]
if self.premadeTiles:
if not (self.dim_t == 1):
self.TCError('Currently, Dim_t = {} > 1 is not supported by premade tiles'.format(self.dim_t))
if not np.array_equal(low_shape, self.tile_shape_low) or not np.array_equal(high_shape,self.tile_shape_high):
self.TCError('Tile shape mismatch: is - specified\n\tlow: {} - {}\n\thigh {} - {}'.format(low_shape, self.tile_shape_low, high_shape,self.tile_shape_high))
else:
single_frame_low_shape = list(low_shape)
single_frame_high_shape = list(high_shape)
single_frame_low_shape[-1] = low_shape[-1] // self.dim_t
if not self.data_flags[DATA_KEY_HIGH]['isLabel']:
single_frame_high_shape[-1] = high_shape[-1] // self.dim_t
if not np.array_equal(single_frame_low_shape, self.frame_shape_low) or not np.array_equal(single_frame_high_shape,self.frame_shape_high):
self.TCError('Frame shape mismatch: is - specified\n\tlow: {} - {}\n\thigh: {} - {}, given dim_t as {}'.format(single_frame_low_shape, self.frame_shape_low, single_frame_high_shape,self.frame_shape_high, self.dim_t))
self.data[DATA_KEY_LOW].extend(low)
self.data[DATA_KEY_HIGH].extend(high)
print('\n')
print('Added {} datasets. Total: {}'.format(low.shape[0], len(self.data[DATA_KEY_LOW])))
self.splitSets()
def splitSets(self):
'''
calculate the set borders for training, testing and validation set
'''
length = len(self.data[DATA_KEY_LOW])
end_train = int( length * self.part_train )
end_test = end_train + int( length * self.part_test )
#just store the borders of the different sets to avoid data duplication
self.setBorders = [end_train, end_test, length]
print('Training set: {}'.format(self.setBorders[0]))
print('Testing set: {}'.format(self.setBorders[1]-self.setBorders[0]))
print('Validation set: {}'.format(self.setBorders[2]-self.setBorders[1]))
def clearData(self):
'''
clears the data buffer
'''
self.data = {
DATA_KEY_LOW:[],
DATA_KEY_HIGH:[]
}
def createTiles(self, data, tileShape, strides=-1):
'''
create tiles from a single frame. fixed, regular pattern
strides: <=0 or tileShape is normal, otherwise create overlapping tiles
'''
dataShape = data.shape #2D sim: [1,res,res,channels]
pad = [self.padding,self.padding,self.padding,0]
if np.isscalar(strides):
if strides <= 0:
strides = tileShape
else:
strides = [strides,strides,strides]
if dataShape[0]<=1:
pad[0] = 0
strides[0] = 1
channels = dataShape[3]
noTiles = [ (dataShape[0]-tileShape[0])//strides[0]+1, (dataShape[1]-tileShape[1])//strides[1]+1, (dataShape[2]-tileShape[2])//strides[2]+1 ]
tiles = []
for tileZ in range(0, noTiles[0]):
for tileY in range(0, noTiles[1]):
for tileX in range(0, noTiles[2]):
idx_from=[tileZ*strides[0], tileY*strides[1], tileX*strides[2]]
idx_to=[idx_from[0]+tileShape[0], idx_from[1]+tileShape[1], idx_from[2]+tileShape[2]]
currTile=data[ idx_from[0]:idx_to[0], idx_from[1]:idx_to[1], idx_from[2]:idx_to[2], :]
if self.padding > 0:
currTile = np.pad(currTile, pad, 'edge')
tiles.append(currTile)
return np.array(tiles)
def cutTile(self, data, tileShape, offset=[0,0,0]):
'''
cut a tile of with shape and offset
'''
offset = np.asarray(offset)
tileShape = np.asarray(tileShape)
tileShape[-1] = data.shape[-1]
if np.less(data.shape[:3], tileShape[:3]+offset[:3]).any():
self.TCError('Can\'t cut tile with shape {} and offset{} from data with shape {}.'.format(tileShape, offset, data.shape))
tile = data[offset[0]:offset[0]+tileShape[0], offset[1]:offset[1]+tileShape[1], offset[2]:offset[2]+tileShape[2], :]
if not np.array_equal(tile.shape,tileShape):
self.TCError('Wrong tile shape after cutting. is: {}. goal: {}.'.format(tile.shape,tileShape))
return tile
#####################################################################################
# batch creation
#####################################################################################
def selectRandomTiles(self, selectionSize, isTraining=True, augment=False, tile_t = 1):
'''
main method to create baches
Return:
shape: [selectionSize, z, y, x, channels * tile_t]
if 2D z = 1
channels: density, [vel x, vel y, vel z], [pos x, pox y, pos z]
'''
if isTraining:
if self.setBorders[0]<1:
self.TCError('no training data.')
else:
if (self.setBorders[1] - self.setBorders[0])<1:
self.TCError('no test data.')
if(tile_t > self.dim_t):
self.TCError('not enough coherent frames. Requested {}, available {}'.format(tile_t, self.dim_t))
batch_low = []
batch_high = []
for i in range(selectionSize):
if augment and self.useDataAug: #data augmentation
low, high = self.generateTile(isTraining, tile_t)
else: #cut random tile without augmentation
low, high = self.getRandomDatum(isTraining, tile_t)
if not self.premadeTiles:
low, high = self.getRandomTile(low, high)
batch_low.append(low)
batch_high.append(high)
return np.asarray(batch_low), np.asarray(batch_high)
def generateTile(self, isTraining=True, tile_t = 1):
'''
generates a random low-high pair of tiles (data augmentation)
'''
# get a frame, is a copy to avoid transormations affecting the raw dataset
data = {}
data[DATA_KEY_LOW], data[DATA_KEY_HIGH] = self.getRandomDatum(isTraining, tile_t)
if not self.premadeTiles:
#cut a tile for faster transformation
if self.do_scaling or self.do_rotation:
factor = 1
if self.do_rotation: # or self.do_scaling:
factor*=1.5 # scaling: to avoid size errors caused by rounding
if self.do_scaling:
scaleFactor = np.random.uniform(self.scaleFactor[0], self.scaleFactor[1])
factor/= scaleFactor
tileShapeLow = np.ceil(self.tile_shape_low*factor)
if self.dim==2:
tileShapeLow[0] = 1
data[DATA_KEY_LOW], data[DATA_KEY_HIGH] = self.getRandomTile(data[DATA_KEY_LOW], data[DATA_KEY_HIGH], tileShapeLow.astype(int))
#random scaling, changes resolution
if self.do_scaling:
data = self.scale(data, scaleFactor)
bounds = np.zeros(4)
#rotate
if self.do_rotation:
bounds = np.array(data[DATA_KEY_LOW].shape)*0.16 #bounds applied on all sides, 1.5*(1-2*0.16)~1
data = self.rotate(data)
#get a tile
data[DATA_KEY_LOW], data[DATA_KEY_HIGH] = self.getRandomTile(data[DATA_KEY_LOW], data[DATA_KEY_HIGH], bounds=bounds) #includes "shifting"
if self.do_rot90:
rot = np.random.choice(self.cube_rot[self.dim])
for axis in rot:
data = self.rotate90(data, axis)
#flip once
if self.do_flip:
axis = np.random.choice(4)
if axis < 3: # axis < self.dim
data = self.flip(data, [axis])
# check tile size
target_shape_low = np.copy(self.tile_shape_low)
target_shape_high = np.copy(self.tile_shape_high)
target_shape_low[-1] *= tile_t
target_shape_high[-1] *= tile_t
if not np.array_equal(data[DATA_KEY_LOW].shape,target_shape_low) or (not np.array_equal(data[DATA_KEY_HIGH].shape,target_shape_high) and not self.data_flags[DATA_KEY_HIGH]['isLabel']):
self.TCError('Wrong tile shape after data augmentation. is: {},{}. goal: {},{}.'.format(data[DATA_KEY_LOW].shape, data[DATA_KEY_HIGH].shape, target_shape_low, target_shape_high))
return data[DATA_KEY_LOW], data[DATA_KEY_HIGH]
def getRandomDatum(self, isTraining=True, tile_t = 1):
'''returns a copy of a random frame'''
if isTraining:
randNo = randrange(0, self.setBorders[0])
else:
randNo = randrange(self.setBorders[0], self.setBorders[1])
randFrame = 0
if tile_t<self.dim_t:
randFrame = randrange(0, self.dim_t - tile_t)
else:
tile_t = self.dim_t
return self.getDatum(randNo*self.dim_t+randFrame, tile_t)
def getDatum(self, index, tile_t = 1):
'''returns a copy of the indicated frame or tile'''
begin_ch = 0
if(self.dim_t > 1):
begin_ch = (index % self.dim_t) * self.tile_shape_low[-1]
end_ch = begin_ch + tile_t * self.tile_shape_low[-1]
begin_ch_y = 0
if(self.dim_t > 1):
begin_ch_y = (index % self.dim_t) * self.tile_shape_high[-1]
end_c_h_y = begin_ch_y + tile_t * self.tile_shape_high[-1]
if not self.data_flags[DATA_KEY_HIGH]['isLabel']:
return np.copy(self.data[DATA_KEY_LOW][index//self.dim_t][:,:,:,begin_ch:end_ch]), np.copy(self.data[DATA_KEY_HIGH][index//self.dim_t][:,:,:,begin_ch_y:end_c_h_y])
else:
return np.copy(self.data[DATA_KEY_LOW][index//self.dim_t][:,:,:,begin_ch:end_ch]), np.copy(self.data[DATA_KEY_HIGH][index//self.dim_t])
def getRandomTile(self, low, high, tileShapeLow=None, bounds=[0,0,0,0]): #bounds to avoid mirrored parts
'''
cut a random tile (low and high) from a given frame, considers densityMinimum
bounds: ignore edges of frames, used to discard mirrored parts after rotation
'''
if tileShapeLow is None:
tileShapeLow = np.copy(self.tile_shape_low) # use copy is very important!!!
tileShapeHigh = tileShapeLow*self.upres
frameShapeLow = np.asarray(low.shape)
if len(low.shape)!=4 or len(tileShapeLow)!=4:
self.TCError('Data shape mismatch.')
if len(high.shape)!=4 and not self.data_flags[DATA_KEY_HIGH]['isLabel']:
self.TCError('Data shape mismatch.')
start = np.ceil(bounds)
end = frameShapeLow - tileShapeLow + np.ones(4) - start
offset_up = np.array([self.upres, self.upres, self.upres])
if self.dim==2:
start[0] = 0
end[0] = 1
offset_up[0] = 1
tileShapeHigh[0] = 1
# check if possible to cut tile
if np.amin((end-start)[:3]) < 0:
self.TCError('Can\'t cut tile {} from frame {} with bounds {}.'.format(tileShapeLow, frameShapeLow, start))
# cut tile
hasMinDensity = False
i = 1
while (not hasMinDensity) and i<20:
offset = np.asarray([randrange(start[0], end[0]), randrange(start[1], end[1]), randrange(start[2], end[2])])
lowTile = self.cutTile(low, tileShapeLow, offset)
offset *= offset_up
if not self.data_flags[DATA_KEY_HIGH]['isLabel']:
highTile = self.cutTile(high, tileShapeHigh, offset)
else:
highTile = high
hasMinDensity = self.hasMinDensity(lowTile)
i+=1
return lowTile, highTile
#####################################################################################
# AUGMENTATION
#####################################################################################
def special_aug(self, data, ops_key, param):
"""
wrapper to call the augmentation operations specified in self.aops in initAugmentation
"""
for data_key in data:
if self.data_flags[data_key]['isLabel']: continue
orig_shape = data[data_key].shape
tile_t = orig_shape[-1] // self.data_flags[data_key]['channels']
data_array = data[data_key]
if(tile_t > 1): data_array = data[data_key].reshape( (-1, tile_t, self.data_flags[data_key]['channels']) )
for c_key, op in self.aops[data_key][ops_key].items():
if self.data_flags[data_key][c_key]:
data_array = op(data_array, self.c_lists[data_key][c_key], param)
if (tile_t > 1): data[data_key] = data_array.reshape(orig_shape)
return data
def rotate(self, data):
'''
random uniform rotation of low and high data of a given frame
'''
#check if single frame
#2D:
if self.dim==2:
theta = np.pi * np.random.uniform(0, 2)
rotation_matrix = np.array([[1, 0, 0, 0 ],
[0, np.cos(theta), -np.sin(theta), 0],
[0, np.sin(theta), np.cos(theta) , 0],
[0, 0, 0, 1] ])
#3D:
elif self.dim==3:
# random uniform rotation in 3D
quat = np.random.normal(size=4)
quat/= np.linalg.norm(quat)
q = np.outer(quat, quat)*2
rotation_matrix = np.array([[1-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0],
[ q[1, 2]+q[3, 0], 1-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0],
[ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1-q[1, 1]-q[2, 2], 0],
[ 0, 0, 0, 1]])
data = self.special_aug(data, AOPS_KEY_ROTATE, rotation_matrix)
for data_key in data:
if not self.data_flags[data_key]['isLabel']:
data[data_key] = self.applyTransform(data[data_key], rotation_matrix.T)
return data
def rotate_simple(self, low, high, angle):
'''
use a different method for rotation. about 30-40% faster than with rotation matrix, but only one axis.
'''
if len(low.shape)!=4 or len(high.shape)!=4:
self.TCError('Data shape mismatch.')
#test rot around z (axis order z,y,x,c)
low = scipy.ndimage.rotate(low, angle, [1,2] , reshape=False, order=self.interpolation_order, mode=self.fill_mode, cval=1.0)
high = scipy.ndimage.rotate(high, angle, [1,2] , reshape=False, order=self.interpolation_order, mode=self.fill_mode, cval=1.0)
return low, high
def rotateVelocities(self, datum, c_list, rotationMatrix):
'''
rotate vel vectors (channel 1-3)
'''
rotation3 = rotationMatrix[:3, :3]
rotation2 = rotationMatrix[1:3, 1:3]
channels = np.split(datum, datum.shape[-1], -1)
for v in c_list:
if len(v) == 3: # currently always ends here!! even for 2D, #z,y,x to match rotation matrix
vel = np.stack([channels[v[2]].flatten(),channels[v[1]].flatten(),channels[v[0]].flatten()])
vel = rotation3.dot(vel)
channels[v[2]] = np.reshape(vel[0], channels[v[2]].shape)
channels[v[1]] = np.reshape(vel[1], channels[v[1]].shape)
channels[v[0]] = np.reshape(vel[2], channels[v[0]].shape)
if len(v) == 2:
vel = np.concatenate([channels[v[1]],channels[v[0]]], -1) #y,x to match rotation matrix
shape = vel.shape
vel = np.reshape(vel, (-1, 2))
vel = np.reshape(rotation2.dot(vel.T).T, shape)
vel = np.split(vel, 2, -1)
channels[v[1]] = vel[0]
channels[v[0]] = vel[1]
return np.concatenate(channels, -1)
def rotate90(self, data, axes):
'''
rotate the frame by 90 degrees from the first axis counterclockwise to the second
axes: 2 int, from axis to axis; see np.rot90
0,1,2 -> z,y,x
'''
if len(axes)!=2:
self.TCError('need 2 axes for rotate90.')
for data_key in data:
if not self.data_flags[data_key]['isLabel']:
data[data_key] = np.rot90(data[data_key], axes=axes)
data = self.special_aug(data, AOPS_KEY_ROT90, axes)
return data
def rotate90Velocities(self, datum, c_list, axes):
if len(axes)!=2:
self.TCError('need 2 axes for rotate90.')
channels = np.split(datum, datum.shape[-1], -1)
for v in c_list: #axes z,y,x -> vel x,y,z: 0,1,2 -> 2,1,0
channels[v[-axes[0]+2]], channels[v[-axes[1]+2]] = -channels[v[-axes[1]+2]], channels[v[-axes[0]+2]]
return np.concatenate(channels, -1)
def flip(self, data, axes, isFrame=True): #axes=list, flip multiple at once
'''
flip low and high data (single frame/tile) along the specified axes
low, high: data format: (z,x,y,c)
axes: list of axis indices 0,1,2-> z,y,x
'''
# axis: 0,1,2 -> z,y,x
if not isFrame:
axes = np.asarray(axes) + np.ones(axes.shape)
#flip tiles/frames
for axis in axes:
for data_key in data:
if not self.data_flags[data_key]['isLabel']:
data[data_key] = np.flip(data[data_key], axis)
data = self.special_aug(data, AOPS_KEY_FLIP, axes)
return data
def flipVelocities(self, datum, c_list, axes):
'''
flip velocity vectors along the specified axes
low: data with velocity to flip (4 channels: d,vx,vy,vz)
axes: list of axis indices 0,1,2-> z,y,x
'''
# !axis order: data z,y,x
channels = np.split(datum, datum.shape[-1], -1)
for v in c_list: # x,y,[z], 2,1,0
if 2 in axes: # flip vel x
channels[v[0]] *= (-1)
if 1 in axes:
channels[v[1]] *= (-1)
if 0 in axes and len(v)==3:
channels[v[2]] *= (-1)
return np.concatenate(channels, -1)
def scale(self, data, factor):
'''
changes frame resolution to "round((factor) * (original resolution))"
'''
# only same factor in every dim for now. how would it affect vel scaling?
# check for 2D
scale = [factor, factor, factor, 1] #single frame
if self.dim==2:
scale[0] = 1
# to ensure high/low ration stays the same
scale = np.round(np.array(data[DATA_KEY_LOW].shape) * scale )/np.array(data[DATA_KEY_LOW].shape)
if len(data[DATA_KEY_LOW].shape)==5: #frame sequence
scale = np.append([1],scale)
#apply transform
#low = self.applyTransform(low, zoom_matrix)
#high = self.applyTransform(high, zoom_matrix)
#changes the size of the frame. should work well with getRandomTile(), no bounds needed
for data_key in data:
if not self.data_flags[data_key]['isLabel']:
data[data_key] = scipy.ndimage.zoom( data[data_key], scale, order=self.interpolation_order, mode=self.fill_mode, cval=0.0)
#necessary?
data = self.special_aug(data, AOPS_KEY_SCALE, factor)
return data
def scaleVelocities(self, datum, c_list, factor):
#scale vel? vel*=factor
channels = np.split(datum, datum.shape[-1], -1)
for v in c_list: # x,y,[z]; 2,1,0
channels[v[0]] *= factor
channels[v[1]] *= factor
if len(v)==3:
channels[v[2]] *= factor
return np.concatenate(channels, -1)
def applyTransform(self, data, transform_matrix, data_dim=3):
# change axis order from z,y,x to x,y,z? (invert axis order +channel)
if len(data.shape)!=4:
self.TCError('Data shape mismatch.')
#set transform to center; from fluiddatagenerator.py
offset = np.array(data.shape) / 2 - np.array([0.5, 0.5, 0.5, 0])
offset_matrix = np.array([[1, 0, 0, offset[0]], [0, 1, 0, offset[1]], [0, 0, 1, offset[2]], [0, 0, 0, 1]])
reset_matrix = np.array([[1, 0, 0,-offset[0]], [0, 1, 0,-offset[1]], [0, 0, 1,-offset[2]], [0, 0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, transform_matrix), reset_matrix)
data = np.rollaxis(data, 3, 0) #channel to front
channel_data = [scipy.ndimage.interpolation.affine_transform(
channel,
transform_matrix[:data_dim,:data_dim],
transform_matrix[:data_dim, data_dim],
order=self.interpolation_order,
mode=self.fill_mode,
cval=0.) for channel in data]
data = np.stack(channel_data, axis=-1) # stack axis=-1 ?\
return data
#####################################################################################
# HELPER METHODS
#####################################################################################
def concatTiles(self, tiles, frameShape ,tileBorder=[0,0,0,0]):
'''
build a frame by concatenation of the given tiles.
tiles: numpy array of same shaped tiles [batch,z,y,x,c]
frameShape: the shape of the frame in tiles [z,y,x]
tileBorder: cut off borders of the tiles. [z,y,x,c]
'''
if len(tiles.shape)!=5 or len(frameShape)!=3 or len(tileBorder)!=4:
self.TCError('Data shape mismatch.')
tiles_in_frame = frameShape[0]*frameShape[1]*frameShape[2]
if tiles_in_frame != len(tiles):
self.TCError('given tiles do not match required tiles.')
# cut borders
tileBorder = np.asarray(tileBorder)
if np.less(np.zeros(4),tileBorder).any():
tileShape = tiles.shape[1:] - 2*tileBorder
tiles_cut = []
for tile in tiles:
tiles_cut.append(self.cutTile(tile, tileShape, tileBorder))
tiles = tiles_cut
#combine tiles to image
frame = []
for z in range(frameShape[0]):
frame_slices = []
for y in range(frameShape[1]):
offset=z*frameShape[1]*frameShape[2] + y*frameShape[2]
frame_slices.append(np.concatenate(tiles[offset:offset+frameShape[2]],axis=2)) #combine x
frame.append( | np.concatenate(frame_slices, axis=1) | numpy.concatenate |
"""
desispec.sky
============
Utility functions to compute a sky model and subtract it.
"""
import numpy as np
from desispec.resolution import Resolution
from desispec.linalg import cholesky_solve
from desispec.linalg import cholesky_invert
from desispec.linalg import spline_fit
from desiutil.log import get_logger
from desispec import util
from desiutil import stats as dustat
import scipy,scipy.sparse,scipy.stats,scipy.ndimage
import sys
def compute_sky(frame, nsig_clipping=4.,max_iterations=100,model_ivar=False,add_variance=True,angular_variation_deg=0,chromatic_variation_deg=0) :
"""Compute a sky model.
Input flux are expected to be flatfielded!
We don't check this in this routine.
Args:
frame : Frame object, which includes attributes
- wave : 1D wavelength grid in Angstroms
- flux : 2D flux[nspec, nwave] density
- ivar : 2D inverse variance of flux
- mask : 2D inverse mask flux (0=good)
- resolution_data : 3D[nspec, ndiag, nwave] (only sky fibers)
nsig_clipping : [optional] sigma clipping value for outlier rejection
Optional:
max_iterations : int , number of iterations
model_ivar : replace ivar by a model to avoid bias due to correlated flux and ivar. this has a negligible effect on sims.
add_variance : evaluate calibration error and add this to the sky model variance
angular_variation_deg : Degree of polynomial for sky flux variation with focal plane coordinates (default=0, i.e. no correction, a uniform sky)
chromatic_variation_deg : Wavelength degree for the chromatic x angular terms. If negative, use as many 2D polynomials of x and y as wavelength entries.
returns SkyModel object with attributes wave, flux, ivar, mask
"""
if angular_variation_deg == 0 :
return compute_uniform_sky(frame, nsig_clipping=nsig_clipping,max_iterations=max_iterations,model_ivar=model_ivar,add_variance=add_variance)
else :
if chromatic_variation_deg < 0 :
return compute_non_uniform_sky(frame, nsig_clipping=nsig_clipping,max_iterations=max_iterations,model_ivar=model_ivar,add_variance=add_variance,angular_variation_deg=angular_variation_deg)
else :
return compute_polynomial_times_sky(frame, nsig_clipping=nsig_clipping,max_iterations=max_iterations,model_ivar=model_ivar,add_variance=add_variance,angular_variation_deg=angular_variation_deg,chromatic_variation_deg=chromatic_variation_deg)
def _model_variance(frame,cskyflux,cskyivar,skyfibers) :
"""look at chi2 per wavelength and increase sky variance to reach chi2/ndf=1
"""
log = get_logger()
tivar = util.combine_ivar(frame.ivar[skyfibers], cskyivar[skyfibers])
# the chi2 at a given wavelength can be large because on a cosmic
# and not a psf error or sky non uniformity
# so we need to consider only waves for which
# a reasonable sky model error can be computed
# mean sky
msky = np.mean(cskyflux,axis=0)
dwave = np.mean(np.gradient(frame.wave))
dskydw = np.zeros(msky.shape)
dskydw[1:-1]=(msky[2:]-msky[:-2])/(frame.wave[2:]-frame.wave[:-2])
dskydw = np.abs(dskydw)
# now we consider a worst possible sky model error (20% error on flat, 0.5A )
max_possible_var = 1./(tivar+(tivar==0)) + (0.2*msky)**2 + (0.5*dskydw)**2
# exclude residuals inconsistent with this max possible variance (at 3 sigma)
bad = (frame.flux[skyfibers]-cskyflux[skyfibers])**2 > 3**2*max_possible_var
tivar[bad]=0
ndata = np.sum(tivar>0,axis=0)
ok=np.where(ndata>1)[0]
chi2 = np.zeros(frame.wave.size)
chi2[ok] = np.sum(tivar*(frame.flux[skyfibers]-cskyflux[skyfibers])**2,axis=0)[ok]/(ndata[ok]-1)
chi2[ndata<=1] = 1. # default
# now we are going to evaluate a sky model error based on this chi2,
# but only around sky flux peaks (>0.1*max)
tmp = np.zeros(frame.wave.size)
tmp = (msky[1:-1]>msky[2:])*(msky[1:-1]>msky[:-2])*(msky[1:-1]>0.1*np.max(msky))
peaks = np.where(tmp)[0]+1
dpix = int(np.ceil(3/dwave)) # +- n Angstrom around each peak
skyvar = 1./(cskyivar+(cskyivar==0))
# loop on peaks
for peak in peaks :
b=peak-dpix
e=peak+dpix+1
mchi2 = np.mean(chi2[b:e]) # mean reduced chi2 around peak
mndata = np.mean(ndata[b:e]) # mean number of fibers contributing
# sky model variance = sigma_flat * msky + sigma_wave * dmskydw
sigma_flat=0.000 # the fiber flat error is already included in the flux ivar
sigma_wave=0.005 # A, minimum value
res2=(frame.flux[skyfibers,b:e]-cskyflux[skyfibers,b:e])**2
var=1./(tivar[:,b:e]+(tivar[:,b:e]==0))
nd=np.sum(tivar[:,b:e]>0)
while(sigma_wave<2) :
pivar=1./(var+(sigma_flat*msky[b:e])**2+(sigma_wave*dskydw[b:e])**2)
pchi2=np.sum(pivar*res2)/nd
if pchi2<=1 :
log.info("peak at {}A : sigma_wave={}".format(int(frame.wave[peak]),sigma_wave))
skyvar[:,b:e] += ( (sigma_flat*msky[b:e])**2 + (sigma_wave*dskydw[b:e])**2 )
break
sigma_wave += 0.005
return (cskyivar>0)/(skyvar+(skyvar==0))
def compute_uniform_sky(frame, nsig_clipping=4.,max_iterations=100,model_ivar=False,add_variance=True) :
"""Compute a sky model.
Sky[fiber,i] = R[fiber,i,j] Flux[j]
Input flux are expected to be flatfielded!
We don't check this in this routine.
Args:
frame : Frame object, which includes attributes
- wave : 1D wavelength grid in Angstroms
- flux : 2D flux[nspec, nwave] density
- ivar : 2D inverse variance of flux
- mask : 2D inverse mask flux (0=good)
- resolution_data : 3D[nspec, ndiag, nwave] (only sky fibers)
nsig_clipping : [optional] sigma clipping value for outlier rejection
Optional:
max_iterations : int , number of iterations
model_ivar : replace ivar by a model to avoid bias due to correlated flux and ivar. this has a negligible effect on sims.
add_variance : evaluate calibration error and add this to the sky model variance
returns SkyModel object with attributes wave, flux, ivar, mask
"""
log=get_logger()
log.info("starting")
# Grab sky fibers on this frame
skyfibers = np.where(frame.fibermap['OBJTYPE'] == 'SKY')[0]
assert np.max(skyfibers) < 500 #- indices, not fiber numbers
nwave=frame.nwave
nfibers=len(skyfibers)
current_ivar=frame.ivar[skyfibers].copy()*(frame.mask[skyfibers]==0)
flux = frame.flux[skyfibers]
Rsky = frame.R[skyfibers]
input_ivar=None
if model_ivar :
log.info("use a model of the inverse variance to remove bias due to correlated ivar and flux")
input_ivar=current_ivar.copy()
median_ivar_vs_wave = np.median(current_ivar,axis=0)
median_ivar_vs_fiber = np.median(current_ivar,axis=1)
median_median_ivar = np.median(median_ivar_vs_fiber)
for f in range(current_ivar.shape[0]) :
threshold=0.01
current_ivar[f] = median_ivar_vs_fiber[f]/median_median_ivar * median_ivar_vs_wave
# keep input ivar for very low weights
ii=(input_ivar[f]<=(threshold*median_ivar_vs_wave))
#log.info("fiber {} keep {}/{} original ivars".format(f,np.sum(ii),current_ivar.shape[1]))
current_ivar[f][ii] = input_ivar[f][ii]
sqrtw=np.sqrt(current_ivar)
sqrtwflux=sqrtw*flux
chi2=np.zeros(flux.shape)
nout_tot=0
for iteration in range(max_iterations) :
# the matrix A is 1/2 of the second derivative of the chi2 with respect to the parameters
# A_ij = 1/2 d2(chi2)/di/dj
# A_ij = sum_fiber sum_wave_w ivar[fiber,w] d(model)/di[fiber,w] * d(model)/dj[fiber,w]
# the vector B is 1/2 of the first derivative of the chi2 with respect to the parameters
# B_i = 1/2 d(chi2)/di
# B_i = sum_fiber sum_wave_w ivar[fiber,w] d(model)/di[fiber,w] * (flux[fiber,w]-model[fiber,w])
# the model is model[fiber]=R[fiber]*sky
# and the parameters are the unconvolved sky flux at the wavelength i
# so, d(model)/di[fiber,w] = R[fiber][w,i]
# this gives
# A_ij = sum_fiber sum_wave_w ivar[fiber,w] R[fiber][w,i] R[fiber][w,j]
# A = sum_fiber ( diag(sqrt(ivar))*R[fiber] ) ( diag(sqrt(ivar))* R[fiber] )^t
# A = sum_fiber sqrtwR[fiber] sqrtwR[fiber]^t
# and
# B = sum_fiber sum_wave_w ivar[fiber,w] R[fiber][w] * flux[fiber,w]
# B = sum_fiber sum_wave_w sqrt(ivar)[fiber,w]*flux[fiber,w] sqrtwR[fiber,wave]
#A=scipy.sparse.lil_matrix((nwave,nwave)).tocsr()
A=np.zeros((nwave,nwave))
B=np.zeros((nwave))
# diagonal sparse matrix with content = sqrt(ivar)*flat of a given fiber
SD=scipy.sparse.lil_matrix((nwave,nwave))
# loop on fiber to handle resolution
for fiber in range(nfibers) :
if fiber%10==0 :
log.info("iter %d sky fiber %d/%d"%(iteration,fiber,nfibers))
R = Rsky[fiber]
# diagonal sparse matrix with content = sqrt(ivar)
SD.setdiag(sqrtw[fiber])
sqrtwR = SD*R # each row r of R is multiplied by sqrtw[r]
A += (sqrtwR.T*sqrtwR).todense()
B += sqrtwR.T*sqrtwflux[fiber]
log.info("iter %d solving"%iteration)
w = A.diagonal()>0
A_pos_def = A[w,:]
A_pos_def = A_pos_def[:,w]
parameters = B*0
try:
parameters[w]=cholesky_solve(A_pos_def,B[w])
except:
log.info("cholesky failed, trying svd in iteration {}".format(iteration))
parameters[w]=np.linalg.lstsq(A_pos_def,B[w])[0]
log.info("iter %d compute chi2"%iteration)
for fiber in range(nfibers) :
# the parameters are directly the unconvolve sky flux
# so we simply have to reconvolve it
fiber_convolved_sky_flux = Rsky[fiber].dot(parameters)
chi2[fiber]=current_ivar[fiber]*(flux[fiber]-fiber_convolved_sky_flux)**2
log.info("rejecting")
nout_iter=0
if iteration<1 :
# only remove worst outlier per wave
# apply rejection iteratively, only one entry per wave among fibers
# find waves with outlier (fastest way)
nout_per_wave=np.sum(chi2>nsig_clipping**2,axis=0)
selection=np.where(nout_per_wave>0)[0]
for i in selection :
worst_entry=np.argmax(chi2[:,i])
current_ivar[worst_entry,i]=0
sqrtw[worst_entry,i]=0
sqrtwflux[worst_entry,i]=0
nout_iter += 1
else :
# remove all of them at once
bad=(chi2>nsig_clipping**2)
current_ivar *= (bad==0)
sqrtw *= (bad==0)
sqrtwflux *= (bad==0)
nout_iter += np.sum(bad)
nout_tot += nout_iter
sum_chi2=float(np.sum(chi2))
ndf=int(np.sum(chi2>0)-nwave)
chi2pdf=0.
if ndf>0 :
chi2pdf=sum_chi2/ndf
log.info("iter #%d chi2=%f ndf=%d chi2pdf=%f nout=%d"%(iteration,sum_chi2,ndf,chi2pdf,nout_iter))
if nout_iter == 0 :
break
log.info("nout tot=%d"%nout_tot)
# we know have to compute the sky model for all fibers
# and propagate the uncertainties
# no need to restore the original ivar to compute the model errors when modeling ivar
# the sky inverse variances are very similar
log.info("compute the parameter covariance")
# we may have to use a different method to compute this
# covariance
try :
parameter_covar=cholesky_invert(A)
# the above is too slow
# maybe invert per block, sandwich by R
except np.linalg.linalg.LinAlgError :
log.warning("cholesky_solve_and_invert failed, switching to np.linalg.lstsq and np.linalg.pinv")
parameter_covar = np.linalg.pinv(A)
log.info("compute mean resolution")
# we make an approximation for the variance to save CPU time
# we use the average resolution of all fibers in the frame:
mean_res_data=np.mean(frame.resolution_data,axis=0)
Rmean = Resolution(mean_res_data)
log.info("compute convolved sky and ivar")
# The parameters are directly the unconvolved sky
# First convolve with average resolution :
convolved_sky_covar=Rmean.dot(parameter_covar).dot(Rmean.T.todense())
# and keep only the diagonal
convolved_sky_var=np.diagonal(convolved_sky_covar)
# inverse
convolved_sky_ivar=(convolved_sky_var>0)/(convolved_sky_var+(convolved_sky_var==0))
# and simply consider it's the same for all spectra
cskyivar = np.tile(convolved_sky_ivar, frame.nspec).reshape(frame.nspec, nwave)
# The sky model for each fiber (simple convolution with resolution of each fiber)
cskyflux = np.zeros(frame.flux.shape)
for i in range(frame.nspec):
cskyflux[i] = frame.R[i].dot(parameters)
# look at chi2 per wavelength and increase sky variance to reach chi2/ndf=1
if skyfibers.size > 1 and add_variance :
modified_cskyivar = _model_variance(frame,cskyflux,cskyivar,skyfibers)
else :
modified_cskyivar = cskyivar.copy()
# need to do better here
mask = (cskyivar==0).astype(np.uint32)
return SkyModel(frame.wave.copy(), cskyflux, modified_cskyivar, mask,
nrej=nout_tot, stat_ivar = cskyivar) # keep a record of the statistical ivar for QA
def compute_polynomial_times_sky(frame, nsig_clipping=4.,max_iterations=30,model_ivar=False,add_variance=True,angular_variation_deg=1,chromatic_variation_deg=1) :
"""Compute a sky model.
Sky[fiber,i] = R[fiber,i,j] Polynomial(x[fiber],y[fiber],wavelength[j]) Flux[j]
Input flux are expected to be flatfielded!
We don't check this in this routine.
Args:
frame : Frame object, which includes attributes
- wave : 1D wavelength grid in Angstroms
- flux : 2D flux[nspec, nwave] density
- ivar : 2D inverse variance of flux
- mask : 2D inverse mask flux (0=good)
- resolution_data : 3D[nspec, ndiag, nwave] (only sky fibers)
nsig_clipping : [optional] sigma clipping value for outlier rejection
Optional:
max_iterations : int , number of iterations
model_ivar : replace ivar by a model to avoid bias due to correlated flux and ivar. this has a negligible effect on sims.
add_variance : evaluate calibration error and add this to the sky model variance
returns SkyModel object with attributes wave, flux, ivar, mask
"""
log=get_logger()
log.info("starting")
# Grab sky fibers on this frame
skyfibers = np.where(frame.fibermap['OBJTYPE'] == 'SKY')[0]
assert np.max(skyfibers) < 500 #- indices, not fiber numbers
nwave=frame.nwave
nfibers=len(skyfibers)
current_ivar=frame.ivar[skyfibers].copy()*(frame.mask[skyfibers]==0)
flux = frame.flux[skyfibers]
Rsky = frame.R[skyfibers]
input_ivar=None
if model_ivar :
log.info("use a model of the inverse variance to remove bias due to correlated ivar and flux")
input_ivar=current_ivar.copy()
median_ivar_vs_wave = np.median(current_ivar,axis=0)
median_ivar_vs_fiber = np.median(current_ivar,axis=1)
median_median_ivar = np.median(median_ivar_vs_fiber)
for f in range(current_ivar.shape[0]) :
threshold=0.01
current_ivar[f] = median_ivar_vs_fiber[f]/median_median_ivar * median_ivar_vs_wave
# keep input ivar for very low weights
ii=(input_ivar[f]<=(threshold*median_ivar_vs_wave))
#log.info("fiber {} keep {}/{} original ivars".format(f,np.sum(ii),current_ivar.shape[1]))
current_ivar[f][ii] = input_ivar[f][ii]
# need focal plane coordinates
x = frame.fibermap["X_TARGET"]
y = frame.fibermap["Y_TARGET"]
# normalize for numerical stability
xm = | np.mean(x) | numpy.mean |
#!/usr/bin/env python
from __future__ import print_function
import unittest
import os
import numpy as np
import numpy.random as rng
import subprocess
from mock import patch
from mock import MagicMock
from mock import Mock
from mock import mock_open
from mock import call
from nnjm_data_iterator import openShuffleNoTempFile
from nnjm_data_iterator import InfiniteIterator
from nnjm_data_iterator import DataIterator
from nnjm_data_iterator import HardElideDataIterator
from nnjm_data_iterator import SoftElideDataIterator
class TestOpenShuffleNoTempFile(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestOpenShuffleNoTempFile, self).__init__(*args, **kwargs)
self.data_filename = os.path.join('tests', 'sample.data')
def test_01_default_usage(self):
with open(self.data_filename, 'r') as f:
data = f.readlines()
with openShuffleNoTempFile(self.data_filename, 'r') as f:
shuffled_data = f.readlines()
self.assertEqual(len(data), len(shuffled_data), 'There should the same number of samples.')
data = np.asarray(data)
shuffled_data = np.asarray(shuffled_data)
self.assertTrue(np.all(data.shape == shuffled_data.shape), 'The data should retain its shape.')
self.assertFalse(np.all(data == shuffled_data), 'The samples are not supposed to be in the same order.')
self.assertTrue(np.all(data.sort() == shuffled_data.sort()), 'We should have exactly the same samples.')
@patch('nnjm_data_iterator.Popen', autospec=True)
def test_02_terminate_should_be_called(self, mocked_popen):
with open(self.data_filename, 'r') as g:
mocked_popen.return_value.stdout = g
with openShuffleNoTempFile(self.data_filename, 'r') as f:
_ = f.readlines()
#print(mocked_popen.mock_calls)
#print(mocked_popen.return_value.terminate.call_count)
self.assertTrue(mocked_popen.return_value.terminate.call_count == 1, 'A call to terminate should have happened.')
mocked_popen.return_value.terminate.assert_called_once_with()
expected_calls = [ call('gzip -cqdf {} | shuf 2> /dev/null'.format(self.data_filename), shell=True, stdout=-1),
call().terminate() ]
# From mock==1.0.1 to mock==2.0.0 the assert_has_calls has changed
# behavior and we can no longer use it for our purpose.
# See: https://github.com/testing-cabal/mock/issues/353
#from pudb import set_trace; set_trace()
#mocked_popen.assert_has_calls('gzip -cqdf {} | shuf 2> /dev/null'.format(self.data_filename), shell=True, stdout=-1)
for e, c in zip(expected_calls, mocked_popen.mock_calls):
self.assertEqual(e, c)
class TestInfiniteIterator(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestInfiniteIterator, self).__init__(*args, **kwargs)
self.data_filename = os.path.join('tests', 'sample.data')
def test_01_default_usage(self):
iteratable = InfiniteIterator(self.data_filename)
# The file has 100 samples, thus using 150 will force a reopen.
for _ in range(150):
sample = next(iteratable)
self.assertEqual(len(sample.split()), 17, 'Invalid format')
@patch('__builtin__.open', spec=open)
def test_02_file_reopened(self, my_mock_open):
def setupMock():
data_file_1 = MagicMock(name='samples', spec=file)
data_file_1.__enter__.return_value = data_file_1
data_file_1.__iter__.return_value = iter(['a', 'b'])
data_file_2 = MagicMock(name='samples', spec=file)
data_file_2.__enter__.return_value = data_file_2
data_file_2.__iter__.return_value = iter(['a', 'b'])
my_mock_open.side_effect = (data_file_1, data_file_2)
return data_file_1, data_file_2
d1, d2 = setupMock()
iteratable = InfiniteIterator(self.data_filename, opener=my_mock_open)
# The file has 2 samples, thus using 3 will force a reopen.
for _ in range(3):
sample = next(iteratable)
self.assertEqual(my_mock_open.call_count, 2, 'We should have opened the file twice.')
expected_calls = [ call('tests/sample.data', 'r') ] * 2
my_mock_open.assert_has_calls(expected_calls)
#print(d1.mock_calls)
#print(d2.mock_calls) # We are not closing the file?!
class TestDataIterator(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestDataIterator, self).__init__(*args, **kwargs)
self.data_filename = os.path.join('tests', 'sample.data')
def test_01_default_usage(self):
# The total isn't 100 samples since the first sample is used to validate the format.
expected_block_sizes = (30, 30, 30, 9)
with open(self.data_filename, 'r') as f:
iteratable = DataIterator(f, block_size = 30)
for i, (X, y) in enumerate(iteratable):
self.assertEqual(X.shape[0], y.shape[0], 'There should be as many samples as labels.')
self.assertEqual(X.shape[0], expected_block_sizes[i], 'Invalid block size {}'.format(i))
self.assertEqual(i+1, len(expected_block_sizes), 'Not the expected block count')
def test_02_changing_block_size(self):
with open(self.data_filename, 'r') as f:
iteratable = DataIterator(f, block_size = 30)
X, y = next(iteratable)
self.assertEqual(X.shape[0], y.shape[0], 'There should be as many samples as labels.')
self.assertEqual(X.shape[0], 30, 'Invalid block size {}'.format(0))
iteratable.block_size = 20
X, y = next(iteratable)
self.assertEqual(X.shape[0], y.shape[0], 'There should be as many samples as labels.')
self.assertEqual(X.shape[0], 20, 'Invalid block size {}'.format(1))
def test_03_block_size_too_big(self):
with open(self.data_filename, 'r') as f:
iteratable = DataIterator(f, block_size = 130)
X, y = next(iteratable)
self.assertEqual(X.shape[0], 100-1, 'The block size should be the maximum number of available samples.')
# Trying to read again should raise a StopIteration since DataIterator is an iterator.
with self.assertRaises(StopIteration) as cm:
next(iteratable)
def test_04_with_infinite_shuffle(self):
"""
This is our use case scenario that we are mainly interested in.
"""
iteratable = DataIterator(InfiniteIterator(self.data_filename, opener=openShuffleNoTempFile),
block_size = 33)
samples = []
# Asking for 6 blocks means that we will trigger a reopen since there 3 blocks per file.
for i in range(6):
samples.append(next(iteratable))
# The data should be shuffle each time we go through the file thus no block should be the same.
for i in range(3):
self.assertFalse(np.all(samples[i][0] == samples[3+i][0]))
# Technically, the first 3 sample blocks should contain the same samples
# as the last three blocks but since we are losing a sample to validate
# the format, I'm not quite sure how to test the equality.
class TestHardElideDataIterator(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestHardElideDataIterator, self).__init__(*args, **kwargs)
self.data = [
'6 104 11 193 388 453 210 10 3 3 3 / 16 252 14 / 333',
'206 7 205 339 11 6 207 380 269 309 10 / 329 23 133 / 37',
'339 11 6 207 380 269 309 10 3 3 3 / 133 37 12 / 334',
'5 12 396 7 285 380 269 5 174 269 5 / 156 324 16 / 226',
'2 2 2 2 6 308 436 275 296 294 483 / 2 2 15 / 364'
]
def testNoElide(self):
iteratable = DataIterator(
iter(self.data),
block_size = 4,
swin_size = 11,
thist_size = 3)
X_expected = [
[206, 7, 205, 339, 11, 6, 207, 380, 269, 309, 10, 329, 23, 133],
[339, 11, 6, 207, 380, 269, 309, 10, 3, 3, 3, 133, 37, 12],
[ 5, 12, 396, 7, 285, 380, 269, 5, 174, 269, 5, 156, 324, 16],
[ 2, 2, 2, 2, 6, 308, 436, 275, 296, 294, 483, 2, 2, 15]]
y_expected = [ 37, 334, 226, 364 ]
iteratable = HardElideDataIterator(iteratable, thist_size = 3, thist_elide_size = 0)
X, y = iteratable.next()
self.assertTrue(np.all(X == X_expected))
self.assertTrue(np.all(y == y_expected))
def testMaxElide(self):
X_expected = [
[206, 7, 205, 339, 11, 6, 207, 380, 269, 309, 10, 0, 0, 0],
[339, 11, 6, 207, 380, 269, 309, 10, 3, 3, 3, 0, 0, 0],
[ 5, 12, 396, 7, 285, 380, 269, 5, 174, 269, 5, 0, 0, 0],
[ 2, 2, 2, 2, 6, 308, 436, 275, 296, 294, 483, 0, 0, 0]]
y_expected = [ 37, 334, 226, 364 ]
iteratable = DataIterator(
iter(self.data),
block_size = 4,
swin_size = 11,
thist_size = 3)
iteratable = HardElideDataIterator(iteratable, thist_size = 3, thist_elide_size = 3)
X, y = iteratable.next()
self.assertTrue(np.all(X == X_expected))
self.assertTrue(np.all(y == y_expected))
def testTwoElide(self):
X_expected = [
[206, 7, 205, 339, 11, 6, 207, 380, 269, 309, 10, 0, 0, 133],
[339, 11, 6, 207, 380, 269, 309, 10, 3, 3, 3, 0, 0, 12],
[ 5, 12, 396, 7, 285, 380, 269, 5, 174, 269, 5, 0, 0, 16],
[ 2, 2, 2, 2, 6, 308, 436, 275, 296, 294, 483, 0, 0, 15]]
y_expected = [ 37, 334, 226, 364 ]
iteratable = DataIterator(
iter(self.data),
block_size = 4,
swin_size = 11,
thist_size = 3)
iteratable = HardElideDataIterator(iteratable, thist_size = 3, thist_elide_size = 2)
X, y = iteratable.next()
self.assertTrue(np.all(X == X_expected))
self.assertTrue(np.all(y == y_expected))
def testOneElide(self):
X_expected = [
[206, 7, 205, 339, 11, 6, 207, 380, 269, 309, 10, 0, 23, 133],
[339, 11, 6, 207, 380, 269, 309, 10, 3, 3, 3, 0, 37, 12],
[ 5, 12, 396, 7, 285, 380, 269, 5, 174, 269, 5, 0, 324, 16],
[ 2, 2, 2, 2, 6, 308, 436, 275, 296, 294, 483, 0, 2, 15]]
y_expected = [ 37, 334, 226, 364 ]
iteratable = DataIterator(
iter(self.data),
block_size = 4,
swin_size = 11,
thist_size = 3)
iteratable = HardElideDataIterator(iteratable, thist_size = 3, thist_elide_size = 1)
X, y = iteratable.next()
self.assertTrue(np.all(X == X_expected))
self.assertTrue( | np.all(y == y_expected) | numpy.all |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 5 13:04:16 2018
Copyright (c) <NAME>
Author email: <<EMAIL>>
Description:
This program was originally created to calculate the evaluation
statistics of deblurred images. However, this program can be used
to evaluate any image with another image.
Inputs:
input_dir = path of the directory where the blur image files are present
out_dir = path of directory where the output deblurred images have to be saved
GT_dir = path of directory where ground truth sharp images are present
Outputs:
One output is a text file containing the evaluation metrics. Another output
is image files showing the local difference map between images of interest
Literature used:
[1] https://github.com/RaphaelMeudec/deblur-gan
-------------------------------------------------------------------------------
"""
# Program starts here
from PIL import Image
import time
import os
import matplotlib.cm as cm
import numpy as np
import math
from skimage.measure import compare_ssim as ssim
from matplotlib import pyplot as plt
import pickle
# Secondary Functions
def load_imgRGB(img_path):
img = Image.open(img_path)
return img
def save_image(img, path):
img.save(path)
def is_an_image(filename):
img_Ext = ['.png', '.jpg', '.jpeg']
for ext in img_Ext:
if ext in filename:
return True
return False
def list_img_files(directory):
files = sorted(os.listdir(directory))
return [os.path.join(directory, f) for f in files if is_an_image(f)]
def PSNR(img1, img2):
mse = np.mean( (img1/255. - img2/255.) ** 2 )
if mse == 0:
return 100
pix_max = 1
return 20 * math.log10(pix_max / math.sqrt(mse))
def computeMetrics(input_dir,out_dir,GT_dir):
listimgs = list_img_files(input_dir)
if not GT_dir:
gt_flag=0
else:
gt_flag=1
if gt_flag==1:
comparison_dir_GT = os.path.join(out_dir,'Compare_with_GTImg')
if not os.path.exists(comparison_dir_GT):
os.makedirs(comparison_dir_GT)
comparison_dir_Blur = os.path.join(out_dir,'Compare_with_InputBlurImg')
if not os.path.exists(comparison_dir_Blur):
os.makedirs(comparison_dir_Blur)
count=0
for img_path in listimgs:
base = os.path.basename(img_path)
filename = os.path.splitext(base)[0]
fileext = os.path.splitext(base)[1]
current_img = load_imageRGB(img_path)
root_filename = filename[:-7]
if filename[-6:] == 'fake_B':
fake_img = current_img
blur_img = load_imgRGB(os.path.join(input_dir,root_filename+"_real_A"+fileext))
if gt_flag==1:
gt_img = load_imgRGB(os.path.join(GT_dir,root_filename+fileext))
else:
continue
# Comparison with blurred image
ss_blur,ss_blur_map = ssim(np.array(fake_img), | np.array(blur_img) | numpy.array |
#!/usr/bin/env python
# coding: utf-8
# # Blauth-Arimotho Algorithm
# Assuming X and Y as input and output variables of the channel respectively and r(x) is the input distributions. <br>
# The capacity of a channel is defined by <br>
# $C = \max_{r(x)} I(X;Y) = \max_{r(x)} \sum_{x} \sum_{y} r(x) p(y|x) \log \frac{r(x) p(y|x)}{r(x) \sum_{\tilde{x}} r(\tilde{x})p(y|\tilde{x})}$
# In[79]:
import numpy as np
def blahut_arimoto(p_y_x: np.ndarray, log_base: float = 2, thresh: float = 1e-12, max_iter: int = 1e3) -> tuple:
'''
Maximize the capacity between I(X;Y)
p_y_x: each row represnets probability assinmnet
log_base: the base of the log when calaculating the capacity
thresh: the threshold of the update, finish the calculation when gettting to it.
max_iter: the maximum iterations of the calculation
'''
# Input test
assert np.abs(p_y_x.sum(axis=1).mean() - 1) < 1e-6
assert p_y_x.shape[0] > 1
# The number of inputs: size of |X|
m = p_y_x.shape[0]
# The number of outputs: size of |Y|
n = p_y_x.shape[1]
# Initialize the prior uniformly
r = | np.ones((1, m)) | numpy.ones |
# -*- coding: utf-8 -*-
"""
Created on Thu May 27 11:58:31 2021
@author: janousu
"""
import sys
sys.path.append(r'C:\SpaFHy_v1_Pallas')
import os
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import matplotlib.colors as mplcolors
import matplotlib.cm as mplcm
from spafhy_io import read_AsciiGrid
import pickle
from netCDF4 import Dataset #, date2num
from datetime import date
saveplots = True
today = date.today()
# import model
# import spafhy
eps = np.finfo(float).eps
# change working dir
os.chdir(r'C:\SpaFHy_v1_Pallas\FigsC3')
# results file
ncf_file = r'C:\SpaFHy_v1_Pallas\results\C3.nc'
# pickled model object
pk_file = r'C:\SpaFHy_v1_Pallas\results\C3model.pk'
""" load pre-computed results for C3 """
# spa instance
with open(pk_file, 'rb') as ff:
spa, Qmeas, FORC = pickle.load(ff)
# get time-index when results start
ix = 1 + np.where(FORC.index == spa.pgen['spinup_end'])[0][0]
tvec = FORC.index # date vector
gis = spa.GisData
twi = gis['twi']
LAIc = gis['LAI_conif']
LAId = gis['LAI_decid']
LAIs = gis['LAI_shrub']
LAIg = gis['LAI_grass']
soil = gis['soilclass']
# soil type indexes
peat_ix = np.where(soil == 4)
med_ix = np.where(soil == 2)
coarse_ix = np.where(soil == 1)
# indices for high and low twi
htwi_ix = np.where(twi > 12)
ltwi_ix = np.where(twi < 7)
# open link to results in netcdf:
dat = Dataset(ncf_file, 'r')
# results per sub-model
cres = dat['cpy'] # canopy -submodel
bres = dat['bu'] # bucket -submodel
tres = dat['top'] # topmodel - submodel
cmask = gis['cmask']
#%%
# sar soil moisture plots
import pandas as pd
# cell locations of kenttarova
kenttarova, _, _, _, _ = read_AsciiGrid(r'C:\PALLAS_RAW_DATA\Lompolonjanka\16b\sve_kenttarova_soilmoist.asc')
kenttarova_loc = np.where(kenttarova == 0)
kenttarova_loc = list([int(kenttarova_loc[0]), int(kenttarova_loc[1])])
# reading sar data
sar_path = r'C:\PALLAS_RAW_DATA\SAR_maankosteus\processed\16m_nc_spafhy_pallas\SAR_PALLAS_2019_mask2_16m_direct_catchment_mean4.nc'
sar = Dataset(sar_path, 'r')
sar_wliq = sar['soilmoisture']*cmask/100
spa_wliq = bres['Wliq']
spa_wliq_top = bres['Wliq_top']
dates_sar = sar['time'][:]
dates_sar = pd.to_datetime(dates_sar, format='%Y%m%d')
dates_spa = pd.to_datetime(tvec[1:], format='%Y%m%d')
#spa dates to match sar dates
date_in_spa = []
for i in range(len(dates_sar)):
ix = np.where(dates_spa == dates_sar[i])[0][0]
date_in_spa.append(ix)
spa_wliq = spa_wliq[date_in_spa,:,:]
spa_wliq_top = spa_wliq_top[date_in_spa,:,:]
# driest and wettest days
spasum = np.nansum(spa_wliq, axis=(1,2))
# index in sar data
day_low = int(np.where(spasum == np.nanmin(spasum))[0])
day_hi = int(np.where(spasum == np.nanmax(spasum))[0])
#sar_low = 43
#sar_hi = 20
# day in sar data
low_date = dates_sar[day_low].strftime("%Y-%m-%d")
hi_date = dates_sar[day_hi].strftime("%Y-%m-%d")
# cropping for plots
xcrop = np.arange(20,170)
ycrop = np.arange(20,250)
sar_wliq = sar_wliq[:,ycrop,:]
sar_wliq = sar_wliq[:,:,xcrop]
spa_wliq = spa_wliq[:,ycrop,:]
spa_wliq = spa_wliq[:,:,xcrop]
spa_wliq_top = spa_wliq_top[:,ycrop,:]
spa_wliq_top = spa_wliq_top[:,:,xcrop]
# Plotting
fig, axs = plt.subplots(nrows=2, ncols=3, figsize=(16,12));
ax1 = axs[0][0]
ax2 = axs[0][1]
ax3 = axs[1][0]
ax4 = axs[1][1]
ax5 = axs[0][2]
ax6 = axs[1][2]
#fig.suptitle('Volumetric water content', fontsize=15)
im1 = ax1.imshow(sar_wliq[day_hi,:,:], cmap='coolwarm_r', vmin=0.0, vmax=1.0, aspect='equal')
ax1.title.set_text('SAR')
#ax1.plot(kenttarova_loc[0], kenttarova_loc[1], marker='o', mec='b', mfc='k', alpha=0.8, ms=6.0)
im2 = ax2.imshow(spa_wliq[day_hi, :,:], cmap='coolwarm_r', vmin=0.0, vmax=1.0, aspect='equal')
ax2.title.set_text('SPAFHY rootzone')
ax2.text(10, -15, f'Wet day : {hi_date}', fontsize=15)
#ax2.plot(kenttarova_loc[0], kenttarova_loc[1], marker='o', mec='b', mfc='k', alpha=0.8, ms=6.0)
im3 = ax3.imshow(sar_wliq[day_low, :,:], cmap='coolwarm_r', vmin=0.0, vmax=1.0, aspect='equal')
ax3.title.set_text('SAR')
im4 = ax4.imshow(spa_wliq[day_low, :,:], cmap='coolwarm_r', vmin=0.0, vmax=1.0, aspect='equal')
ax4.title.set_text('SPAFHY rootzone')
ax4.text(10, -15, f'Dry day : {low_date}', fontsize=15)
im5 = ax5.imshow(spa_wliq_top[day_hi, :, :], cmap='coolwarm_r', vmin=0.0, vmax=1.0, aspect='equal')
ax5.title.set_text('SPAFHY topsoil')
im6 = ax6.imshow(spa_wliq_top[day_low, :,:], cmap='coolwarm_r', vmin=0.0, vmax=1.0, aspect='equal')
ax6.title.set_text('SPAFHY topsoil')
ax1.axis("off")
ax2.axis("off")
ax3.axis("off")
ax4.axis("off")
ax5.axis("off")
ax6.axis("off")
plt.tight_layout()
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.83, 0.15, 0.015, 0.7])
bar1 = fig.colorbar(im1, cax=cbar_ax)
fig.suptitle('SpaFHy v1')
#fig.subplots_adjust(right=0.8)
#cbar_ax = fig.add_axes([0.8, 0.2, 0.02, 0.6])
#bar2 = fig.colorbar(im6, cax=cbar_ax)
#ax5.plot([1.1, 1.1], [1.1, -1.2], color='black', lw=1, transform=ax2.transAxes, clip_on=False)
if saveplots == True:
plt.savefig(f'SAR_vs.SPAFHY_soilmoist_{today}.pdf')
plt.savefig(f'SAR_vs.SPAFHY_soilmoist_{today}.png')
#%%
# normalized by mean plots
# mean of each pixel
#spamean = np.nanmean(spa_wliq, axis=0)
#spatopmean = np.nanmean(spa_wliq_top, axis=0)
#sarmean = np.nanmean(sar_wliq, axis=0)
# mean of wet and dry days
spamean_wet = np.nanmean(spa_wliq[day_hi,:,:])
spatopmean_wet = np.nanmean(spa_wliq_top[day_hi,:,:])
sarmean_wet = np.nanmean(sar_wliq[day_hi,:,:])
spamean_dry = np.nanmean(spa_wliq[day_low,:,:])
spatopmean_dry = np.nanmean(spa_wliq_top[day_low,:,:])
sarmean_dry = np.nanmean(sar_wliq[day_low,:,:])
# median day of total sums
spasum = np.nansum(spa_wliq, axis=(1,2))
spamedian = spa_wliq[np.where(np.sort(np.nansum(spa_wliq, axis=(1,2)))[(int(len(spasum)/2))] == spasum)[0][0],:,:]
sarsum = np.nansum(sar_wliq, axis=(1,2))
sarmedian = sar_wliq[np.where(np.sort(np.nansum(sar_wliq, axis=(1,2)))[(int(len(sarsum)/2))] == sarsum)[0][0],:,:]
spatopsum = np.nansum(spa_wliq_top, axis=(1,2))
spatopmedian = spa_wliq[np.where(np.sort(np.nansum(spa_wliq_top, axis=(1,2)))[(int(len(spatopsum)/2))] == spatopsum)[0][0],:,:]
# Plotting
fig, axs = plt.subplots(nrows=2, ncols=3, figsize=(16,12));
ax1 = axs[0][0]
ax2 = axs[0][1]
ax3 = axs[1][0]
ax4 = axs[1][1]
ax5 = axs[0][2]
ax6 = axs[1][2]
#fig.suptitle('Volumetric water content', fontsize=15)
im1 = ax1.imshow(sar_wliq[day_hi,:,:]/sarmean_wet, vmin=0.0, vmax=2.0, cmap='coolwarm_r', aspect='equal')
ax1.title.set_text('SAR/SARwet_mean')
#ax1.plot(kenttarova_loc[0], kenttarova_loc[1], marker='o', mec='b', mfc='k', alpha=0.8, ms=6.0)
im2 = ax2.imshow(spa_wliq[day_hi, :,:]/spamean_wet, vmin=0.0, vmax=2.0, cmap='coolwarm_r', aspect='equal')
ax2.title.set_text('SPAFHYROOT/SPAFHYROOTwet_mean')
ax2.text(10, -15, f'Wet day : {hi_date}', fontsize=15)
#ax2.plot(kenttarova_loc[0], kenttarova_loc[1], marker='o', mec='b', mfc='k', alpha=0.8, ms=6.0)
im3 = ax3.imshow(sar_wliq[day_low, :,:]/sarmean_dry, vmin=0.0, vmax=2.0, cmap='coolwarm_r', aspect='equal')
ax3.title.set_text('SAR/SARdry_mean')
im4 = ax4.imshow(spa_wliq[day_low, :,:]/spamean_dry, vmin=0.0, vmax=2.0, cmap='coolwarm_r', aspect='equal')
ax4.title.set_text('SPAFHYROOT/SPAFHYROOTdry_mean')
ax4.text(10, -15, f'Dry day : {low_date}', fontsize=15)
im5 = ax5.imshow(spa_wliq_top[day_hi, :, :]/spatopmean_wet, vmin=0.0, vmax=2.0, cmap='coolwarm_r', aspect='equal')
ax5.title.set_text('SPAFHYTOP/SPAFHYTOPwet_mean')
im6 = ax6.imshow(spa_wliq_top[day_low, :,:]/spatopmean_dry, cmap='coolwarm_r', vmin=0.0, vmax=2.0, aspect='equal')
ax6.title.set_text('SPAFHYTOP/SPAFHYTOPdry_mean')
ax1.axis("off")
ax2.axis("off")
ax3.axis("off")
ax4.axis("off")
ax5.axis("off")
ax6.axis("off")
plt.tight_layout()
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.83, 0.15, 0.015, 0.7])
bar1 = fig.colorbar(im1, cax=cbar_ax)
#ax1.text(10, -15, 'norm by mean of the day', fontsize=10)
fig.suptitle('SpaFHy v1')
#fig.subplots_adjust(right=0.8)
#cbar_ax = fig.add_axes([0.8, 0.2, 0.02, 0.6])
#bar2 = fig.colorbar(im6, cax=cbar_ax)
#ax5.plot([1.1, 1.1], [1.1, -1.2], color='black', lw=1, transform=ax2.transAxes, clip_on=False)
if saveplots == True:
plt.savefig(f'SAR_vs.SPAFHY_soilmoist_normmean_{today}.pdf')
plt.savefig(f'SAR_vs.SPAFHY_soilmoist_normmean_{today}.png')
#%%
# normalized by median day plots
# mean of each pixel
#spamean = np.nanmean(spa_wliq, axis=0)
#spatopmean = np.nanmean(spa_wliq_top, axis=0)
#sarmean = np.nanmean(sar_wliq, axis=0)
# median day of total sums
spasum = np.nansum(spa_wliq, axis=(1,2))
spamedian = spa_wliq[np.where(np.sort(np.nansum(spa_wliq, axis=(1,2)))[(int(len(spasum)/2))] == spasum)[0][0],:,:]
sarsum = np.nansum(sar_wliq, axis=(1,2))
sarmedian = sar_wliq[np.where(np.sort(np.nansum(sar_wliq, axis=(1,2)))[(int(len(sarsum)/2))] == sarsum)[0][0],:,:]
spatopsum = np.nansum(spa_wliq_top, axis=(1,2))
spatopmedian = spa_wliq[np.where(np.sort(np.nansum(spa_wliq_top, axis=(1,2)))[(int(len(spatopsum)/2))] == spatopsum)[0][0],:,:]
# Plotting
fig, axs = plt.subplots(nrows=2, ncols=3, figsize=(16,12));
ax1 = axs[0][0]
ax2 = axs[0][1]
ax3 = axs[1][0]
ax4 = axs[1][1]
ax5 = axs[0][2]
ax6 = axs[1][2]
#fig.suptitle('Volumetric water content', fontsize=15)
im1 = ax1.imshow(sar_wliq[day_hi,:,:]/sarmedian, vmin=0.0, vmax=2.0, cmap='coolwarm_r', aspect='equal')
ax1.title.set_text('SAR/SAR_median')
#ax1.plot(kenttarova_loc[0], kenttarova_loc[1], marker='o', mec='b', mfc='k', alpha=0.8, ms=6.0)
im2 = ax2.imshow(spa_wliq[day_hi, :,:]/spamedian, vmin=0.0, vmax=2.0, cmap='coolwarm_r', aspect='equal')
ax2.title.set_text('SPAFHYROOT/SPAFHYROOT_median')
ax2.text(10, -15, f'Wet day : {hi_date}', fontsize=15)
#ax2.plot(kenttarova_loc[0], kenttarova_loc[1], marker='o', mec='b', mfc='k', alpha=0.8, ms=6.0)
im3 = ax3.imshow(sar_wliq[day_low, :,:]/sarmedian, vmin=0.0, vmax=2.0, cmap='coolwarm_r', aspect='equal')
ax3.title.set_text('SAR/SAR_median')
im4 = ax4.imshow(spa_wliq[day_low, :,:]/spamedian, vmin=0.0, vmax=2.0, cmap='coolwarm_r', aspect='equal')
ax4.title.set_text('SPAFHYROOT/SPAFHYROOT_median')
ax4.text(10, -15, f'Dry day : {low_date}', fontsize=15)
im5 = ax5.imshow(spa_wliq_top[day_hi, :, :]/spatopmedian, vmin=0.0, vmax=2.0, cmap='coolwarm_r', aspect='equal')
ax5.title.set_text('SPAFHYTOP/SPAFHYTOP_median')
im6 = ax6.imshow(spa_wliq_top[day_low, :,:]/spatopmedian, cmap='coolwarm_r', vmin=0.0, vmax=2.0, aspect='equal')
ax6.title.set_text('SPAFHYTOP/SPAFHYTOP_median')
ax1.axis("off")
ax2.axis("off")
ax3.axis("off")
ax4.axis("off")
ax5.axis("off")
ax6.axis("off")
plt.tight_layout()
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.83, 0.15, 0.015, 0.7])
bar1 = fig.colorbar(im1, cax=cbar_ax)
#ax1.text(10, -15, 'norm by mean of the day', fontsize=10)
fig.suptitle('SpaFHy v1')
#fig.subplots_adjust(right=0.8)
#cbar_ax = fig.add_axes([0.8, 0.2, 0.02, 0.6])
#bar2 = fig.colorbar(im6, cax=cbar_ax)
#ax5.plot([1.1, 1.1], [1.1, -1.2], color='black', lw=1, transform=ax2.transAxes, clip_on=False)
if saveplots == True:
plt.savefig(f'SAR_vs.SPAFHY_soilmoist_normmedian_{today}.pdf')
plt.savefig(f'SAR_vs.SPAFHY_soilmoist_normmedian_{today}.png')
#%%
# normalized by pixel mean
# mean of each pixel
spamean = np.nanmean(spa_wliq, axis=0)
spatopmean = np.nanmean(spa_wliq_top, axis=0)
sarmean = np.nanmean(sar_wliq, axis=0)
# Plotting
fig, axs = plt.subplots(nrows=2, ncols=3, figsize=(16,12));
ax1 = axs[0][0]
ax2 = axs[0][1]
ax3 = axs[1][0]
ax4 = axs[1][1]
ax5 = axs[0][2]
ax6 = axs[1][2]
#fig.suptitle('Volumetric water content', fontsize=15)
im1 = ax1.imshow(sar_wliq[day_hi,:,:]/sarmean, vmin=0.0, vmax=2.0, cmap='coolwarm_r', aspect='equal')
ax1.title.set_text('SAR/SARpixel_mean')
#ax1.plot(kenttarova_loc[0], kenttarova_loc[1], marker='o', mec='b', mfc='k', alpha=0.8, ms=6.0)
im2 = ax2.imshow(spa_wliq[day_hi, :,:]/spamean, vmin=0.0, vmax=2.0, cmap='coolwarm_r', aspect='equal')
ax2.title.set_text('SPAFHYROOT/SPAFHYROOTpixel_mean')
ax2.text(10, -15, f'Wet day : {hi_date}', fontsize=15)
#ax2.plot(kenttarova_loc[0], kenttarova_loc[1], marker='o', mec='b', mfc='k', alpha=0.8, ms=6.0)
im3 = ax3.imshow(sar_wliq[day_low, :,:]/sarmean, vmin=0.0, vmax=2.0, cmap='coolwarm_r', aspect='equal')
ax3.title.set_text('SAR/SARpixel_mean')
im4 = ax4.imshow(spa_wliq[day_low, :,:]/spamean, vmin=0.0, vmax=2.0, cmap='coolwarm_r', aspect='equal')
ax4.title.set_text('SPAFHYROOT/SPAFHYROOTpixel_mean')
ax4.text(10, -15, f'Dry day : {low_date}', fontsize=15)
im5 = ax5.imshow(spa_wliq_top[day_hi, :, :]/spatopmean, vmin=0.0, vmax=2.0, cmap='coolwarm_r', aspect='equal')
ax5.title.set_text('SPAFHYTOP/SPAFHYTOPpixel_mean')
im6 = ax6.imshow(spa_wliq_top[day_low, :,:]/spatopmean, cmap='coolwarm_r', vmin=0.0, vmax=2.0, aspect='equal')
ax6.title.set_text('SPAFHYTOP/SPAFHYTOPpixel_mean')
ax1.axis("off")
ax2.axis("off")
ax3.axis("off")
ax4.axis("off")
ax5.axis("off")
ax6.axis("off")
plt.tight_layout()
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.83, 0.15, 0.015, 0.7])
bar1 = fig.colorbar(im1, cax=cbar_ax)
#ax1.text(10, -15, 'norm by mean of the day', fontsize=10)
fig.suptitle('SpaFHy v1')
#fig.subplots_adjust(right=0.8)
#cbar_ax = fig.add_axes([0.8, 0.2, 0.02, 0.6])
#bar2 = fig.colorbar(im6, cax=cbar_ax)
#ax5.plot([1.1, 1.1], [1.1, -1.2], color='black', lw=1, transform=ax2.transAxes, clip_on=False)
if saveplots == True:
plt.savefig(f'SAR_vs.SPAFHY_soilmoist_normpixel_{today}.pdf')
plt.savefig(f'SAR_vs.SPAFHY_soilmoist_normpixel_{today}.png')
#%%
# point examples from mineral and openmire
# soilscouts at Kenttarova
folder = r'C:\SpaFHy_v1_Pallas\data\obs'
soil_file = 'soilscouts_s3_s5_s18.csv'
fp = os.path.join(folder, soil_file)
soilscout = pd.read_csv(fp, sep=';', date_parser=['time'])
soilscout['time'] = pd.to_datetime(soilscout['time'])
# ec observation data
ec_fp = r'C:\SpaFHy_v1_Pallas\data\obs\ec_soilmoist.csv'
ecmoist = pd.read_csv(ec_fp, sep=';', date_parser=['time'])
ecmoist['time'] = pd.to_datetime(ecmoist['time'])
soilm = soilscout.merge(ecmoist)
# cell locations of kenttarova
kenttarova, _, _, _, _ = read_AsciiGrid(r'C:\PALLAS_RAW_DATA\Lompolonjanka\16b\sve_kenttarova_soilmoist.asc')
kenttarova_loc = np.where(kenttarova == 0)
k_loc = list([int(kenttarova_loc[0]), int(kenttarova_loc[1])])
l_loc = [60, 60]
sar_wliq = sar['soilmoisture']*cmask/100
spa_wliq = bres['Wliq']
spa_wliq_top = bres['Wliq_top']
dates_sar = sar['time'][:]
dates_sar = pd.to_datetime(dates_sar, format='%Y%m%d')
#spa dates to match sar dates
date_in_spa = []
date_in_soilm = []
for i in range(len(dates_sar)):
ix = np.where(dates_spa == dates_sar[i])[0][0]
date_in_spa.append(ix)
yx = np.where(soilm['time'] == dates_sar[i])[0][0]
date_in_soilm.append(yx)
spa_wliq = spa_wliq[date_in_spa,:,:]
spa_wliq_top = spa_wliq_top[date_in_spa,:,:]
soilm = soilm.loc[date_in_soilm]
soilm = soilm.reset_index()
# Plotting
fig, axs = plt.subplots(nrows=2, ncols=1, figsize=(12,8));
ax1 = axs[0]
ax2 = axs[1]
#fig.suptitle('Volumetric water content', fontsize=15)
im1 = ax1.plot(sar_wliq[:,k_loc[0],k_loc[1]])
ax1.plot(spa_wliq[:,k_loc[0],k_loc[1]])
ax1.plot(spa_wliq_top[:,k_loc[0],k_loc[1]])
ax1.plot(soilm['s3'])
ax1.plot(soilm['s5'])
ax1.plot(soilm['s18'])
ax1.title.set_text('Mineral')
ax1.legend(['SAR', 'SpaFHy rootzone', 'SpaFHy top', 's3 = -0.05m', 's5 = -0.60', 's18 = -0.3'], ncol = 6)
im2 = ax2.plot(sar_wliq[:,l_loc[0],l_loc[1]])
ax2.plot(spa_wliq[:,l_loc[0],l_loc[1]])
ax2.plot(spa_wliq_top[:,l_loc[0],l_loc[1]])
ax2.title.set_text('Open mire')
ax2.legend(['SAR', 'SpaFHy rootzone', 'SpaFHy top'], ncol = 3)
#%%
# point examples from mineral and openmire without SAR
# soilscouts at Kenttarova
folder = r'C:\SpaFHy_v1_Pallas\data\obs'
soil_file = 'soilscouts_s3_s5_s18.csv'
fp = os.path.join(folder, soil_file)
soilscout = pd.read_csv(fp, sep=';', date_parser=['time'])
soilscout['time'] = pd.to_datetime(soilscout['time'])
# ec observation data
ec_fp = r'C:\SpaFHy_v1_Pallas\data\obs\ec_soilmoist.csv'
ecmoist = pd.read_csv(ec_fp, sep=';', date_parser=['time'])
ecmoist['time'] = pd.to_datetime(ecmoist['time'])
soilm = soilscout.merge(ecmoist)
# cell locations of kenttarova
kenttarova, _, _, _, _ = read_AsciiGrid(r'C:\PALLAS_RAW_DATA\Lompolonjanka\16b\sve_kenttarova_soilmoist.asc')
kenttarova_loc = np.where(kenttarova == 0)
k_loc = list([int(kenttarova_loc[0]), int(kenttarova_loc[1])])
l_loc = [60, 60]
sar_wliq = sar['soilmoisture']*cmask/100
spa_wliq = bres['Wliq']
spa_wliq_top = bres['Wliq_top']
dates_sar = sar['time'][:]
dates_sar = pd.to_datetime(dates_sar, format='%Y%m%d')
'''
#spa dates to match sar dates
date_in_spa = []
date_in_soilm = []
for i in range(len(dates_sar)):
ix = np.where(dates_spa == dates_sar[i])[0][0]
date_in_spa.append(ix)
yx = np.where(soilm['time'] == dates_sar[i])[0][0]
date_in_soilm.append(yx)
'''
spa_wliq_df = pd.DataFrame()
spa_wliq_df['spa_k'] = spa_wliq[:,k_loc[0],k_loc[1]]
spa_wliq_df['spa_l'] = spa_wliq[:,l_loc[0],l_loc[1]]
spa_wliq_df['spatop_k'] = spa_wliq_top[:,k_loc[0],k_loc[1]]
spa_wliq_df['spatop_l'] = spa_wliq_top[:,l_loc[0],l_loc[1]]
spa_wliq_df['time'] = dates_spa
soilm = soilm.merge(spa_wliq_df)
soilm.index = soilm['time']
soilm = soilm[['s3', 's5', 's18', 'SH-5A', 'SH-5B', 'SH-20A', 'SH-20B', 'spa_k', 'spa_l', 'spatop_k', 'spatop_l']]
soilm = soilm.loc[(soilm.index > '2018-04-01') & (soilm.index < '2019-12-01')]
# Plotting
fig, axs = plt.subplots(nrows=2, ncols=1, figsize=(12,8));
ax1 = axs[0]
ax2 = axs[1]
#fig.suptitle('Volumetric water content', fontsize=15)
im1 = ax1.plot(soilm['spa_k'])
ax1.plot(soilm['spatop_k'])
ax1.plot(soilm['s3'], alpha=0.4)
#ax1.plot(soilm['s5'], alpha=0.4)
ax1.plot(soilm['s18'], alpha=0.4)
#ax1.plot(soilm['SH-5A'], alpha=0.4)
ax1.plot(soilm['SH-5B'], alpha=0.4)
#ax1.plot(soilm['SH-20A'], alpha=0.4)
ax1.plot(soilm['SH-20B'], alpha=0.4)
ax1.title.set_text('Mineral')
ax1.legend(['spa root', 'spa top', 's3 = -0.05', 's18 = -0.3', 'SH-5A', 'SH-5B', 'SH-20A', 'SH-20B'], ncol = 8)
ax1.set_ylim(0,0.6)
im2 = ax2.plot(soilm['spa_l'])
ax2.plot(soilm['spatop_l'])
ax2.title.set_text('Mire')
ax2.legend(['spa root', 'spa top'], ncol = 2)
fig.suptitle('SpaFHy v1')
if saveplots == True:
plt.savefig(f'pointplots_soilmoist_{today}.pdf')
plt.savefig(f'pointplots_soilmoist_{today}.png')
#%%
# Q-Q plots of dry, wet and inbetween day
import numpy as np
import pandas as pd
norm = False
sar_wliq = sar['soilmoisture']*cmask/100
spa_wliq = bres['Wliq']
spa_wliq_top = bres['Wliq_top']
dates_sar = sar['time'][:]
dates_sar = pd.to_datetime(dates_sar, format='%Y%m%d')
#spa dates to match sar dates
date_in_spa = []
for i in range(len(dates_sar)):
ix = np.where(dates_spa == dates_sar[i])[0][0]
date_in_spa.append(ix)
spa_wliq = spa_wliq[date_in_spa,:,:]
spa_wliq_top = spa_wliq_top[date_in_spa,:,:]
if norm == True:
spa_wliq = spa_wliq/(np.nanmean(spa_wliq))
spa_wliq_top = spa_wliq_top/(np.nanmean(spa_wliq_top))
sar_wliq = sar_wliq/(np.nanmean(sar_wliq))
sar_flat_dry = sar_wliq[day_low,:,:].flatten()
#sar_flat[np.where(sar_flat <= 0)] = np.nan
spa_flat_dry = spa_wliq[day_low,:,:].flatten()
spa_top_flat_dry = spa_wliq_top[day_low,:,:].flatten()
flat_pd = pd.DataFrame()
flat_pd['sar_dry'] = sar_flat_dry
flat_pd['spa_dry'] = spa_flat_dry
flat_pd['spa_top_dry'] = spa_top_flat_dry
sar_flat_wet = sar_wliq[day_hi,:,:].flatten()
#sar_flat[np.where(sar_flat <= 0)] = np.nan
spa_flat_wet = spa_wliq[day_hi,:,:].flatten()
spa_top_flat_wet = spa_wliq_top[day_hi,:,:].flatten()
inb = int((day_hi+day_low)/2)
sar_flat_inb = sar_wliq[inb,:,:].flatten()
spa_flat_inb = spa_wliq[inb,:,:].flatten()
spa_top_flat_inb = spa_wliq_top[inb,:,:].flatten()
flat_pd['sar_wet'] = sar_flat_wet
flat_pd['spa_wet'] = spa_flat_wet
flat_pd['spa_top_wet'] = spa_top_flat_wet
flat_pd['sar_inb'] = sar_flat_inb
flat_pd['spa_inb'] = spa_flat_inb
flat_pd['spa_top_inb'] = spa_top_flat_inb
flat_pd = flat_pd.loc[np.isfinite(flat_pd['sar_dry']) & np.isfinite(flat_pd['spa_dry']) & np.isfinite(flat_pd['spa_top_dry'])]
#flat_pd = flat_pd.loc[(flat_pd['sar'] > 0) & (flat_pd['sar'] < 1)]
#g = sns.scatterplot(flat_pd['sar'], flat_pd['spa'], alpha=0.0001, s=2)
#g.set(ylim=(-0.1, 1.0))
#g.set(xlim=(-0.1, 1.0))
# Plotting
fig, axs = plt.subplots(nrows=2, ncols=3, figsize=(12,8));
ax1 = axs[0][0]
ax2 = axs[0][1]
ax3 = axs[1][0]
ax4 = axs[1][1]
ax5 = axs[0][2]
ax6 = axs[1][2]
x1 = sns.regplot(ax=ax1, x=flat_pd['sar_dry'], y=flat_pd['spa_dry'], scatter_kws={'s':10, 'alpha':0.1}, line_kws={"color": "red"})
if norm == False:
ax1.set(ylim=(0, 1))
ax1.set(xlim=(0, 1))
else:
ax1.set(ylim=(0, 2.5))
ax1.set(xlim=(0, 2.5))
#ax1.set_title('Dry day')
x2 = sns.regplot(ax=ax2, x=flat_pd['sar_wet'], y=flat_pd['spa_wet'], scatter_kws={'s':10, 'alpha':0.1}, line_kws={"color": "red"})
if norm == False:
ax2.set(ylim=(0, 1))
ax2.set(xlim=(0, 1))
else:
ax2.set(ylim=(0, 2.5))
ax2.set(xlim=(0, 2.5))
#ax2.set_title('Wet day')
x3 = sns.regplot(ax=ax3, x=flat_pd['sar_dry'], y=flat_pd['spa_top_dry'], scatter_kws={'s':10, 'alpha':0.1}, line_kws={"color": "red"})
if norm == False:
ax3.set(ylim=(0, 1))
ax3.set(xlim=(0, 1))
else:
ax3.set(ylim=(0, 2.5))
ax3.set(xlim=(0, 2.5))
#ax3.set_title('Dry day')
x4 = sns.regplot(ax=ax4, x=flat_pd['sar_wet'], y=flat_pd['spa_top_wet'], scatter_kws={'s':10, 'alpha':0.1}, line_kws={"color": "red"})
if norm == False:
ax4.set(ylim=(0, 1))
ax4.set(xlim=(0, 1))
else:
ax4.set(ylim=(0, 2.5))
ax4.set(xlim=(0, 2.5))
#ax4.set_title('Wet day')
x5 = sns.regplot(ax=ax5, x=flat_pd['sar_inb'], y=flat_pd['spa_inb'], scatter_kws={'s':10, 'alpha':0.1}, line_kws={"color": "red"})
if norm == False:
ax5.set(ylim=(0, 1))
ax5.set(xlim=(0, 1))
else:
ax5.set(ylim=(0, 2.5))
ax5.set(xlim=(0, 2.5))
#ax4.set_title('Wet day')
x6 = sns.regplot(ax=ax6, x=flat_pd['sar_inb'], y=flat_pd['spa_top_inb'], scatter_kws={'s':10, 'alpha':0.1}, line_kws={"color": "red"})
if norm == False:
ax6.set(ylim=(0, 1))
ax6.set(xlim=(0, 1))
else:
ax6.set(ylim=(0, 2.5))
ax6.set(xlim=(0, 2.5))
#ax4.set_title('Wet day')
if norm == True:
fig.suptitle('SpaFHy v1, norm by total means of each')
else:
fig.suptitle('SpaFHy v1')
if norm == False:
if saveplots == True:
plt.savefig(f'sar_spa_qq_wetdry_{today}.pdf')
plt.savefig(f'sar_spa_qq_wetdry_{today}.png')
else:
if saveplots == True:
plt.savefig(f'sar_spa_qq_wetdry_norm_{today}.pdf')
plt.savefig(f'sar_spa_qq_wetdry_norm_{today}.png')
#%%
# QQ plots of the whole season
norm = False
sar_wliq = sar['soilmoisture']*cmask/100
spa_wliq = bres['Wliq']
spa_wliq_top = bres['Wliq_top']
dates_sar = sar['time'][:]
dates_sar = pd.to_datetime(dates_sar, format='%Y%m%d')
#spa dates to match sar dates
date_in_spa = []
for i in range(len(dates_sar)):
ix = np.where(dates_spa == dates_sar[i])[0][0]
date_in_spa.append(ix)
spa_wliq = spa_wliq[date_in_spa,:,:]
spa_wliq_top = spa_wliq_top[date_in_spa,:,:]
sar_flat = sar_wliq[:,:,:].flatten()
spa_flat = spa_wliq[:,:,:].flatten()
spa_top_flat = spa_wliq_top[:,:,:].flatten()
flat_pd = pd.DataFrame()
flat_pd['sar'] = sar_flat
flat_pd['spa'] = spa_flat
flat_pd['spa_top'] = spa_top_flat
flat_pd = flat_pd.loc[ | np.isfinite(flat_pd['sar']) | numpy.isfinite |
import pandas as pd
import numpy as np
import json
from tqdm import tqdm
from scipy.optimize import minimize
from utils import get_next_gw, time_decay
from ranked_probability_score import ranked_probability_score, match_outcome
class Bradley_Terry:
""" Model game outcomes using logistic distribution """
def __init__(
self,
games,
threshold=0.1,
scale=1,
parameters=None,
decay=True):
"""
Args:
games (pd.DataFrame): Finished games to used for training.
threshold (float): Threshold to differentiate team performances
scale (float): Variance of strength ratings
parameters (array): Initial parameters to use
decay (boolean): Apply time decay
"""
self.games = games.loc[:, [
"score1", "score2", "team1", "team2", "date"]]
self.games = self.games.dropna()
self.games["date"] = pd.to_datetime(self.games["date"])
self.games["days_since"] = (
self.games["date"].max() - self.games["date"]).dt.days
self.games["weight"] = (
time_decay(0.0026, self.games["days_since"]) if decay else 1)
self.decay = decay
self.games["score1"] = self.games["score1"].astype(int)
self.games["score2"] = self.games["score2"].astype(int)
self.teams = np.sort(np.unique(self.games["team1"]))
self.league_size = len(self.teams)
self.threshold = threshold
self.scale = scale
# Initial parameters
if parameters is None:
self.parameters = np.concatenate((
np.random.uniform(0, 1, (self.league_size)), # Strength
[.1], # Home advantage
))
else:
self.parameters = parameters
def likelihood(self, parameters, games):
""" Perform sample prediction and compare with outcome
Args:
parameters (pd.DataFrame): Current estimate of the parameters
games (pd.DataFrame): Fixtures
Returns:
(float): Likelihood of the estimated parameters
"""
parameter_df = (
pd.DataFrame()
.assign(rating=parameters[:self.league_size])
.assign(team=self.teams)
)
fixtures_df = (
pd.merge(
games,
parameter_df,
left_on='team1',
right_on='team')
.rename(columns={"rating": "rating1"})
.merge(parameter_df, left_on='team2', right_on='team')
.rename(columns={"rating": "rating2"})
.drop("team_y", axis=1)
.drop("team_x", axis=1)
)
outcome = match_outcome(fixtures_df)
outcome_ma = np.ones((fixtures_df.shape[0], 3))
outcome_ma[np.arange(0, fixtures_df.shape[0]), outcome] = 0
odds = np.zeros((fixtures_df.shape[0], 3))
odds[:, 0] = (
1 / (1 + np.exp(
-(
fixtures_df["rating1"] + parameters[-1] -
fixtures_df["rating2"] - self.threshold
) / self.scale)
)
)
odds[:, 2] = (
1 / (1 + np.exp(
-(
fixtures_df["rating2"] - parameters[-1] -
fixtures_df["rating1"] - self.threshold
) / self.scale)
)
)
odds[:, 1] = 1 - odds[:, 0] - odds[:, 2]
return - np.power(
| np.ma.masked_array(odds, outcome_ma) | numpy.ma.masked_array |
"""
Factor Analysis-regularized logistic regression.
Is `linear_layer` necessary?
"""
__date__ = "June - December 2021"
import numpy as np
import os
from sklearn.base import BaseEstimator
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
import torch
from torch.distributions import Categorical, Normal, kl_divergence
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader, WeightedRandomSampler
import warnings
from ..utils.utils import get_weights
# https://stackoverflow.com/questions/53014306/
if float(torch.__version__[:3]) >= 1.9:
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
FLOAT = torch.float32
INT = torch.int64
MAX_LABEL = 1000
EPSILON = 1e-6
FIT_ATTRIBUTES = ['classes_']
class FaSae(torch.nn.Module, BaseEstimator):
def __init__(self, reg_strength=1.0, z_dim=32, weight_reg=0.0,
nonnegative=True, variational=False, kl_factor=1.0, n_iter=50000,
lr=1e-3, batch_size=256, beta=0.5, device='auto'):
"""
A supervised autoencoder with nonnegative and variational options.
Notes
-----
* The `labels` argument to `fit` and `score` is a bit hacky so that the
model can work nicely with the sklearn model selection tools. The
labels should be an array of integers with `label // 1000` encoding
the individual and `label % 1000` encoding the behavioral label.
Parameters
----------
reg_strength : float, optional
This controls how much the classifier is regularized. This should
be positive, and larger values indicate more regularization.
z_dim : int, optional
Latent dimension/number of networks.
weight_reg : float, optional
Model L2 weight regularization.
nonnegative : bool, optional
Use nonnegative factorization.
variational : bool, optional
Whether a variational autoencoder is used.
kl_factor : float, optional
How much to weight the KL divergence term in the variational
autoencoder (VAE). The standard setting is `1.0`. This is a distinct
regularization parameter from `reg_strength` that can be
independently set. This parameter is only used if `variational` is
`True`.
n_iter : int, optional
Number of gradient steps during training.
lr : float, optional
Learning rate.
batch_size : int, optional
Minibatch size
"""
super(FaSae, self).__init__()
assert kl_factor >= 0.0, f"{kl_factor} < 0"
# Set parameters.
assert isinstance(reg_strength, (int, float))
assert reg_strength >= 0.0
self.reg_strength = float(reg_strength)
assert isinstance(z_dim, int)
assert z_dim >= 1
self.z_dim = z_dim
assert isinstance(weight_reg, (int, float))
assert weight_reg >= 0.0
self.weight_reg = float(weight_reg)
assert isinstance(nonnegative, bool)
self.nonnegative = nonnegative
assert isinstance(variational, bool)
self.variational = variational
assert isinstance(kl_factor, (int, float))
assert kl_factor >= 0.0
self.kl_factor = float(kl_factor)
assert isinstance(n_iter, int)
assert n_iter > 0
self.n_iter = n_iter
assert isinstance(lr, (int, float))
assert lr > 0.0
self.lr = float(lr)
assert isinstance(batch_size, int)
assert batch_size > 0
self.batch_size = batch_size
assert isinstance(beta, (int, float))
assert beta >= 0.0 and beta <= 1.0
self.beta = float(beta)
if device == 'auto':
device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = device
self.classes_ = None
def _initialize(self, n_features):
"""Initialize parameters of the networks before training."""
# Check arguments.
n_classes = len(self.classes_)
assert n_classes <= self.z_dim, f"{n_classes} > {self.z_dim}"
if self.nonnegative and self.weight_reg > 0.0:
self.weight_reg = 0.0
warnings.warn(
f"Weight regularization should be 0.0 "
f"for nonnegative factorization"
)
# Make the networks.
self.recognition_model = torch.nn.Linear(n_features, self.z_dim)
self.rec_model_1 = torch.nn.Linear(n_features, self.z_dim)
self.rec_model_2 = torch.nn.Linear(n_features, self.z_dim)
self.linear_layer = torch.nn.Linear(self.z_dim, self.z_dim)
prior_mean = torch.zeros(self.z_dim).to(self.device)
prior_std = torch.ones(self.z_dim).to(self.device)
self.prior = Normal(prior_mean, prior_std)
self.model = torch.nn.Linear(self.z_dim, n_features)
self.logit_bias = torch.nn.Parameter(torch.zeros(1,n_classes))
self.to(self.device)
def fit(self, features, labels, print_freq=500):
"""
Train the model on the given dataset.
Parameters
----------
features : numpy.ndarray
Shape: [n_data, n_features]
labels : numpy.ndarray
Shape: [n_data]
n_iter : int, optional
Number of training epochs.
lr : float, optional
Learning rate.
batch_size : int, optional
verbose : bool, optional
print_freq : None or int, optional
"""
# Check arguments.
features, labels = check_X_y(features, labels)
# Derive groups, labels, and weights from labels.
groups, labels, weights = _derive_groups(labels)
self.classes_, labels = np.unique(labels, return_inverse=True)
if features.shape[0] != labels.shape[0]:
raise ValueError(f"{features.shape}[0] != {labels.shape}[0]")
if len(features.shape) != 2:
raise ValueError(f"len({features.shape}) != 2")
if len(labels.shape) != 1:
raise ValueError(f"len({labels.shape}) != 1")
self._initialize(features.shape[1])
# NumPy arrays to PyTorch tensors.
features = torch.tensor(features, dtype=FLOAT).to(self.device)
labels = torch.tensor(labels, dtype=INT).to(self.device)
weights = torch.tensor(weights, dtype=FLOAT).to(self.device)
sampler_weights = torch.pow(weights, 1.0 - self.beta)
weights = torch.pow(weights, self.beta)
# Make some loaders and an optimizer.
dset = TensorDataset(features, labels, weights)
sampler = WeightedRandomSampler(
sampler_weights,
num_samples=self.batch_size,
replacement=True,
)
loader = DataLoader(
dset,
sampler=sampler,
batch_size=self.batch_size,
)
optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)
# Train.
for epoch in range(1,self.n_iter+1):
epoch_loss = 0.0
for batch in loader:
self.zero_grad()
loss = self(*batch)
epoch_loss += loss.item()
loss.backward()
optimizer.step()
if print_freq is not None and epoch % print_freq == 0:
print(f"iter {epoch:04d}, loss: {loss:3f}")
return self
def forward(self, features, labels, weights):
"""
Calculate a loss for the features and labels.
Parameters
----------
features : torch.Tensor
Shape: [batch,n_features]
labels : torch.Tensor
Shape: [batch]
weights : None or torch.Tensor
Shape: [batch]
Returns
-------
loss : torch.Tensor
Shape: []
"""
if self.variational:
# Feed through the recognition network to get latents.
z_mus = self.rec_model_1(features)
z_log_stds = self.rec_model_2(features)
# Make the variational posterior and get a KL from the prior.
dist = Normal(z_mus, EPSILON + z_log_stds.exp())
kld = kl_divergence(dist, self.prior).sum(dim=1) # [b]
# Sample.
zs = dist.rsample() # [b,z]
# Project.
zs = self.linear_layer(zs)
else: # deterministic autoencoder
# Feed through the recognition network to get latents.
zs = self.recognition_model(features)
# Reconstruct the features.
if self.nonnegative:
A = F.softplus(self.model.weight)
features_rec = A.unsqueeze(0) @ F.softplus(zs).unsqueeze(-1)
features_rec = features_rec.squeeze(-1)
else:
A = self.model.weight
features_rec = self.model(zs)
# Calculate a reconstruction loss.
rec_loss = torch.mean((features - features_rec).pow(2), dim=1) # [b]
rec_loss = self.reg_strength * rec_loss
# Predict the labels.
logits = zs[:,:len(self.classes_)-1]
ones = torch.ones(
logits.shape[0],
1,
dtype=logits.dtype,
device=logits.device,
)
logits = torch.cat([logits, ones], dim=1) + self.logit_bias
log_probs = Categorical(logits=logits).log_prob(labels) # [b]
# Weight label log likes by class weights.
if weights is not None:
assert weights.shape == labels.shape
log_probs = weights * log_probs
# Regularize the model weights.
l2_loss = self.weight_reg * torch.norm(A)
# Combine all the terms into a composite loss.
loss = rec_loss - log_probs
if self.variational:
loss = loss + self.kl_factor * kld
loss = torch.mean(loss) + l2_loss
return loss
@torch.no_grad()
def predict_proba(self, features, to_numpy=True, stochastic=False):
"""
Probability estimates.
Note
----
* This should be consistent with `self.forward`.
Parameters
----------
features : numpy.ndarray
Shape: [batch, n_features]
to_numpy : bool, optional
stochastic : bool, optional
Returns
-------
probs : numpy.ndarray
Shape: [batch, n_classes]
"""
if self.variational:
# Feed through the recognition network to get latents.
z_mus = self.rec_model_1(features)
z_log_stds = self.rec_model_2(features)
if stochastic:
# Make the variational posterior and sample.
dist = Normal(z_mus, EPSILON + z_log_stds.exp())
zs = dist.rsample() # [b,z]
else:
zs = z_mus
# Project.
zs = self.linear_layer(zs)
else: # deterministic autoencoder
# Feed through the recognition network to get latents.
zs = self.recognition_model(features)
# Get class predictions.
logits = zs[:,:len(self.classes_)-1]
ones = torch.ones(
logits.shape[0],
1,
dtype=logits.dtype,
device=logits.device,
)
logits = torch.cat([logits, ones], dim=1) + self.logit_bias
probs = F.softmax(logits, dim=1) # [b, n_classes]
if to_numpy:
return probs.cpu().numpy()
return probs
@torch.no_grad()
def predict(self, X):
"""
Predict class labels for the features.
Parameters
----------
X : numpy.ndarray
Features
Shape: [batch, n_features]
Returns
-------
predictions : numpy.ndarray
Shape: [batch]
"""
# Checks
check_is_fitted(self, attributes=FIT_ATTRIBUTES)
X = check_array(X)
# Feed through model.
X = torch.tensor(X, dtype=FLOAT).to(self.device)
probs = self.predict_proba(X, to_numpy=False)
predictions = torch.argmax(probs, dim=1)
return self.classes_[predictions.cpu().numpy()]
@torch.no_grad()
def score(self, features, labels):
"""
Get a class weighted accuracy.
This is the objective we really care about, which doesn't contain the
regularization in FA's `forward` method.
Parameters
----------
features : numpy.ndarray
Shape: [n_datapoints, n_features]
labels : numpy.ndarray
Shape: [n_datapoints]
weights : None or numpy.ndarray
Shape: [n_datapoints]
Return
------
weighted_acc : float
"""
# Derive groups, labels, and weights from labels.
groups, labels, weights = _derive_groups(labels)
predictions = self.predict(features)
scores = np.zeros(len(features))
scores[predictions == labels] = 1.0
scores = scores * weights
weighted_acc = np.mean(scores)
return weighted_acc
def get_params(self, deep=True):
"""Get parameters for this estimator."""
params = {
'reg_strength': self.reg_strength,
'z_dim': self.z_dim,
'weight_reg': self.weight_reg,
'nonnegative': self.nonnegative,
'variational': self.variational,
'kl_factor': self.kl_factor,
'n_iter': self.n_iter,
'lr': self.lr,
'batch_size': self.batch_size,
'beta': self.beta,
'device': self.device,
'classes_': self.classes_,
}
if deep:
params['model_state_dict'] = self.state_dict()
return params
def set_params(self, reg_strength=None, z_dim=None, weight_reg=None,
nonnegative=None, variational=None, kl_factor=None, n_iter=None,
lr=None, batch_size=None, beta=None, device=None, classes_=None,
model_state_dict=None):
"""
Set the parameters of this estimator.
Parameters
----------
...
"""
if reg_strength is not None:
self.reg_strength = reg_strength
if z_dim is not None:
self.z_dim = z_dim
if weight_reg is not None:
self.weight_reg = weight_reg
if nonnegative is not None:
self.nonnegative = nonnegative
if variational is not None:
self.variational = variational
if kl_factor is not None:
self.kl_factor = kl_factor
if n_iter is not None:
self.n_iter = n_iter
if lr is not None:
self.lr = lr
if batch_size is not None:
self.batch_size = batch_size
if beta is not None:
self.beta = beta
if device is not None:
self.device = device
if classes_ is not None:
self.classes_ = classes_
if model_state_dict is not None:
assert 'model.bias' in model_state_dict, \
f"'model.bias' not in {list(model_state_dict.keys())}"
n_features = len(model_state_dict['model.bias'].view(-1))
self._initialize(n_features)
self.load_state_dict(model_state_dict)
return self
def save_state(self, fn):
"""Save parameters for this estimator."""
np.save(fn, self.get_params(deep=True))
def load_state(self, fn):
"""Load and set the parameters for this estimator."""
self.set_params(** | np.load(fn, allow_pickle=True) | numpy.load |
''' This is the code I use to make animaitons of the RG flow.
The code reads in data in the same format RG_<integration type>.py
outputs.
You may get some warnings for taking log of zero and such. So, far these
have not ruined any of the output
<NAME> (c) 2016
'''
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import sys
# load data here
#name='data file to load'
#save_name=' animation to save , must be a .mp4 extension'
#title=r'what ever you like '
name='RG_STS_001_kmax10_ds1.npy'
save_name='test.mp4'
title=r'RG test'
d= | np.load(name) | numpy.load |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Inception Score (IS) from the paper
"Improved techniques for training GANs"."""
import pickle
import numpy as np
import tensorflow as tf
import dnnlib
import dnnlib.tflib as tflib
from metrics import metric_base
# ----------------------------------------------------------------------------
class IS(metric_base.MetricBase):
def __init__(self, num_images, num_splits, minibatch_per_gpu, **kwargs):
super().__init__(**kwargs)
self.num_images = num_images
self.num_splits = num_splits
self.minibatch_per_gpu = minibatch_per_gpu
def _evaluate(self, Gs, G_kwargs, num_gpus, **_kwargs): # pylint: disable=arguments-differ
minibatch_size = num_gpus * self.minibatch_per_gpu
with dnnlib.util.open_url(
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/inception_v3_softmax.pkl') as f:
inception = pickle.load(f)
activations = np.empty([self.num_images, inception.output_shape[1]], dtype=np.float32)
# Construct TensorFlow graph.
result_expr = []
for gpu_idx in range(num_gpus):
with tf.device(f'/gpu:{gpu_idx}'):
Gs_clone = Gs.clone()
inception_clone = inception.clone()
latents = tf.random_normal([self.minibatch_per_gpu] + Gs_clone.input_shape[1:])
labels = self._get_random_labels_tf(self.minibatch_per_gpu)
images = Gs_clone.get_output_for(latents, labels, **G_kwargs)
if images.shape[1] == 1: images = tf.tile(images, [1, 3, 1, 1])
images = tflib.convert_images_to_uint8(images)
result_expr.append(inception_clone.get_output_for(images))
# Calculate activations for fakes.
for begin in range(0, self.num_images, minibatch_size):
self._report_progress(begin, self.num_images)
end = min(begin + minibatch_size, self.num_images)
activations[begin:end] = np.concatenate(tflib.run(result_expr), axis=0)[:end - begin]
# Calculate IS.
scores = []
for i in range(self.num_splits):
part = activations[i * self.num_images // self.num_splits: (i + 1) * self.num_images // self.num_splits]
kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))
kl = np.mean( | np.sum(kl, 1) | numpy.sum |
## author: <NAME>
## modify: xin luo, 2021.8.14
"""
notes:
For external tile processing please use "tile.py" with the same extent
for the A and D files. This as the program uses the tile numbering to
determine which of the tiles should be crossed together.
When running in external tile-mode the saved file with crossovers
will be appended with "_XOVERS_AD/DA". Please use "_A" or "_D" in the
filename to indicate Asc or Des tracks when running in tile mode.
example:
python xover.py a.h5 d.h5 -o xover.h5 -p 3031 -d 100 -k 1 1\
-v orb lon lat time height -b 10
python xover.py ./tiles/*_a.h5 ./tiles/*_d.h5 -p 3031 -d 100 -k\
1 1 -v orb lon lat time height -f -b 10
"""
import os
import glob
import numpy as np
import pyproj
import h5py
import argparse
import warnings
from scipy import stats
# Ignore all warnings
warnings.filterwarnings("ignore")
def get_args():
""" Get command-line arguments. """
parser = argparse.ArgumentParser(
description='Program for computing satellite/airborne crossovers.')
parser.add_argument(
'input', metavar='ifile', type=str, nargs=2,
help='name of two input files to cross (HDF5)')
parser.add_argument(
'-o', metavar='ofile', dest='output', type=str, nargs=1,
help='name of output file (HDF5)',
default=[None])
parser.add_argument(
'-p', metavar=('epsg_num'), dest='proj', type=str, nargs=1,
help=('projection: EPSG number (AnIS=3031, GrIS=3413)'),
default=['4326'],)
parser.add_argument(
'-d', metavar=('tile_size'), dest='tile_dxy', type=int, nargs=1,
help='tile size (km)',
default=[None],)
parser.add_argument(
'-k', metavar=('n_res'), dest='nres', type=int, nargs=1,
help='along-track subsampling every k:th pnt for each file',
default=[1],)
parser.add_argument(
'-b', metavar=('buffer'), dest='buff', type=int, nargs=1,
help=('tile buffer (km)'),
default=[0],)
parser.add_argument(
'-v', metavar=('spot','x','y','t','h'), dest='vnames', type=str, nargs=5,
help=('main vars: names if HDF5, spot/lon/lat/time/height'),
default=[None],)
parser.add_argument( #
'-f', dest='tile', action='store_true',
help=('run in tile mode'),
default=False)
return parser.parse_args()
def intersect(x_up, y_up, x_down, y_down, t_up, \
t_down, z_up =None, z_down=None):
"""
!!! more fast, but difficult to understand, moreover, we add the time and height input
reference:
https://stackoverflow.com/questions/17928452/
find-all-intersections-of-xy-data-point-graph-with-numpy
des: Find orbit crossover locations through solving the equation:
p0 + s*(p1-p0) = q0 + t*(q1-q0); p and q are descending and ascending points respectively.
---> s*(p1-p0)-t*(q1-q0) = q0-p0
if s and t belong to [0,1], p and q actually do intersect.
!! in order to speed up calculation, this code vectorizing solution of the 2x2 linear systems
input:
x_down, y_down: coord_x and coord_y of the descending points
x_up, y_up: coord_x, coord_y of the ascending points.
t_down, t_up: time of down track and up track, respectively.
z_down, z_up: height of down track and up track, respectively.
retrun:
np.array(shape: (n,2)), coordinates (x,y) of the intersection points.
n is number of intersection points
"""
p = np.column_stack((x_down, y_down)) # coords of the descending points
q = np.column_stack((x_up, y_up)) # coords of the ascending points
(p0, p1, q0, q1) = p[:-1], p[1:], q[:-1], q[1:] # remove first/last row respectively
# (num_uppoints, 2) - (num_dpoints, 1, 2), array broadcast, dim: (num_dpoints, num_uppoints, 2)
rhs = q0 - p0[:, np.newaxis, :]
mat = np.empty((len(p0), len(q0), 2, 2)) # dim: (p_num, q_num, dif((x, y)), orbit(down,up))
mat[..., 0] = (p1 - p0)[:, np.newaxis] # dif (x_down,y_down) between point_down and previous point_down
mat[..., 1] = q0 - q1 # dif (x_up, y_up) between point_up and previous point_up
mat_inv = -mat.copy()
mat_inv[..., 0, 0] = mat[..., 1, 1] # exchange between x_dif and y_dif, down and up
mat_inv[..., 1, 1] = mat[..., 0, 0]
det = mat[..., 0, 0] * mat[..., 1, 1] - mat[..., 0, 1] * mat[..., 1, 0]
mat_inv /= det[..., np.newaxis, np.newaxis] # ???
params = mat_inv @ rhs[..., np.newaxis] #
intersection = np.all((params >= 0) & (params <= 1), axis=(-1, -2)) #
p0_s = params[intersection, 0, :] * mat[intersection, :, 0]
xover_coords = p0_s + p0[np.where(intersection)[0]]
## interplate the xover time corresponding to down and up tracks, respectively.
## -- get the previous point of xover point (down and up, respectively)
p_start_idx = np.where(intersection)[0] # down track
q_start_idx = | np.where(intersection) | numpy.where |
import copy
import warnings
import astropy.units as u
import numpy as np
from scipy import interpolate, stats
from sunpy.data import manager
from sunxspex_dan.io import load_chianti_continuum, load_chianti_lines_lite, load_xray_abundances
__all__ = ['thermal_emission', 'continuum_emission', 'line_emission',
'setup_continuum_parameters', 'setup_line_parameters', 'setup_default_abundances']
doc_string_params = """
Parameters
----------
energy_edges: `astropy.units.Quantity`
The edges of the energy bins in a 1D N+1 quantity.
temperature: `astropy.units.Quantity`
The temperature of the plasma.
Can be scalar or 1D of any length. If not scalar, the flux for each temperature
will be calculated. The first dimension of the output flux will correspond
to temperature.
emission_measure: `astropy.units.Quantity`
The emission measure of the plasma at each temperature.
Must be same length as temperature or scalar.
abundance_type: `str` (optional)
Abundance type to use. Options are:
1. cosmic
2. sun_coronal - default abundance
3. sun_coronal_ext
4. sun_hybrid
5. sun_hybrid_ext
6. sun_photospheric
7. mewe_cosmic
8. mewe_solar
The values for each abundance type is stored in the global
variable DEFAULT_ABUNDANCES which is generated by `setup_default_abundances`
function. To load different default values for each abundance type,
see the docstring of that function.
relative_abundances: `tuple` of `tuples` of (`int`, `float`) (optional)
The relative abundances of different elements as a fraction of their
default abundances defined by abundance_type.
Each tuple represents the values for a given element.
The first entry represents the atomic number of the element.
The second entry represents the axis represents the fraction by which the
element's default abundance should be scaled.
observer_distance: `astropy.units.Quantity` (Optional)
The distance between the source and the observer.
Default=1 AU.
Returns
-------
flux: `astropy.units.Quantity`
The photon flux as a function of temperature and energy.
"""
def setup_continuum_parameters(filename=None):
"""
Define continuum intensities as a function of temperature.
Intensities are set as global variables and used in
calculation of spectra by other functions in this module. They are in
units of per volume emission measure at source, i.e. they must be
divided by 4 * pi R**2 to be converted to physical values where
R**2 is observer distance.
Intensities are derived from output from the CHIANTI atomic physics database.
The default CHIANTI data used here are collected from
`https://hesperia.gsfc.nasa.gov/ssw/packages/xray/dbase/chianti/chianti_cont_1_250_v71.sav`.
This includes contributions from thermal bremsstrahlung and two-photon interactions.
To use a different file, provide the URL/file location via the filename kwarg,
e.g. to include only thermal bremsstrahlung, set the filename kwarg to
'https://hesperia.gsfc.nasa.gov/ssw/packages/xray/dbase/chianti/chianti_cont_1_250_v70_no2photon.sav'
Parameters
----------
filename: `str` (optional)
URL or file location of the CHIANTI IDL save file to be used.
"""
if filename:
with manager.override_file("chianti_continuum", uri=filename):
cont_info = load_chianti_continuum()
else:
cont_info = load_chianti_continuum()
continuum_grid = {}
continuum_grid["abundance index"] = cont_info.element_index.data
continuum_grid["sorted abundance index"] = np.sort(continuum_grid["abundance index"])
T_grid = (cont_info.temperature.data * cont_info.attrs["units"]["temperature"]).to(u.K)
continuum_grid["log10T"] = np.log10(T_grid.value)
continuum_grid["T_keV"] = T_grid.to_value(u.keV, equivalencies=u.temperature_energy())
wavelength = cont_info.wavelength.data * cont_info.attrs["units"]["wavelength"]
dwave_AA = (cont_info.attrs["wavelength_edges"][1:] -
cont_info.attrs["wavelength_edges"][:-1]).to_value(u.AA)
continuum_grid["E_keV"] = wavelength.to_value(u.keV, equivalencies=u.spectral())
continuum_grid["energy bin widths keV"] = (
continuum_grid["E_keV"] * dwave_AA / wavelength.to_value(u.AA))
continuum_grid["intensity"] = cont_info.data
continuum_grid["intensity unit"] = cont_info.attrs["units"]["data"]
continuum_grid["intensity description"] = (
"Intensity is stored as photons per keV per unit emission measure at the source. "
"It (and its unit) therefore must be multipled by emission measure and "
"divided by 4 * pi * observer_distance**2 to get observed values.")
continuum_grid["energy range keV"] = (continuum_grid["E_keV"].min(),
continuum_grid["E_keV"].max())
continuum_grid["temperature range K"] = (T_grid.value.min(), T_grid.value.max())
return continuum_grid
def setup_line_parameters(filename=None):
"""Define line intensities as a function of temperature for calculating line emission.
Line intensities are set as global variables and used in the
calculation of spectra by other functions in this module. They are in
units of per unit emission measure at source, i.e. they must be
divided by 4 pi R**2 (where R is the observer distance) and
multiplied by emission measure to be converted to physical values at the observer.
Line intensities are derived from output from the CHIANTI atomic
physics database. The default CHIANTI data used here is collected from
`https://hesperia.gsfc.nasa.gov/ssw/packages/xray/dbase/chianti/chianti_lines_1_10_v71.sav`.
To use a different file, provide the URL/file location via the filename kwarg.
Parameters
----------
filename: `str` (optional)
URL or file location of the CHIANTI IDL save file to be used.
"""
if filename:
with manager.override_file("chianti_lines", uri=filename):
line_info = load_chianti_lines_lite()
else:
line_info = load_chianti_lines_lite()
line_grid = {}
line_grid["intensity"] = np.array(line_info.data)
line_grid["intensity unit"] = line_info.attrs["units"]["data"]
line_grid["intensity description"] = (
"Intensity is stored as photons per unit emission measure at the source. "
"It (and its unit) therefore must be multipled by emission measure and "
"divided by 4 * pi * observer_distance**2 to get observed values.")
line_grid["line peaks keV"] = (
line_info.peak_energy.data * line_info.attrs["units"]["peak_energy"]).to_value(
u.keV, equivalencies=u.spectral())
line_grid["log10T"] = line_info.logT.data
line_grid["abundance index"] = line_info.attrs["element_index"]
line_grid["line atomic numbers"] = line_info.atomic_number.data
line_grid["energy range keV"] = (line_grid["line peaks keV"].min(),
line_grid["line peaks keV"].max())
T_grid = 10**line_grid["log10T"]
line_grid["temperature range K"] = (T_grid.min(), T_grid.max())
return line_grid
def setup_default_abundances(filename=None):
"""
Read default abundance values into global variable.
By default, data is read from the following file:
https://hesperia.gsfc.nasa.gov/ssw/packages/xray/dbase/chianti/xray_abun_file.genx
To load data from a different file, see Notes section.
Parameters
----------
filename: `str` (optional)
URL or file location of the .genx abundance file to be used.
"""
if filename:
with manager.override_file("xray_abundance", uri=filename):
return load_xray_abundances()
else:
return load_xray_abundances()
# Read line, continuum and abundance data into global variables.
CONTINUUM_GRID = setup_continuum_parameters()
LINE_GRID = setup_line_parameters()
DEFAULT_ABUNDANCES = setup_default_abundances()
DEFAULT_ABUNDANCE_TYPE = "sun_coronal_ext"
@u.quantity_input(energy_edges=u.keV,
temperature=u.K,
emission_measure=(u.cm**(-3), u.cm**(-5)),
observer_distance=u.cm)
def thermal_emission(energy_edges,
temperature,
emission_measure,
abundance_type=DEFAULT_ABUNDANCE_TYPE,
relative_abundances=None,
observer_distance=(1*u.AU).to(u.cm)):
f"""Calculate the thermal X-ray spectrum (lines + continuum) from the solar atmosphere.
The flux is calculated as a function of temperature and emission measure.
Which continuum mechanisms are included --- free-free, free-bound, or two-photon --- are
determined by the file from which the continuum parameters are loaded.
To change the file used, see the setup_continuum_parameters() function.
{doc_string_params}"""
# Convert inputs to known units and confirm they are within range.
energy_edges_keV, temperature_K = _sanitize_inputs(energy_edges, temperature)
energy_range = (min(CONTINUUM_GRID["energy range keV"][0], LINE_GRID["energy range keV"][0]),
max(CONTINUUM_GRID["energy range keV"][1], LINE_GRID["energy range keV"][1]))
_error_if_input_outside_valid_range(energy_edges_keV, energy_range, "energy", "keV")
temp_range = (min(CONTINUUM_GRID["temperature range K"][0], LINE_GRID["temperature range K"][0]),
max(CONTINUUM_GRID["temperature range K"][1], LINE_GRID["temperature range K"][1]))
_error_if_input_outside_valid_range(temperature_K, temp_range, "temperature", "K")
# Calculate abundances
abundances = _calculate_abundances(abundance_type, relative_abundances)
# Calculate fluxes.
continuum_flux = _continuum_emission(energy_edges_keV, temperature_K, abundances)
line_flux = _line_emission(energy_edges_keV, temperature_K, abundances)
flux = ((continuum_flux + line_flux) * emission_measure /
(4 * np.pi * observer_distance**2))
if temperature.isscalar and emission_measure.isscalar:
flux = flux[0]
return flux
@u.quantity_input(energy_edges=u.keV,
temperature=u.K,
emission_measure=(u.cm**(-3), u.cm**(-5)),
observer_distance=u.cm)
def continuum_emission(energy_edges,
temperature,
emission_measure,
abundance_type=DEFAULT_ABUNDANCE_TYPE,
relative_abundances=None,
observer_distance=(1*u.AU).to(u.cm)):
f"""Calculate the thermal X-ray continuum emission from the solar atmosphere.
The emission is calculated as a function of temperature and emission measure.
Which continuum mechanisms are included --- free-free, free-bound, or two-photon --- are
determined by the file from which the comtinuum parameters are loaded.
To change the file used, see the setup_continuum_parameters() function.
{doc_string_params}"""
# Convert inputs to known units and confirm they are within range.
energy_edges_keV, temperature_K = _sanitize_inputs(energy_edges, temperature)
_error_if_input_outside_valid_range(energy_edges_keV, CONTINUUM_GRID["energy range keV"],
"energy", "keV")
_error_if_input_outside_valid_range(temperature_K, CONTINUUM_GRID["temperature range K"],
"temperature", "K")
# Calculate abundances
abundances = _calculate_abundances(abundance_type, relative_abundances)
# Calculate flux.
flux = _continuum_emission(energy_edges_keV, temperature_K, abundances)
flux *= emission_measure / (4 * np.pi * observer_distance**2)
if temperature.isscalar and emission_measure.isscalar:
flux = flux[0]
return flux
@u.quantity_input(energy_edges=u.keV,
temperature=u.K,
emission_measure=(u.cm**(-3), u.cm**(-5)),
observer_distance=u.cm)
def line_emission(energy_edges,
temperature,
emission_measure,
abundance_type=DEFAULT_ABUNDANCE_TYPE,
relative_abundances=None,
observer_distance=(1*u.AU).to(u.cm)):
"""
Calculate thermal line emission from the solar corona.
{docstring_params}"""
# Convert inputs to known units and confirm they are within range.
energy_edges_keV, temperature_K = _sanitize_inputs(energy_edges, temperature)
_warn_if_input_outside_valid_range(energy_edges_keV, LINE_GRID["energy range keV"],
"energy", "keV")
_error_if_input_outside_valid_range(temperature_K, LINE_GRID["temperature range K"],
"temperature", "K")
# Calculate abundances
abundances = _calculate_abundances(abundance_type, relative_abundances)
flux = _line_emission(energy_edges_keV, temperature_K, abundances)
flux *= emission_measure / (4 * np.pi * observer_distance**2)
if temperature.isscalar and emission_measure.isscalar:
flux = flux[0]
return flux
def _continuum_emission(energy_edges_keV, temperature_K, abundances):
"""
Calculates emission-measure-normalized X-ray continuum spectrum at the source.
Output must be multiplied by emission measure and divided by 4*pi*observer_distance**2
to get physical values.
Which continuum mechanisms are included --- free-free, free-bound, or two-photon --- are
determined by the file from which the comtinuum parameters are loaded.
To change the file used, see the setup_continuum_parameters() function.
Parameters
----------
energy_edges_keV: 1-D array-like
Boundaries of contiguous spectral bins in units on keV.
temperature_K: 1-D array-like
The temperature(s) of the plasma in unit of K. Must not be a scalar.
abundances: 1-D `numpy.array` of same length a DEFAULT_ABUNDANCES.
The abundances for the all the elements.
"""
# Handle inputs and derive some useful parameters from them
log10T_in = np.log10(temperature_K)
T_in_keV = temperature_K / 11604518 # Convert temperature from K to keV.
# Get energy bins centers based on geometric mean.
energy_gmean_keV = stats.gmean(np.vstack((energy_edges_keV[:-1], energy_edges_keV[1:])))
# Mask Unwanted Abundances
abundance_mask = np.zeros(len(abundances))
abundance_mask[CONTINUUM_GRID["abundance index"]] = 1.
abundances *= abundance_mask
##### Calculate Continuum Intensity Summed Over All Elements
##### For Each Temperature as a function of Energy/Wavelength ######
# Before looping over temperatures, let's perform the calculations that are
# used over again in the for loop.
# 1. If many temperatures are input, convolve intensity grid with abundances for all
# temperatures here. If only a few temperatures are input, do this step only
# when looping over input temperatures. This minimizes computation.
n_tband = 3
n_t_grid = len(CONTINUUM_GRID["log10T"])
n_temperature_K = len(temperature_K)
n_thresh = n_temperature_K * n_tband
if n_thresh >= n_t_grid:
intensity_per_em_at_source_allT = np.zeros(CONTINUUM_GRID["intensity"].shape[1:])
for i in range(0, n_t_grid):
intensity_per_em_at_source_allT[i] = np.matmul(
abundances[CONTINUUM_GRID["sorted abundance index"]],
CONTINUUM_GRID["intensity"][:, i])
# 2. Add dummy axes to energy and temperature grid arrays for later vectorized operations.
repeat_E_grid = CONTINUUM_GRID["E_keV"][np.newaxis, :]
repeat_T_grid = CONTINUUM_GRID["T_keV"][:, np.newaxis]
dE_grid_keV = CONTINUUM_GRID["energy bin widths keV"][np.newaxis, :]
# 3. Identify the indices of the temperature bins containing each input temperature and
# the bins above and below them. For each input temperature, these three bins will
# act as a temperature band over which we'll interpolate the continuum emission.
selt = np.digitize(log10T_in, CONTINUUM_GRID["log10T"]) - 1
tband_idx = selt[:, np.newaxis] + np.arange(n_tband)[np.newaxis, :]
# Finally, loop over input temperatures and calculate continuum emission for each.
flux = np.zeros((n_temperature_K, len(energy_gmean_keV)))
for j, logt in enumerate(log10T_in):
# If not already done above, calculate continuum intensity summed over
# all elements as a function of energy/wavelength over the temperature band.
if n_thresh < n_t_grid:
element_intensities_per_em_at_source = CONTINUUM_GRID["intensity"][:, tband_idx[j]]
intensity_per_em_at_source = np.zeros(element_intensities_per_em_at_source.shape[1:])
for i in range(0, n_tband):
intensity_per_em_at_source[i] = np.matmul(
abundances[CONTINUUM_GRID["sorted abundance index"]],
element_intensities_per_em_at_source[:, i])
else:
intensity_per_em_at_source = intensity_per_em_at_source_allT[tband_idx[j]]
##### Calculate Continuum Intensity at Input Temperature ######
##### Do this by interpolating the normalized temperature component
##### of the intensity grid to input temperature(s) and then rescaling.
# Calculate normalized temperature component of the intensity grid.
exponent = (repeat_E_grid / repeat_T_grid[tband_idx[j]])
exponential = np.exp(np.clip(exponent, None, 80))
gaunt = intensity_per_em_at_source / dE_grid_keV * exponential
# Interpolate the normalized temperature component of the intensity grid the the
# input temperature.
flux[j] = _interpolate_continuum_intensities(
gaunt, CONTINUUM_GRID["log10T"][tband_idx[j]], CONTINUUM_GRID["E_keV"], energy_gmean_keV, logt)
# Rescale the interpolated intensity.
flux = flux * np.exp(-(energy_gmean_keV[np.newaxis, :] / T_in_keV[:, np.newaxis]))
# Put intensity into correct units.
return flux * CONTINUUM_GRID["intensity unit"]
def _line_emission(energy_edges_keV, temperature_K, abundances):
"""
Calculates emission-measure-normalized X-ray line spectrum at the source.
Output must be multiplied by emission measure and divided by 4*pi*observer_distance**2
to get physical values.
Parameters
----------
energy_edges_keV: 1-D array-like
Boundaries of contiguous spectral bins in units on keV.
temperature_K: 1-D array-like
The temperature(s) of the plasma in unit of K. Must not be a scalar.
abundances: 1-D `numpy.array` of same length a DEFAULT_ABUNDANCES.
The abundances for the all the elements.
"""
n_energy_bins = len(energy_edges_keV)-1
n_temperatures = len(temperature_K)
# Find indices of lines within user input energy range.
energy_roi_indices = np.logical_and(LINE_GRID["line peaks keV"] >= energy_edges_keV.min(),
LINE_GRID["line peaks keV"] <= energy_edges_keV.max())
n_energy_roi_indices = energy_roi_indices.sum()
# If there are emission lines within the energy range of interest, compile spectrum.
if n_energy_roi_indices > 0:
# Mask Unwanted Abundances
abundance_mask = np.zeros(len(abundances))
abundance_mask[LINE_GRID["abundance index"]] = 1.
abundances *= abundance_mask
# Extract only the lines within the energy range of interest.
line_abundances = abundances[LINE_GRID["line atomic numbers"][energy_roi_indices] - 2]
# Above magic number of of -2 is comprised of:
# a -1 to account for the fact that index is atomic number -1, and
# another -1 because abundance index is offset from abundance index by 1.
##### Calculate Line Intensities within the Input Energy Range #####
# Calculate abundance-normalized intensity of each line in energy range of
# interest as a function of energy and temperature.
line_intensity_grid = LINE_GRID["intensity"][energy_roi_indices]
line_intensities = _calculate_abundance_normalized_line_intensities(
np.log10(temperature_K), line_intensity_grid, LINE_GRID["log10T"])
# Scale line intensities by abundances to get true line intensities.
line_intensities *= line_abundances
##### Weight Line Emission So Peak Energies Maintained Within Input Energy Binning #####
# Split emission of each line between nearest neighboring spectral bins in
# proportion such that the line centroids appear at the correct energy
# when averaged over neighboring bins.
# This has the effect of appearing to double the number of lines as regards
# the dimensionality of the line_intensities array.
line_peaks_keV = LINE_GRID["line peaks keV"][energy_roi_indices]
split_line_intensities, line_spectrum_bins = _weight_emission_bins_to_line_centroid(
line_peaks_keV, energy_edges_keV, line_intensities)
#### Calculate Flux #####
# Use binned_statistic to determine which spectral bins contain
# components of line emission and sum over those line components
# to get the total emission is each spectral bin.
flux = stats.binned_statistic(line_spectrum_bins, split_line_intensities,
"sum", n_energy_bins, (0, n_energy_bins-1)).statistic
else:
flux = np.zeros((n_temperatures, n_energy_bins))
# Scale flux by observer distance, emission measure and spectral bin width
# and put into correct units.
energy_bin_widths = (energy_edges_keV[1:] - energy_edges_keV[:-1]) * u.keV
flux = (flux * LINE_GRID["intensity unit"] / energy_bin_widths)
return flux
def _interpolate_continuum_intensities(data_grid, log10T_grid, energy_grid_keV, energy_keV, log10T):
# Determine valid range based on limits of intensity grid's spectral extent
# and the normalized temperature component of intensity.
n_tband = len(log10T_grid)
vrange, = np.where(data_grid[0] > 0)
for i in range(1, n_tband):
vrange_i, = np.where(data_grid[i] > 0)
if len(vrange) < len(vrange_i):
vrange = vrange_i
data_grid = data_grid[:, vrange]
energy_grid_keV = energy_grid_keV[vrange]
energy_idx, = np.where(energy_keV < energy_grid_keV.max())
# Interpolate temperature component of intensity and derive continuum intensity.
flux = np.zeros(energy_keV.shape)
if len(energy_idx) > 0:
energy_keV = energy_keV[energy_idx]
cont0 = interpolate.interp1d(energy_grid_keV, data_grid[0])(energy_keV)
cont1 = interpolate.interp1d(energy_grid_keV, data_grid[1])(energy_keV)
cont2 = interpolate.interp1d(energy_grid_keV, data_grid[2])(energy_keV)
# Calculate the continuum intensity as the weighted geometric mean
# of the interpolated values across the temperature band of the
# temperature component of intensity.
logelog10T = np.log(log10T)
x0, x1, x2 = np.log(log10T_grid)
flux[energy_idx] = np.exp(
np.log(cont0) * (logelog10T - x1) * (logelog10T - x2) / ((x0 - x1) * (x0 - x2)) +
np.log(cont1) * (logelog10T - x0) * (logelog10T - x2) / ((x1 - x0) * (x1 - x2)) +
np.log(cont2) * (logelog10T - x0) * (logelog10T - x1) / ((x2 - x0) * (x2 - x1)) )
return flux
def _calculate_abundance_normalized_line_intensities(logT, data_grid, line_logT_bins):
"""
Calculates normalized line intensities at a given temperature using interpolation.
Given a 2D array, say of line intensities, as a function of two parameters,
say energy and log10(temperature), and a log10(temperature) value,
interpolate the line intensities over the temperature axis and
extract the intensities as a function of energy at the input temperature.
Note that strictly speaking the code is agnostic to the physical properties
of the axes and values in the array. All the matters is that data_grid
is interpolated over the 2nd axis and the input value also corresponds to
somewhere along that same axis. That value does not have to exactly correspond to
the value of a column in the grid. This is accounted for by the interpolation.
Parameters
----------
logT: 1D `numpy.ndarray` of `float`.
The input value along the 2nd axis at which the line intensities are desired.
If multiple values given, the calculation is done for each and the
output array has an extra dimension.
data_grid: 2D `numpy.ndarray`
Some property, e.g. line intensity, as function two parameters,
e.g. energy (0th dimension) and log10(temperature in kelvin) (1st dimension).
line_logT_bins: 1D `numpy.ndarray`
The value along the 2nd axis at which the data are required,
say a value of log10(temperature in kelvin).
Returns
-------
interpolated_data: 1D or 2D `numpy.ndarray`
The line intensities as a function of energy (1st dimension) at
each of the input temperatures (0th dimension).
Note that unlike the input line intensity table, energy here is the 0th axis.
If there is only one input temperature, interpolated_data is 1D.
"""
# Ensure input temperatures are in an array to consistent manipulation.
n_temperatures = len(logT)
# Get bins in which input temperatures belong.
temperature_bins = np.digitize(logT, line_logT_bins)-1
# For each input "temperature", interpolate the grid over the 2nd axis
# using the bins corresponding to the input "temperature" and the two neighboring bins.
# This will result in a function giving the data as a function of the 1st axis,
# say energy, at the input temperature to sub-temperature bin resolution.
interpolated_data = np.zeros((n_temperatures, data_grid.shape[0]))
for i in range(n_temperatures):
# Identify the "temperature" bin to which the input "temperature"
# corresponds and its two nearest neighbors.
indx = temperature_bins[i]-1+np.arange(3)
# Interpolate the 2nd axis to produce a function that gives the data
# as a function of 1st axis, say energy, at a given value along the 2nd axis,
# say "temperature".
get_intensities_at_logT = interpolate.interp1d(line_logT_bins[indx], data_grid[:, indx], kind="quadratic")
# Use function to get interpolated_data as a function of the first axis at
# the input value along the 2nd axis,
# e.g. line intensities as a function of energy at a given temperature.
interpolated_data[i, :] = get_intensities_at_logT(logT[i]).squeeze()[:]
return interpolated_data
def _weight_emission_bins_to_line_centroid(line_peaks_keV, energy_edges_keV, line_intensities):
"""
Split emission between neighboring energy bins such that averaged energy is the line peak.
Given the peak energies of the lines and a set of the energy bin edges:
1. Find the bins into which each of the lines belong.
2. Calculate distance between the line peak energy and the
center of the bin to which it corresponds as a fraction of the distance between
the bin center the center of the next closest bin to the line peak energy.
3. Assign the above fraction of the line intensity to the neighboring bin and
the rest of the energy to the original bin.
4. Add the neighboring bins to the array of bins containing positive emission.
Parameters
----------
line_peaks_keV: 1D `numpy.ndarray`
The energy of the line peaks in keV.
energy_peak_keV: 1D `numpy.ndarray`
The edges of adjacent energy bins.
Length must be n+1 where n is the number of energy bins.
These energy bins may be referred to as 'spectrum energy bins' in comments.
line_intensities: 2D `numpy.ndarray`
The amplitude of the line peaks.
The last dimension represents intensities of each line in line_peaks_keV while
the first dimension represents the intensities as a function of another parameter,
e.g. temperature.
These intensities are the ones divided between neighboring bins as described above.
Returns
-------
new_line_intensities: 2D `numpy.ndarray`
The weighted line intensities including neigboring component for each line weighted
such that total emission is the same, but the energy of each line averaged over the
energy_edge_keV bins is the same as the actual line energy.
new_iline: `numpy.ndarray`
Indices of the spectrum energy bins to which emission from each line corresponds.
This includes indices of the neighboring bin emission components.
"""
# Get widths and centers of the spectrum energy bins.
energy_bin_widths = energy_edges_keV[1:] - energy_edges_keV[:-1]
energy_centers = energy_edges_keV[:-1] + energy_bin_widths/2
energy_center_diffs = energy_centers[1:] - energy_centers[:-1]
# For each line, find the index of the spectrum energy bin to which it corresponds.
iline = np.digitize(line_peaks_keV, energy_edges_keV) - 1
# Get the difference between each line energy and
# the center of the spectrum energy bin to which is corresponds.
line_deviations_keV = line_peaks_keV - energy_centers[iline]
# Get the indices of the lines which are above and below their bin center.
neg_deviation_indices, = np.where(line_deviations_keV < 0)
pos_deviation_indices, = np.where(line_deviations_keV >= 0)
# Discard bin indices at the edge of the spectral range if they should
# be shared with a bin outside the energy range.
neg_deviation_indices = neg_deviation_indices[np.where(iline[neg_deviation_indices] > 0)[0]]
pos_deviation_indices = pos_deviation_indices[
np.where(iline[pos_deviation_indices] <= (len(energy_edges_keV)-2))[0]]
# Split line emission between the spectrum energy bin containing the line peak and
# the nearest neighboring bin based on the proximity of the line energy to
# the center of the spectrum bin.
# Treat lines which are above and below the bin center separately as slightly
# different indexing is required.
new_line_intensities = copy.deepcopy(line_intensities)
new_iline = copy.deepcopy(iline)
if len(neg_deviation_indices) > 0:
neg_line_intensities, neg_neighbor_intensities, neg_neighbor_iline = _weight_emission_bins(
line_deviations_keV, neg_deviation_indices,
energy_center_diffs, line_intensities, iline, negative_deviations=True)
# Combine new line and neighboring bin intensities and indices into common arrays.
new_line_intensities[:, neg_deviation_indices] = neg_line_intensities
new_line_intensities = np.concatenate((new_line_intensities, neg_neighbor_intensities), axis=-1)
new_iline = | np.concatenate((new_iline, neg_neighbor_iline)) | numpy.concatenate |
#! /usr/bin/env python
#################################################################################
# File Name : vpg_multi.py
# Created By : yang
# Creation Date : [2017-03-18 19:00]
# Last Modified : [2017-04-06 19:17]
# Description :
#################################################################################
from rllab.sampler.base import BaseSampler
import rllab.sampler.parallel_sampler as parallel_sampler
from rllab.algos.batch_polopt import BatchPolopt
from rllab.misc import special, tensor_utils, ext
from rllab.core.serializable import Serializable
from ..optimizer.optimizer import MyFirstOrderOptimizer
from ..samples.multi_sampler import BatchSampler_Multi
from rllab.algos import util
import rllab.misc.logger as logger
import theano
import theano.tensor as TT
import pickle
import numpy as np
import sys
class VPG_multi_Stein(BatchPolopt, Serializable):
def __init__(
self,
num_of_agents,
temp,
env,
policy,
baseline,
policy_list,
baseline_list,
anneal_temp_start = 500,
anneal_temp=False,
anneal_method = 'linear',
anneal_discount_epoch=1,
anneal_discount_factor=0.02,
temp_min = 1e-2,
optimizer=None,
optimizer_args=None,
learning_rate = 1e-3,
optimization_method = "adam",
adaptive_kernel = False,
policy_weight_decay = 0.0,
with_critic = True,
include_kernel = True,
evolution = False,
evolution_ratio = 0.25,
evolution_epsilon = 0.01,
evolution_update_steps = 20,
**kwargs):
Serializable.quick_init(self, locals())
if optimizer is None:
default_args = dict(
batch_size=None,
max_epochs=1,
)
if optimizer_args is None:
optimizer_args = default_args
else:
optimizer_args = dict(default_args, **optimizer_args)
optimizer = MyFirstOrderOptimizer(**optimizer_args)
self.optimizer = optimizer
self.opt_info = None
self.temp = temp
self.anneal_temp = anneal_temp
self.anneal_method = anneal_method
self.anneal_discount_epoch = anneal_discount_epoch
self.anneal_discount_factor = anneal_discount_factor
self.temp_min = temp_min
self.anneal_temp_start = anneal_temp_start
self.num_of_agents = num_of_agents
self.sampler_list = [BatchSampler_Multi(self, i, with_critic) for i in range(self.num_of_agents)]
self.optimizer_list = [pickle.loads(pickle.dumps(self.optimizer)) for _ in range(self.num_of_agents)]
super(VPG_multi_Stein, self).__init__(env=env, policy=policy, baseline=baseline, **kwargs)
self.policy_list = policy_list
self.baseline_list = baseline_list
self.stein_learning_rate = learning_rate
self.stein_optimization_method = optimization_method
self.adaptive_kernel = adaptive_kernel
self.search_space = np.linspace(0.1, 2.0, num=20)
self.policy_weight_decay = policy_weight_decay
self.include_kernel = include_kernel
self.evolution = evolution
self.evolution_ratio = evolution_ratio
self.evolution_epsilon = evolution_epsilon
self.evolution_update_steps = evolution_update_steps
def start_worker(self):
for i in range(self.num_of_agents):
self.sampler_list[i].start_worker()
def shutdown_worker(self):
for i in range(self.num_of_agents):
self.sampler_list[i].shutdown_worker()
def train(self):
self.start_worker()
self.init_opt()
for itr in range(self.current_itr, self.n_itr):
if self.anneal_temp and (itr + 1) % self.anneal_discount_epoch == 0 and itr >= self.anneal_temp_start:
if self.anneal_method == 'loglinear':
self.temp *= self.anneal_discount_factor
elif self.anneal_method == 'linear':
self.temp -= self.anneal_discount_factor
if self.temp < self.temp_min:
self.temp = self.temp_min
logger.log("Current Temperature {:}".format(self.temp))
with logger.prefix('itr #%d | ' % itr):
average_return_list = []
gradient_list = []
for i in range(self.num_of_agents):
paths = self.sampler_list[i].obtain_samples(itr)
samples_data, average_return = self.sampler_list[i].process_samples(itr, paths)
average_return_list.append(average_return)
gradient = self.optimize_policy(itr, samples_data, i)
gradient_list.append(gradient)
logger.log("Update Policy {BEGIN}")
self.update_policies(gradient_list)
logger.log("Update Policy {END}")
logger.record_tabular('AverageReturn', np.max(average_return_list))
logger.log("saving snapshot...")
params = self.get_itr_snapshot(itr)
self.current_itr = itr + 1
params["algo"] = self
if self.store_paths:
pass
logger.save_itr_params(itr, params)
logger.log("saved")
logger.dump_tabular(with_prefix=False)
if self.evolution and (itr + 1) % self.evolution_update_steps == 0:
logger.log(">>>>>>>>>>>>>>>>>>>>>>> Evolution START <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
num_of_update = int(self.evolution_ratio * self.num_of_agents)
sorted_id = np.argsort(average_return_list)
deleted_id = sorted_id[:num_of_update]
sampled_id = sorted_id[num_of_update:]
for i in range(len(deleted_id)):
current_id = np.random.choice(sampled_id, 1)
current_params = self.policy_list[current_id].get_param_values()
current_epsilon = self.evolution_epsilon * (np.random.random(current_params.shape) - 0.5)
self.policy_list[deleted_id[i]].set_param_values(current_params + current_epsilon)
logger.log(">>>>>>>>>>>>>>>>>>>>>>> Evolution FINISH <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
self.shutdown_worker()
def get_itr_snapshot(self, itr):
return dict(
itr=itr,
policy_list=self.policy_list,
baseline_list=self.baseline_list,
env=self.env,
)
def optimize_policy(self, itr, samples_data, id):
logger.log("optimizing policy")
inputs = ext.extract(
samples_data,
"observations", "actions", "advantages"
)
agent_infos = samples_data["agent_infos"]
state_info_list = [agent_infos[k] for k in self.policy.state_info_keys]
inputs += tuple(state_info_list)
if self.policy.recurrent:
inputs += (samples_data["valids"],)
dist_info_list = [agent_infos[k] for k in self.policy.distribution.dist_info_keys]
loss_before = self.optimizer_list[id].loss(inputs)
self.optimizer_list[id].optimize(inputs)
loss_after = self.optimizer_list[id].loss(inputs)
logger.record_tabular("#{:} LossBefore".format(id), loss_before)
logger.record_tabular("#{:} LossAfter".format(id), loss_after)
mean_kl, max_kl = self.opt_info['f_kl_list'][id](*(list(inputs) + dist_info_list))
logger.record_tabular('#{:} MeanKL'.format(id), mean_kl)
logger.record_tabular('#{:} MaxKL'.format(id), max_kl)
return self.optimizer_list[id].gradient
def update_policies(self, gradient_list):
gradient = -np.array(gradient_list)
params = np.array([self.policy_list[i].get_param_values() for i in range(self.num_of_agents)])
## get distance matrix
distance_matrix = np.sum(np.square(params[None, :, :] - params[:, None, :]), axis=-1)
# get median
distance_vector = distance_matrix.flatten()
distance_vector.sort()
median = 0.5 * (
distance_vector[int(len(distance_vector) / 2)] + distance_vector[int(len(distance_vector) / 2) - 1])
h = median / (2 * np.log(self.num_of_agents + 1))
if self.adaptive_kernel:
L_min = None
alpha_best = None
for alpha in self.search_space:
kernel_alpha = np.exp(distance_matrix * (-alpha / h))
mean_kernel = np.sum(kernel_alpha, axis = 1)
L = np.mean(np.square(mean_kernel - 2.0 * np.ones_like(mean_kernel)))
logger.log("Current Loss {:} and Alpha : {:}".format(L, alpha))
if L_min is None:
L_min = L
alpha_best = alpha
elif L_min > L:
L_min = L
alpha_best = alpha
logger.record_tabular('Best Alpha', alpha_best)
h = h / alpha_best
kernel = np.exp(distance_matrix[:, :] * (-1.0 / h))
kernel_gradient = kernel[:, :, None] * (2.0 / h) * (params[None, :, :] - params[:, None, :])
if self.include_kernel:
weights = (1.0 / self.temp) * kernel[:, :, None] * gradient[:, None, :] + kernel_gradient[:, :, :]
else:
weights = kernel[:, :, None] * gradient[:, None, :]
weights = -np.mean(weights[:, :, :], axis=0)
# adam update
if self.stein_optimization_method == 'adam':
if self.stein_m is None:
self.stein_m = np.zeros_like(params)
if self.stein_v is None:
self.stein_v = np.zeros_like(params)
self.stein_t += 1.0
self.stein_m = self.stein_beta1 * self.stein_m + (1.0 - self.stein_beta1) * weights
self.stein_v = self.stein_beta2 * self.stein_v + (1.0 - self.stein_beta2) * np.square(weights)
m_hat = self.stein_m / (1.0 - self.stein_beta1 ** self.stein_t)
v_hat = self.stein_v / (1.0 - self.stein_beta2 ** self.stein_t)
params = params - self.stein_learning_rate * (m_hat / (np.sqrt(v_hat) + self.stein_epsilon))
elif self.stein_optimization_method == 'adagrad':
if self.stein_m is None:
self.stein_m = np.zeros_like(params)
self.stein_m = self.stein_m + np.square(weights)
params = params - self.stein_learning_rate * (weights / ( | np.sqrt(self.stein_m + self.stein_epsilon) | numpy.sqrt |
"""
Code to build spatially-dependent network.
There's two sides to it:
- Use correct density of different cell types and place them in space
- Connect them according to spatially-dependent radial connectivity profiles
For the connectivity part we need to evaluate all-to-all-distances which is O(n^2).
Instead, take advantage of the profiles decaying with distance and use a space partitioning structure
so we only compute distances between reachable cell pairs.
"""
import logging
import numpy as np
import pandas as pd
from collections import namedtuple
import numba
from tctx.util.profiling import log_time
from tctx.util import parallel
##########################################################################################
# Density
def assign_area(layers_desc, target_cell_count):
return target_cell_count / layers_desc.density_per_mm2.sum()
def assign_cell_counts(density_per_mm2, size_mm2):
layers_cell_counts = (size_mm2 * density_per_mm2).round().astype(np.int)
return layers_cell_counts
def assign_cell_positions_cube(height_um, layers_cell_counts, side_um):
cells = pd.DataFrame({
'layer': np.repeat(height_um.index, layers_cell_counts)
}, dtype='category')
total_cell_count = layers_cell_counts.sum()
cells['x'] = np.random.rand(total_cell_count) * side_um
cells['y'] = np.random.rand(total_cell_count) * side_um
layer_z_offsets = np.concatenate([[0], height_um.cumsum().values[:-1]])
cells['z'] = np.concatenate([
np.random.rand(count) * height + offset
for count, height, offset in zip(layers_cell_counts, height_um, layer_z_offsets)
])
return cells
def define_space(layers_desc, target_cell_count) -> pd.Series:
"""
given statistics of the cortical layers and the total cell count,
generate the total volume of the circuit.
"""
size_mm2 = assign_area(layers_desc, target_cell_count)
mm_to_mu = 1000.
side_um = np.sqrt(size_mm2) * mm_to_mu
return pd.Series({'size_mm2': size_mm2, 'side_um': side_um, 'height_um': layers_desc.height_um.sum()})
##########################################################################################
# Spacially-dependent connectivity
def get_cell_count_in_area(densities, radius_um):
"""in mm2"""
assert isinstance(densities, pd.Series)
um_to_mm = 0.001
sampling_radius_mm = radius_um * um_to_mm
sampling_area_mm2 = np.pi * sampling_radius_mm ** 2
return densities * sampling_area_mm2
@numba.njit
def wrapped_a2a_distance(p, q, m):
diff = np.expand_dims(p, axis=1) - np.expand_dims(q, axis=0)
diff = np.abs(diff)
return np.sqrt(np.sum(np.square(np.minimum(diff, m - diff)), axis=-1))
BinPartitioning = namedtuple('BinPartitioning', 'bin_edges, bin_distances, total_bin_count, bin_dims')
CellPartitioning = namedtuple('CellPartitioning', 'cell_to_bin_idx, bin_cells_mask')
# @numba.jit
def build_partitioning_bins(side_um, bin_size_um):
bin_edges = np.arange(0, side_um + bin_size_um, bin_size_um)
bin_dims = (len(bin_edges) - 1, len(bin_edges) - 1)
total_bin_count = (len(bin_edges) - 1) ** 2
bin_coords = np.array(np.unravel_index(np.arange(total_bin_count), bin_dims)).T
bin_distances = wrapped_a2a_distance(bin_coords, bin_coords, len(bin_edges) - 1) * bin_size_um
return BinPartitioning(bin_edges, bin_distances, total_bin_count, bin_dims)
def partition_cells(sorted_cells, partitioning):
gids = sorted_cells.index.values
cell_to_bin_idx, bin_cells_mask = _partition_cells(
gids,
sorted_cells['x'].values,
sorted_cells['y'].values,
partitioning.bin_edges, partitioning.bin_dims, partitioning.total_bin_count
)
return CellPartitioning(
pd.Series(cell_to_bin_idx, index=gids),
pd.DataFrame(bin_cells_mask, columns=gids)
)
# @numba.jit
def _partition_cells(gids, xs, ys, bin_edges, bin_dims, total_bin_count):
x_bin_idx = np.digitize(xs, bin_edges) - 1
y_bin_idx = np.digitize(ys, bin_edges) - 1
cell_to_bin_idx = np.ravel_multi_index((x_bin_idx, y_bin_idx), bin_dims)
bin_cells_mask = np.zeros((total_bin_count, len(gids)), dtype=np.bool_)
bin_cells_mask[cell_to_bin_idx, np.arange(len(cell_to_bin_idx))] = True
return cell_to_bin_idx, bin_cells_mask
def wrap_around_dimension(values, full_side):
"""make sure values are wrapped around in a torus"""
offset = values.copy()
mask = offset < 0
offset[mask] = offset[mask] + full_side
mask = offset > full_side
offset[mask] = offset[mask] - full_side
return offset
def create_connections(cells, conn_fixed_counts, space_def, conn_profiles, bin_size_um, free_degree, proj_offsets):
"""
:param cells: DataFrame with columns: x, y, z, ei_type
:param conn_fixed_counts: a dict from string (e2e, e2i, i2e, i2i) to an integer
representing the number of incoming connections
:param space_def:
:param conn_profiles:
:param bin_size_um:
:param free_degree: flag on whether to use free degrees or fixed in degrees
:return:
"""
# TODO merge conn_fixed_counts and free_degree params
# we don't want to use distance in Z to determine connectivity
logging.debug('Using %s degrees', 'free' if free_degree else 'fixed')
logging.debug('proj offset\n%s', proj_offsets)
# Note: because we have a potential bias, the virtual position of the cell is different
# depending on whether it is a source/target and what kind of connection we are looking at.
c_types = ['e2e', 'e2i', 'i2e', 'i2i']
if proj_offsets is None:
proj_offsets = {c_type: np.array([0., 0.]) for c_type in c_types}
with log_time('partitioning space'):
partition = build_partitioning_bins(space_def['side_um'], bin_size_um)
source_position_by_c_type = {}
source_partition_by_c_type = {}
target_position_by_c_type = {}
target_partition_by_c_type = {}
for c_type in c_types:
sources = cells[cells.ei_type == c_type[0]].copy()
sources['x'] = wrap_around_dimension(sources['x'].values + proj_offsets[c_type][0], space_def['side_um'])
sources['y'] = wrap_around_dimension(sources['y'].values + proj_offsets[c_type][1], space_def['side_um'])
sorted_source_positions = sources.sort_values(['ei_type', 'x', 'y', 'z'])[['x', 'y']]
source_position_by_c_type[c_type] = sorted_source_positions
source_partition_by_c_type[c_type] = partition_cells(sorted_source_positions, partition)
targets = cells[cells.ei_type == c_type[-1]]
sorted_target_positions = targets.sort_values(['ei_type', 'x', 'y', 'z'])[['x', 'y']]
target_position_by_c_type[c_type] = sorted_target_positions
target_partition_by_c_type[c_type] = partition_cells(sorted_target_positions, partition)
c_type_per_task, s_type_per_task = [], []
all_params = []
c_type_codes = pd.Series(np.arange(4), index=['e2e', 'e2i', 'i2e', 'i2i'], dtype=np.uint)
s_type_codes = pd.Series( | np.arange(2) | numpy.arange |
import os
import numpy as np
import pytest
import fitsio
import piff
from .._se_image import _get_wcs_inverse, SEImageSlice
SE_DIMS_CUT = 512
def test_se_image_get_wcs_inverse_caches(se_image_data, coadd_image_data):
_get_wcs_inverse.cache_clear()
psf_mod = piff.PSF.read(se_image_data['source_info']['piff_path'])
se_im = SEImageSlice(
source_info=se_image_data['source_info'],
psf_model=psf_mod,
wcs=se_image_data['eu_wcs'],
wcs_position_offset=1,
wcs_color=0,
psf_kwargs={"GI_COLOR": 0.61},
noise_seeds=[10],
mask_tape_bumps=False,
)
se_im._im_shape = (512, 512)
_get_wcs_inverse(
coadd_image_data['eu_wcs'],
coadd_image_data['position_offset'],
se_im,
se_im._im_shape,
8,
)
assert _get_wcs_inverse.cache_info().hits == 0
_get_wcs_inverse(
coadd_image_data['eu_wcs'],
coadd_image_data['position_offset'],
se_im,
se_im._im_shape,
8,
)
assert _get_wcs_inverse.cache_info().hits == 1
se_im = SEImageSlice(
source_info=se_image_data['source_info'],
psf_model=psf_mod,
wcs=se_image_data['eu_wcs'],
wcs_position_offset=1,
wcs_color=0,
psf_kwargs={"GI_COLOR": 0.61},
noise_seeds=[10],
mask_tape_bumps=False,
)
se_im._im_shape = (512, 512)
_get_wcs_inverse(
coadd_image_data['eu_wcs'],
coadd_image_data['position_offset'],
se_im,
se_im._im_shape,
8,
)
assert _get_wcs_inverse.cache_info().hits == 2
@pytest.mark.skipif(
os.environ.get('TEST_DESDATA', None) is None,
reason=(
'SEImageSlice can only be tested if '
'test data is at TEST_DESDATA'))
def test_se_image_get_wcs_inverse_pixmappy(se_image_data, coadd_image_data):
coadd_wcs = coadd_image_data['eu_wcs']
se_wcs = piff.PSF.read(
se_image_data['source_info']['piff_path']
).wcs[se_image_data['source_info']['ccdnum']]
# this hack mocks up an esutil-like interface to the pixmappy WCS
def se_image2sky(x, y):
if np.ndim(x) == 0 and np.ndim(y) == 0:
is_scalar = True
else:
is_scalar = False
# the factor of +1 here converts from zero to one indexed
ra, dec = se_wcs._radec(
(np.atleast_1d(x) - se_wcs.x0 +
se_image_data['source_info']['position_offset']),
( | np.atleast_1d(y) | numpy.atleast_1d |
import math
import numpy as np
class Plane():
def __init__(self,atoms):
# Stores a plane equation in the format
# ax + bx + cz + d = 0
self.atoms = atoms
xs = [atom.coordinates[0] for atom in atoms]
ys = [atom.coordinates[1] for atom in atoms]
zs = [atom.coordinates[2] for atom in atoms]
# do fit
tmp_A = []
tmp_b = []
for i in range(len(xs)):
tmp_A.append([xs[i], ys[i], 1])
tmp_b.append(zs[i])
b = np.matrix(tmp_b).T
A = np.matrix(tmp_A)
fit = (A.T * A).I * A.T * b
self.errors = b - A * fit
fit = np.array(fit).reshape(3)
self.a, self.b, self.d = fit[0], fit[1], fit[2]
# fit is currently in the form
# ax + by + d = cz
# c = -(a*x[0] + b*y[0] + d) / z[0]
self.c = - ((self.a*xs[0] + self.b*ys[0] + self.d) / zs[0])
def plane_angle(self, plane):
a1,b1,c1 = self.a,self.b, self.c
a2,b2,c2 = plane.a,plane.b, plane.c
d = ( a1 * a2 + b1 * b2 + c1 * c2 )
e1 = np.sqrt( a1 * a1 + b1 * b1 + c1 * c1)
e2 = np.sqrt( a2 * a2 + b2 * b2 + c2 * c2)
d = d / (e1 * e2)
A = np.degrees(np.arccos(d))
if A > 90:
A = 180 - A
return A
def point_distance(self,atom):
x1, y1, z1 = atom.coordinates[0], atom.coordinates[1], atom.coordinates[2]
d = np.abs((self.a * x1 + self.b * y1 + self.c * z1 + self.d))
e = (np.sqrt(self.a * self.a + self.b * self.b + self.c * self.c))
return d/e
def test_planarity(self,atoms = None):
if atoms == None:
devs = [self.point_distance(atom) for atom in self.atoms]
if len(np.where(np.array(devs)>2)[0]) >= 1:
return False
else:
return True
else:
devs = [self.point_distance(atom) for atom in atoms]
if len(np.where(np.array(devs)>2)[0]) >= 1:
return False
else:
return True
def bond_angle(atom1,atom2,atom3):
a = atom1.coordinates
b = atom2.coordinates
c = atom3.coordinates
ba = a - b
bc = c - b
cosine_angle = np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc))
angle = np.arccos(cosine_angle)
return np.degrees(angle)
def torsional_angle(atom1,atom2,atom3,atom4):
# returns interplanar angle between planes defined by atom1, atom2, atom3, and atom2, atom3, atom4
pass
def vector(atom1,atom2, as_angstrom=False):
# returns the vector defined by the position between two atoms
pass
def calc_lstsq_displacement(disp,vectors):
A = vectors.T
xs = []
x, _, _, _ = np.linalg.lstsq(A,disp,rcond=-1)
xs.append(x)
return np.array(xs[0])
def vector_angle(v1,v2):
theta = np.arccos((v1.dot(v2))/(np.sqrt(v1.dot(v1))*np.sqrt(v2.dot(v2))))
return np.degrees(theta)
def vector_plane_angle(vector, plane):
# returns the angle made between a vector and a plane
pass
# https://stackoverflow.com/questions/14016898/port-matlab-bounding-ellipsoid-code-to-python
# Python implementation of the MATLAB function MinVolEllipse, based on the Khachiyan algorithm
# for both
# A is a matrix containing the information regarding the shape of the ellipsoid
# to get radii from A you have to do SVD on it, giving U Q and V
# 1 / sqrt(Q) gives the radii of the ellipsoid
# problems arise for planar motifs. add two extra points at centroid of +/- 0.00001*plane_normal to overcome
def mvee(atoms, tol = 0.00001):
"""
Find the minimum volume ellipse around a set of atom objects.
Return A, c where the equation for the ellipse given in "center form" is
(x-c).T * A * (x-c) = 1
[U Q V] = svd(A);
where r = 1/sqrt(Q)
V is rotation matrix
U is ???
"""
points_asarray = | np.array([atom.coordinates for atom in atoms]) | numpy.array |
''' Schemdraw base Element class '''
from collections import ChainMap
import numpy as np
from ..backends.mpl import Figure
from ..adddocs import adddocs
from ..segments import SegmentText, BBox
from ..transform import Transform, mirror_point, flip_point
gap = [np.nan, np.nan] # Put a gap in a path
def angle(a, b):
''' Compute angle from coordinate a to b '''
theta = np.degrees(np.arctan2(b[1] - a[1], (b[0] - a[0])))
return theta
def distance(a, b):
''' Compute distance from A to B '''
r = np.sqrt((a[0]-b[0])**2 + (a[1]-b[1])**2)
return r
class Element(object):
''' Parent class for a single circuit element.
Keyword Arguments
-------------------------
d : string
Drawing direction ['down', 'up', 'left', 'right'] or
abbreviated ['d', 'u', 'l', 'r']
at : float list [x, y]
Starting coordinate of element, defaults to current
drawing position. OR xy can be tuple of (Element, anchorname)
to be resolved after the Element has been placed (see Walrus
mode in documentation)
xy : float list [x, y]
Alias for at keyword
theta : float
Angle (degrees) of element. Overrides the `d` parameter.
flip : bool
Flip the element up/down
reverse : bool
Reverse the element (for example a DIODE)
zoom : float
Zoom/magnification factor for element. Default = 1.
anchor : string
Name of the "pin" in the element to place at `xy` in the
Drawing. Typically used for elements with more than two
terminals. For example, an OPAMP element has `in1`, `in2`,
and `out` anchors.
label, toplabel, botlabel, lftlabel, rgtlabel : string or list
Add a string to label the element on the given side.
Can be a string or list of strings that will be evenly-
spaced along the element (['-', 'V1', '+']). Use $
for latex formatting, for example `$R_1 = 100 \\Omega$`.
See also: `add_label` method.
lblofst : float
Offset between label and element
lblsize : float
Font size of labels, overrides Drawing.fontsize
for this element
lblrotate : bool
Rotate the label text to align with the element,
for example vertical text with an element having
`d="up"`.
lblloc : string
Location for drawing the label specified by `label`
parameter, either ['top', 'bot', 'lft', 'rgt', 'center']
or name of an anchor.
zorder : int
Z-order parameter for placing element in front or behind
others.
color : string
Color for the element
ls : string
Line style for the element '-', '--', ':', etc.
lw : float
Line width for the element
fill : string
Fill color for elements with closed paths or shapes
move_cur : bool
Move the Drawing cursor to the endpoint of the element
'''
def __init__(self, d=None, **kwargs):
self.userparams = kwargs
if d is not None: # Allow direction to be specified as first param without name
self.userparams['d'] = d
self.dwgparams = {} # Set by drawing in place() method
self.params = {} # Set by element defintion in setup() method
self.cparams = None # Combined (ChainMap) of above params
self.localshift = 0
self.anchors = {} # Untransformed anchors
self.absanchors = {} # Transformed, absolute anchors
self.segments = []
self.transform = Transform(0, [0, 0])
if 'xy' in self.userparams: # Allow legacy 'xy' parameter
self.userparams.setdefault('at', self.userparams.pop('xy'))
def buildparams(self):
''' Combine parameters from user, setup, and drawing '''
# Accomodate xy positions based on OTHER elements before they are fully set up.
if 'at' in self.userparams and isinstance(self.userparams['at'][1], str):
element, pos = self.userparams['at']
if pos in element.absanchors:
xy = element.absanchors[pos]
else:
raise KeyError('Unknown anchor name {}'.format(pos))
self.userparams['at'] = xy
# All subsequent actions get params from cparams
self.cparams = ChainMap(self.userparams, self.params, self.dwgparams)
self.flipreverse()
def flipreverse(self):
''' Flip and/or reverse segments if necessary '''
if self.userparams.get('flip', False):
[s.doflip() for s in self.segments]
for name, pt in self.anchors.items():
self.anchors[name] = flip_point(pt)
if self.userparams.get('reverse', False):
if 'center' in self.anchors:
centerx = self.anchors['center'][0]
else:
xmin, _, xmax, _ = self.get_bbox()
centerx = (xmin + xmax)/2
[s.doreverse(centerx) for s in self.segments]
for name, pt in self.anchors.items():
self.anchors[name] = mirror_point(pt, centerx)
def place(self, dwgxy, dwgtheta, **dwgparams):
''' Determine position within the drawing '''
self.dwgparams = dwgparams
if self.cparams is None:
self.buildparams()
anchor = self.cparams.get('anchor', None)
zoom = self.cparams.get('zoom', 1)
xy = np.asarray(self.cparams.get('at', dwgxy))
# Get bounds of element, used for positioning user labels
self.bbox = self.get_bbox()
if 'endpts' in self.cparams:
theta = dwgtheta
elif self.cparams.get('d') is not None:
theta = {'u': 90, 'r': 0, 'l': 180, 'd': 270}[self.cparams.get('d')[0].lower()]
else:
theta = self.cparams.get('theta', dwgtheta)
if anchor is not None:
self.localshift = -np.asarray(self.anchors[anchor])
self.transform = Transform(theta, xy, self.localshift, zoom)
# Add user-defined labels
# user-defined labels - allow element def to define label location
lblloc = self.cparams.get('lblloc', 'top')
lblsize = self.cparams.get('lblsize', self.cparams.get('fontsize', 14))
lblrotate = self.cparams.get('lblrotate', False)
lblcolor = self.cparams.get('color', 'black')
userlabels = {
'top': self.cparams.get('toplabel', None),
'bot': self.cparams.get('botlabel', None),
'lft': self.cparams.get('lftlabel', None),
'rgt': self.cparams.get('rgtlabel', None),
'center': self.cparams.get('clabel', None)
}
if 'label' in self.cparams:
userlabels[lblloc] = self.cparams.get('label')
for loc, label in userlabels.items():
if label is not None:
rotation = (theta if lblrotate else 0)
self.add_label(label, loc,# size=lblsize,
rotation=rotation, color=lblcolor)
# Add element-specific anchors
for name, pos in self.anchors.items():
self.absanchors[name] = self.transform.transform(np.array(pos))
self.absanchors['xy'] = self.transform.transform([0, 0])
# Set all anchors as attributes
for name, pos in self.absanchors.items():
if getattr(self, name, None) is not None:
# Don't clobber element parameter names!
name = 'anchor_' + name
setattr(self, name, pos)
drop = self.cparams.get('drop', None)
if drop is None or not self.cparams.get('move_cur', True):
return dwgxy, dwgtheta
elif self.params.get('theta', None) == 0:
# Element def specified theta = 0, don't change
return self.transform.transform(drop), dwgtheta
else:
return self.transform.transform(drop), theta
def get_bbox(self, transform=False):
''' Get element bounding box, including path and shapes.
Parameters
----------
transform : bool
Apply the element transform to the bbox
Returns
-------
xmin, ymin, xmax, ymax
Corners of the bounding box
'''
xmin = ymin = np.inf
xmax = ymax = -np.inf
for segment in self.segments:
if transform:
segment = segment.xform(self.transform)
segxmin, segymin, segxmax, segymax = segment.get_bbox()
xmin = min(xmin, segxmin)
xmax = max(xmax, segxmax)
ymin = min(ymin, segymin)
ymax = max(ymax, segymax)
return BBox(xmin, ymin, xmax, ymax)
def add_label(self, label, loc='top', ofst=None, align=None, rotation=0, **kwargs):
''' Add a label to the element
Parameters
----------
label : string or list
Text to add. If list, list items will be evenly spaced
along the element.
loc : string
Location for text relative to element, either
['top', 'bot', 'lft', 'rgt'] or name of an anchor
ofst : float or list
Offset between text and element. Defaults to Element.lblofst.
Can be list of [x, y] offets.
align : tuple
Tuple of (horizontal, vertical) alignment where horizontal
is ['center', 'left', 'right'] and vertical is ['center',
'top', 'bottom']
rotation : float
Rotation angle (degrees)
Keyword Arguments
-----------------
fontsize : float
Font size
font:
color: string
Label text color
'''
rotation = (rotation + 360) % 360
if rotation > 90 and rotation < 270:
rotation -= 180 # Keep the label from going upside down
# This ensures a 'top' label is always on top, regardless of rotation
theta = self.transform.theta
if (theta % 360) > 90 and (theta % 360) <= 270:
if loc == 'top':
loc = 'bot'
elif loc == 'bot':
loc = 'top'
elif loc == 'lft':
loc = 'rgt'
elif loc == 'rgt':
loc = 'lft'
if align is None: # Determine best alignment for label based on angle
th = theta - rotation
# Below alignment divisions work for label on top. Rotate angle for other sides.
if loc == 'lft':
th = th + 90
elif loc == 'bot':
th = th + 180
elif loc == 'rgt':
th = th + 270
th = (th+360) % 360 # Normalize angle so it's positive, clockwise
rotalign = [('center', 'bottom'), # label on top
('right', 'bottom'),
('right', 'center'), # label on right
('right', 'top'),
('center', 'top'), # label on bottom
('left', 'top'),
('left', 'center'), # label on left
('left', 'bottom')]
# Index into rotalign for a "top" label that's been rotated
rotalignidx = int(round((th/360)*8) % 8)
if loc in self.anchors:
x1, y1, x2, y2 = self.get_bbox()
if (np.isclose(self.anchors[loc][0], x1, atol=.15) or
np.isclose(self.anchors[loc][0], x2, atol=.15) or
np.isclose(self.anchors[loc][1], y1, atol=.15) or
np.isclose(self.anchors[loc][1], y2, atol=.15)):
# Anchor is on an edge
dofst = self.cparams.get('lblofst', .1)
if np.isclose(self.anchors[loc][0], x1, atol=.15):
alignH = 'right'
ofstx = -dofst
elif np.isclose(self.anchors[loc][0], x2, atol=.15):
alignH = 'left'
ofstx = dofst
else:
alignH = 'center'
ofstx = 0
if np.isclose(self.anchors[loc][1], y1, atol=.15):
alignV = 'top'
ofsty = -dofst
elif np.isclose(self.anchors[loc][1], y2, atol=.15):
alignV = 'bottom'
ofsty = dofst
else:
alignV = 'center'
ofsty = 0
align = (alignH, alignV)
rotalignidx = (rotalign.index(align) + round((th/360)*8)) % 8
if ofst is None and not isinstance(label, (tuple, list)):
ofst = [ofstx, ofsty]
if loc == 'center':
align = ('center', 'center')
else:
align = rotalign[rotalignidx]
xmax = self.bbox.xmax
xmin = self.bbox.xmin
ymax = self.bbox.ymax
ymin = self.bbox.ymin
if not np.isfinite(xmax+xmin+ymax+ymin):
xmax = xmin = ymax = ymin = .1
lblparams = dict(ChainMap(kwargs, self.cparams))
lblparams.pop('label', None) # Can't pop from nested chainmap, convert to flat dict first
lblparams.update({'align': align, 'rotation': rotation})
if ofst is None:
ofst = self.cparams.get('lblofst', .1)
if isinstance(label, (list, tuple)):
# Divide list along length
if loc == 'top':
for i, lbltxt in enumerate(label):
xdiv = (xmax-xmin)/(len(label)+1)
xy = [xmin+xdiv*(i+1), ymax]
ofst = [0, ofst] if not isinstance(ofst, (list, tuple)) else ofst
self.segments.append(SegmentText(np.asarray(xy)+np.asarray(ofst), lbltxt, **lblparams))
elif loc == 'bot':
for i, lbltxt in enumerate(label):
xdiv = (xmax-xmin)/(len(label)+1)
xy = [xmin+xdiv*(i+1), ymin]
ofst = [0, -ofst] if not isinstance(ofst, (list, tuple)) else ofst
self.segments.append(SegmentText(np.asarray(xy)+np.asarray(ofst), lbltxt, **lblparams))
elif loc == 'lft':
for i, lbltxt in enumerate(label):
ydiv = (ymax-ymin)/(len(label)+1)
xy = [xmin, ymin+ydiv*(i+1)]
ofst = [-ofst, 0] if not isinstance(ofst, (list, tuple)) else ofst
self.segments.append(SegmentText(np.asarray(xy)+np.asarray(ofst), lbltxt, **lblparams))
elif loc == 'rgt':
for i, lbltxt in enumerate(label):
ydiv = (ymax-ymin)/(len(label)+1)
xy = [xmax, ymin+ydiv*(i+1)]
ofst = [ofst, 0] if not isinstance(ofst, (list, tuple)) else ofst
self.segments.append(SegmentText( | np.asarray(xy) | numpy.asarray |
"""
Economic Model for VF
Created on 30 March 2020
Author: <NAME>
Contact: <EMAIL>
"""
# ========= IMPORT LIBRARIES ======= #
import numpy as np
import math
import matplotlib.pyplot as plt
import datetime
# ========== GLOBAL VARIABLES ====== #
#Time parameters
YEARLY_TO_MONTHLY_31 = 11.77
DAYS_IN_MONTH = 31
DAYS_IN_YEAR = 365
WEEKS_IN_YEAR = 52
DAYS_IN_WEEK = 7
DAYS_IN_QUARTER = 112
ROI_THRESHOLD = -5 # Below this Number indicates Bankruptcy
GROWING_AREA_RATIO_TO_TOTAL = 0.5
# =========== CREATION OF TIME SERIES
days = 3660 # Days of simulation
days_timeseries =[] # Creation of a timeseries for days
for i in range(days+1):
days_timeseries.append(i)
years = math.floor(days / 365) # Years of Simulation
years_series = [] # Creation of time series for years
for i in range(years + 1):
years_series.append(i)
# ====== USER INPUTS ======== #
yield_required = 9000 #Annual yield (kg)
harvest_weight = 0.1 # 100g of lettuce
land_area = 200
crop_price = 10 # £ per kg
crops_per_area = 20 # per sq-m of growbed
no_of_tiers = 15
# Capital Expenditure
def calc_capex(land_area):
'''
PP. 51 of Plant Factory
Initial cost including necessary facilities (15 tiers, 50cm distance between tiers)
$4000 USD per sq-m x 0.8 for £
'''
capex = 4000*0.8*land_area
return capex
# Annual Productivity
def calc_yield(land_area, GROWING_AREA_RATIO_TO_TOTAL, no_of_tiers, crops_per_area):
'''
PP. 51 of Plant Factory
3000 lettuce heads per sq-m per year (80-100g fresh weight)
20 plants per sq-m (culture bed) x 15 tiers x 0.9 ratio salable
x 0.5 effective floor ratio of tiers to total floor area
50% of floor area used for operations, walkway, seedlings, production
equipment.
12-15 days to harvest
20-22 days seed to seedling
'''
yield_potential = land_area * GROWING_AREA_RATIO_TO_TOTAL\
* crops_per_area * no_of_tiers * harvest_weight
return yield_potential
# =========== OVERALL FIXED COSTS ========== #
# Depreciation of building and facilities
# Tax or Rent of Land/Building
# Insurance
# Basic Salaries
# Basic Charges for Electricity and Municipal Water
# ========== ACTIVITIES ====== #
'''
Cost Components from PP.52 Plant Factory
Labour : 25-30%
Electricity: 25-30%
Depreciation: 25-35%
Logistics: 9.8%
Consumables: 7.6%
Seeds: 2.1%
Other: 11%
'''
# --------- PURCHASING CONSUMABLES ------- #
# CLEANING SUPPLIES - FIXED COST
# SEEDS - VARIABLE COST
def calc_seeds(land_area):
'''
Seeds typically account for 2.1% production costs
PP. 51 of Plant Factory
3000 lettuce heads per sq-m per year (80-100g fresh weight)
20 plants per sq-m (culture bed) x 15 tiers x 0.9 ratio salable
x 0.5 effective floor ratio of tiers to total floor area
50% of floor area used for operations, walkway, seedlings, production
equipment.
12-15 days to harvest
20-22 days seed to seedling
'''
qty_of_seeds = yield_potential/harvest_weight # annual qty of seeds required
seeds_cost = qty_of_seeds * 0.01
return seeds_cost
# PACKAGING - VARIABLE COST
"""
Consumables typically account for 7.5% production costs
"""
# SUBSTRATE - VARIABLE COST
# NUTRIENTS - VARIABLE COST
# CO2 - VARIABLE COST
# PEST MANAGEMENT - VARIABLE COST
# --------- SOWING AND PROPAGATION ------- #
# CLEANING & SYSTEM MAINTENANCE - FIXED COST
# WATER & ENERGY - FIXED COST
# DEPRECIATION - FIXED COST
# LABOUR - VARIABLE COST
# --------- GROWING ------- #
# UTILITIES -
"""
Electricity typically accounts for 21% of Production costs PP.52 Plant Factory
"""
def calc_utilities(yield_potential): # Energy and Water
water_consumption = yield_potential*1
energy_consumption = yield_potential*1
utilities_annual = water_consumption*energy_consumption
return utilities_annual
# LABOUR
def calc_labour(yield_potential):
"""
Labour Costs Formaula
Notes
------
Direct farm labour cost = Number of staff working full-time x wages x 30 hours
Generalisation if statement on farm labour required if unknown
"""
farm_hours = yield_potential*0.2
labour_cost = farm_hours * 7 # wage
return labour_cost
# DEPRECIATION
'''
The economic life period for calculating the depreciation differs from country to country.
In Japan, it is 15 years for the PFAL building, 10 years for the facilities, and 5 years
for the LED lamps.
Typically accounts for 21% of Production costs
'''
# EXPECTED YIELDS
def calc_expected_yield(yield_potential):
yield_rate = 0.97 # Ratio of marketable plants produced by divided by no. of seeds transplanted
expected_yield = yield_potential * yield_rate
return expected_yield
# --------- HARVESTING AND PACKAGING ------- #
# LABOUR - Variable costs
# CLEANING & SYSTEM MAINTENANCE - Variable costs
# WASTE MANAGEMENT
# --------- PACKING AND DELIVERY ------- #
# DELIVERY LABOUR / OUTSOURCING FEES
"""
Packing and Delivery Typically 6-8% of production cost when near City
12% when outside city PP.52 of Plant Factory
"""
"""
Logistics typically accounts for 9.8% PP.52 of Plant Factory
"""
# VEHICLE MAINTENANCE AND INSURANCE
# COMPLIANCE
# --------- SALES & MARKETING ------- #
# MARKETING COSTS
# OFFICE EXPENSES
# ==================== FINANCES ================ #
# OpEx Time series
'''The component costs for electricity, labor,
depreciation, and others of the PFAL using fluorescent (FL) lamps
in Japan accounted for, on average, 25% e 30%, 25% e 30%, 25% e 35%,
and 20%, respectively.
'''
def calc_cogs(yield_potential):
'''
seeds_cost + nutrients_cost + co2_cost + (labour_cost * 50) + packaging costs + media costs
'''
cogs_annual = yield_potential*2
return cogs_annual
def calc_cogs_time_series(days, cogs_annual):
"""
Cost of Goods Sold Formula
Notes
-----
Can adjust for days/weekly/monthly/annually in the future - ASSUMED: CONSUMABLES PURCHASED MONTHLY
"""
cogs_time_series = []
for i in range(days):
if i % DAYS_IN_MONTH == 0:
cogs_time_series.append(cogs_annual / YEARLY_TO_MONTHLY_31)
else:
cogs_time_series.append(0)
return cogs_time_series
def calc_opex_time_series(days, labour_cost, utilities):
"""
Can adjust for days/weekly/monthly/annually in the future - ASSUMED: CONSUMABLES PURCHASED QUARTERLY
Operations = Bill Growth Lights + Bill Environmental Control + Bill Misc Energy + Water Bill + Salary Cost + Maintenance Cost +
Distribution cost - Reduction from Renewable Energy
"""
opex_time_series = []
for i in range(days):
opex = 0
if i % DAYS_IN_MONTH == 0:
opex += (labour_cost / YEARLY_TO_MONTHLY_31) + (utilities / YEARLY_TO_MONTHLY_31)
if i % DAYS_IN_YEAR == 0:
opex += 0
opex_time_series.append(opex)
return opex_time_series
# Sales (ANNUALLY)
def calc_sales(expected_yield): # per year
sales = expected_yield*15 # £15 per kilo
return sales
def calc_revenue_time_series(days, sales):
revenue_time_series = []
for i in range(days):
revenue = 0
if i % DAYS_IN_WEEK == 0:
revenue += (sales/WEEKS_IN_YEAR) # sales across 365 days of the year
revenue_time_series.append(revenue)
return revenue_time_series
# Profit
def calc_profit_time_series(opex_time_series, cogs_time_series, revenue_time_series):
opex = np.asarray(opex_time_series)
cogs = np.asarray(cogs_time_series)
revenue = | np.asarray(revenue_time_series) | numpy.asarray |
import logging
from typing import List, Callable
import numpy as np
from pyquaternion import Quaternion
from pyrep import PyRep
from pyrep.errors import IKError
from pyrep.objects import Dummy, Object
from rlbench import utils
from rlbench.action_modes import ArmActionMode, ActionMode
from rlbench.backend.exceptions import BoundaryError, WaypointError
from rlbench.backend.observation import Observation
from rlbench.backend.robot import Robot
from rlbench.backend.scene import Scene
from rlbench.backend.task import Task
from rlbench.demo import Demo
from rlbench.observation_config import ObservationConfig
_TORQUE_MAX_VEL = 9999
_DT = 0.05
_MAX_RESET_ATTEMPTS = 40
_MAX_DEMO_ATTEMPTS = 10
class InvalidActionError(Exception):
pass
class TaskEnvironmentError(Exception):
pass
class TaskEnvironment(object):
def __init__(self, pyrep: PyRep, robot: Robot, scene: Scene, task: Task,
action_mode: ActionMode, dataset_root: str,
obs_config: ObservationConfig,
static_positions: bool = False,
attach_grasped_objects: bool = True):
self._pyrep = pyrep
self._robot = robot
self._scene = scene
self._task = task
self._variation_number = 0
self._action_mode = action_mode
self._dataset_root = dataset_root
self._obs_config = obs_config
self._static_positions = static_positions
self._attach_grasped_objects = attach_grasped_objects
self._reset_called = False
self._prev_ee_velocity = None
self._enable_path_observations = False
self._scene.load(self._task)
self._pyrep.start()
self._target_workspace_check = Dummy.create()
self._last_e = None
def get_name(self) -> str:
return self._task.get_name()
def sample_variation(self) -> int:
self._variation_number = np.random.randint(
0, self._task.variation_count())
return self._variation_number
def set_variation(self, v: int) -> None:
if v >= self.variation_count():
raise TaskEnvironmentError(
'Requested variation %d, but there are only %d variations.' % (
v, self.variation_count()))
self._variation_number = v
def variation_count(self) -> int:
return self._task.variation_count()
def reset(self) -> (List[str], Observation):
self._scene.reset()
try:
desc = self._scene.init_episode(
self._variation_number, max_attempts=_MAX_RESET_ATTEMPTS,
randomly_place=not self._static_positions)
except (BoundaryError, WaypointError) as e:
raise TaskEnvironmentError(
'Could not place the task %s in the scene. This should not '
'happen, please raise an issues on this task.'
% self._task.get_name()) from e
self._reset_called = True
# redundancy resolution
self._last_e = None
# Returns a list of descriptions and the first observation
return desc, self._scene.get_observation()
def get_observation(self) -> Observation:
return self._scene.get_observation()
def get_joint_upper_velocity_limits(self):
return self._robot.arm.get_joint_upper_velocity_limits()
def get_all_graspable_objects(self):
return self._task.get_graspable_objects()
def get_robot_visuals(self):
return self._robot.arm.get_visuals()
def get_all_graspable_object_positions(self, relative_to_cameras=False):
""" returns the positions of all graspable object relative to all enabled cameras """
objects = self._task.get_graspable_objects()
positions = []
for ob in objects:
if relative_to_camera:
positions.append(self._scene.get_object_position_relative_to_cameras(ob))
else:
positions.append({"left_shoulder_camera": ob.get_position(),
"right_shoulder_camera": ob.get_position(),
"front_camera": ob.get_position(),
"wrist_camera": ob.get_position()})
return positions
def get_all_graspable_object_poses(self, relative_to_cameras=False):
""" returns the pose of all graspable object relative to all enabled cameras """
objects = self._task.get_graspable_objects()
poses = []
for ob in objects:
if relative_to_cameras:
poses.append(self._scene.get_object_pose_relative_to_cameras(ob))
else:
poses.append({"left_shoulder_camera": ob.get_pose(),
"right_shoulder_camera": ob.get_pose(),
"front_camera": ob.get_pose(),
"wrist_camera": ob.get_pose()})
return poses
def _assert_action_space(self, action, expected_shape):
if np.shape(action) != expected_shape:
raise RuntimeError(
'Expected the action shape to be: %s, but was shape: %s' % (
str(expected_shape), str(np.shape(action))))
def _assert_unit_quaternion(self, quat):
if not np.isclose(np.linalg.norm(quat), 1.0):
raise RuntimeError('Action contained non unit quaternion!')
def _torque_action(self, action):
self._robot.arm.set_joint_target_velocities(
[(_TORQUE_MAX_VEL if t < 0 else -_TORQUE_MAX_VEL)
for t in action])
self._robot.arm.set_joint_forces(np.abs(action))
def _ee_action(self, action, relative_to=None):
self._assert_unit_quaternion(action[3:])
try:
joint_positions = self._robot.arm.solve_ik(
action[:3], quaternion=action[3:], relative_to=relative_to)
self._robot.arm.set_joint_target_positions(joint_positions)
except IKError as e:
raise InvalidActionError('Could not find a path.') from e
done = False
prev_values = None
# Move until reached target joint positions or until we stop moving
# (e.g. when we collide wth something)
while not done:
self._scene.step()
cur_positions = self._robot.arm.get_joint_positions()
reached = np.allclose(cur_positions, joint_positions, atol=0.01)
not_moving = False
if prev_values is not None:
not_moving = np.allclose(
cur_positions, prev_values, atol=0.001)
prev_values = cur_positions
done = reached or not_moving
def _path_action(self, action, relative_to=None):
self._assert_unit_quaternion(action[3:])
try:
# Check if the target is in the workspace; if not, then quick reject
# Only checks position, not rotation
pos_to_check = action[:3]
if relative_to is not None:
self._target_workspace_check.set_position(
pos_to_check, relative_to)
pos_to_check = self._target_workspace_check.get_position()
valid = self._scene.check_target_in_workspace(pos_to_check)
if not valid:
raise InvalidActionError('Target is outside of workspace.')
path = self._robot.arm.get_path(
action[:3], quaternion=action[3:], ignore_collisions=True,
relative_to=relative_to)
done = False
observations = []
while not done:
done = path.step()
self._scene.step()
if self._enable_path_observations:
observations.append(self._scene.get_observation())
success, terminate = self._task.success()
# If the task succeeds while traversing path, then break early
if success:
break
observations.append(self._scene.get_observation())
return observations
except IKError as e:
raise InvalidActionError('Could not find a path.') from e
def step(self, action, camcorder=None) -> (Observation, int, bool):
# returns observation, reward, done, info
if not self._reset_called:
raise RuntimeError(
"Call 'reset' before calling 'step' on a task.")
# action should contain 1 extra value for gripper open close state
arm_action = np.array(action[:-1])
ee_action = action[-1]
if 0.0 > ee_action > 1.0:
raise ValueError('Gripper action expected to be within 0 and 1.')
# Discretize the gripper action
current_ee = (1.0 if self._robot.gripper.get_open_amount()[0] > 0.9 else 0.0)
if ee_action > 0.5:
ee_action = 1.0
elif ee_action < 0.5:
ee_action = 0.0
if current_ee != ee_action:
arm_action = | np.array([0.0]*7) | numpy.array |
import numpy as np
from skimage.exposure import equalize_adapthist
import torch
from scipy.ndimage import gaussian_filter
import scipy
import random
import torch as th
from PIL import Image
from scipy.interpolate import RectBivariateSpline
class MyRandomImageContrastTransform(object):
def __init__(self, random_state=None, is_labelmap=[False, True], clip_limit_range=[0.01, 1], nbins=256,
enable=False):
"""
Perform Contrast Limited Adaptive Histogram Equalization (CLAHE)
. An algorithm for local contrast enhancement, that uses histograms computed over different tile regions of the
image. Local details can therefore be enhanced even in regions that are darker or lighter than most of the image.
Based on https://scikit-image.org/docs/dev/api/skimage.exposure.html?highlight=equalize_adapthist#skimage
.exposure.equalize_adapthist
Arguments
---------
"""
self.random_state = random_state
self.clip_limit_range = clip_limit_range # [0,1] The larger the value, the higher the contrast
self.nbins = nbins
self.is_label_map = is_labelmap
self.enable = enable
def __call__(self, *inputs):
if self.enable:
outputs = []
assert len(self.is_label_map) == len(
inputs), 'for each input, must clarify whether this is a label map or not.'
clip_limit = np.random.uniform(low=self.clip_limit_range[0], high=self.clip_limit_range[1])
for idx, _input in enumerate(inputs):
_input = _input.numpy()
flag = self.is_label_map[idx]
if flag:
result = _input
else:
print(_input.shape)
result = np.zeros(_input.shape, dtype=_input.dtype)
for i in range(_input.shape[0]):
temp = _input[i]
print('temp shape', temp.shape)
_input_min = temp.min()
_input_max = temp.max()
## clahe requires intensity to be Uint16
temp = intensity_normalise(temp, perc_threshold=(0., 100.0), min_val=0, max_val=255)
temp = np.int16(temp)
clahe_output = equalize_adapthist(temp, clip_limit=clip_limit, nbins=self.nbins)
## recover intensity range
result[i] = intensity_normalise(clahe_output, perc_threshold=(0., 100.0), min_val=_input_min,
max_val=_input_max)
tensorresult = torch.from_numpy(result).float()
outputs.append(tensorresult)
return outputs if idx >= 1 else outputs[0]
else:
outputs = inputs
return outputs
class RandomGamma(object):
'''
Perform Random Gamma Contrast Adjusting
support 2D and 3D
'''
def __init__(self, p_thresh=0.5, gamma_range=[0.8, 1.4], gamma_flag=True, preserve_range=True):
"""
Randomly do gamma to a torch tensor
Arguments
--------
:param gamma_flag: [bool] list of flags for gamma aug
"""
self.gamma_range = gamma_range
self.p_thresh = p_thresh
self.gamma_flag = gamma_flag
self.preserve_range = preserve_range ## if preserve the range to be in [min,max]
def __call__(self, *inputs):
outputs = []
if np.random.rand() < self.p_thresh:
gamma = random.random() * (self.gamma_range[1] - self.gamma_range[0]) + self.gamma_range[0] #
# print ('gamma: %f',gamma)
for idx, _input in enumerate(inputs):
assert inputs[0].size() == _input.size()
if (self.gamma_flag[idx]):
assert gamma > 0
if self.preserve_range:
self.c_min = _input.min()
self.c_max = _input.max()
_input = _input ** (1.0 / gamma)
if self.preserve_range:
_input[_input < self.c_min] = self.c_min
_input[_input > self.c_max] = self.c_max
outputs.append(_input)
else:
idx = len(inputs)
outputs = inputs
return outputs if idx >= 1 else outputs[0]
class RandomBrightnessFluctuation(object):
'''
Perform image contrast and brightness augmentation.
support 2D and 3D
'''
def __init__(self, p=0.5, contrast_range=[0.8, 1.2], brightness_range=[-0.1, 0.1], flag=True, preserve_range=True):
"""
Arguments
--------
:param flag: [bool] list of flags for aug
"""
self.contrast_range = contrast_range
self.brightness_range = brightness_range
self.p_thresh = p
self.flag = flag
self.preserve_range = preserve_range ## if preserve the range to be in [min,max]
def __call__(self, *inputs):
outputs = []
if np.random.rand() < self.p_thresh:
scale = random.random() * (self.contrast_range[1] - self.contrast_range[0]) + self.contrast_range[0] #
brightness = random.random() * (self.brightness_range[1] - self.brightness_range[0]) + \
self.brightness_range[
0] #
# print ('gamma: %f',gamma)
for idx, _input in enumerate(inputs):
assert inputs[0].size() == _input.size()
if (self.flag[idx]):
assert scale > 0
if self.preserve_range:
self.c_min = _input.min()
self.c_max = _input.max()
_input = _input * scale + brightness
if self.preserve_range:
_input[_input < self.c_min] = self.c_min
_input[_input > self.c_max] = self.c_max
outputs.append(_input)
else:
idx = len(inputs)
outputs = inputs
return outputs if idx >= 1 else outputs[0]
def intensity_normalise(img_data, perc_threshold=(0., 99.0), min_val=0., max_val=1):
'''
intensity_normalise
Works by calculating :
a = (max'-min')/(max-min)
b = max' - a * max
new_value = a * value + b
img_data=3D matrix [N*H*W]
'''
if len(img_data.shape) == 3:
output = np.zeros_like(img_data)
assert img_data.shape[0] < img_data.shape[1], 'check data is formatted as N*H*W'
for idx in range(img_data.shape[0]): #
slice_data = img_data[idx]
a_min_val, a_max_val = np.percentile(slice_data, perc_threshold)
## restrict the intensity range
slice_data[slice_data <= a_min_val] = a_min_val
slice_data[slice_data >= a_max_val] = a_max_val
## perform normalisation
scale = (max_val - min_val) / (a_max_val - a_min_val)
bias = max_val - scale * a_max_val
output[idx] = slice_data * scale + bias
return output
elif len(img_data.shape) == 2:
a_min_val, a_max_val = np.percentile(img_data, perc_threshold)
## restrict the intensity range
img_data[img_data <= a_min_val] = a_min_val
img_data[img_data >= a_max_val] = a_max_val
## perform normalisation
scale = (max_val - min_val) / (a_max_val - a_min_val)
bias = max_val - scale * a_max_val
output = img_data * scale + bias
return output
else:
raise NotImplementedError
def contrast_enhancement(img_data, clip_limit=0.01, nbins=256):
if len(img_data.shape) == 3:
output = np.zeros_like(img_data)
assert img_data.shape[0] < img_data.shape[1], 'check data is formatted as N*H*W'
for idx in range(img_data.shape[0]): #
slice_data = img_data[idx]
slice_data = equalize_adapthist(slice_data, clip_limit=clip_limit, nbins=nbins)
output[idx] = slice_data
return output
else:
raise NotImplementedError
class MyNormalizeMedicPercentile(object):
"""
Given min_val: float and max_val: float,
will normalize each channel of the th.*Tensor to
the provided min and max values.
Works by calculating :
a = (max'-min')/(max-min)
b = max' - a * max
new_value = a * value + b
where min' & max' are given values,
and min & max are observed min/max for each channel
"""
def __init__(self,
min_val=0.0,
max_val=1.0,
perc_threshold=(1.0, 95.0),
norm_flag=True):
"""
Normalize a tensor between a min and max value
:param min_val: (float) lower bound of normalized tensor
:param max_val: (float) upper bound of normalized tensor
:param perc_threshold: (float, float) percentile of image intensities used for scaling
:param norm_flag: [bool] list of flags for normalisation
"""
self.min_val = min_val
self.max_val = max_val
self.perc_threshold = perc_threshold
self.norm_flag = norm_flag
def __call__(self, *inputs):
# prepare the normalisation flag
if isinstance(self.norm_flag, bool):
norm_flag = [self.norm_flag] * len(inputs)
else:
norm_flag = self.norm_flag
outputs = []
eps = 1e-8
for idx, _input in enumerate(inputs):
if norm_flag[idx]:
# determine the percentiles and threshold the outliers
_min_val, _max_val = np.percentile(_input.numpy(), self.perc_threshold)
_input[th.le(_input, _min_val)] = _min_val
_input[th.ge(_input, _max_val)] = _max_val
# scale the intensity values
a = (self.max_val - self.min_val) / ((_max_val - _min_val) + eps)
b = self.max_val - a * _max_val
_input = _input.mul(a).add(b)
outputs.append(_input)
return outputs if idx >= 1 else outputs[0]
class MyRandomPurtarbation(object):
"""
"""
def __init__(self,
multi_control_points=[2,4,8],
max_sigma=16,
flag=True,
add_noise=True,
epsilon=0.01,
p=0.5,
magnitude=0.3
):
"""
Running random perturbation on images
:param multi_control_points: list of number of control points at each scale, by default, only use 4 control
points.
:param max_sigma: float, a parameter to control the scale of gaussian filter for smoothness
:param flag: whether to apply the perturbation to each input in the list
:param add_noise: boolean: adding random gaussian noise: default: True
:param epsilon: float, a scalar to control the level of noise, Default: 0.01
:param p: the probability of performing perturbation. Default: 0.5
"""
self.multi_control_points = multi_control_points
self.max_sigma = max_sigma
self.flag = flag
self.add_noise = add_noise
self.epsilon = epsilon
assert magnitude>=0 and magnitude<1,'magnitude must be in [0,1)'
self.magnitude=magnitude
self.p = p
def __call__(self, *inputs):
# prepare the perturbation flag
if isinstance(self.flag, bool):
flag = [self.flag] * len(inputs)
else:
flag = self.flag
if np.random.rand() >= self.p:
# do nothing
return inputs
else:
outputs = []
if isinstance(self.multi_control_points, list):
self.multi_control_points.sort()
else:
raise ValueError
for idx, input in enumerate(inputs):
if flag[idx]:
_input = input.numpy()
if np.abs(np.sum(_input) - 0) > 1e-6:
##random generate bias field
ch, h, w = _input.shape[0], _input.shape[1], _input.shape[2]
total_bias_field = np.zeros((h, w))
## from coarse grid to fine grid
for control_points in self.multi_control_points:
assert control_points <= np.min((h,
w)), 'num of control points at each scale must be ' \
'smaller or equal to the original image size'
control_points_field = np.float32(np.random.uniform(0, 1, (control_points, control_points)))
sigma = control_points * 2.0
if sigma > self.max_sigma: sigma = self.max_sigma
control_points_field = gaussian_filter(control_points_field, sigma)
interp = np.array(
Image.fromarray(control_points_field, mode='L').resize((h, w), resample=Image.BICUBIC),
dtype=np.float32)
interp = interp / (1.0 * interp.sum() * control_points + 1e-12)
total_bias_field += interp
total_bias_field = gaussian_filter(total_bias_field, self.max_sigma)
total_bias_field = (total_bias_field / (
1.0 * total_bias_field.sum() + 1e-12)) * h * w ## should be close to a identity
# restrict values to [1-magnitude, 1+magnitude]
total_bias_field=np.clip(total_bias_field,1-self.magnitude,1+self.magnitude)
## bias image
_input = np.repeat(total_bias_field[np.newaxis, :, :], repeats=ch, axis=0) * _input
_min_val = np.min(np.array(_input))
_max_val = np.max(np.array(_input))
_input = (_input - _min_val) / (_max_val - _min_val + 1e-8)
## add gaussian noise
if self.add_noise:
noise = np.random.randn(ch, h, w)
noise = noise * self.epsilon
_input = _input + noise
_input = np.clip(_input, 0, 1)
else:
print('ignore black images')
#
input = torch.from_numpy(_input).float()
# print (input.size())
outputs.append(input)
return outputs if idx >= 1 else outputs[0]
class MyRandomPurtarbationV2(object):
"""
"""
def __init__(self,
ms_control_point_spacing=[32],
magnitude=0.2,
flag=True,
add_noise=True,
epsilon=0.01,
p=0.5,
debug=False,
spline_dgree=3,
spline_smoothness=3,
):
"""
Running random perturbation on images, perturbation is smoothed using bspline interpolation
:param ms_control_point_spacing: list of control point spacing at each scale. Prefer to use 5x5
control points in the coarse grid (images are divided into 4x4).
:param magnitude: float, control the value range of knots vectors at the initialization stage
:param flag: whether to apply the perturbation to each input in the list
:param add_noise: boolean: adding random gaussian noise: default: True
:param epsilon: float, a scalar to control the level of noise, Default: 0.01
:param spline_dgree: int,degree of bivariate spline, default =3
:param p: the probability of performing perturbation. Default: 0.5
"""
assert len(ms_control_point_spacing) >= 1, 'must specify at least one spacing, but got {}'.format(
str(ms_control_point_spacing))
assert np.abs(magnitude)<1, 'must set magnitude x in a reasonable range, bias field value 1+/-magnitude can not be zero or negative'
self.ms_control_point_spacing = [64]
self.magnitude = magnitude
self.flag = flag
self.add_noise = add_noise
self.epsilon = epsilon
self.spline_dgree = spline_dgree
self.spline_smoothness = spline_smoothness
self.p = p
self.debug = False
def __call__(self, *inputs):
# prepare the perturbation flag
if isinstance(self.flag, bool):
flag = [self.flag] * len(inputs)
else:
flag = self.flag
if np.random.rand() >= self.p:
# do nothing
return inputs
else:
outputs = []
if isinstance(self.ms_control_point_spacing, list):
## from coarse to fine:
self.ms_control_point_spacing.sort(reverse=True)
if not self.ms_control_point_spacing[-1] == 1:
self.ms_control_point_spacing.append(1)
self.ms_control_point_spacing.sort(reverse=True)
else:
raise ValueError
for idx, input in enumerate(inputs):
if flag[idx]:
_input = input.numpy()
if np.abs(np.sum(_input) - 0) > 1e-6:
##random generate bias field
ch, orig_h, orig_w = _input.shape[0], _input.shape[1], _input.shape[2]
assert orig_h == orig_w, 'currently only support square images for simplicity, but found size ({},' \
'{})'.format(
orig_h, orig_w)
raw_image = _input.copy()
## extend the coordinates to be larger than the original
h=np.round(orig_h+self.ms_control_point_spacing[0]*1.5)
w=np.round(orig_w+self.ms_control_point_spacing[0]*1.5)
h=np.int(h)
w=np.int(w)
assert np.round(h /self.ms_control_point_spacing[0]) >= self.spline_dgree + 1 and np.round(w / self.ms_control_point_spacing[
0]) >= self.spline_dgree + 1, 'please decrease the spacing, the number of control ' \
'points in each dimension ' \
'should be at least kx+1, current bspline order k={}, ' \
'but found only :{} and {} along each axis'.format(
self.spline_dgree, h / self.ms_control_point_spacing[0], w / self.ms_control_point_spacing[0])
## initialize the coarsest grid:
xmax, ymax = w // 2, h // 2
if self.debug:
print (xmax,ymax)
print ('self.ms_control_point_spacing[0]',self.ms_control_point_spacing[0])
x = np.arange(-xmax, xmax + 1, self.ms_control_point_spacing[0])
y = np.arange(-ymax, ymax + 1, self.ms_control_point_spacing[0])
knots_matrix = 1 + \
np.float32(np.random.uniform(-np.abs(self.magnitude), np.abs(self.magnitude), (len(y), len(x)))) ##initialize value between [-1-magnitude, 1+magnitude]
if self.debug: print('initialize {} points'.format(knots_matrix.shape))
y_init = x
x_init = y
z_init = knots_matrix
## from coarse grid to fine grid
for spacing in self.ms_control_point_spacing[1:]:
interp_spline = RectBivariateSpline(y_init, x_init, z_init, s=self.spline_smoothness,
kx=self.spline_dgree, ky=self.spline_dgree)
if spacing > 1:
x2 = np.arange(-xmax, xmax + 1, spacing)
y2 = np.arange(-xmax, xmax + 1, spacing)
else:
## the finest resolution
x2 = np.arange(-xmax, xmax, spacing)
y2 = np.arange(-xmax, xmax, spacing)
z2 = interp_spline(y2, x2)
z_init = z2
x_init = x2
y_init = y2
total_bias_field = (z_init / (
1.0 * z_init.sum() + 1e-12)) * h * w ## should be close to a identity
offset_h=np.int((h-orig_h)//2)
offset_w=np.int((w-orig_w)//2)
total_bias_field=total_bias_field[offset_h:h-offset_h,offset_w:w-offset_w]
total_bias_field= | np.clip(total_bias_field,1-self.magnitude,1+self.magnitude) | numpy.clip |
import numpy as np
def getClosestFactors(n):
i = int(n ** 0.5)
while (n % i != 0):
i -= 1
return (i, int(n/i))
def getBoundary(x, r, n):
"""returns in the form [lower, upper)"""
lower = x - r
upper = x + r + 1
if lower < 0:
lower = 0
if upper > n:
upper = n
return (lower, upper)
def getRandomSample(array, n):
"""returns in the form (x, y, array[x, y])"""
if n > array.size:
raise ValueError("Sample size must be smaller than number of elements in array")
else:
idx = np.random.choice(array.shape[0], size=n, replace=False)
idy = np.random.choice(array.shape[1], size=n, replace=False)
sample = array[idx, idy]
return list(zip(idx, idy, sample))
def getNeighbours(array, randomSample, radius):
"""Get the neighbours of randomSample[:, 2] within a radius.
Border cases include -1 for missing neighbours."""
maxNeighbours = (2*radius + 1)**2 - 1
sampleSize = len(randomSample)
neighbours = | np.full((sampleSize, maxNeighbours), -1) | numpy.full |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the
# Pyedra Project (https://github.com/milicolazo/Pyedra/).
# Copyright (c) 2020, <NAME>
# License: MIT
# Full Text: https://github.com/milicolazo/Pyedra/blob/master/LICENSE
# ============================================================================
# DOCS
# ============================================================================
"""Implementation of phase function for asteroids."""
# =============================================================================
# IMPORTS
# =============================================================================
import attr
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
import pandas as pd
import scipy
import scipy.interpolate
import scipy.optimize as optimization
from . import core, datasets
# ============================================================================
# CLASSES
# ============================================================================
@attr.s(frozen=True)
class HG1G2Plot(core.BasePlot):
"""Plots for HG1G2 fit."""
default_plot_kind = "curvefit"
def curvefit(
self,
df,
idc="id",
alphac="alpha",
magc="v",
ax=None,
cmap=None,
fit_kwargs=None,
data_kwargs=None,
):
"""Plot the phase function using the HG1G2 model.
Parameters
----------
df: ``pandas.DataFrame``
The dataframe must with the values
idc : ``str``, optional (default=id)
Column with the mpc number of the asteroids.
alphac : ``str``, optional (default=alpha)
Column with the phase angle of the asteroids.
magc : ``str``, optional (default=v)
Column with the magnitude. The default 'v' value is reference
to the reduced magnitude in Johnson's V filter.
ax : ``matplotlib.pyplot.Axis``, (optional)
Matplotlib axis
cmap : ``None``, ``str`` or calable (optional)
Name of the color map to be used
(https://matplotlib.org/users/colormaps.html).
If is None, the default colors of the matplotlib.pyplot.plot
function is used, and if, and is a callable is used as
colormap generator.
fit_kwargs: ``dict`` or ``None`` (optional)
The parameters to send to the fit curve plot.
Only ``label`` and ``color`` can't be provided.
data_kwargs: ``dict`` or ``None`` (optional)
The parameters to send to the data plot.
Only ``label`` and ``color`` can't be provided.
Return
------
``matplotlib.pyplot.Axis`` :
The axis where the method draws.
"""
def fit_y(d, e, f):
y = d - 2.5 * np.log10(e * fi1 + f * fi2 + (1 - e - f) * fi3)
return y
if ax is None:
ax = plt.gca()
fig = ax.get_figure()
fig.set_size_inches(self.DEFAULT_FIGURE_SIZE)
ax.invert_yaxis()
ax.set_title("HG1G2 - Phase curves")
ax.set_xlabel("Phase angle")
ax.set_ylabel(magc.upper())
fit_kwargs = {} if fit_kwargs is None else fit_kwargs
fit_kwargs.setdefault("ls", "--")
fit_kwargs.setdefault("alpha", 0.5)
data_kwargs = {} if data_kwargs is None else data_kwargs
data_kwargs.setdefault("marker", "o")
data_kwargs.setdefault("ls", "None")
model_size = len(self.pdf.model_df)
if cmap is None:
colors = [None] * model_size
elif callable(cmap):
colors = cmap(np.linspace(0, 1, model_size))
else:
cmap = cm.get_cmap(cmap)
colors = cmap(np.linspace(0, 1, model_size))
for idx, m_row in self.pdf.iterrows():
row_id = int(m_row.id)
data = df[df[idc] == m_row.id]
fi1 = np.array([])
fi2 = np.array([])
fi3 = np.array([])
for alpha_b in data[alphac]:
p1 = self.pdf.metadata.y_interp1(alpha_b)
fi1 = np.append(fi1, p1)
p2 = self.pdf.metadata.y_interp2(alpha_b)
fi2 = np.append(fi2, p2)
p3 = self.pdf.metadata.y_interp3(alpha_b)
fi3 = np.append(fi3, p3)
v_fit = fit_y(m_row.H12, m_row.G1, m_row.G2)
line = ax.plot(
data[alphac],
v_fit,
label=f"Fit #{row_id}",
color=colors[idx],
**fit_kwargs,
)
# data part
ax.plot(
data[alphac],
data[magc],
color=line[0].get_color(),
label=f"Data #{row_id}",
**data_kwargs,
)
# reorder legend for two columns
handles, labels = ax.get_legend_handles_labels()
labels, handles = zip(
*sorted(zip(labels, handles), key=lambda t: t[0])
)
ax.legend(handles, labels, ncol=2, loc="best")
return ax
# ============================================================================
# FUNCTIONS
# ============================================================================
def _HG1G2_model(X, a, b, c):
x, y, z = X
return a * x + b * y + c * z
def HG1G2_fit(df, idc="id", alphac="alpha", magc="v"):
"""Fit (H-G1-G2) system to data from table.
HG1G2_fit calculates the H,G1 and G2 parameters of the phase
function following the procedure described in [5]_ .
Parameters
----------
df: ``pandas.DataFrame``
The dataframe must with the values
idc : ``str``, optional (default=id)
Column with the mpc number of the asteroids.
alphac : ``str``, optional (default=alpha)
Column with the phase angle of the asteroids.
magc : ``str``, optional (default=v)
Column with the magnitude. The default 'v' value is reference
to the reduced magnitude in Johnson's V filter.
Returns
-------
``PyedraFitDataFrame``
The output contains eight columns: id (mpc number of
the asteroid), H (absolute magnitude returned by the fit),
H error (fit H parameter error), G1 (G1 parameter returned by
the fit), G1 error (fit G1 parameter error), G2 (G2 parameter
returned bythe fit), G2 error (fit G2 parameter error), and R
(fit determination coefficient).
References
----------
.. [5] <NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>.,<NAME>., <NAME>., 2010,
Icarus, 209, 542.
"""
lt = core.obs_counter(df, 3, idc, alphac)
if len(lt):
lt_str = " - ".join(str(idx) for idx in lt)
raise ValueError(
f"Some asteroids has less than 3 observations: {lt_str}"
)
noob = df.drop_duplicates(subset=idc, keep="first", inplace=False)
size = len(noob)
id_column = np.empty(size, dtype=int)
H_1_2_column = np.empty(size)
error_H_1_2_column = np.empty(size)
G_1_column = np.empty(size)
error_G_1_column = np.empty(size)
G_2_column = np.empty(size)
error_G_2_column = np.empty(size)
R_column = | np.empty(size) | numpy.empty |
import pytest
import pandas as pd
import numpy as np
from numpy import pi, sqrt
import matplotlib.pyplot as plt
import os
from numpy.testing import assert_almost_equal, assert_allclose
from rolldecayestimators.ikeda import Ikeda, IkedaR
from rolldecayestimators import lambdas
import rolldecayestimators
import pyscores2.test
import pyscores2.indata
import pyscores2.output
@pytest.fixture
def ikeda():
N=10
data = np.zeros(N)
w_hat=np.linspace(0.1,1, N)
B_W0_hat = pd.Series(data=data, index=w_hat)
fi_a = np.deg2rad(10)
beam=10
lpp=100
kg = 1
Cb=0.7
draught = 10
volume = Cb*lpp*draught*beam
V = 5
w = 0.2
A0 = 0.95
N_sections = 21
x_s = np.linspace(0, lpp, N_sections)
data = {
'B_s' : beam*np.ones(N_sections),
'T_s' : draught*np.ones(N_sections),
'C_s' : A0*np.ones(N_sections),
}
sections=pd.DataFrame(data=data, index=x_s)
i= Ikeda(V=V, w=w, B_W0_hat=B_W0_hat, fi_a=fi_a, beam=beam, lpp=lpp, kg=kg, volume=volume,
sections=sections, BKL=0, BKB=0)
i.R=2.0 # Set bilge radius manually
yield i
@pytest.fixture
def ikeda_faust():
# this is indata from Carl-Johans matlab example for ship: Faust.
ScaleF = 1 # %/29.565; # Scale Factor [-]
visc = 1.15 * 10 ** -6; # [m2/s], kinematic viscosity
Cb = 0.61; # Block coeff
L = 220 * ScaleF; # Length
vcg = 14.4 * ScaleF; # roll axis (vertical centre of gravity) [m]
vcg = 14.9 * ScaleF; # roll axis (vertical centre of gravity) [m]
B = 32.26 * ScaleF; # Breadth of hull [m]
d = 9.5 * ScaleF; # Draught of hull [m]
A = 0.93 * B * d; # Area of cross section of hull [m2]
bBK = 0.4 * ScaleF; # breadth of Bilge keel [m] !!(=height???)
R = 5 * ScaleF; # Bilge Radis
g = 9.81;
C_mid = 0.93;
OG = -1 * (vcg - d) # *0.8; # distance from roll axis to still water level
Ho = B / (2 * d); # half breadth to draft ratio
ra = 1025; # density of water
# locals
LBK = L / 4; # Approx
disp = L * B * d * Cb; # Displacement
# variables!!
T = 27.6 * sqrt(ScaleF);
wE = 2 * pi * 1 / T; # circular frequency
fi_a = 10 * pi / 180; # roll amplitude !!rad??
V = 0; # Speed
data_path_faust = os.path.join(rolldecayestimators.path, 'Bw0_faust.csv')
data_faust = pd.read_csv(data_path_faust, sep=';')
data_faust['w_hat'] = lambdas.omega_hat(b=B, g=g, omega0=data_faust['w_vec'])
data_faust['B_W0_hat'] = lambdas.B_hat_lambda(B=data_faust['b44_vec'], Disp=disp, b=B, g=g, rho=ra)
data_faust.set_index('w_hat', inplace=True)
B_W0_hat = data_faust['B_W0_hat']
N_sections = 21
x_s = np.linspace(0, L, N_sections)
data = {
'B_s': B * np.ones(N_sections),
'T_s': d * np.ones(N_sections),
'C_s': C_mid*np.ones(N_sections),
}
sections = pd.DataFrame(data=data, index=x_s) # Fake sections (not testing the eddy)
i = Ikeda(V=V, w=wE, B_W0_hat=B_W0_hat, fi_a=fi_a, beam=B, lpp=L, kg=vcg, volume=disp,
sections=sections, BKB=bBK, BKL=LBK)
i.R = R # Set bilge radius manually
yield i
def test_R(ikeda):
assert ikeda.R==2.0
def test_calculate_Ikeda(ikeda):
B_44=ikeda.calculate_B44()
def test_calculate_Ikeda_faust(ikeda_faust):
B_44=ikeda_faust.calculate_B44()
def test_Bw0(ikeda_faust):
Bw0=ikeda_faust.calculate_B_W0()
assert_allclose(Bw0, 5.541101e-05, rtol=0.001)
def test_bw44_V0(ikeda_faust):
ikeda_faust.V = 0 ## Ship speed
bw44 = ikeda_faust.calculate_B_W()
assert_allclose(bw44, 5.541101e-05, rtol=0.01)
def test_bilge_keel(ikeda_faust):
ikeda_faust.V = 0 ## Ship speed
T = 27.6
ikeda_faust.w = 2 * pi * 1 / T; # circular frequency
ikeda_faust.fi_a = 10 * pi / 180; # roll amplitude !!rad??
B_BK = ikeda_faust.calculate_B_BK()
assert_allclose(B_BK, ikeda_faust.B_hat(75841485), rtol=0.01)
@pytest.mark.skip('This one does not work yet')
def test_friction(ikeda_faust):
ikeda_faust.V = 0 ## Ship speed
T = 27.6
ikeda_faust.w = 2 * pi * 1 / T; # circular frequency
ikeda_faust.fi_a = 10 * pi / 180; # roll amplitude !!rad??
B44F = ikeda_faust.calculate_B_F()
assert_allclose(B44F, ikeda_faust.B_hat(5652721), rtol=0.001)
def test_hull_lift(ikeda_faust):
ikeda_faust.V = 10 ## Ship speed
B44L = ikeda_faust.calculate_B_L()
assert_allclose(B44L, ikeda_faust.B_hat(1.692159e+08), rtol=0.001)
@pytest.fixture
def indata():
indata=pyscores2.indata.Indata()
indata.open(indataPath=pyscores2.test.indata_path)
yield indata
@pytest.fixture
def output():
output=pyscores2.output.OutputFile(filePath=pyscores2.test.outdata_path)
yield output
def test_load_scoresII(indata, output):
V = 5
w = 0.2
fi_a = np.deg2rad(10)
ikeda = Ikeda.load_scoresII(indata=indata, output_file=output, V=V, w=w, fi_a=fi_a, BKB=0, BKL=0, kg=0)
ikeda.R = 2.0 # Set bilge radius manually
B_44_hat = ikeda.calculate_B44()
def test_calculate_R_b(indata, output):
V = 5
w = 0.2
fi_a = np.deg2rad(10)
ikeda = Ikeda.load_scoresII(indata=indata, output_file=output, V=V, w=w, fi_a=fi_a, BKB=0, BKL=0, kg=0)
R_b = ikeda.calculate_R_b()
def test_load_scoresII_scale(indata, output):
V = 5
w = 0.2
fi_a = np.deg2rad(10)
R=2.0
ikeda = Ikeda.load_scoresII(indata=indata, output_file=output, V=V, w=w, fi_a=fi_a, BKB=0, BKL=0, kg=0)
ikeda.R = R # Set bilge radius manually
scale_factor=50
V_m=V/np.sqrt(scale_factor)
w_m=w*np.sqrt(scale_factor)
ikeda_model = Ikeda.load_scoresII(indata=indata, output_file=output, V=V_m, w=w_m, fi_a=fi_a, BKB=0, BKL=0,
scale_factor=scale_factor, kg=0)
ikeda_model.R = R/scale_factor # Set bilge radius manually
assert_almost_equal(ikeda.calculate_B_W(), ikeda_model.calculate_B_W() )
assert_almost_equal(ikeda.calculate_B_BK(), ikeda_model.calculate_B_BK())
assert_almost_equal(ikeda.calculate_B_E(), ikeda_model.calculate_B_E() )
assert_almost_equal(ikeda.calculate_B_L(), ikeda_model.calculate_B_L() )
def test_load_scoresII_scale_V_variation(indata, output):
scale_factor = 68
N = 200
V = np.linspace(0, 15.5, N) * 1.852 / 3.6 / np.sqrt(scale_factor)
kg = 0.2735294117647059
w = 2.4755750032144674
## Load ScoresII results
phi_a_deg = 10
phi_a = | np.deg2rad(phi_a_deg) | numpy.deg2rad |
#!/usr/bin/python3
import sys
import os
import argparse
import numpy as np
def get_candidates(bedgraph, cutoff=10, p=False):
result=[]
with open(bedgraph) as f:
stack=[]
for l in f.readlines():
pos = l.strip().split('\t')
if float(pos[2]) >= cutoff:
stack.append(pos)
else:
if len(stack)>0:
if sum([float(x[2]) for x in stack])/len(stack) >= cutoff*2 and len(stack)>=40 and len(stack)<=500:
r=[stack[0][0], str(int(stack[0][1])+1), str(int(stack[-1][1])+1)]
if p:
print('\t'.join(r))
else:
result.append(r)
stack=[]
if not p:
return result
def get_candidates_from_file(candidate_file):
with open(candidate_file) as f:
candidates = [x.strip().split('\t') for x in f.readlines()]
return [[x[0],int(x[1]),int(x[2])] for x in candidates]
def find_srna(input1,input2,gff,cutoff=10):
candidates1=get_candidates(input1, cutoff=cutoff)
print('Finding {} candidates in + strand...'.format(len(candidates1)))
candidates2=get_candidates(input2, cutoff=cutoff)
print('Finding {} candidates in - strand...'.format(len(candidates2)))
g1=[]
g2=[]
with open(gff) as f:
for l in f.readlines():
if l.startswith('#'):
continue
items=l.strip().split('\t')
if items[6]=='+':
g1.append(items)
elif items[6]=='-':
g2.append(items)
def _intersect(g, candidates):
result=[]
for c in candidates:
for i in range(len(g)):
if i==1:
continue
left=int(g[i-1][4])+100 if g[i-1][2] in('gene','CDS') else int(g[i-1][4])
right=int(g[i][3])-60 if g[i-1][2] in('gene','CDS') else int(g[i][3])
if int(c[1]) >= left and int(c[2]) <= right:
result.append(c)
return result
r1 = _intersect(g1, candidates1)
print('Get {} sRNA in + strand...'.format(len(r1)))
r2 = _intersect(g2, candidates2)
print('Get {} sRNA in - strand...'.format(len(r2)))
return r1, r2
def find(args):
def _save(r, output_file):
rna = []
for i in r[0]:
i.append('+')
rna.append(i)
for i in r[1]:
i.append('-')
rna.append(i)
rna = sorted(rna,key=lambda x:x[1])
with open(output_file,'w') as f:
f.writelines(['\t'.join(x)+'\n' for x in rna])
if args.input_and_output_files is None:
r=find_srna(args.input1, args.input2, args.gff, int(args.cutoff))
_save(r, args.output)
else:
with open(args.input_and_output_files) as f:
for l in f.readlines():
if l.startswith('#') or l.strip()=='':
continue
k = l.strip()
input_1 = '{}.1.scale.bedgraph'.format(k)
input_2 = '{}.2.scale.bedgraph'.format(k)
output = '{}.o.scale.bedgraph'.format(k)
print('parsing input1: {}, input2: {}'.format(input_1, input_2))
r=find_srna(os.path.join(args.input_file_dir, input_1),
os.path.join(args.input_file_dir, input_2),
args.gff,
int(args.cutoff))
_save(r, os.path.join(args.output_file_dir, output))
def scale_and_merge(files_and_score, input_dir):
m=None
for fs in files_and_score:
a_path=os.path.join(input_dir, fs[0])
print(' reading {}'.format(a_path))
a=np.genfromtxt(a_path)[:,2]*1000000/int(fs[1])
if m is not None:
m=np.vstack((m,a))
else:
m=a
if m.ndim==1:
return m.tolist()
else:
return | np.mean(m,axis=0) | numpy.mean |
#!/usr/bin/env python
#
# Copyright (c) 2017-2018 Via Technology Ltd. All Rights Reserved.
# Consult your license regarding permissions and restrictions.
import unittest
import numpy as np
from os import environ as env
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from pru.trajectory_functions import *
class TestTrajectoryFunctions(unittest.TestCase):
def test_rad2nm(self):
assert_almost_equal(rad2nm(np.pi), 180.0 * 60.0)
assert_almost_equal(rad2nm(np.pi / 180.0), 60.0)
def test_calculate_delta_time(self):
start_time = np.datetime64('2017-08-01 11:15')
finish_time = | np.datetime64('2017-08-01 11:16') | numpy.datetime64 |
from __future__ import print_function, division, absolute_import, unicode_literals
from builtins import bytes, dict, object, range, map, input, str
from future.utils import itervalues, viewitems, iteritems, listvalues, listitems
import os.path
import numpy as np
from astropy import time
import pwkit.environments.casa.util as casautil
from rfpipe import fileLock
import pickle
import logging
logger = logging.getLogger(__name__)
try:
import vysmaw_reader
except ImportError:
pass
qa = casautil.tools.quanta()
def data_prep(st, segment, data, flagversion="latest", returnsoltime=False):
""" Applies calibration, flags, and subtracts time mean for data.
flagversion can be "latest" or "rtpipe".
Optionally prepares data with antenna flags, fixing out of order data,
calibration, downsampling, OTF rephasing...
"""
from rfpipe import calibration, flagging, util
if not np.any(data):
return data
# take pols of interest
takepol = [st.metadata.pols_orig.index(pol) for pol in st.pols]
logger.debug('Selecting pols {0} and chans {1}'.format(st.pols, st.chans))
# TODO: check on reusing 'data' to save memory
datap = np.nan_to_num(np.require(data, requirements='W').take(takepol, axis=3).take(st.chans, axis=2))
datap = prep_standard(st, segment, datap)
if not np.any(datap):
logger.info("All data zeros after prep_standard")
return datap
if st.gainfile is not None:
logger.info("Applying calibration with {0}".format(st.gainfile))
ret = calibration.apply_telcal(st, datap, savesols=st.prefs.savesols,
returnsoltime=returnsoltime)
if returnsoltime:
datap, soltime = ret
else:
datap = ret
if not np.any(datap):
logger.info("All data zeros after apply_telcal")
return datap
else:
logger.info("No gainfile found, so not applying calibration.")
# support backwards compatibility for reproducible flagging
logger.info("Flagging with version: {0}".format(flagversion))
if flagversion == "latest":
datap = flagging.flag_data(st, datap)
elif flagversion == "rtpipe":
datap = flagging.flag_data_rtpipe(st, datap)
zerofrac = 1-np.count_nonzero(datap)/datap.size
if zerofrac > 0.8:
logger.warning('Flagged {0:.1f}% of data. Zeroing all if greater than 80%.'.format(zerofrac*100))
return np.array([])
if st.prefs.timesub == 'mean':
logger.info('Subtracting mean visibility in time.')
datap = util.meantsub(datap, parallel=st.prefs.nthread > 1)
else:
logger.info('No visibility subtraction done.')
if (st.prefs.apply_chweights or st.prefs.apply_blweights) and st.readints > 3:
if st.prefs.apply_chweights:
# TODO: find better estimator. Currently loses sensitivity to FRB 121102 bursts.
chvar = np.std(np.abs(datap).mean(axis=1), axis=0)
chvar_norm = np.mean(1/chvar**2, axis=0)
if st.prefs.apply_blweights:
blvar = np.std(np.abs(datap).mean(axis=2), axis=0)
blvar_norm = np.mean(1/blvar**2, axis=0)
if st.prefs.apply_chweights:
logger.info('Reweighting data by channel variances')
datap = (datap/chvar[None, None, :, :])/chvar_norm[None, None, None, :]
if st.prefs.apply_blweights:
logger.info('Reweighting data by baseline variances')
datap = (datap/blvar[None, :, None, :])/blvar_norm[None, None, None, :]
if st.prefs.savenoise:
save_noise(st, segment, datap)
if returnsoltime:
return datap, soltime
else:
return datap
def read_segment(st, segment, cfile=None, timeout=10):
""" Read a segment of data.
cfile and timeout are specific to vys data.
cfile used as proxy for real-time environment when simulating data.
Returns data as defined in metadata (no downselection yet)
default timeout is multiple of read time in seconds to wait.
"""
# assumed read shape (st.readints, st.nbl, st.metadata.nchan_orig, st.npol)
logger.info("Reading segment {0} of datasetId {1}"
.format(segment, st.metadata.datasetId))
if st.metadata.datasource == 'sdm':
data_read = read_bdf_segment(st, segment)
elif st.metadata.datasource == 'vys':
data_read = read_vys_segment(st, segment, cfile=cfile, timeout=timeout)
elif st.metadata.datasource == 'sim':
simseg = segment if cfile else None
data_read = simulate_segment(st, segment=simseg)
elif st.metadata.datasource == 'vyssim':
data_read = read_vys_segment(st, segment, cfile=cfile, timeout=timeout,
returnsim=True)
else:
logger.error('Datasource {0} not recognized.'
.format(st.metadata.datasource))
# report bad values
if np.any(np.isnan(data_read)):
logger.warning("Read data has some NaNs")
if np.any(np.isinf(data_read)):
logger.warning("Read data has some Infs")
if np.any(np.abs(data_read) > 1e20):
logger.warning("Read data has values larger than 1e20")
if not np.any(data_read):
logger.info('Read data are all zeros for segment {0}.'.format(segment))
return np.array([])
else:
logger.info('Read data with zero fraction of {0:.3f} for segment {1}'
.format(1- | np.count_nonzero(data_read) | numpy.count_nonzero |
import scipy.integrate as integrate
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling import fitting, models
from scipy.interpolate import interp1d
import copy
##Catalog Functions:
def reduced_catalog(catalog, seed, p_0, del_catalog = False):
'''
Reduce the number of rows in a catalog by randomly choosing rows.
Args:
catalog (array): the catalog to be reduced.
seed (int)
'''
sq1 = np.random.SeedSequence(seed)
rng = np.random.default_rng(sq1)
random_indexs = rng.choice(2, size=len(catalog), p=[p_0,1-p_0]).astype('bool')
catalog_reduced = catalog[random_indexs]
if del_catalog:
del catalog
return catalog_reduced
## Astrophysics Functions:
def look_dist(z,size,option, sigma_m = 0.308, sigma_k = 0.0, sigma_l = 0.692, H_0 = 67.8, c = 299792.458):
def definite_integral(f,lim_inf,lim_sup):
return integrate.quad(lambda x: f(x), lim_inf, lim_sup)[0]
def inv_E(z):
return (sigma_m*((1+z)**3.0) + sigma_k*((1+z)**2.0) + sigma_l)**(-0.5)
def com_dist_lof(z):
return (c/H_0) * definite_integral(inv_E, 0, z)
def com_dist_trans(z):
if sigma_k == 0.0:
return com_dist_lof(z)
elif sigma_k > 0.0:
return (c/H_0)*(sigma_k**(-0.5))*np.sinh((sigma_k**0.5)*com_dist_lof(z)/(c/H_0))
elif sigma_k < 0.0:
return (c/H_0)*(np.abs(sigma_k)**(-0.5))*np.sinh((np.abs(sigma_k)**0.5)*com_dist_lof(z)/(c/H_0))
def ang_diam_dist(z):
return com_dist_trans(z)/(1+z)
D_A = ang_diam_dist(z)
if option == 'degree':
return np.degrees(size/D_A)
elif option == 'arcmin':
return 60*np.degrees(size/D_A)
elif option == 'arcsec':
return 3600* | np.degrees(size/D_A) | numpy.degrees |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# #########################################################################
# Copyright (c) 2016, UChicago Argonne, LLC. All rights reserved. #
# #
# Copyright 2016. UChicago Argonne, LLC. This software was produced #
# under U.S. Government contract DE-AC02-06CH11357 for Argonne National #
# Laboratory (ANL), which is operated by UChicago Argonne, LLC for the #
# U.S. Department of Energy. The U.S. Government has rights to use, #
# reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR #
# UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR #
# ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is #
# modified to produce derivative works, such modified software should #
# be clearly marked, so as not to confuse it with the version available #
# from ANL. #
# #
# Additionally, redistribution and use in source and binary forms, with #
# or without modification, are permitted provided that the following #
# conditions are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of UChicago Argonne, LLC, Argonne National #
# Laboratory, ANL, the U.S. Government, nor the names of its #
# contributors may be used to endorse or promote products derived #
# from this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago #
# Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
# #########################################################################
"""Defines an object for simulating X-ray phantoms.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright (c) 2016, UChicago Argonne, LLC."
__docformat__ = 'restructuredtext en'
__all__ = [
'Phantom',
'save_phantom',
'load_phantom',
'pickle_phantom',
'unpickle_phantom',
]
from copy import deepcopy
import itertools
import logging
import pickle
import warnings
import numpy as np
from scipy.spatial import Delaunay
from xdesign.constants import PI
from xdesign.geometry import *
from xdesign.material import *
logger = logging.getLogger(__name__)
def save_phantom(phantom, filename):
"""Save phantom to file as a python repr."""
f = open(filename, 'w')
f.write("{}".format(repr(phantom)))
f.close()
logger.info('Save Phantom to {}'.format(filename))
def load_phantom(filename):
"""Load phantom from file containing a python repr."""
f = open(filename, 'r')
raw_phantom = f.read()
f.close()
logger.info('Load Phantom from {}'.format(filename))
return eval(raw_phantom)
def pickle_phantom(phantom, filename):
"""Save phantom to file as a python pickle."""
f = open(filename, 'wb')
pickle.dump(phantom, f)
def unpickle_phantom(filename):
"""Load phantom from file as a python pickle."""
f = open(filename, 'rb')
return pickle.load(f)
class Phantom(object):
"""An object for the purpose of evaluating X-ray imaging methods.
Phantoms may be hierarchical structures with children that are contained
within and/or a parent which contains them. They have two parts: a geometry
and properties. The geometry defines the spatial extent over which the
properties are valid. Properties are parameters which a :class:`.Probe`
uses to measure the Phantom.
All Phantoms must fit within the geometry of their ancestors. Phantoms
whose geometry is None act as containers.
Attributes
----------
geometry : :class:`.Entity`
The spatial boundary of the Phantom; may be None.
children :
A list of Phantoms contained in this Phantom.
parent :
The Phantom containing this Phantom.
material :
The mass_attenuation of the phantom.
population :
The number of decendents of this phantom.
"""
# OPERATOR OVERLOADS
def __init__(self, geometry=None, children=[], material=None):
self._geometry = geometry
self.population = 0
self.parent = None
self.material = material
self.children = list()
for child in children:
self.append(child)
def __add__(self, other):
"""Combine two Phantoms."""
parent = Phantom()
parent.append(self)
parent.append(other)
return parent
def __str__(self):
return "{}()".format(type(self).__name__)
def __repr__(self):
return "Phantom(geometry={}, children={}, material={})".format(
repr(self.geometry), repr(self.children), repr(self.material)
)
# PROPERTIES
@property
def is_leaf(self):
"""Return whether the Phantom is a leaf node."""
return not self.children
@property
def geometry(self):
"""Return the geometry of the Phantom."""
return self._geometry
@property
def center(self):
"""Return the centroid of the Phantom."""
if self.geometry is None:
return None
return self.geometry.center
@property
def radius(self):
"""Return the radius of the smallest boundary sphere."""
if self.geometry is None:
return None
return self.geometry.radius
@property
def volume(self):
"""Return the volume of the Phantom"""
if self.geometry is None:
return None
if hasattr(self.geometry, 'volume'):
return self.geometry.volume
else:
return self.geometry.area
@property
def density(self):
'''Return the geometric density of the Phantom.'''
if self.geometry is None:
return None
child_volume = 0
for child in self.children:
child_volume += child.volume
return child_volume / self.volume
# GEOMETRIC TRANSFORMATIONS
def translate(self, vector):
"""Translate the Phantom."""
for child in self.children:
child.translate(vector)
if self._geometry is not None:
self._geometry.translate(vector)
def rotate(self, theta, point=Point([0.5, 0.5]), axis=None):
"""Rotate around an axis that passes through the given point."""
for child in self.children:
child.rotate(theta, point, axis)
if self._geometry is not None:
self.geometry.rotate(theta, point, axis)
# TREE MANIPULATION
def append(self, child):
"""Add a child to the Phantom.
Only add the child if it is contained within the geometry of its
ancestors.
"""
boundary = self.geometry
parent = self.parent
while boundary is None and parent is not None:
boundary = parent.geometry
parent = parent.parent
def contains_children(boundary, child):
for grandchild in child.children:
if (
grandchild.geometry is None
and not contains_children(boundary, grandchild)
):
return False
if not boundary.contains(grandchild.geometry):
return False
return True
if (
boundary is None
or (child.geometry is None and contains_children(boundary, child))
or boundary.contains(child.geometry)
):
child.parent = self
self.children.append(child)
self.population += child.population + 1
return True
else:
warnings.warn(
"{} not appended; it is not a subset.".format(repr(child)),
RuntimeWarning
)
return False
def pop(self, i=-1):
"""Pop the i-th child from the Phantom."""
self.children[i].parent = None
self.population -= self.children[i].population + 1
return self.children.pop(i)
def sprinkle(
self,
counts,
radius,
gap=0,
region=None,
material=None,
max_density=1,
shape=Circle
):
"""Sprinkle a number of :class:`.Circle` shaped Phantoms around the
Phantom. Uses various termination criteria to determine when to stop
trying to add circles.
Parameters
----------
counts : int
The number of circles to be added.
radius : scalar or list
The radius of the circles to be added.
gap : float, optional
The minimum distance between circle boundaries.
A negative value allows overlapping edges.
region : :class:`.Entity`, optional
The new circles are confined to this shape. None if the circles are
allowed anywhere.
max_density : scalar, optional
Stops adding circles when the geometric density of the phantom
reaches this ratio.
material : scalar, optional
A mass attenuation parameter passed to the circles.
Returns
----------
counts : scalar
The number of circles successfully added.
"""
if counts < 0:
raise ValueError('Cannot add negative number of circles.')
if not isinstance(radius, list):
radius = [radius, radius]
if len(radius) != 2 or radius[0] < radius[1] or radius[1] <= 0:
raise ValueError(
'Radius range must be larger than zero and largest' +
'radius must be listed first.'
)
if gap < 0:
# Support for partially overlapping phantoms is not yet supported
# in the aquisition module
raise NotImplementedError
if max_density < 0:
raise ValueError("Cannot stop at negative density.")
collision = False
if radius[0] + gap < 0: # prevents circles with negative radius
collision = True
kTERM_CRIT = 500 # tries to append a new circle before quitting
n_tries = 0 # attempts to append a new circle
n_added = 0 # circles successfully added
if region is None:
if self.geometry is None:
return 0
region = self.geometry
while (
n_tries < kTERM_CRIT and n_added < counts
and self.density < max_density
):
center = _random_point(region, margin=radius[0])
if collision:
self.append(
Phantom(
geometry=Circle(center, radius[0]), material=material
)
)
n_added += 1
continue
circle = shape(center, radius=radius[0] + gap)
overlap = _collision(self, circle)
if np.max(overlap) <= radius[0] - radius[1]:
candidate = shape(center, radius=radius[0] - | np.max(overlap) | numpy.max |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.