prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
"""Preprocess the data for model."""
import os
import inspect
import csv
import numpy as np
from PIL import Image
import skvideo.io
import scipy
import tensorflow as tf
import pandas as pd
from .vgg16 import Vgg16
from .c3d import c3d
class VideoVGGExtractor(object):
"""Select uniformly distributed frames and extract its VGG feature."""
def __init__(self, frame_num, sess):
"""Load VGG model.
Args:
frame_num: number of frames per video.
sess: tf.Session()
"""
self.frame_num = frame_num
self.inputs = tf.placeholder(tf.float32, [self.frame_num, 224, 224, 3])
self.vgg16 = Vgg16()
self.vgg16.build(self.inputs)
self.sess = sess
def _select_frames(self, path):
"""Select representative frames for video.
Ignore some frames both at begin and end of video.
Args:
path: Path of video.
Returns:
frames: list of frames.
"""
frames = list()
# video_info = skvideo.io.ffprobe(path)
video_data = skvideo.io.vread(path)
total_frames = video_data.shape[0]
# Ignore some frame at begin and end.
for i in np.linspace(0, total_frames, self.frame_num + 2)[1:self.frame_num + 1]:
frame_data = video_data[int(i)]
img = Image.fromarray(frame_data)
img = img.resize((224, 224), Image.BILINEAR)
frame_data =
|
np.array(img)
|
numpy.array
|
# In[1]:
# !pwd
import numpy as np
import time
import matplotlib.pyplot as plt
from IPython.display import display, Markdown
import cat_utils
def forward_progagate_2_layers(X, Y, W1, b1, W2, b2):
"""
X: [n_x, m] train data set
Y: [n_y, m] label data
W1:[n_1, n_x] weight for the first layer
b1:[n_1, 1] bias for the first layer
W2:[n_y, n_1] weight for the second layer
b1:[n_y, 1] bias for the first layer
returns: A1, A2
"""
Z1 = np.dot(W1, X) + b1 # [n_1, m] <= [n_1, n_x] . [n_x, m]
A1 = np.tanh(Z1) # [n_1, m]
Z2 = np.dot(W2, A1) + b2 # [n_y, m] <= [n_y, n_1] . [n_1, m]
A2 = 1.0 / (1.0 + np.exp(-Z2)) # [n_y, m]
return A1, A2
def backward_propagate_2_layers(X, Y, A1, A2, W1, b1, W2, b2):
"""
X: [n_x, m] train data set
Y: [n_y, m] label data
A1: [n_1, m] first layer output
A2: [1, m] second layer output
W1: [n_1, n_x] weight for the first layer
b1: [n_1, 1] bias for the first layer
W2: [n_y, n_1] weight for the second layer
b1: [n_y, 1] bias for the first layer
returns: dW1, db1, dW2, db2
"""
n_x, m = X.shape
dZ2 = A2 - Y # [n_y, m]
dW2 = np.dot(dZ2, A1.T) / m # [n_y, n_1] <= [n_y, m] . [n_1, m].T
db2 = np.sum(dZ2, axis=1, keepdims=True) / m # [n_y, 1] <= [n_y, m]
dgZ1 = 1 - np.power(A1, 2) # [n_1, m]
dZ1 = np.multiply(np.dot(W2.T, dZ2), dgZ1) # [n_1, m] <= [n_y, n_1].T . [n_y, m]
dW1 = np.dot(dZ1, X.T) / m # [n_1, n_x] <= [n_1, m] . [n_x, m].T
db1 = np.sum(dZ1, axis=1, keepdims=True) / m # [n1, 1] <= [n_1, m]
return dW1, db1, dW2, db2
def neural_network_2_layers(X, Y, n_1, num_iterations=10, learning_rate=0.01,
early_stop_cost=0., msg_interval=1, print_interval=100):
"""
X: [n_x, m] train data set
Y: [n_y, m] n_y=1 in this case
n_1: first hidden layer dimension
num_iterations: number iterations
learning_rate: learning rate alpha
early_stop_cost: early stop cost, if the cost small than this number, the train will stop
returns: W1, b1, W2, b2, A2, msgs, costs (A2, is the output)
"""
n_x, m = X.shape
n_y = 1
W1 = np.random.randn(n_1, n_x) * 0.01
b1 = np.zeros([n_1, 1])
W2 = np.random.randn(n_y, n_1) * 0.01
b2 = np.zeros([n_y, 1])
print("init weights", W1[0][0], W2[0][0])
msgs = []
start = time.time()
for i in range(num_iterations):
A1, A2 = forward_progagate_2_layers(X, Y, W1, b1, W2, b2)
dW1, db1, dW2, db2 = backward_propagate_2_layers(X, Y, A1, A2, W1, b1, W2, b2)
W1 = W1 - learning_rate * dW1
b1 = b1 - learning_rate * db1
W2 = W2 - learning_rate * dW2
b2 = b2 - learning_rate * db2
cost = np.sum(-(np.dot(Y, np.log(A2.T)) + np.dot(1-Y, np.log(1-A2.T)))) / m
cost = np.squeeze(cost)
if cost < early_stop_cost:
break
if i % msg_interval == 0:
train_predict, train_accuracy, test_predict, test_accuracy = cat_utils.accuracy_2_layers(
W1, b1, W2, b2, test_set_x, test_set_y, A2, train_set_y)
msg = {
"iterations": i,
"dimensions": n_1,
"learning_rate": learning_rate,
"cost": cost,
"train_accuracy": train_accuracy,
"test_accuracy": test_accuracy,
"training_time": time.time() - start}
msgs.append(msg)
if i % print_interval == 0:
print(i, msg)
if 'debug' in globals() and debug:
print(i, " derivitive", dW1[0][0], dW2[0][0], db1[0][0], db2[0][0])
print(i, " weights", W1[0][0], W2[0][0])
print(i, msgs[-1])
return W1, b1, W2, b2, A2, msgs, train_predict, train_accuracy, test_predict, test_accuracy
train_set_x, train_set_x_orig, train_set_y, test_set_x, test_set_x_orig, test_set_y, classes = cat_utils.load_normalized_dataset()
print(train_set_x.shape)
def train(params):
n_1 = params["n_1"]
num_iterations = params["num_iterations"]
learning_rate = params["learning_rate"]
early_stop_cost = params["early_stop_cost"]
np.random.seed(1) # set seed, so that the result is comparable
W1, b1, W2, b2, A2, msgs, train_predict, train_accuracy, test_predict, test_accuracy = neural_network_2_layers(
train_set_x, train_set_y, n_1, num_iterations, learning_rate, early_stop_cost)
return W1, b1, W2, b2, msgs, train_predict, train_accuracy, test_predict, test_accuracy
markdown_rows = ["|iterations|n_1|learning_rate|stop_cost|train_accuracy|test_accuracy|training_time|",
"|:--|:------------|:--------|:---------|:------------|:-------------|:------------|"]
debug=False
param = {"n_1": 100, "num_iterations": 2001, "learning_rate": 0.005, "early_stop_cost": 0.05}
W1, b1, W2, b2, msgs, train_predict, train_accuracy, test_predict, test_accuracy = train(param)
costs = [msg["cost"] for msg in msgs]
plt.ioff()
plt.plot(
|
np.squeeze(costs)
|
numpy.squeeze
|
import logging
import random
import time
import matplotlib.pyplot as plt
import numpy as np
from shapely import geometry
logger = logging.getLogger("polygone_2d_helper")
# logger.setLevel("DEBUG")
# logger.setLevel("INFO")
if __name__ == "__main__":
logging.basicConfig()
np.set_printoptions(precision=6, suppress=True)
# print(logger.getEffectiveLevel())
class Fcalculator:
def __init__(self, points, epsilon=np.array(0.0001)):
"""points is list of tupel with x,y like [(x1,y1), (x2,y2), (x3,y3),...]"""
self.epsilon = epsilon
self.points = points
def q_of_phi(self, phi):
a_ = np.cos(phi, dtype=np.float128)
b_ = np.sin(phi, dtype=np.float128) - 1.0
q = np.array([a_, b_])
logger.debug("q^2: {}".format(np.abs(q[0] ** 2 + q[1] ** 2)))
return q
def F_of_qs(self, q, p0_, p1_, c=0.0):
p0 = np.array(p0_)
p1 = np.array(p1_)
c = np.array(c)
q_cross = np.array([-q[1], q[0]])
p0p1 = p1 - p0
scale = 1.0 / np.abs(np.abs(q[0] ** 2 + q[1] ** 2))
if scale >= 1000.0 / self.epsilon:
logger.debug("Scale == NONE")
polygon = geometry.Polygon(self.points)
area = np.array(polygon.area, dtype=np.complex)
logger.debug("area: {}".format(area))
s_value = area / len(self.points)
elif np.abs(np.dot(p0p1, q)) >= 0.0001:
f_p0 = -1.0 * np.exp(1.0j * (np.dot(p0, q) + c))
f_p1 = -1.0 * np.exp(1.0j * (np.dot(p1, q) + c))
s_value = scale * np.dot(p0p1, q_cross) * (f_p1 - f_p0) / np.dot(p0p1, q)
else:
logger.debug("np.dot(p0p1, q) > epsilon")
s_value = scale * np.dot(p0p1, q_cross) * -1.0j * np.exp(1.0j * (np.dot(p0, q) + c))
logger.debug("s_value: {:1.6f}".format(s_value))
return s_value
def F_of_qs_arr(self, q, p0_, p1_, c=0.0):
p0 = np.array(p0_)
p1 = np.array(p1_)
c = np.array(c)
q_cross = np.array([-q[1], q[0]])
p0p1 = p1 - p0
# scale = 1.0 / np.abs(np.dot(q, q))
scale = 1.0 / np.abs(q[0] ** 2 + q[1] ** 2)
f_p0 = -1.0 * np.exp(1.0j * (np.dot(p0, q) + c))
f_p1 = -1.0 * np.exp(1.0j * (np.dot(p1, q) + c))
case1_array = scale * np.dot(p0p1, q_cross) * (f_p1 - f_p0) / np.dot(p0p1, q)
case2_array = scale * np.dot(p0p1, q_cross) * -1.0j * np.exp(1.0j * (np.dot(p0, q) + c))
# print("case1_array.shape", case1_array.shape)
res_array = np.where(np.abs(np.dot(p0p1, q)) >= 0.0001, case1_array, case2_array)
if np.max(scale) >= 1000.0 / self.epsilon:
logger.debug("Scale == NONE")
polygon = geometry.Polygon(self.points)
area = np.array(polygon.area, dtype=np.complex)
logger.debug("area: {}".format(area))
s_value = area / len(self.points)
case3_array = np.ones_like(q[0]) * s_value
res_array = np.where(scale >= 1000.0 / self.epsilon, case3_array, res_array)
return res_array
def F_of_phi(self, phi, c=0.0):
logger.debug("###########################################")
logger.info("phi: {}".format(phi))
sum_res = np.zeros_like(phi, dtype=np.complex256)
q = self.q_of_phi(phi)
for index in range(len(self.points)):
logger.debug("index: {}".format(index))
p0 = self.points[index - 1]
p1 = self.points[index]
logger.debug("p0: {}; p1: {}".format(p0, p1))
sum_res += self.F_of_qs_arr(q, p0, p1, c=c)
logger.debug("sum_res {}".format(sum_res))
final_res = sum_res
logger.debug("sum_res.dtype: {}".format(sum_res.dtype))
logger.info("final value: {}".format(final_res))
return final_res
def tuples_to_array(t):
"""converts a list of point-tuple into np.ndarray with shape (?,2)"""
assert type(t) is list
assert len(t) != 0
length = len(t)
a = np.empty((length, 2))
for i_, tuple_ in enumerate(t):
a[i_] = np.array([tuple_[0], tuple_[1]])
return a
def array_to_tuples(a):
"""converts a numpy array (shape (?,2) )into a list where each element is a point-tuple"""
assert type(a) == np.ndarray
t = []
for i_ in range(a.shape[0]):
t.append(tuple((a[i_, 0], a[i_, 1])))
return t
def polygon_to_tuples(polygon):
"""point coordinates as tuple-list of a shapley.geometry.Polygon"""
return [x for x in geometry.mapping(polygon)["coordinates"][0]]
def get_spin(point_list):
"""sums all angles of a point_list/array (simple linear ring).
If positive the direction is counter-clockwise and mathematical positive"""
if type(point_list) == list:
arr = tuples_to_array(point_list)
else:
arr = point_list
direction = 0.0
# print(point_list)
for index in range(len(point_list)):
p0 = np.array(list(point_list[index - 2]))
p1 = np.array(list(point_list[index - 1]))
p2 = np.array(list(point_list[index]))
s1 = p1 - p0
s1_norm = s1 / np.sqrt(np.dot(s1, s1))
s2 = p2 - p1
s2_norm = s2 / np.sqrt(np.dot(s2, s2))
s1_bar = (-s1_norm[1], s1_norm[0])
direction += np.dot(s1_bar, s2_norm)
# print(direction)
return direction
def angle_between(p1, p2):
ang1 = -np.arctan2(*p1[::-1])
ang2 =
|
np.arctan2(*p2[::-1])
|
numpy.arctan2
|
"""
Item-based k-NN collaborative filtering.
"""
import pathlib
import logging
import pandas as pd
import numpy as np
import scipy.sparse as sps
import scipy.sparse.linalg as spla
from numba import njit, prange
from lenskit import util, matrix
from . import Predictor
_logger = logging.getLogger(__name__)
@njit(nogil=True)
def _predict_weighted_average(model, nitems, nrange, ratings, targets):
min_nbrs, max_nbrs = nrange
scores = np.full(nitems, np.nan, dtype=np.float_)
for i in prange(targets.shape[0]):
iidx = targets[i]
rptr = model.rowptrs[iidx]
rend = model.rowptrs[iidx + 1]
num = 0
denom = 0
nnbrs = 0
for j in range(rptr, rend):
nidx = model.colinds[j]
if np.isnan(ratings[nidx]):
continue
nnbrs = nnbrs + 1
num = num + ratings[nidx] * model.values[j]
denom = denom + np.abs(model.values[j])
if max_nbrs > 0 and nnbrs >= max_nbrs:
break
if nnbrs < min_nbrs:
continue
scores[iidx] = num / denom
return scores
@njit(nogil=True)
def _predict_sum(model, nitems, nrange, ratings, targets):
min_nbrs, max_nbrs = nrange
scores = np.full(nitems, np.nan, dtype=np.float_)
for i in prange(targets.shape[0]):
iidx = targets[i]
rptr = model.rowptrs[iidx]
rend = model.rowptrs[iidx + 1]
score = 0
nnbrs = 0
for j in range(rptr, rend):
nidx = model.colinds[j]
if np.isnan(ratings[nidx]):
continue
nnbrs = nnbrs + 1
score = score + model.values[j]
if max_nbrs > 0 and nnbrs >= max_nbrs:
break
if nnbrs < min_nbrs:
continue
scores[iidx] = score
return scores
_predictors = {
'weighted-average': _predict_weighted_average,
'sum': _predict_sum
}
class ItemItem(Predictor):
"""
Item-item nearest-neighbor collaborative filtering with ratings. This item-item implementation
is not terribly configurable; it hard-codes design decisions found to work well in the previous
Java-based LensKit code.
Attributes:
item_index_(pandas.Index): the index of item IDs.
item_means_(numpy.ndarray): the mean rating for each known item.
item_counts_(numpy.ndarray): the number of saved neighbors for each item.
sim_matrix_(matrix.CSR): the similarity matrix.
user_index_(pandas.Index): the index of known user IDs for the rating matrix.
rating_matrix_(matrix.CSR): the user-item rating matrix for looking up users' ratings.
"""
def __init__(self, nnbrs, min_nbrs=1, min_sim=1.0e-6, save_nbrs=None,
center=True, aggregate='weighted-average'):
"""
Args:
nnbrs(int):
the maximum number of neighbors for scoring each item (``None`` for unlimited)
min_nbrs(int): the minimum number of neighbors for scoring each item
min_sim(double): minimum similarity threshold for considering a neighbor
save_nbrs(double):
the number of neighbors to save per item in the trained model
(``None`` for unlimited)
center(bool):
whether to normalize (mean-center) rating vectors. Turn this off when working
with unary data and other data types that don't respond well to centering.
aggregate:
the type of aggregation to do. Can be ``weighted-average`` or ``sum``.
"""
self.nnbrs = nnbrs
if self.nnbrs is not None and self.nnbrs < 1:
self.nnbrs = -1
self.min_nbrs = min_nbrs
if self.min_nbrs is not None and self.min_nbrs < 1:
self.min_nbrs = 1
self.min_sim = min_sim
self.save_nbrs = save_nbrs
self.center = center
self.aggregate = aggregate
try:
self._predict_agg = _predictors[aggregate]
except KeyError:
raise ValueError('unknown aggregator {}'.format(aggregate))
def fit(self, ratings):
"""
Train a model.
The model-training process depends on ``save_nbrs`` and ``min_sim``, but *not* on other
algorithm parameters.
Args:
ratings(pandas.DataFrame):
(user,item,rating) data for computing item similarities.
"""
# Training proceeds in 2 steps:
# 1. Normalize item vectors to be mean-centered and unit-normalized
# 2. Compute similarities with pairwise dot products
self._timer = util.Stopwatch()
init_rmat, users, items = matrix.sparse_ratings(ratings)
n_items = len(items)
_logger.info('[%s] made sparse matrix for %d items (%d ratings from %d users)',
self._timer, len(items), init_rmat.nnz, len(users))
rmat, item_means = self._mean_center(ratings, init_rmat, items)
rmat = self._normalize(rmat)
_logger.info('[%s] computing similarity matrix', self._timer)
smat = self._compute_similarities(rmat)
_logger.info('[%s] got neighborhoods for %d of %d items',
self._timer, np.sum(np.diff(smat.rowptrs) > 0), n_items)
_logger.info('[%s] computed %d neighbor pairs', self._timer, smat.nnz)
self.item_index_ = items
self.item_means_ = item_means
self.item_counts_ = np.diff(smat.rowptrs)
self.sim_matrix_ = smat
self.user_index_ = users
self.rating_matrix_ = init_rmat
return self
def _mean_center(self, ratings, rmat, items):
if not self.center:
return rmat, None
item_means = ratings.groupby('item').rating.mean()
item_means = item_means.reindex(items).values
mcvals = rmat.values - item_means[rmat.colinds]
nmat = matrix.CSR(rmat.nrows, rmat.ncols, rmat.nnz,
rmat.rowptrs.copy(), rmat.colinds.copy(), mcvals)
_logger.info('[%s] computed means for %d items', self._timer, len(item_means))
return nmat, item_means
def _normalize(self, rmat):
rmat = matrix.csr_to_scipy(rmat)
# compute column norms
norms = spla.norm(rmat, 2, axis=0)
# and multiply by a diagonal to normalize columns
recip_norms = norms.copy()
is_nz = recip_norms > 0
recip_norms[is_nz] = np.reciprocal(recip_norms[is_nz])
norm_mat = rmat @ sps.diags(recip_norms)
assert norm_mat.shape[1] == rmat.shape[1]
# and reset NaN
norm_mat.data[np.isnan(norm_mat.data)] = 0
_logger.info('[%s] normalized rating matrix columns', self._timer)
return matrix.csr_from_scipy(norm_mat, False)
def _compute_similarities(self, rmat):
mkl = matrix.mkl_ops()
if mkl is None:
return self._scipy_similarities(rmat)
else:
return self._mkl_similarities(mkl, rmat)
def _scipy_similarities(self, rmat):
nitems = rmat.ncols
sp_rmat = matrix.csr_to_scipy(rmat)
_logger.info('[%s] multiplying matrix with scipy', self._timer)
smat = sp_rmat.T @ sp_rmat
smat = smat.tocoo()
rows, cols, vals = smat.row, smat.col, smat.data
rows = rows[:smat.nnz]
cols = cols[:smat.nnz]
vals = vals[:smat.nnz]
rows, cols, vals = self._filter_similarities(rows, cols, vals)
csr = self._select_similarities(nitems, rows, cols, vals)
return csr
def _mkl_similarities(self, mkl, rmat):
nitems = rmat.ncols
assert rmat.values is not None
_logger.info('[%s] multiplying matrix with MKL', self._timer)
smat = mkl.csr_syrk(rmat)
rows = matrix.csr_rowinds(smat)
cols = smat.colinds
vals = smat.values
rows, cols, vals = self._filter_similarities(rows, cols, vals)
del smat
nnz = len(rows)
_logger.info('[%s] making matrix symmetric (%d nnz)', self._timer, nnz)
rows = np.resize(rows, nnz * 2)
cols = np.resize(cols, nnz * 2)
vals = np.resize(vals, nnz * 2)
rows[nnz:] = cols[:nnz]
cols[nnz:] = rows[:nnz]
vals[nnz:] = vals[:nnz]
csr = self._select_similarities(nitems, rows, cols, vals)
return csr
def _filter_similarities(self, rows, cols, vals):
"Threshold similarites & remove self-similarities."
_logger.info('[%s] filtering %d similarities', self._timer, len(rows))
# remove self-similarity
mask = rows != cols
# remove too-small similarities
if self.min_sim is not None:
mask = np.logical_and(mask, vals >= self.min_sim)
_logger.info('[%s] filter keeps %d of %d entries', self._timer, np.sum(mask), len(rows))
return rows[mask], cols[mask], vals[mask]
def _select_similarities(self, nitems, rows, cols, vals):
_logger.info('[%s] ordering similarities', self._timer)
csr = matrix.csr_from_coo(rows, cols, vals, shape=(nitems, nitems))
csr.sort_values()
if self.save_nbrs is None or self.save_nbrs <= 0:
return csr
_logger.info('[%s] picking %d top similarities', self._timer, self.save_nbrs)
counts = csr.row_nnzs()
_logger.debug('have %d rows in size range [%d,%d]',
len(counts), np.min(counts), np.max(counts))
ncounts = np.fmin(counts, self.save_nbrs)
_logger.debug('will have %d rows in size range [%d,%d]',
len(ncounts), np.min(ncounts), np.max(ncounts))
assert np.all(ncounts <= self.save_nbrs)
assert np.all(ncounts >= 0)
nnz = np.sum(ncounts)
rp2 = np.zeros_like(csr.rowptrs)
rp2[1:] = np.cumsum(ncounts)
ci2 = np.zeros(nnz, np.int32)
vs2 = np.zeros(nnz)
for i in range(nitems):
sp1 = csr.rowptrs[i]
sp2 = rp2[i]
ep1 = sp1 + ncounts[i]
ep2 = sp2 + ncounts[i]
assert ep1 - sp1 == ep2 - sp2
ci2[sp2:ep2] = csr.colinds[sp1:ep1]
vs2[sp2:ep2] = csr.values[sp1:ep1]
return matrix.CSR(csr.nrows, csr.ncols, nnz, rp2, ci2, vs2)
def predict_for_user(self, user, items, ratings=None):
_logger.debug('predicting %d items for user %s', len(items), user)
if ratings is None:
if user not in self.user_index_:
_logger.debug('user %s missing, returning empty predictions', user)
return pd.Series(np.nan, index=items)
upos = self.user_index_.get_loc(user)
ratings = pd.Series(self.rating_matrix_.row_vs(upos),
index=pd.Index(self.item_index_[self.rating_matrix_.row_cs(upos)]))
# set up rating array
# get rated item positions & limit to in-model items
ri_pos = self.item_index_.get_indexer(ratings.index)
m_rates = ratings[ri_pos >= 0]
ri_pos = ri_pos[ri_pos >= 0]
rate_v = np.full(len(self.item_index_), np.nan, dtype=np.float_)
# mean-center the rating array
if self.center:
rate_v[ri_pos] = m_rates.values - self.item_means_[ri_pos]
else:
rate_v[ri_pos] = m_rates.values
_logger.debug('user %s: %d of %d rated items in model', user, len(ri_pos), len(ratings))
assert np.sum(np.logical_not(np.isnan(rate_v))) == len(ri_pos)
# set up item result vector
# ipos will be an array of item indices
i_pos = self.item_index_.get_indexer(items)
i_pos = i_pos[i_pos >= 0]
_logger.debug('user %s: %d of %d requested items in model', user, len(i_pos), len(items))
# scratch result array
iscore = np.full(len(self.item_index_), np.nan, dtype=np.float_)
# now compute the predictions
iscore = self._predict_agg(self.sim_matrix_,
len(self.item_index_),
(self.min_nbrs, self.nnbrs),
rate_v, i_pos)
nscored = np.sum(np.logical_not(np.isnan(iscore)))
if self.center:
iscore += self.item_means_
assert np.sum(np.logical_not(np.isnan(iscore))) == nscored
results = pd.Series(iscore, index=self.item_index_)
results = results[results.notna()]
results = results.reindex(items, fill_value=np.nan)
assert results.notna().sum() == nscored
_logger.debug('user %s: predicted for %d of %d items',
user, results.notna().sum(), len(items))
return results
def save(self, path):
path = pathlib.Path(path)
_logger.info('saving I-I model to %s', path)
data = dict(items=self.item_index_.values, users=self.user_index_.values,
means=self.item_means_)
data.update(matrix.csr_save(self.sim_matrix_, 's_'))
data.update(matrix.csr_save(self.rating_matrix_, 'r_'))
|
np.savez_compressed(path, **data)
|
numpy.savez_compressed
|
"""
Filter
------
"""
from typing import List
from typing import Optional
from typing import Tuple
import numpy as np
from anndata import AnnData # type: ignore
import metacells.utilities as ut
from metacells.tools.mask import combine_masks
__all__ = [
"filter_data",
]
@ut.logged()
@ut.timed_call()
def filter_data( # pylint: disable=dangerous-default-value
adata: AnnData,
obs_masks: List[str] = [],
var_masks: List[str] = [],
*,
mask_obs: Optional[str] = None,
mask_var: Optional[str] = None,
invert_obs: bool = False,
invert_var: bool = False,
track_obs: Optional[str] = None,
track_var: Optional[str] = None,
name: Optional[str] = None,
top_level: bool = True,
) -> Optional[Tuple[AnnData, ut.PandasSeries, ut.PandasSeries]]:
"""
Filter (slice) the data based on previously-computed masks.
For example, it is useful to discard cell-cycle genes, cells which have too few UMIs for
meaningful analysis, etc. In general, the "best" filter depends on the data set.
This function makes it easy to combine different pre-computed per-observation (cell) and
per-variable (gene) boolean mask annotations into a final overall inclusion mask, and slice the
data accordingly, while tracking the base index of the cells and genes in the filtered data.
**Input**
Annotated ``adata``, where the observations are cells and the variables are genes.
**Returns**
An annotated data containing a subset of the observations (cells) and variables (genes).
If no observations and/or no variables were selected by the filter, returns ``None``.
If ``name`` is not specified, the returned data will be unnamed. Otherwise, if the name starts
with a ``.``, it will be appended to the current name (if any). Otherwise, ``name`` is the new
name.
If ``mask_obs`` and/or ``mask_var`` are specified, store the mask of the selected data as a
per-observation and/or per-variable annotation of the full ``adata``.
If ``track_obs`` and/or ``track_var`` are specified, store the original indices of the selected
data as a per-observation and/or per-variable annotation of the result data.
**Computation Parameters**
1. Combine the masks in ``obs_masks`` and/or ``var_masks`` using
:py:func:`metacells.tools.mask.combine_masks` passing it ``invert_obs`` and ``invert_var``,
and ``mask_obs`` and ``mask_var`` as the ``to`` parameter. If either list of masks is empty,
use the full mask.
2. If the obtained masks for either the observations or variables is empty, return ``None``.
Otherwise, return a slice of the full data containing just the observations and variables
specified by the final masks.
"""
if len(obs_masks) == 0:
obs_mask = np.full(adata.n_obs, True, dtype="bool")
if mask_obs is not None:
ut.set_o_data(adata, mask_obs, obs_mask)
else:
mask = combine_masks(adata, obs_masks, invert=invert_obs, to=mask_obs)
if mask is None:
assert mask_obs is not None
obs_mask = ut.get_o_numpy(adata, mask_obs, formatter=ut.mask_description) > 0
else:
obs_mask = ut.to_numpy_vector(mask, only_extract=True) > 0
if len(var_masks) == 0:
var_mask =
|
np.full(adata.n_vars, True, dtype="bool")
|
numpy.full
|
import os, sys
from collections import defaultdict
import numpy as np
import gzip
from dateutil import parser
MEAN_DIFF_THRESHOLD = 5.0
# expected format
# Sep 04 20:55:50.139 INFO Ping response, time: [rtt], local: 0.0.0.0:[srcport], from: ...
def parse_udping(fname):
if not os.path.isfile(fname):
print(f"error: missing {fname}")
return
port_pings = defaultdict(list)
min_time = None
with gzip.open(fname) as f:
for l in f:
l = l.decode('utf-8')
try:
if 'Ping' in l:
sp = l.strip().split(" ")
time = parser.parse(' '.join(sp[:3])).timestamp()
if min_time is None or time < min_time:
min_time = time
rtt = float(sp[7].replace(",",""))
_, srcport = sp[9].replace(",","").split(":")
port_pings[srcport].append((time, rtt))
except Exception as e:
continue
for port in port_pings:
port_pings[port] = [(t - min_time, r) for t, r in port_pings[port]]
return(port_pings)
# expected format:
# [iface] [rxrate_bytes]
def parse_bmon(fname):
if not os.path.isfile(fname):
print(f"error: missing {fname}")
return
rates = []
with gzip.open(fname) as f:
try:
for l in f:
_, rxrate = l.strip().split()
rxrate = float(rxrate) * 8
rates.append(rxrate)
except Exception as e:
print(f"error: failed to parse bmon for {fname}: {e}")
return rates
if __name__ == "__main__":
if len(sys.argv) != 2:
print("usage: python parse.py [path/to/results_dir]")
raise Exception()
results_dir = sys.argv[1]
ping = open('udping_results.out', 'w')
ping.write("src dst time port Latency rtt\n")
bmon = open('bmon_results.out', 'w')
bmon.write("src dst Throughput bw\n")
base_rtts = open('minrtts.out', 'w')
base_rtts.write("src dst port Latency\n")
paths = os.listdir(results_dir)
for path in paths:
sp = path.split('-')
if not os.path.isdir(path) or len(sp) != 2 or 'ssh' in path:
continue
src, dst = sp
control_pings = parse_udping(os.path.join(results_dir, path, 'control', 'udping.log.gz'))
iperf_pings = parse_udping(os.path.join(results_dir, path, 'iperf', 'udping.log.gz'))
bundler_pings = parse_udping(os.path.join(results_dir, path, 'bundler', 'udping.log.gz'))
iperf_rates = parse_bmon(os.path.join(results_dir, path, 'iperf', 'bmon.log.gz'))
bundler_rates = parse_bmon(os.path.join(results_dir, path, 'bundler', 'bmon.log.gz'))
if control_pings is None:
continue
for srcport in control_pings.keys():
try:
control = control_pings[srcport]
for time, r in control:
ping.write(f"{src} {dst} {time} {srcport} control {r}\n")
except:
continue
if np.mean(control_pings[srcport]) > 50:
base_rtts.write(f"{src} {dst} {srcport} {
|
np.mean(control_pings[srcport])
|
numpy.mean
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 3 12:07:34 2020
@author: konrad
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from scipy.interpolate import griddata
import PhysicalCalculations as PC
import VortexLine as VL
# %%
AoA = (0, 5, 10)
weights = (.5, .5)
Thr = .3
x_full, y_full, u_full, v_full, vort_full, u_std, v_std, Cont_full, Mom_full = \
PC.Read_Data(AoA)
x, y, u, v, vort = PC.make_square(x_full, y_full, u_full, v_full, vort_full, 1000, step=1)
x_OT = np.genfromtxt("../Data/OT_Results/{:.0f}_{:.0f}_{:.0f}_{:.2f}_{:.2f}_x.csv"
.format(AoA[0], AoA[1], AoA[2], weights[0], weights[1]), delimiter=",")
y_OT = np.genfromtxt("../Data/OT_Results/{:.0f}_{:.0f}_{:.0f}_{:.2f}_{:.2f}_y.csv"
.format(AoA[0], AoA[1], AoA[2], weights[0], weights[1]), delimiter=",")
vort_OT_pos = np.genfromtxt("../Data/OT_Results/{:.0f}_{:.0f}_{:.0f}_{:.2f}_{:.2f}_pos.csv"
.format(AoA[0], AoA[1], AoA[2], weights[0], weights[1]), delimiter=",")
vort_OT_neg = np.genfromtxt("../Data/OT_Results/{:.0f}_{:.0f}_{:.0f}_{:.2f}_{:.2f}_neg.csv"
.format(AoA[0], AoA[1], AoA[2], weights[0], weights[1]), delimiter=",")
sums = np.genfromtxt("../Data/OT_Results/{:.0f}_{:.0f}_{:.0f}_{:.2f}_{:.2f}_sums.csv"
.format(AoA[0], AoA[1], AoA[2], weights[0], weights[1]), delimiter=",")
vort_OT = vort_OT_pos*np.sum(weights*sums[0])\
- vort_OT_neg*np.sum(weights*sums[1])
# %%
mask_OT = abs(vort_OT) > Thr * np.max(abs(vort_OT))
u_OT, v_OT = PC.u_omega(x_OT, y_OT, x_OT[mask_OT],
y_OT[mask_OT], vort_OT[mask_OT], h=1)
u_OT += 1
Mom_OT = PC.Momentum(vort_OT, u_OT, v_OT,
np.gradient(x_OT[0, :]), np.gradient(y_OT[:, 0]))
# %%
x_arc, y_arc = PC.Gen_Arc(AoA[1])
Arc = VL.VortexLine(x_arc, y_arc)
exvelo = lambda xt, yt: (griddata(np.vstack((x.flatten(), y.flatten())).transpose(),
u_OT.flatten(),
np.vstack((xt.flatten(), yt.flatten())).transpose()),
griddata(np.vstack((x.flatten(), y.flatten())).transpose(),
v_OT.flatten(),
np.vstack((xt.flatten(), yt.flatten())).transpose()))
gamma = Arc.solve_gamma(exvelo)
unorm, vnorm = exvelo(Arc.xc, Arc.yc)
u_gam, v_gam = Arc.velocity(gamma, Arc.xc, Arc.yc)
u_indu, v_indu = Arc.velocity_ext(gamma, x, y)
upw_uni = -unorm * Arc.sy + vnorm * Arc.sx
upw_gamma = -(unorm - u_gam) * Arc.sy + (vnorm - v_gam) * Arc.sx
# %% Motivation
skip = 50
plt.figure()
plt.contourf(x_full, y_full, np.sqrt(u_full[2]**2, + v_full[2]**2))
plt.quiver(x_full[::skip, ::skip], y_full[::skip, ::skip],
u_full[2][::skip, ::skip], v_full[2][::skip, ::skip])
plt.xticks(())
plt.yticks(())
# %% Method Flowchart
c_m = cm.RdBu_r
ylim = .1
levs = np.linspace(-ylim, ylim, 51)
plt.figure()
plt.contourf(x, y, vort[0], cmap=c_m, extend='both', levels=levs)
plt.colorbar()
plt.xticks(())
plt.yticks(())
plt.axis('equal')
plt.figure()
plt.contourf(x, y, vort[2], cmap=c_m, extend='both', levels=levs)
plt.colorbar()
plt.xticks(())
plt.yticks(())
plt.axis('equal')
plt.figure()
plt.contourf(x_OT, y_OT, vort_OT, cmap=c_m, extend='both', levels=levs)
plt.colorbar()
plt.xticks(())
plt.yticks(())
plt.axis('equal')
plt.figure()
plt.contourf(x_OT, y_OT, Mom_OT, extend='both', levels=np.linspace(0, 1e-4, 51))
plt.colorbar()
plt.xticks(())
plt.yticks(())
plt.axis('equal')
# %% Interpolation Method
plt.figure()
plt.contourf(x, y,
|
np.ones_like(x)
|
numpy.ones_like
|
__author__ = '<NAME>'
import numpy as np
import statsmodels.api as sm
from scipy.spatial import distance
from collections import Counter
from simulation_parameters import(
TRANSMIT_POWER,
CARRIER_FREQUENCY
)
from pathloss import (
compute_pathloss_fspl,
compute_pathloss_nyc_28_los
)
def convert_km_to_m(km_distance):
"""Function that converts distance in km to m!"""
_ = km_distance * 10**3
return _
def compute_distance_matrix(users, basestations):
"""Distances between all users and basestations is calculated.
Args:
users: (obj) list of users!
basestations: (obj) list of basestations!
Returns:
(list of) numpy arrays containing the distance between a user and all basestations in km!.
"""
coords_list_ue = [getattr(ele, 'coordinates') for ele in users]
coords_list_bs = [getattr(ele, 'coordinates') for ele in basestations]
distance_matrix = []
count = 0
for _ in coords_list_ue:
element = [coords_list_ue[count]]
coords = element + coords_list_bs
dist = distance.cdist(coords, coords, 'euclidean')
new_dist = np.delete(dist[0], 0)
distance_matrix.append(new_dist)
count += 1
return np.array(distance_matrix)
def compute_distance_matrix_m(distance_matrix_km):
"""Distance matrix is converted from km to m!
Args:
distance_matrix_km: (list of) numpy arrays containing the distance between a UE and all BSs in km!
Returns:
(list of) numpy arrays containing the distance between a user and all basestations in m!.
"""
distance_matrix_m = [convert_km_to_m(_) for _ in distance_matrix_km]
return distance_matrix_m
def compute_pathloss_matrix_fspl(_dist_matrix_m):
"""Pathlosses for all users is calculated.
Args:
_dist_matrix_m: (obj) list of users!
Returns:
(list of) numpy arrays containing the distance between a user and all basestations in km!.
"""
pl_matrix = [compute_pathloss_fspl(distance=_, frequency=CARRIER_FREQUENCY) for _ in _dist_matrix_m]
return pl_matrix
def compute_pathloss_matrix_nyc(_dist_matrix_m):
"""Pathlosses for all users is calculated.
Args:
_dist_matrix_m: (obj) list of users!
Returns:
(list of) numpy arrays containing the distance between a user and all basestations in km!.
"""
pl_matrix = [compute_pathloss_nyc_28_los(_) for _ in _dist_matrix_m]
return pl_matrix
def cell_associate(pathloss_matrix, users, basestations):
"""Associate a user with a basestation that provides the minimum pathloss.
Args:
pathloss_matrix: (list) of numpy arrays
users: (obj) list of users!
basestations: (obj) list of basestations!
Returns:
(list of) tuples containing the UE object and the BS it is associated to.
"""
index_list_min_pl = [] # List that contains tuple of (index(min(pathloss)), pathloss) for eacb UE!
list_bs = [] # List of basestation objects associated with each UE in order!
for _ in pathloss_matrix:
index_list_min_pl.append(min(enumerate(_), key=(lambda x: x[1])))
for _ in index_list_min_pl:
index = _[0]
list_bs.append(basestations[index])
cell_associate_list = list(zip(users, list_bs)) # List that contains tuple: (ue_object, bs_associated)!
return cell_associate_list
def compute_count_for_bs(pathloss_matrix, basestations):
"""Computes the number of UEs associated with a BS object for BS.
Args:
pathloss_matrix: (list) of numpy arrays
basestations: (obj) list of basestations!
Returns:
(list of) tuples containing the BS object and the number of UEs it is associated to.
"""
index_list_min_pl = [] # List that contains tuple of (index(min(pathloss)), pathloss) for eacb UE!
list_bs = [] # List of basestation objects associated with each UE in order!
for _ in pathloss_matrix:
index_list_min_pl.append(min(enumerate(_), key=(lambda x: x[1])))
for _ in index_list_min_pl:
index = _[0]
list_bs.append(basestations[index])
bs_cnt = Counter(list_bs)
bs_count = list(zip(bs_cnt.keys(),
bs_cnt.values())) # List of tuples that contain BS objects and the number of UEs they are associated to!
return bs_count
def compute_total_ue_for_bs(_cell_associate, _bs_count):
"""Computes the number of UEs associated with a BS object for UE.
Args:
_cell_associate: (list) that contains tuple (ue_object, bs_associated)!
_bs_count: (list) that contains tuple of BS objects and the number of UEs they are associated to!
Returns:
(list of) ints that correspond to the count of total number of UEs associated with a BS.
"""
other_ue_count_bs = [] # List of the total number of UEs associated with a BS for each UE!
for x in _cell_associate:
for y in _bs_count:
if x[1] == y[0]:
other_ue_count_bs.append(y[1])
return other_ue_count_bs
def compute_distance_ue_bs(_cell_associate):
"""Computes the distance between the user and the basestation it is associated to.
Args:
_cell_associate: (list) that contains tuple (ue_object, bs_associated)!
Returns:
(list of) distances in metres between each UE and BS.
"""
distance_in_km = [distance.euclidean(_[0].coordinates, _[1].coordinates) for _ in _cell_associate]
distance_in_m = [convert_km_to_m(_) for _ in distance_in_km]
return distance_in_m
def compute_rx_power(P_tx, G_tx, G_rx, PL):
""" Link Budget!
Args:
P_tx: transmit output power (dBm)
G_tx: transmitter antenna gain (dBi)
G_rx: receiver antenna gain (dBi)
PL: pathloss in (dB)!
Returns:
P_rx: received power (dB)!!!
"""
p_rx = P_tx + G_tx + G_rx - PL - 30
return p_rx # returns P_rx in dB!
def calculate_snr(rx_power, noise_power):
""" Function that calculates SNR in dB!
Args:
rx_power: (numpy array) received power in dB!
noise_power: noise power in dB!
Returns:
snr: (numpy array) Signal-to-Noise ratio in dB!
"""
snr = rx_power - noise_power # rx_power and noise_power should be in dB!
return snr # This gives SNR in dB!
def calculate_log_2_factor(value):
""" Function that calculates the logarithmic term!"""
result = np.log2(1 + value)
return result
def calculate_prelog_term(bandwidth, number):
""" Function that calculates the prelog term!
Args:
bandwidth: (int) bandwidth allocated to basestation which is can be exclusive or pooled!
number: (numpy array) (N) which is the number of total users associated to every basestation for the basestation with
which the UE of interest is associated to!
Returns:
prelog_term: (float) prelog_term is the ratio of the bandwidth and number!
"""
prelog_term = bandwidth / number
return prelog_term
def create_ecdf(metric_values):
"""Function to create ecdf (Empirical Cumulative Distribution Function!).
Args:
metric_values: (numpy array) of values that needs ecdf!
Returns:
x: (list of or numpy array) x values for ecdf!
y: (list of or numpy array) y values for ecdf!
"""
ecdf = sm.distributions.ECDF(metric_values)
x = np.linspace(min(metric_values), max(metric_values))
y = ecdf(x)
return x, y
def convert_dB_to_W(dB_value):
""" Function that converts dB values into Watts!"""
_ = 10 ** (dB_value / 10)
return _
def convert_W_to_dB(W_value):
""" Function that converts power values in Watts into dB values!"""
_ = 10 * np.log10(W_value)
return _
def calculate_sinr_exclusive(pathloss_matrix, rx_power, noise_power):
"""Function to calculate SINR in dB!
Args:
pathloss_matrix: (list of) numpy arrays!
rx_power: (numpy array) received power in dB for each UE!
noise_power: (constant term) noise power in dB!
Returns:
sinr: (numpy array) Signal-to-Interference-plus-Noise ratio in dB!
"""
pathloss_matrix_ = np.array(pathloss_matrix)
with np.nditer(pathloss_matrix_, op_flags=['readwrite']) as it:
for x in it:
x[...] = compute_rx_power(P_tx=TRANSMIT_POWER, G_tx=0, G_rx=0, PL=x)
rxpower_matrix = pathloss_matrix_
with np.nditer(rxpower_matrix, op_flags=['readwrite']) as it:
for x in it:
x[...] = convert_dB_to_W(x)
rx_power_matrix_W = rxpower_matrix
all_rx_power_W = np.sum(rx_power_matrix_W, axis=1)
rx_power_W = np.fromiter((convert_dB_to_W(_) for _ in rx_power), rx_power.dtype)
inter_cell_interference = all_rx_power_W - rx_power_W
sum_factor = inter_cell_interference + convert_dB_to_W(noise_power)
sinr_watts = rx_power_W / sum_factor
sinr_dB = np.fromiter((convert_W_to_dB(_) for _ in sinr_watts), sinr_watts.dtype)
return sinr_dB
def calculate_sinr_pooled_ii(pathloss_matrix_A, pathloss_matrix_B, rx_power, noise_power):
"""Function to calculate SINR in dB!
Args:
pathloss_matrix_A: (list of) numpy arrays!
pathloss_matrix_B: (list of) numpy arrays!
rx_power: (numpy array) received power in dB for each UE!
noise_power: (constant term) noise power in dB!
Returns:
sinr: (numpy array) Signal-to-Interference-plus-Noise ratio in dB!
"""
pathloss_matrix_A = np.array(pathloss_matrix_A)
with np.nditer(pathloss_matrix_A, op_flags=['readwrite']) as it:
for x in it:
x[...] = compute_rx_power(P_tx=TRANSMIT_POWER, G_tx=0, G_rx=0, PL=x)
rxpower_matrix_A = pathloss_matrix_A
pathloss_matrix_B = np.array(pathloss_matrix_B)
with np.nditer(pathloss_matrix_B, op_flags=['readwrite']) as it:
for x in it:
x[...] = compute_rx_power(P_tx=TRANSMIT_POWER, G_tx=0, G_rx=0, PL=x)
rxpower_matrix_B = pathloss_matrix_B
with np.nditer(rxpower_matrix_A, op_flags=['readwrite']) as it:
for x in it:
x[...] = convert_dB_to_W(x)
rx_power_matrix_A_W = rxpower_matrix_A
with np.nditer(rxpower_matrix_B, op_flags=['readwrite']) as it:
for x in it:
x[...] = convert_dB_to_W(x)
rx_power_matrix_B_W = rxpower_matrix_B
all_rx_power_A_W = np.sum(rx_power_matrix_A_W, axis=1)
all_rx_power_B_W = np.sum(rx_power_matrix_B_W, axis=1)
rx_power_W = np.fromiter((convert_dB_to_W(_) for _ in rx_power), rx_power.dtype)
inter_cell_interference = all_rx_power_A_W + all_rx_power_B_W - rx_power_W
sum_factor = inter_cell_interference + convert_dB_to_W(noise_power)
sinr_watts = rx_power_W / sum_factor
sinr_dB = np.fromiter((convert_W_to_dB(_) for _ in sinr_watts), sinr_watts.dtype)
return sinr_dB
def calculate_sinr_pooled_iii(pathloss_matrix_A,
pathloss_matrix_B,
pathloss_matrix_C,
rx_power,
noise_power):
"""Function to calculate SINR in dB!
Args:
pathloss_matrix_A: (list of) numpy arrays!
pathloss_matrix_B: (list of) numpy arrays!
pathloss_matrix_C: (list of) numpy arrays!
rx_power: (numpy array) received power in dB for each UE!
noise_power: (constant term) noise power in dB!
Returns:
sinr: (numpy array) Signal-to-Interference-plus-Noise ratio in dB!
"""
pathloss_matrix_A = np.array(pathloss_matrix_A)
with np.nditer(pathloss_matrix_A, op_flags=['readwrite']) as it:
for x in it:
x[...] = compute_rx_power(P_tx=TRANSMIT_POWER, G_tx=0, G_rx=0, PL=x)
rxpower_matrix_A = pathloss_matrix_A
pathloss_matrix_B = np.array(pathloss_matrix_B)
with np.nditer(pathloss_matrix_B, op_flags=['readwrite']) as it:
for x in it:
x[...] = compute_rx_power(P_tx=TRANSMIT_POWER, G_tx=0, G_rx=0, PL=x)
rxpower_matrix_B = pathloss_matrix_B
pathloss_matrix_C = np.array(pathloss_matrix_C)
with np.nditer(pathloss_matrix_C, op_flags=['readwrite']) as it:
for x in it:
x[...] = compute_rx_power(P_tx=TRANSMIT_POWER, G_tx=0, G_rx=0, PL=x)
rxpower_matrix_C = pathloss_matrix_C
with np.nditer(rxpower_matrix_A, op_flags=['readwrite']) as it:
for x in it:
x[...] = convert_dB_to_W(x)
rx_power_matrix_A_W = rxpower_matrix_A
with np.nditer(rxpower_matrix_B, op_flags=['readwrite']) as it:
for x in it:
x[...] = convert_dB_to_W(x)
rx_power_matrix_B_W = rxpower_matrix_B
with np.nditer(rxpower_matrix_C, op_flags=['readwrite']) as it:
for x in it:
x[...] = convert_dB_to_W(x)
rx_power_matrix_C_W = rxpower_matrix_C
all_rx_power_A_W = np.sum(rx_power_matrix_A_W, axis=1)
all_rx_power_B_W = np.sum(rx_power_matrix_B_W, axis=1)
all_rx_power_C_W =
|
np.sum(rx_power_matrix_C_W, axis=1)
|
numpy.sum
|
"""Converts dynamic object annotations to KITTI format."""
import argparse
import glob
import json
import os
from datetime import datetime
from os.path import join, basename
from typing import List, Callable
import numpy as np
from pyquaternion import Quaternion
from tqdm import tqdm
from calibration import (
load_calib_from_json,
get_3d_transform_camera_lidar,
rigid_transform_3d,
transform_rotation,
)
from constants import TIME_FORMAT, SIZE, LOCATION, ROTATION
from plot_objects_on_image import ObjectAnnotationHandler
IMAGE_DIMS = np.array([3848, 2168]) # width, height
OCCLUSION_MAP = {
"None": 0,
"Light": 1,
"Medium": 1,
"Heavy": 2,
"VeryHeavy": 2,
"Undefined": 2, # If undefined we assume the worst
}
def _parse_class(obj_properties):
obj_cls = obj_properties["class"]
if obj_cls not in ("VulnerableVehicle", "Vehicle", "Pedestrian"):
# Remove Animals, Debris, Movers and any other unwanted classes
return None
elif obj_properties["unclear"] or obj_properties["object_type"] == "Inconclusive":
# Ignore unclear and inconclusive objects
obj_cls = "DontCare"
elif obj_cls == "VulnerableVehicle":
# Rename the VulnerableVehicle class to Cyclist to match KITTI
obj_cls = "Cyclist"
# Remove stuff without rider
if obj_properties.get("with_rider", "True") == "False":
return None
# Ignore everything that's not a bicyclist or motorbicyclist
elif obj_properties["object_type"] not in ("Bicycle", "Motorcycle"):
obj_cls = "DontCare"
elif obj_cls == "Vehicle":
# Ignore more exotic vehicle classes (HeavyEquip, TramTrain, Other)
if obj_properties["object_type"] not in ("Car", "Van", "Truck", "Trailer", "Bus"):
obj_cls = "DontCare"
elif obj_cls == "Pedestrian":
# No special treatment for pedestrians
pass
return obj_cls
def _convert_to_kitti(
objects: List[ObjectAnnotationHandler], yaw_func: Callable[[Quaternion], float]
) -> List[str]:
kitti_annotation_lines = []
for obj in objects:
class_name = _parse_class(obj.properties)
if class_name is None:
continue # discard object
truncation, xmax, xmin, ymax, ymin = _parse_bbox_2d(obj.outer_points)
if obj.marking3d is None:
size, location, yaw, alpha = [0, 0, 0], [0, 0, 0], 0, 0
else:
size = obj.marking3d[SIZE][::-1] # H,W,L not L,W,H
location = obj.marking3d[LOCATION] # x,y,z
yaw = yaw_func(obj.marking3d[ROTATION])
alpha = 0 # TODO: calculate this!
if class_name != "DontCare" and "occlusion_ratio" not in obj.properties:
print("Missing occlusion for obj: ", obj)
kitti_obj = " ".join(
map(
str,
[
class_name,
truncation,
OCCLUSION_MAP[obj.properties.get("occlusion_ratio", "Undefined")],
alpha,
xmin,
ymin,
xmax,
ymax,
*size,
*location,
yaw,
],
)
)
kitti_annotation_lines.append(kitti_obj)
return kitti_annotation_lines
def _parse_bbox_2d(outer_points):
xmin_nonclip, ymin_nonclip = np.min(outer_points, axis=0)
xmax_nonclip, ymax_nonclip = np.max(outer_points, axis=0)
xmin, ymin =
|
np.clip([xmin_nonclip, ymin_nonclip], a_min=0, a_max=IMAGE_DIMS)
|
numpy.clip
|
import pytest
import torch
import numpy as np
import librosa
import audtorch.transforms.functional as F
xfail = pytest.mark.xfail
a11 = np.array([1, 2, 3, 4], dtype=float)
a12 = np.array([5, 6, 7, 8], dtype=float)
a21 = np.array([9, 10, 11, 12], dtype=float)
a22 = np.array([13, 14, 15, 16], dtype=float)
ones = np.ones(4)
zeros = np.zeros(4)
A = np.array([[a11, a12], [a21, a22]]) # Tensor of shape (2, 2, 4)
# Ratio in dB to add two inputs to yield 1.5 magnitude
_half_ratio = -10 * np.log10(0.5 ** 2)
def _mean(input, axis):
"""Return mean along axis and preserve number of dimensions."""
return np.expand_dims(np.mean(input, axis=axis), axis=axis)
def _pad(vector, padding, value):
"""Add padding to a vector using np.pad."""
return np.pad(vector, padding, 'constant', constant_values=value)
@pytest.mark.parametrize('input,idx,axis,expected_output', [
(A, (0, 2), -1, np.array([[a11[:2], a12[:2]], [a21[:2], a22[:2]]])),
(A, (1, 2), -2,
|
np.array([[a12], [a22]])
|
numpy.array
|
# -*- coding: utf-8 -*-
"""
Package: iads
Fichier: Classifiers.py
Année: semestre 2 - 2018-2019, Sorbonne Université
"""
# Import de packages externes
import numpy as np
import pandas as pd
from iads import LabeledSet as ls
import random
import math
# ---------------------------
class Classifier:
""" Classe pour représenter un classifieur
Attention: cette classe est une classe abstraite, elle ne peut pas être
instanciée.
"""
#TODO: A Compléter
def __init__(self, input_dimension):
""" Constructeur de Classifier
Argument:
- intput_dimension (int) : dimension d'entrée des exemples
Hypothèse : input_dimension > 0
"""
raise NotImplementedError("Please Implement this method")
def predict(self, x):
""" rend la prediction sur x (-1 ou +1)
"""
raise NotImplementedError("Please Implement this method")
def train(self, labeledSet):
""" Permet d'entrainer le modele sur l'ensemble donné
"""
raise NotImplementedError("Please Implement this method")
def accuracy(self, dataset):
""" Permet de calculer la qualité du système
"""
per=0
for i in range(dataset.size()):
if self.predict(dataset.getX(i)) == dataset.getY(i):
per+=1
return float(per)/dataset.size()
# ---------------------------
class ClassifierLineaireRandom(Classifier):
""" Classe pour représenter un classifieur linéaire aléatoire
Cette classe hérite de la classe Classifier
"""
#TODO: A Compléter
def __init__(self, input_dimension):
""" Constructeur de Classifier
Argument:
- intput_dimension (int) : dimension d'entrée des exemples
Hypothèse : input_dimension > 0
"""
self.input_dimension = input_dimension
self.w = 2*np.random.rand(input_dimension)-1
def predict(self, x):
""" rend la prediction sur x (-1 ou +1)
"""
s = 0
for i in range(len(x)):
s += x[i]*self.w[i]
return (1 if s>=0 else -1)
def train(self, labeledSet):
""" Permet d'entrainer le modele sur l'ensemble donné
"""
print(" no trainning for this model")
# ---------------------------
class ClassifierKNN(Classifier):
""" Classe pour représenter un classifieur par K plus proches voisins.
Cette classe hérite de la classe Classifier
"""
#TODO: A Compléter
def __init__(self, input_dimension,output_dimension, k):
""" Constructeur de Classifier
Argument:
- intput_dimension (int) : dimension d'entrée des exemples
- k (int) : nombre de voisins à considérer
Hypothèse : input_dimension > 0
"""
self.input_dimension = input_dimension
self.output_dimension = output_dimension
self.k= k
def predict(self, x):
""" rend la prediction sur x (-1 ou +1)
"""
#construit la matrice des distance de x
tab = []
for i in range(self.trainingSet.size()):
#calcul de la distance entre x et tainningSet[i]
tab.append(np.linalg.norm(x - self.trainingSet.getX(i)))
tab = np.argsort(tab)
s = 0
r = [0 for i in range(self.output_dimension)]
for i in range(len(r)):
zero, un = 0,0
for j in range(self.k):
if self.trainingSet.getY(tab[j])[i] == 1:
un += 1
else:
zero+=1
r[i] = 0 if zero > un else 1
return r
def train(self, labeledSet):
""" Permet d'entrainer le modele sur l'ensemble donné
"""
self.trainingSet = labeledSet
def accuracy(self, Set):
somme = 0
for j in range(Set.size()):
baelor = 1
for i in range(len(Set.getY(j))):
if Set.getY(j)[i] != self.predict(Set.getX(j))[i]:
baelor = 0
somme += 1 if (baelor == 0) else 0
return (float(somme)/Set.size() )
# ---------------------------
class ClassifierPerceptronRandom(Classifier):
def __init__(self, input_dimension):
""" Argument:
- input_dimension (int) : dimension d'entrée des exemples
Hypothèse : input_dimension > 0
"""
v = np.random.rand(input_dimension) # vecteur aléatoire à input_dimension dimensions
self.w = (2* v - 1) / np.linalg.norm(v) # on normalise par la norme de v
def predict(self, x):
""" rend la prediction sur x (-1 ou +1)
"""
z = np.dot(x, self.w)
return z
def train(self,labeledSet):
""" Permet d'entrainer le modele sur l'ensemble donné
"""
print("No training needed")
class ClassifierPerceptron(Classifier):
""" Perceptron de Rosenblatt
"""
def __init__(self,input_dimension,learning_rate):
""" Argument:
- intput_dimension (int) : dimension d'entrée des exemples
- learning_rate :
Hypothèse : input_dimension > 0
"""
##TODO
self.input_dimension = input_dimension
self.learning_rate = learning_rate
#w i j == poids entre le noeud d'entréej et neurone j
self.w = (2* np.random.rand(self.input_dimension))-1
def predict(self,x):
""" rend la prediction sur x (-1 ou +1)
"""
##TODO
return 1 if np.dot(self.w, x)>0 else -1
def train(self,labeledSet):
""" Permet d'entrainer le modele sur l'ensemble donné
"""
##TODO
self.out = 0
self.trainingSet = labeledSet
r = list(range(self.trainingSet.size()))
np.random.shuffle(r)
for i in r:
out = self.predict(self.trainingSet.getX(i))
if out * self.trainingSet.getY(i) <0:
self.w = self.w + self.learning_rate *self.trainingSet.getY(i) *self.trainingSet.getX(i)
class ClassifierPerceptronRandom(Classifier):
def __init__(self, input_dimension):
""" Argument:
- input_dimension (int) : dimension d'entrée des exemples
Hypothèse : input_dimension > 0
"""
v = np.random.rand(input_dimension) # vecteur aléatoire à input_dimension dimensions
self.w = (2* v - 1) / np.linalg.norm(v) # on normalise par la norme de v
def predict(self, x):
""" rend la prediction sur x (-1 ou +1)
"""
z = np.dot(x, self.w)
return z
def train(self,labeledSet):
""" Permet d'entrainer le modele sur l'ensemble donné
"""
print("No training needed")
class ClassifierPerceptron(Classifier):
""" Perceptron de Rosenblatt
"""
def __init__(self,input_dimension,learning_rate):
""" Argument:
- intput_dimension (int) : dimension d'entrée des exemples
- learning_rate :
Hypothèse : input_dimension > 0
"""
##TODO
self.input_dimension = input_dimension
self.learning_rate = learning_rate
#w i j == poids entre le noeud d'entréej et neurone j
self.w = (2* np.random.rand(self.input_dimension))-1
def predict(self,x):
""" rend la prediction sur x (-1 ou +1)
"""
##TODO
return 1 if np.dot(self.w, x)>0 else -1
def train(self,labeledSet):
""" Permet d'entrainer le modele sur l'ensemble donné
"""
##TODO
self.out = 0
self.trainingSet = labeledSet
r = list(range(self.trainingSet.size()))
np.random.shuffle(r)
for i in r:
out = self.predict(self.trainingSet.getX(i))
if out * self.trainingSet.getY(i) <0:
self.w = self.w + self.learning_rate *self.trainingSet.getY(i) *self.trainingSet.getX(i)
class KernelBias:
def transform(self,x):
y=np.asarray([x[0],x[1],1])
return y
class ClassifierPerceptronKernel(Classifier):
def __init__(self,dimension_kernel,learning_rate,kernel):
self.input_dimension = dimension_kernel
self.learning_rate = learning_rate
self.w = (2* np.random.rand(dimension_kernel))-1
self.k = kernel
def predict(self,x):
return 1 if np.dot(self.w, self.k.transform(x))>0 else -1
def train(self,labeledSet):
self.out = 0
self.trainingSet = labeledSet
for i in range(self.trainingSet.size()):
out = self.predict(self.trainingSet.getX(i))
if out * self.trainingSet.getY(i) <0:
self.w = self.w + self.learning_rate *self.trainingSet.getY(i) *self.k.transform(self.trainingSet.getX(i))
class KernelPoly:
def transform(self,x):
##TODO
y=np.asarray([1, x[0],x[1],x[0]*x[0], x[1]*x[1], x[0]*x[1]])
return y
def classe_majoritaire(the_set):
a = [0,0]
for i in range(the_set.size()):
if the_set.getY(i) == 1:
a[0]+=1
else:
a[1]+=1
p = np.argmax(a)
return 1 if p== 0 else -1
def entropie(L):
#calcul des distributions de probas
a =
|
np.zeros(2)
|
numpy.zeros
|
import numpy as np
import matplotlib.pyplot as plt
from pre_processing import pre_processing,re_sampling,moving_avg
from rejectBlink_PCA import rejectBlink_PCA
import json
import os
from pixel_size import pixel_size,pixel2angle
import warnings
import matplotlib.patches as patches
import pandas as pd
from makeEyemetrics import makeMicroSaccade,draw_heatmap
from zeroInterp import zeroInterp
from tqdm import tqdm
# from scipy.stats import gamma
# import scipy.stats as stats
warnings.simplefilter('ignore')
#%% ### initial settings ###################
cfg={
'SAMPLING_RATE':500,
'center':[1920, 1080],
'DOT_PITCH':0.369,
'VISUAL_DISTANCE':80,
'acceptMSRange':2.81,
# 'acceptMSRange':3,
'windowL':10,
'TIME_START':-1,
'TIME_END':4,
'WID_ANALYSIS':4,
'WID_BASELINE':np.array([-0.2,0]),
'WID_FILTER':np.array([]),
'METHOD':1, #subtraction
'FLAG_LOWPASS':False,
'THRES_DIFF':0.04
}
saveFileLocs = './data/'
f = open(os.path.join(str('./data/data_original.json')))
dat = json.load(f)
f.close()
mmName = list(dat.keys())
# v = np.diff(np.array(dat['PDR'])).reshape(-1)
# plt.hist(v,bins=100)
# sigma = np.nanstd(v)
# ramda = 6
# upsilon = ramda*sigma
y,rejectNum = pre_processing(np.array(dat['PDR']),cfg)
y = np.delete(y,rejectNum,axis=0)
for mm in mmName:
dat[mm] = [d for i,d in enumerate(dat[mm]) if not i in rejectNum]
#%% ### figure plot ##########################
x = np.linspace(cfg['TIME_START'],cfg['TIME_END'],y.shape[1])
plt.figure()
x = np.linspace(cfg['TIME_START'],cfg['TIME_END'],y.shape[1]-1)
plt.subplot(1,2,1)
plt.plot(x,np.diff(y).T)
plt.xlim([cfg['TIME_START'],cfg['TIME_END']])
plt.ylim([-cfg['THRES_DIFF'] ,cfg['THRES_DIFF'] ])
plt.xlabel('Time from response queue')
x = np.linspace(cfg['TIME_START'],cfg['TIME_END'],y.shape[1])
plt.subplot(1,2,2)
plt.plot(x,y.T)
plt.xlim([cfg['TIME_START'],cfg['TIME_END']])
plt.xlabel('Time from response queue')
plt.ylabel('Changes in pupil size')
#%% ### PCA ##########################
pc,rejectNumPCA = rejectBlink_PCA(y)
y = np.delete(y,rejectNumPCA,axis=0)
for mm in mmName:
dat[mm] = [d for i,d in enumerate(dat[mm]) if not i in rejectNumPCA]
#%% ### rejection of outlier(interplation failed trial) #########
# max_val = [max(abs(y[i,])) for i in np.arange(y.shape[0])]
# fx = np.diff(y)
# rejectOutlier = []
# for i in np.arange(len(y)):
# if len(np.unique(np.round(fx[i,],5))) < 20:
# rejectOutlier.append(i)
# y = np.delete(y,rejectOutlier,axis=0)
# for mm in mmName:
# dat[mm] = [d for i,d in enumerate(dat[mm]) if not i in rejectOutlier]
#%% ### reject gaze position ###
gazeX = np.array(dat['gazeX'])
gazeY = np.array(dat['gazeY'])
gazeX = zeroInterp(gazeX.copy(),cfg['SAMPLING_RATE'],5)
gazeX = gazeX['pupilData'][:,cfg['SAMPLING_RATE']:]
gazeY = zeroInterp(gazeY.copy(),cfg['SAMPLING_RATE'],5)
gazeY = gazeY['pupilData'][:,cfg['SAMPLING_RATE']:]
gv = [list(np.gradient(g)*cfg['SAMPLING_RATE']) for g in gazeX.tolist()]
gacc = [list(np.gradient(np.gradient(g))*(cfg['SAMPLING_RATE']**2)) for g in gazeX.tolist()]
thFix = pixel_size(cfg['DOT_PITCH'],30,cfg['VISUAL_DISTANCE']) # changes in velocity of x > 30°/sec
thAccFix = thFix*400 # changes in acceration of x > 1200°/sec
endFix = []
for iTrial,d in enumerate(tqdm(dat['endFix'])):
# if len(d)==0:
sigma = np.std(gv[iTrial])
sigma = sigma*3
sigma_acc = np.std(gacc[iTrial])
sigma_acc = sigma_acc*3
ind = np.argwhere(abs(np.array(gv[iTrial])) > sigma).reshape(-1)
ind_acc = np.argwhere(abs(np.array(gacc[iTrial])) > sigma_acc).reshape(-1)
# ind_acc = ind_acc[np.argwhere(np.diff(np.r_[0, ind_acc]) > 10)].reshape(-1)
# ind = np.unique(np.r_[ind,ind_acc])
if len(ind) > 0:
if np.max(np.diff(np.r_[0, ind])) > 1:
eFixTime = ind[np.argwhere(np.diff(np.r_[0, ind]) > 10)].reshape(-1)
if len(eFixTime) == 0:
eFixTime = ind[0]
eFixTime = np.r_[eFixTime,len(gv[iTrial])]
sFixTime = ind[np.r_[np.argwhere(np.diff(np.r_[0, ind]) > 10)[1:].reshape(-1)-1,len(ind)-1]]
sFixTime = np.r_[0,sFixTime]
tmp_endFix = []
for iFix in np.arange(len(sFixTime)):
tmp_endFix.append([0,0,0,
gazeX[iTrial,np.arange(sFixTime[iFix],eFixTime[iFix])].mean(),
gazeY[iTrial,np.arange(sFixTime[iFix],eFixTime[iFix])].mean(),
0])
else:
sFixTime = ind[0].tolist()
eFixTime = ind[-1].tolist()
tmp_endFix.append([0,0,0,
gazeX[iTrial,np.arange(sFixTime,eFixTime)].mean(),
gazeY[iTrial,np.arange(sFixTime,eFixTime)].mean(),
0])
dat['endFix'][iTrial] = tmp_endFix
else:
dat['endFix'][iTrial] = []
# endFix.append(tmp_endFix)
rangeWin = pixel_size(cfg['DOT_PITCH'],cfg['acceptMSRange'],cfg['VISUAL_DISTANCE'])
center = np.array(cfg['center'])/2
gazeX = []
gazeY = []
for iTrial,fixTrial in enumerate(dat['endFix']):
tmp_gx=[]
tmp_gy=[]
if len(fixTrial)>0:
for gx in fixTrial:
tmp_gx.append(float(gx[3]))
tmp_gy.append(float(gx[4]))
gazeX.append(np.mean(tmp_gx))
gazeY.append(np.mean(tmp_gy))
else:
gazeX.append(0)
gazeY.append(0)
gazeX = np.array(gazeX)-np.array(dat['gazeX']).mean(axis=1)
gazeY = np.array(gazeY)-np.array(dat['gazeY']).mean(axis=1)
# gazeX_p = np.array(gazeX)-center[0]
# gazeY_p = np.array(gazeY)-center[1]
# gazeX_p=pixel2angle(cfg['DOT_PITCH'],gazeX_p.tolist(),cfg['VISUAL_DISTANCE'])
# gazeY_p=pixel2angle(cfg['DOT_PITCH'],gazeY_p.tolist(),cfg['VISUAL_DISTANCE'])
# gazeX = np.mean(gazeX-center[0],axis=1)
# gazeY = np.mean(gazeY-center[1],axis=1)
a = rangeWin**2
b = rangeWin**2
tmp_x = gazeX**2
tmp_y = gazeY**2
P = (tmp_x/a)+(tmp_y/b)-1
rejectGaze = np.argwhere(P > 0).reshape(-1)
fig = plt.figure()
ax = plt.axes()
e = patches.Ellipse(xy=(0,0), width=rangeWin*2, height=rangeWin*2, fill=False, ec='r')
ax.add_patch(e)
plt.plot(gazeX,gazeY,'.')
plt.plot(gazeX[rejectGaze],gazeY[rejectGaze],'r.')
plt.axis('equal')
plt.xlim([-rangeWin-20,rangeWin+20])
plt.ylim([-rangeWin-20,rangeWin+20])
rejectGaze2 = np.argwhere(np.isnan(gazeX)).reshape(-1)
rejectGaze = np.unique(np.r_[rejectGaze,rejectGaze2])
y = np.delete(y,rejectGaze,axis=0)
for mm in mmName:
dat[mm] = [d for i,d in enumerate(dat[mm]) if not i in rejectGaze]
dat['PDR'] = y.tolist()
#%% ### participants reject ###
reject=[]
NUM_TRIAL = 150
numOftrials = []
rejectedTrial = []
for iSub in np.arange(1,int(max(dat['sub']))+1):
ind = [i for i, sub in enumerate(dat['sub']) if sub == iSub]
numOftrials.append(len(ind))
rejectedTrial.append(NUM_TRIAL - len(ind))
if numOftrials[iSub-1] < NUM_TRIAL * 0.5:
reject.append(iSub)
print('# of trials = ' + str(numOftrials))
print('Averaged # of trials = ' + str(np.round(np.mean(numOftrials),2)))
print('SD # of trials = ' + str(np.round(np.std(numOftrials),2)))
n, bins, patches = plt.hist(np.array(rejectedTrial))
# a_hat, loc_hat, scale_hat = gamma.fit(n)
# ps_hat = stats.gamma.pdf(bins, a_hat, loc=loc_hat, scale=scale_hat)
# plt.figure()
# plt.plot(ps_hat)
th = np.round(np.std(rejectedTrial),2)
# np.round(np.median(rejectedTrial),2)
# for iSub in np.arange(1,int(max(dat['sub']))+1):
# if rejectedTrial[iSub-1] > th:
# reject.append(iSub)
rejectSub = [i for i,d in enumerate(dat['sub']) if d in reject]
print('rejected subject = ' + str(reject))
y = np.delete(y,rejectSub,axis=0)
for mm in mmName:
dat[mm] = [d for i,d in enumerate(dat[mm]) if not i in rejectSub]
rejectedTrial = [d for i,d in enumerate(rejectedTrial) if not i+1 in reject]
ave = np.array(rejectedTrial)/NUM_TRIAL
print('rejected num ave = ' + str(round(np.mean(ave),3)) + ', sd = ' + str(round(np.std(ave),3)))
#%% PC events -------------------------------------------
events = {'sub':[],
'condition':[],
'PDR':[],
'min':[]
}
for iSub in np.unique(dat['sub']):
for iCond in np.unique(dat['condition']):
ind = np.argwhere((dat['sub'] == iSub ) &
(dat['condition'] == np.int64(iCond) )).reshape(-1)
tmp_y = y[ind,:]
tmp_y = moving_avg(tmp_y, 10)
tmp_y = tmp_y.mean(axis=0)
events['sub'].append(iSub)
events['condition'].append(iCond)
events['PDR'].append(tmp_y.tolist())
plt.figure()
time_min = 0.3
time_max = 4
dat['min'] = []
dat['events'] = []
dat['events_p'] = []
x_t = x[np.argwhere((x>time_min) & (x<time_max))]
for iSub in np.unique(dat['sub']):
ind = np.argwhere((events['sub'] == iSub)).reshape(-1)
tmp_p =
|
np.array(events['PDR'])
|
numpy.array
|
import unittest
import numpy as np
import fastpli.simulation
import fastpli.analysis
import fastpli.tools
import fastpli.objects
# TODO: test rofl, not simpli.apply_rofl
class MainTest(unittest.TestCase):
def test_simple_rofl(self):
simpli = fastpli.simulation.Simpli()
simpli.omp_num_threads = 1
simpli.voxel_size = 60 # in micro meter
simpli.dim = [1, 1, 1]
# single voxel
tissue = np.ones((1, 1, 1), dtype=np.int32)
optical_axis = np.zeros((1, 1, 1, 3), dtype=np.float32)
optical_axis[:] = [np.cos(np.deg2rad(45)), 0, np.sin(np.deg2rad(45))]
tissue_properties = np.array([[0, 0], [-0.001, 0]])
# Simulate PLI Measurement ###
simpli.filter_rotations = np.deg2rad([0, 30, 60, 90, 120, 150])
simpli.light_intensity = 26000 # a.u.
simpli.interpolate = 'Slerp'
simpli.untilt_sensor_view = False
simpli.wavelength = 525 # in nm
simpli.step_size = 1 # in voxel_size
simpli.tilts = np.deg2rad([(0, 0), (5.5, 0), (5.5, 90), (5.5, 180),
(5.5, 270)])
tilting_stack = [None] * 5
for t, (theta, phi) in enumerate(simpli.tilts):
simpli.step_size = 1 / np.cos(theta)
images = simpli.run_simulation(tissue, optical_axis,
tissue_properties, theta, phi)
# calculate modalities
tilting_stack[t] = images
simpli.noise_model = lambda x: np.random.negative_binomial(
x / (3 - 1), 1 / 3)
rofl_direction, rofl_incl, rofl_t_rel, _ = simpli.apply_rofl(
tilting_stack, grad_mode=False)
t_rel = 4 * simpli.voxel_size * abs(
tissue_properties[1][0]) / (simpli.wavelength / 1e3)
self.assertTrue(
abs(rofl_direction.flatten()[0] - np.deg2rad(0)) < 1e-10 or
abs(rofl_direction.flatten()[0] - np.deg2rad(180)) < 1e-10)
self.assertTrue(
abs(rofl_incl.flatten()[0] -
|
np.deg2rad(45)
|
numpy.deg2rad
|
from causal_world.envs.robot.action import TriFingerAction
from causal_world.envs.robot.observations import TriFingerObservations
from causal_world.utils.env_utils import clip
import numpy as np
import pybullet
from causal_world.configs.world_constants import WorldConstants
class TriFingerRobot(object):
def __init__(self,
action_mode,
observation_mode,
skip_frame,
normalize_actions,
normalize_observations,
simulation_time,
pybullet_client_full_id,
pybullet_client_w_goal_id,
pybullet_client_w_o_goal_id,
revolute_joint_ids,
finger_tip_ids,
cameras=None,
camera_indicies=np.array([0, 1, 2])):
"""
This class provides the functionalities of the robot itself
:param action_mode: (str) defines the action mode of the robot whether
its joint_positions, end_effector_positions
or joint_torques.
:param observation_mode: (str) defines the observation mode of the robot
if cameras or structured.
:param skip_frame: (int) the low level controller is running @250Hz
which corresponds to skip frame of 1, a skip frame
of 250 corresponds to frequency of 1Hz
:param normalize_actions: (bool) this is a boolean which specifies
whether the actions passed to the step
function are normalized or not.
:param normalize_observations: (bool) this is a boolean which specifies
whether the observations returned
should be normalized or not.
:param simulation_time: (float) the time for one action step in the pybullet
simulation.
:param pybullet_client_full_id: (int) pybullet client full mode id
:param pybullet_client_w_goal_id: (int) pybullet client with goal mode id
:param pybullet_client_w_o_goal_id: (int) pybullet client without goal mode id
:param revolute_joint_ids: (list) joint ids in the urdf
:param finger_tip_ids: (list) finger tip ids in the urdf
:param cameras: (list) Camera objects list
:param camera_indicies: (list) maximum of 3 elements where each element
is from 0 to , specifies which cameras
to return in the observations and the
order as well.
"""
self._pybullet_client_full_id = pybullet_client_full_id
self._pybullet_client_w_goal_id = pybullet_client_w_goal_id
self._pybullet_client_w_o_goal_id = pybullet_client_w_o_goal_id
self._revolute_joint_ids = revolute_joint_ids
self._finger_tip_ids = finger_tip_ids
self._normalize_actions = normalize_actions
self._normalize_observations = normalize_observations
self._action_mode = action_mode
self._observation_mode = observation_mode
self._skip_frame = skip_frame
self._simulation_time = simulation_time
self._dt = self._simulation_time * self._skip_frame
#TODO: for some reason this is needed
self._control_index = -1
self._position_gains = np.array([10.0, 10.0, 10.0] * 3)
self._velocity_gains = np.array([0.1, 0.3, 0.001] * 3)
self._safety_kd = np.array([0.08, 0.08, 0.04] * 3)
self._max_motor_torque = 0.36
self._robot_actions = TriFingerAction(action_mode, normalize_actions)
if self._pybullet_client_w_goal_id is not None:
self._set_finger_state_in_goal_image()
self._tool_cameras = cameras
self._camera_indicies = camera_indicies
self._robot_observations = TriFingerObservations(
observation_mode,
normalize_observations,
cameras=self._tool_cameras,
camera_indicies=self._camera_indicies)
self._last_action = None
self._last_clipped_action = None
if action_mode != "joint_torques":
self._last_applied_joint_positions = None
self._latest_full_state = None
self._state_size = 18
self._disable_velocity_control()
return
def get_link_names(self):
"""
:return: (list) returns the link names in the urdf
"""
return WorldConstants.LINK_IDS
def get_control_index(self):
"""
:return: (int) returns the current control index
"""
return self._control_index
def get_full_env_state(self):
"""
:return: returns the current state variables and their values in the
environment wrt to the robot.
"""
return self.get_current_variable_values()
def set_full_env_state(self, env_state):
"""
This function is used to set the env state through interventions on
the environment itself
:param env_state: (dict) specifies the state variables and its values
to intervene on.
:return: None
"""
self.apply_interventions(env_state)
return
def update_latest_full_state(self):
"""
Updates the latest full state in terms of joint positions, velocities,
torques..etc
:return: None
"""
if self._pybullet_client_full_id is not None:
current_joint_states = pybullet.\
getJointStates(
WorldConstants.ROBOT_ID, self._revolute_joint_ids,
physicsClientId=self._pybullet_client_full_id
)
else:
current_joint_states = pybullet.\
getJointStates(
WorldConstants.ROBOT_ID, self._revolute_joint_ids,
physicsClientId=self._pybullet_client_w_o_goal_id
)
current_position = np.array(
[joint[0] for joint in current_joint_states])
current_velocity = np.array(
[joint[1] for joint in current_joint_states])
current_torques =
|
np.array([joint[3] for joint in current_joint_states])
|
numpy.array
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import operator
import random
import math
import json
import threading
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
import h5py
import util
from util import *
import time
from tqdm import tqdm
class PronounCorefKGModel(object):
def __init__(self, config, model='Train'):
self.config = config
self.context_embeddings = util.EmbeddingDictionary(config["context_embeddings"])
self.head_embeddings = util.EmbeddingDictionary(config["head_embeddings"], maybe_cache=self.context_embeddings)
self.char_embedding_size = config["char_embedding_size"]
self.char_dict = util.load_char_dict(config["char_vocab_path"])
self.max_span_width = config["max_span_width"]
self.genres = {g: i for i, g in enumerate(config["genres"])}
self.softmax_threshold = config['softmax_threshold']
if config["lm_path"]:
self.lm_file = h5py.File(self.config["lm_path"], "r")
else:
self.lm_file = None
self.kg_lm_file = h5py.File(self.config["kg_lm_path"], "r")
self.lm_layers = self.config["lm_layers"]
self.lm_size = self.config["lm_size"]
self.eval_data = None # Load eval data lazily.
print('Start to load the eval data')
st = time.time()
# self.load_kg('final_kg.json')
self.kg_embedding_size = 300
self.load_simple_kg('final_kg.json')
self.load_eval_data()
self.load_test_data()
print("Finished in {:.2f}".format(time.time() - st))
input_props = []
input_props.append((tf.string, [None, None])) # Tokens.
input_props.append((tf.float32, [None, None, self.context_embeddings.size])) # Context embeddings.
input_props.append((tf.float32, [None, None, self.head_embeddings.size])) # Head embeddings.
input_props.append((tf.float32, [None, None, self.lm_size, self.lm_layers])) # LM embeddings.
input_props.append((tf.int32, [None, None, None])) # Character indices.
input_props.append((tf.int32, [None])) # Text lengths.
input_props.append((tf.int32, [None])) # Speaker IDs.
input_props.append((tf.int32, [])) # Genre.
input_props.append((tf.bool, [])) # Is training.
input_props.append((tf.int32, [None])) # gold_starts.
input_props.append((tf.int32, [None])) # gold_ends.
input_props.append((tf.float32, [None, None, None])) # related kg embeddings.
input_props.append((tf.int32, [None, None])) # candidate_positions.
input_props.append((tf.int32, [None, None])) # pronoun_positions.
input_props.append((tf.bool, [None, None])) # labels
input_props.append((tf.float32, [None, None])) # candidate_masks
self.queue_input_tensors = [tf.placeholder(dtype, shape) for dtype, shape in input_props]
dtypes, shapes = zip(*input_props)
queue = tf.PaddingFIFOQueue(capacity=10, dtypes=dtypes, shapes=shapes)
self.enqueue_op = queue.enqueue(self.queue_input_tensors)
self.input_tensors = queue.dequeue()
self.predictions, self.loss = self.get_predictions_and_loss(*self.input_tensors)
self.global_step = tf.Variable(0, name="global_step", trainable=False)
self.reset_global_step = tf.assign(self.global_step, 0)
learning_rate = tf.train.exponential_decay(self.config["learning_rate"], self.global_step,
self.config["decay_frequency"], self.config["decay_rate"],
staircase=True)
trainable_params = tf.trainable_variables()
gradients = tf.gradients(self.loss, trainable_params)
gradients, _ = tf.clip_by_global_norm(gradients, self.config["max_gradient_norm"])
optimizers = {
"adam": tf.train.AdamOptimizer,
"sgd": tf.train.GradientDescentOptimizer
}
optimizer = optimizers[self.config["optimizer"]](learning_rate)
self.train_op = optimizer.apply_gradients(zip(gradients, trainable_params), global_step=self.global_step)
def start_enqueue_thread(self, session):
with open(self.config["train_path"]) as f:
train_examples = [json.loads(jsonline) for jsonline in f.readlines()]
def _enqueue_loop():
while True:
random.shuffle(train_examples)
for example in train_examples:
tensorized_example = self.tensorize_pronoun_example(example, is_training=True)
feed_dict = dict(zip(self.queue_input_tensors, tensorized_example))
session.run(self.enqueue_op, feed_dict=feed_dict)
enqueue_thread = threading.Thread(target=_enqueue_loop)
enqueue_thread.daemon = True
enqueue_thread.start()
def restore(self, session, log_path=None):
# Don't try to restore unused variables from the TF-Hub ELMo module.
vars_to_restore = [v for v in tf.global_variables() if "module/" not in v.name]
saver = tf.train.Saver(vars_to_restore)
if log_path:
checkpoint_path = os.path.join(log_path, "model.max.ckpt")
else:
checkpoint_path = os.path.join(self.config["log_dir"], "model.max.ckpt")
print("Restoring from {}".format(checkpoint_path))
session.run(tf.global_variables_initializer())
saver.restore(session, checkpoint_path)
def load_lm_embeddings(self, doc_key):
if self.lm_file is None:
return
|
np.zeros([0, 0, self.lm_size, self.lm_layers])
|
numpy.zeros
|
import io
import centrosome.propagate
import numpy
import scipy.stats
import cellprofiler_core.image
import cellprofiler_core.measurement
import cellprofiler_core.module
from cellprofiler_core.constants.measurement import COLTYPE_FLOAT
import cellprofiler.modules.measureobjectintensitydistribution
import cellprofiler_core.object
import cellprofiler_core.pipeline
import cellprofiler_core.preferences
import cellprofiler_core.setting
import cellprofiler_core.workspace
import tests.modules
cellprofiler_core.preferences.set_headless()
OBJECT_NAME = "objectname"
CENTER_NAME = "centername"
IMAGE_NAME = "imagename"
HEAT_MAP_NAME = "heatmapname"
def feature_frac_at_d(bin, bin_count, image_name=IMAGE_NAME):
if bin == bin_count + 1:
return "_".join(
[
cellprofiler.modules.measureobjectintensitydistribution.M_CATEGORY,
cellprofiler.modules.measureobjectintensitydistribution.F_FRAC_AT_D,
image_name,
cellprofiler.modules.measureobjectintensitydistribution.FF_OVERFLOW,
]
)
return (
cellprofiler.modules.measureobjectintensitydistribution.M_CATEGORY
+ "_"
+ cellprofiler.modules.measureobjectintensitydistribution.FF_FRAC_AT_D
% (image_name, bin, bin_count)
)
def feature_mean_frac(bin, bin_count, image_name=IMAGE_NAME):
if bin == bin_count + 1:
return "_".join(
[
cellprofiler.modules.measureobjectintensitydistribution.M_CATEGORY,
cellprofiler.modules.measureobjectintensitydistribution.F_MEAN_FRAC,
image_name,
cellprofiler.modules.measureobjectintensitydistribution.FF_OVERFLOW,
]
)
return (
cellprofiler.modules.measureobjectintensitydistribution.M_CATEGORY
+ "_"
+ cellprofiler.modules.measureobjectintensitydistribution.FF_MEAN_FRAC
% (image_name, bin, bin_count)
)
def feature_radial_cv(bin, bin_count, image_name=IMAGE_NAME):
if bin == bin_count + 1:
return "_".join(
[
cellprofiler.modules.measureobjectintensitydistribution.M_CATEGORY,
cellprofiler.modules.measureobjectintensitydistribution.F_RADIAL_CV,
image_name,
cellprofiler.modules.measureobjectintensitydistribution.FF_OVERFLOW,
]
)
return (
cellprofiler.modules.measureobjectintensitydistribution.M_CATEGORY
+ "_"
+ cellprofiler.modules.measureobjectintensitydistribution.FF_RADIAL_CV
% (image_name, bin, bin_count)
)
def test_please_implement_a_test_of_the_new_version():
assert (
cellprofiler.modules.measureobjectintensitydistribution.MeasureObjectIntensityDistribution.variable_revision_number
== 6
)
def test_load_v2():
data = """CellProfiler Pipeline: http://www.cellprofiler.org
Version:2
DateRevision:20120126174947
MeasureObjectIntensityDistribution:[module_num:8|svn_version:\'Unknown\'|variable_revision_number:2|show_window:True|notes:\x5B\x5D|batch_state:array(\x5B\x5D, dtype=uint8)]
Hidden:2
Hidden:2
Hidden:2
Select an image to measure:EnhancedGreen
Select an image to measure:OrigBlue
Select objects to measure:Nuclei
Object to use as center?:These objects
Select objects to use as centers:Cells
Select objects to measure:Nuclei
Object to use as center?:Other objects
Select objects to use as centers:Cells
Scale bins?:No
Number of bins:4
Maximum radius:200
Scale bins?:Yes
Number of bins:5
Maximum radius:50
"""
pipeline = cellprofiler_core.pipeline.Pipeline()
def callback(caller, event):
assert not isinstance(event, cellprofiler_core.pipeline.event.LoadException)
pipeline.add_listener(callback)
pipeline.load(io.StringIO(data))
assert len(pipeline.modules()) == 1
module = pipeline.modules()[0]
assert isinstance(
module,
cellprofiler.modules.measureobjectintensitydistribution.MeasureObjectIntensityDistribution,
)
assert module.object_count.value == 2
assert module.bin_counts_count.value == 2
assert {"OrigBlue", "EnhancedGreen"}.issubset(module.images_list.value)
assert module.objects[0].object_name == "Nuclei"
assert (
module.objects[0].center_choice
== cellprofiler.modules.measureobjectintensitydistribution.C_SELF
)
assert module.objects[0].center_object_name == "Cells"
assert (
module.objects[1].center_choice
== cellprofiler.modules.measureobjectintensitydistribution.C_CENTERS_OF_OTHER
)
assert module.objects[1].center_object_name == "Cells"
assert module.bin_counts[0].bin_count == 4
assert not module.bin_counts[0].wants_scaled
assert module.bin_counts[0].maximum_radius == 200
assert module.bin_counts[1].bin_count == 5
assert module.bin_counts[1].wants_scaled
assert module.bin_counts[1].maximum_radius == 50
def test_load_v3():
file = tests.modules.get_test_resources_directory(
"measureobjectintensitydistribution/v3.pipeline"
)
with open(file, "r") as fd:
data = fd.read()
pipeline = cellprofiler_core.pipeline.Pipeline()
def callback(caller, event):
assert not isinstance(event, cellprofiler_core.pipeline.event.LoadException)
pipeline.add_listener(callback)
pipeline.load(io.StringIO(data))
assert len(pipeline.modules()) == 1
module = pipeline.modules()[0]
assert isinstance(
module,
cellprofiler.modules.measureobjectintensitydistribution.MeasureObjectIntensityDistribution,
)
assert module.object_count.value == 3
assert module.bin_counts_count.value == 2
assert {"OrigBlue", "EnhancedGreen"}.issubset(module.images_list.value)
assert module.objects[0].object_name == "Nuclei"
assert (
module.objects[0].center_choice
== cellprofiler.modules.measureobjectintensitydistribution.C_SELF
)
assert module.objects[0].center_object_name == "Cells"
assert (
module.objects[1].center_choice
== cellprofiler.modules.measureobjectintensitydistribution.C_CENTERS_OF_OTHER
)
assert module.objects[1].center_object_name == "Cells"
assert (
module.objects[2].center_choice
== cellprofiler.modules.measureobjectintensitydistribution.C_EDGES_OF_OTHER
)
assert module.objects[2].center_object_name == "Cells"
assert module.bin_counts[0].bin_count == 4
assert not module.bin_counts[0].wants_scaled
assert module.bin_counts[0].maximum_radius == 200
assert module.bin_counts[1].bin_count == 5
assert module.bin_counts[1].wants_scaled
assert module.bin_counts[1].maximum_radius == 50
assert len(module.heatmaps) == 0
def test_load_v4():
file = tests.modules.get_test_resources_directory(
"measureobjectintensitydistribution/v4.pipeline"
)
with open(file, "r") as fd:
data = fd.read()
pipeline = cellprofiler_core.pipeline.Pipeline()
def callback(caller, event):
assert not isinstance(event, cellprofiler_core.pipeline.event.LoadException)
pipeline.add_listener(callback)
pipeline.load(io.StringIO(data))
assert len(pipeline.modules()) == 1
module = pipeline.modules()[0]
assert isinstance(
module,
cellprofiler.modules.measureobjectintensitydistribution.MeasureObjectIntensityDistribution,
)
assert (
module.wants_zernikes
== cellprofiler.modules.measureobjectintensitydistribution.Z_NONE
)
assert module.zernike_degree == 9
assert len(module.images_list.value) == 2
assert {"CropGreen", "CropRed"}.issubset(module.images_list.value)
assert len(module.objects) == 2
for group, (object_name, center_choice, center_object_name) in zip(
module.objects,
(
(
"Nuclei",
cellprofiler.modules.measureobjectintensitydistribution.C_SELF,
"Ichthyosaurs",
),
(
"Cells",
cellprofiler.modules.measureobjectintensitydistribution.C_EDGES_OF_OTHER,
"Nuclei",
),
),
):
assert group.object_name.value == object_name
assert group.center_choice.value == center_choice
assert group.center_object_name == center_object_name
assert len(module.bin_counts) == 2
for group, (bin_count, scale, max_radius) in zip(
module.bin_counts, ((5, True, 100), (4, False, 100))
):
assert group.wants_scaled == scale
assert group.bin_count == bin_count
assert group.maximum_radius == max_radius
for (
group,
(
image_name,
object_name,
bin_count,
measurement,
colormap,
wants_to_save,
output_image_name,
),
) in zip(
module.heatmaps,
(
(
"CropRed",
"Cells",
5,
cellprofiler.modules.measureobjectintensitydistribution.A_FRAC_AT_D,
"Default",
True,
"Heat",
),
(
"CropGreen",
"Nuclei",
4,
cellprofiler.modules.measureobjectintensitydistribution.A_MEAN_FRAC,
"Spectral",
False,
"A",
),
(
"CropRed",
"Nuclei",
5,
cellprofiler.modules.measureobjectintensitydistribution.A_RADIAL_CV,
"Default",
False,
"B",
),
),
):
assert group.image_name.value == image_name
assert group.object_name.value == object_name
assert int(group.bin_count.value) == bin_count
assert group.measurement == measurement
assert group.colormap == colormap
assert group.wants_to_save_display == wants_to_save
assert group.display_name == output_image_name
def test_load_v5():
file = tests.modules.get_test_resources_directory(
"measureobjectintensitydistribution/v5.pipeline"
)
with open(file, "r") as fd:
data = fd.read()
pipeline = cellprofiler_core.pipeline.Pipeline()
def callback(caller, event):
assert not isinstance(event, cellprofiler_core.pipeline.event.LoadException)
pipeline.add_listener(callback)
pipeline.load(io.StringIO(data))
assert len(pipeline.modules()) == 2
module = pipeline.modules()[0]
assert isinstance(
module,
cellprofiler.modules.measureobjectintensitydistribution.MeasureObjectIntensityDistribution,
)
assert (
module.wants_zernikes
== cellprofiler.modules.measureobjectintensitydistribution.Z_MAGNITUDES
)
assert module.zernike_degree == 7
assert len(module.images_list.value) == 2
assert {"CropGreen", "CropRed"}.issubset(module.images_list.value)
assert len(module.objects) == 2
for group, (object_name, center_choice, center_object_name) in zip(
module.objects,
(
(
"Nuclei",
cellprofiler.modules.measureobjectintensitydistribution.C_SELF,
"Ichthyosaurs",
),
(
"Cells",
cellprofiler.modules.measureobjectintensitydistribution.C_EDGES_OF_OTHER,
"Nuclei",
),
),
):
assert group.object_name.value == object_name
assert group.center_choice.value == center_choice
assert group.center_object_name == center_object_name
assert len(module.bin_counts) == 2
for group, (bin_count, scale, max_radius) in zip(
module.bin_counts, ((5, True, 100), (4, False, 100))
):
assert group.wants_scaled == scale
assert group.bin_count == bin_count
assert group.maximum_radius == max_radius
for (
group,
(
image_name,
object_name,
bin_count,
measurement,
colormap,
wants_to_save,
output_image_name,
),
) in zip(
module.heatmaps,
(
(
"CropRed",
"Cells",
5,
cellprofiler.modules.measureobjectintensitydistribution.A_FRAC_AT_D,
"Default",
True,
"Heat",
),
(
"CropGreen",
"Nuclei",
4,
cellprofiler.modules.measureobjectintensitydistribution.A_MEAN_FRAC,
"Spectral",
False,
"A",
),
(
"CropRed",
"Nuclei",
5,
cellprofiler.modules.measureobjectintensitydistribution.A_RADIAL_CV,
"Default",
False,
"B",
),
),
):
assert group.image_name.value == image_name
assert group.object_name.value == object_name
assert int(group.bin_count.value) == bin_count
assert group.measurement == measurement
assert group.colormap == colormap
assert group.wants_to_save_display == wants_to_save
assert group.display_name == output_image_name
module = pipeline.modules()[1]
assert (
module.wants_zernikes
== cellprofiler.modules.measureobjectintensitydistribution.Z_MAGNITUDES_AND_PHASE
)
def test_01_get_measurement_columns():
module = (
cellprofiler.modules.measureobjectintensitydistribution.MeasureObjectIntensityDistribution()
)
module.images_list.value = "DNA, Cytoplasm, Actin"
for i, object_name, center_name in (
(0, "Nucleii", None),
(1, "Cells", "Nucleii"),
(2, "Cytoplasm", "Nucleii"),
):
if i:
module.add_object()
module.objects[i].object_name.value = object_name
if center_name is None:
module.objects[
i
].center_choice.value = (
cellprofiler.modules.measureobjectintensitydistribution.C_SELF
)
else:
module.objects[
i
].center_choice.value = (
cellprofiler.modules.measureobjectintensitydistribution.C_CENTERS_OF_OTHER
)
module.objects[i].center_object_name.value = center_name
for i, bin_count in enumerate((4, 5, 6)):
if i:
module.add_bin_count()
module.bin_counts[i].bin_count.value = bin_count
module.bin_counts[2].wants_scaled.value = False
columns = module.get_measurement_columns(None)
column_dictionary = {}
for object_name, feature, coltype in columns:
key = (object_name, feature)
assert not (key in column_dictionary)
assert coltype == COLTYPE_FLOAT
column_dictionary[key] = (object_name, feature, coltype)
for object_name in [x.object_name.value for x in module.objects]:
for image_name in module.images_list.value:
for bin_count, wants_scaled in [
(x.bin_count.value, x.wants_scaled.value) for x in module.bin_counts
]:
for bin in range(1, bin_count + (1 if wants_scaled else 2)):
for feature_fn in (
feature_frac_at_d,
feature_mean_frac,
feature_radial_cv,
):
measurement = feature_fn(bin, bin_count, image_name)
key = (object_name, measurement)
assert key in column_dictionary
del column_dictionary[key]
assert len(column_dictionary) == 0
def test_02_get_zernike_columns():
module = (
cellprofiler.modules.measureobjectintensitydistribution.MeasureObjectIntensityDistribution()
)
for wants_zernikes, ftrs in (
(
cellprofiler.modules.measureobjectintensitydistribution.Z_MAGNITUDES,
(
cellprofiler.modules.measureobjectintensitydistribution.FF_ZERNIKE_MAGNITUDE,
),
),
(
cellprofiler.modules.measureobjectintensitydistribution.Z_MAGNITUDES_AND_PHASE,
(
cellprofiler.modules.measureobjectintensitydistribution.FF_ZERNIKE_MAGNITUDE,
cellprofiler.modules.measureobjectintensitydistribution.FF_ZERNIKE_PHASE,
),
),
):
module.wants_zernikes.value = wants_zernikes
module.zernike_degree.value = 2
module.images_list.value = "DNA, Cytoplasm, Actin"
for i, object_name, center_name in (
(0, "Nucleii", None),
(1, "Cells", "Nucleii"),
(2, "Cytoplasm", "Nucleii"),
):
if i:
module.add_object()
module.objects[i].object_name.value = object_name
columns = module.get_measurement_columns(None)
for image_name in "DNA", "Cytoplasm", "Actin":
for object_name in "Nucleii", "Cells", "Cytoplasm":
for n, m in ((0, 0), (1, 1), (2, 0), (2, 2)):
for ftr in ftrs:
name = "_".join(
(
cellprofiler.modules.measureobjectintensitydistribution.M_CATEGORY,
ftr,
image_name,
str(n),
str(m),
)
)
col = (
object_name,
name,
COLTYPE_FLOAT,
)
assert col in columns
def test_01_get_measurements():
module = (
cellprofiler.modules.measureobjectintensitydistribution.MeasureObjectIntensityDistribution()
)
module.images_list.value = "DNA, Cytoplasm, Actin"
for i, object_name, center_name in (
(0, "Nucleii", None),
(1, "Cells", "Nucleii"),
(2, "Cytoplasm", "Nucleii"),
):
if i:
module.add_object()
module.objects[i].object_name.value = object_name
if center_name is None:
module.objects[
i
].center_choice.value = (
cellprofiler.modules.measureobjectintensitydistribution.C_SELF
)
else:
module.objects[
i
].center_choice.value = (
cellprofiler.modules.measureobjectintensitydistribution.C_CENTERS_OF_OTHER
)
module.objects[i].center_object_name.value = center_name
for i, bin_count in ((0, 4), (0, 5), (0, 6)):
if i:
module.add_bin_count()
module.bin_counts[i].bin_count.value = bin_count
for object_name in [x.object_name.value for x in module.objects]:
assert tuple(module.get_categories(None, object_name)) == (
cellprofiler.modules.measureobjectintensitydistribution.M_CATEGORY,
)
for feature in cellprofiler.modules.measureobjectintensitydistribution.F_ALL:
assert feature in module.get_measurements(
None,
object_name,
cellprofiler.modules.measureobjectintensitydistribution.M_CATEGORY,
)
for image_name in module.images_list.value:
for (
feature
) in cellprofiler.modules.measureobjectintensitydistribution.F_ALL:
assert image_name in module.get_measurement_images(
None,
object_name,
cellprofiler.modules.measureobjectintensitydistribution.M_CATEGORY,
feature,
)
for bin_count in [x.bin_count.value for x in module.bin_counts]:
for bin in range(1, bin_count + 1):
for (
feature
) in cellprofiler.modules.measureobjectintensitydistribution.F_ALL:
assert "%dof%d" % (
bin,
bin_count,
) in module.get_measurement_scales(
None,
object_name,
cellprofiler.modules.measureobjectintensitydistribution.M_CATEGORY,
feature,
image_name,
)
def test_02_get_zernike_measurements():
module = (
cellprofiler.modules.measureobjectintensitydistribution.MeasureObjectIntensityDistribution()
)
for wants_zernikes, ftrs in (
(
cellprofiler.modules.measureobjectintensitydistribution.Z_MAGNITUDES,
(
cellprofiler.modules.measureobjectintensitydistribution.FF_ZERNIKE_MAGNITUDE,
),
),
(
cellprofiler.modules.measureobjectintensitydistribution.Z_MAGNITUDES_AND_PHASE,
(
cellprofiler.modules.measureobjectintensitydistribution.FF_ZERNIKE_MAGNITUDE,
cellprofiler.modules.measureobjectintensitydistribution.FF_ZERNIKE_PHASE,
),
),
):
module.wants_zernikes.value = wants_zernikes
module.zernike_degree.value = 2
module.images_list.value = "DNA, Cytoplasm, Actin"
for i, object_name, center_name in (
(0, "Nucleii", None),
(1, "Cells", "Nucleii"),
(2, "Cytoplasm", "Nucleii"),
):
if i:
module.add_object()
module.objects[i].object_name.value = object_name
if center_name is None:
module.objects[
i
].center_choice.value = (
cellprofiler.modules.measureobjectintensitydistribution.C_SELF
)
else:
module.objects[
i
].center_choice.value = (
cellprofiler.modules.measureobjectintensitydistribution.C_CENTERS_OF_OTHER
)
module.objects[i].center_object_name.value = center_name
for object_name in "Nucleii", "Cells", "Cytoplasm":
result = module.get_measurements(
None,
object_name,
cellprofiler.modules.measureobjectintensitydistribution.M_CATEGORY,
)
for ftr in ftrs:
assert ftr in result
iresult = module.get_measurement_images(
None,
object_name,
cellprofiler.modules.measureobjectintensitydistribution.M_CATEGORY,
ftr,
)
for image in "DNA", "Cytoplasm", "Actin":
assert image in iresult
sresult = module.get_measurement_scales(
None,
object_name,
cellprofiler.modules.measureobjectintensitydistribution.M_CATEGORY,
ftr,
image,
)
for n, m in ((0, 0), (1, 1), (2, 0), (2, 2)):
assert "%d_%d" % (n, m) in sresult
def test_default_heatmap_values():
module = (
cellprofiler.modules.measureobjectintensitydistribution.MeasureObjectIntensityDistribution()
)
module.add_heatmap()
module.heatmaps[0].image_name.value = IMAGE_NAME
module.heatmaps[0].object_name.value = OBJECT_NAME
module.heatmaps[0].bin_count.value = 10
module.images_list.value = "Bar"
module.objects[0].object_name.value = "Foo"
module.bin_counts[0].bin_count.value = 2
assert module.heatmaps[0].image_name.get_image_name() == "Bar"
assert not module.heatmaps[0].image_name.is_visible()
assert module.heatmaps[0].object_name.get_objects_name() == "Foo"
assert not module.heatmaps[0].object_name.is_visible()
assert module.heatmaps[0].get_number_of_bins() == 2
module.images_list.value = "Bar, MoreBar"
assert module.heatmaps[0].image_name.is_visible()
assert module.heatmaps[0].image_name.get_image_name() == IMAGE_NAME
module.add_object()
assert module.heatmaps[0].object_name.is_visible()
assert module.heatmaps[0].object_name.get_objects_name() == OBJECT_NAME
module.add_bin_count()
assert module.heatmaps[0].get_number_of_bins() == 10
def run_module(
image,
labels,
center_labels=None,
center_choice=cellprofiler.modules.measureobjectintensitydistribution.C_CENTERS_OF_OTHER,
bin_count=4,
maximum_radius=100,
wants_scaled=True,
wants_workspace=False,
wants_zernikes=cellprofiler.modules.measureobjectintensitydistribution.Z_NONE,
zernike_degree=2,
):
"""Run the module, returning the measurements
image - matrix representing the image to be analyzed
labels - labels matrix of objects to be analyzed
center_labels - labels matrix of alternate centers or None for self
centers
bin_count - # of radial bins
"""
module = (
cellprofiler.modules.measureobjectintensitydistribution.MeasureObjectIntensityDistribution()
)
module.wants_zernikes.value = wants_zernikes
module.zernike_degree.value = zernike_degree
module.images_list.value = IMAGE_NAME
module.objects[0].object_name.value = OBJECT_NAME
object_set = cellprofiler_core.object.ObjectSet()
main_objects = cellprofiler_core.object.Objects()
main_objects.segmented = labels
object_set.add_objects(main_objects, OBJECT_NAME)
if center_labels is None:
module.objects[
0
].center_choice.value = (
cellprofiler.modules.measureobjectintensitydistribution.C_SELF
)
else:
module.objects[0].center_choice.value = center_choice
module.objects[0].center_object_name.value = CENTER_NAME
center_objects = cellprofiler_core.object.Objects()
center_objects.segmented = center_labels
object_set.add_objects(center_objects, CENTER_NAME)
module.bin_counts[0].bin_count.value = bin_count
module.bin_counts[0].wants_scaled.value = wants_scaled
module.bin_counts[0].maximum_radius.value = maximum_radius
module.add_heatmap()
module.add_heatmap()
module.add_heatmap()
for i, (a, f) in enumerate(
(
(
cellprofiler.modules.measureobjectintensitydistribution.A_FRAC_AT_D,
cellprofiler.modules.measureobjectintensitydistribution.F_FRAC_AT_D,
),
(
cellprofiler.modules.measureobjectintensitydistribution.A_MEAN_FRAC,
cellprofiler.modules.measureobjectintensitydistribution.F_MEAN_FRAC,
),
(
cellprofiler.modules.measureobjectintensitydistribution.A_RADIAL_CV,
cellprofiler.modules.measureobjectintensitydistribution.F_RADIAL_CV,
),
)
):
module.heatmaps[i].image_name.value = IMAGE_NAME
module.heatmaps[i].object_name.value = OBJECT_NAME
module.heatmaps[i].bin_count.value = str(bin_count)
module.heatmaps[i].wants_to_save_display.value = True
display_name = HEAT_MAP_NAME + f
module.heatmaps[i].display_name.value = display_name
module.heatmaps[i].colormap.value = "gray"
module.heatmaps[i].measurement.value = a
pipeline = cellprofiler_core.pipeline.Pipeline()
measurements = cellprofiler_core.measurement.Measurements()
image_set_list = cellprofiler_core.image.ImageSetList()
image_set = measurements
img = cellprofiler_core.image.Image(image)
image_set.add(IMAGE_NAME, img)
workspace = cellprofiler_core.workspace.Workspace(
pipeline, module, image_set, object_set, measurements, image_set_list
)
module.run(workspace)
if wants_workspace:
return measurements, workspace
return measurements
def test_zeros_self():
"""Test the module on an empty labels matrix, self-labeled"""
m = run_module(
numpy.zeros((10, 10)),
numpy.zeros((10, 10), int),
wants_zernikes=cellprofiler.modules.measureobjectintensitydistribution.Z_MAGNITUDES_AND_PHASE,
zernike_degree=2,
)
for bin in range(1, 5):
for feature in (
feature_frac_at_d(bin, 4),
feature_mean_frac(bin, 4),
feature_radial_cv(bin, 4),
):
assert feature in m.get_feature_names(OBJECT_NAME)
data = m.get_current_measurement(OBJECT_NAME, feature)
assert len(data) == 0
for ftr in (
cellprofiler.modules.measureobjectintensitydistribution.FF_ZERNIKE_MAGNITUDE,
cellprofiler.modules.measureobjectintensitydistribution.FF_ZERNIKE_PHASE,
):
for n_, m_ in ((0, 0), (1, 1), (2, 0), (2, 2)):
feature = "_".join(
(
cellprofiler.modules.measureobjectintensitydistribution.M_CATEGORY,
ftr,
IMAGE_NAME,
str(n_),
str(m_),
)
)
assert feature in m.get_feature_names(OBJECT_NAME)
assert len(m[OBJECT_NAME, feature]) == 0
def test_circle():
"""Test the module on a uniform circle"""
i, j = numpy.mgrid[-50:51, -50:51]
labels = (numpy.sqrt(i * i + j * j) <= 40).astype(int)
m, workspace = run_module(
numpy.ones(labels.shape),
labels,
wants_workspace=True,
wants_zernikes=True,
zernike_degree=2,
)
assert isinstance(workspace, cellprofiler_core.workspace.Workspace)
bins = labels * (1 + (numpy.sqrt(i * i + j * j) / 10).astype(int))
for bin in range(1, 5):
data = m.get_current_measurement(OBJECT_NAME, feature_frac_at_d(bin, 4))
assert len(data) == 1
area = (float(bin) * 2.0 - 1.0) / 16.0
assert data[0] > area - 0.1
assert data[0] < area + 0.1
heatmap = workspace.image_set.get_image(
HEAT_MAP_NAME
+ cellprofiler.modules.measureobjectintensitydistribution.F_FRAC_AT_D
).pixel_data
data = data.astype(heatmap.dtype)
assert scipy.stats.mode(heatmap[bins == bin])[0][0] == data[0]
data = m.get_current_measurement(OBJECT_NAME, feature_mean_frac(bin, 4))
assert len(data) == 1
assert round(abs(data[0] - 1), 2) == 0
heatmap = workspace.image_set.get_image(
HEAT_MAP_NAME
+ cellprofiler.modules.measureobjectintensitydistribution.F_MEAN_FRAC
).pixel_data
data = data.astype(heatmap.dtype)
assert scipy.stats.mode(heatmap[bins == bin])[0][0] == data[0]
data = m.get_current_measurement(OBJECT_NAME, feature_radial_cv(bin, 4))
assert len(data) == 1
assert round(abs(data[0] - 0), 2) == 0
heatmap = workspace.image_set.get_image(
HEAT_MAP_NAME
+ cellprofiler.modules.measureobjectintensitydistribution.F_RADIAL_CV
).pixel_data
data = data.astype(heatmap.dtype)
assert scipy.stats.mode(heatmap[bins == bin])[0][0] == data[0]
module = workspace.module
assert isinstance(
module,
cellprofiler.modules.measureobjectintensitydistribution.MeasureObjectIntensityDistribution,
)
data = m[OBJECT_NAME, module.get_zernike_magnitude_name(IMAGE_NAME, 0, 0)]
assert len(data) == 1
assert abs(data[0] - 1) < 0.001
for n_, m_ in ((1, 1), (2, 0), (2, 2)):
data = m[OBJECT_NAME, module.get_zernike_magnitude_name(IMAGE_NAME, n_, m_)]
assert abs(data[0] - 0) < 0.001
def test_01_half_circle():
"""Test the module on a circle and an image that's 1/2 zeros
The measurements here are somewhat considerably off because
the propagate function uses a Manhattan distance with jaywalking
allowed instead of the Euclidean distance.
"""
i, j = numpy.mgrid[-50:51, -50:51]
labels = (numpy.sqrt(i * i + j * j) <= 40).astype(int)
image = numpy.zeros(labels.shape)
image[i > 0] = (numpy.sqrt(i * i + j * j) / 100)[i > 0]
image[j == 0] = 0
image[i == j] = 0
image[i == -j] = 0
# 1/2 of the octants should be pretty much all zero and 1/2
# should be all one
x = [0, 0, 0, 0, 1, 1, 1, 1]
expected_cv = numpy.std(x) / numpy.mean(x)
m = run_module(image, labels)
bin_labels = (numpy.sqrt(i * i + j * j) * 4 / 40.001).astype(int)
mask = i * i + j * j <= 40 * 40
total_intensity = numpy.sum(image[mask])
for bin in range(1, 5):
data = m.get_current_measurement(OBJECT_NAME, feature_frac_at_d(bin, 4))
assert len(data) == 1
bin_count = numpy.sum(bin_labels[mask] == bin - 1)
frac_in_bin = float(bin_count) / numpy.sum(mask)
bin_intensity = numpy.sum(image[mask & (bin_labels == bin - 1)])
expected = bin_intensity / total_intensity
assert numpy.abs(expected - data[0]) < 0.2 * expected
data = m.get_current_measurement(OBJECT_NAME, feature_mean_frac(bin, 4))
assert len(data) == 1
expected = expected / frac_in_bin
assert numpy.abs(data[0] - expected) < 0.2 * expected
data = m.get_current_measurement(OBJECT_NAME, feature_radial_cv(bin, 4))
assert len(data) == 1
assert numpy.abs(data[0] - expected_cv) < 0.2 * expected_cv
def test_02_half_circle_zernike():
i, j = numpy.mgrid[-50:50, -50:50]
ii, jj = [_.astype(float) + 0.5 for _ in (i, j)]
labels = (numpy.sqrt(ii * ii + jj * jj) <= 40).astype(int)
image = numpy.zeros(labels.shape)
image[ii > 0] = 1
m = run_module(
image,
labels,
wants_zernikes=cellprofiler.modules.measureobjectintensitydistribution.Z_MAGNITUDES_AND_PHASE,
zernike_degree=2,
)
for n_, m_, expected, delta in (
(0, 0, 0.5, 0.001),
(1, 1, 0.225, 0.1),
(2, 0, 0, 0.01),
(2, 2, 0, 0.01),
):
ftr = "_".join(
(
cellprofiler.modules.measureobjectintensitydistribution.M_CATEGORY,
cellprofiler.modules.measureobjectintensitydistribution.FF_ZERNIKE_MAGNITUDE,
IMAGE_NAME,
str(n_),
str(m_),
)
)
assert abs(m[OBJECT_NAME, ftr][0] - expected) < delta
ftr = "_".join(
(
cellprofiler.modules.measureobjectintensitydistribution.M_CATEGORY,
cellprofiler.modules.measureobjectintensitydistribution.FF_ZERNIKE_PHASE,
IMAGE_NAME,
"1",
"1",
)
)
phase_i_1_1 = m[OBJECT_NAME, ftr][0]
image = numpy.zeros(labels.shape)
image[jj > 0] = 1
m = run_module(
image,
labels,
wants_zernikes=cellprofiler.modules.measureobjectintensitydistribution.Z_MAGNITUDES_AND_PHASE,
zernike_degree=1,
)
phase_j_1_1 = m[OBJECT_NAME, ftr][0]
assert numpy.abs(numpy.abs(phase_i_1_1 - phase_j_1_1) - numpy.pi / 2) == 0
def test_line():
"""Test the alternate centers with a line"""
labels = numpy.array(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
)
centers = numpy.zeros(labels.shape, int)
centers[2, 1] = 1
distance_to_center = numpy.array(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1.4, 2.4, 3.4, 4.4, 5.4, 6.4, 7.4, 0],
[0, 0, 1, 2, 3, 4, 5, 6, 7, 0],
[0, 1, 1.4, 2.4, 3.4, 4.4, 5.4, 6.4, 7.4, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
)
distance_to_edge = numpy.array(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 2, 2, 2, 2, 2, 2, 2, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
)
numpy.random.seed(0)
image = numpy.random.uniform(size=labels.shape)
m = run_module(image, labels, centers)
total_intensity = numpy.sum(image[labels == 1])
normalized_distance = distance_to_center / (
distance_to_center + distance_to_edge + 0.001
)
bin_labels = (normalized_distance * 4).astype(int)
for bin in range(1, 5):
data = m.get_current_measurement(OBJECT_NAME, feature_frac_at_d(bin, 4))
assert len(data) == 1
bin_intensity = numpy.sum(image[(labels == 1) & (bin_labels == bin - 1)])
expected = bin_intensity / total_intensity
assert numpy.abs(expected - data[0]) < 0.1 * expected
data = m.get_current_measurement(OBJECT_NAME, feature_mean_frac(bin, 4))
expected = (
expected
* numpy.sum(labels == 1)
/ numpy.sum((labels == 1) & (bin_labels == bin - 1))
)
assert numpy.abs(data[0] - expected) < 0.1 * expected
data = m.get_current_measurement(OBJECT_NAME, feature_radial_cv(bin, 4))
assert len(data) == 1
def test_no_scaling():
i, j = numpy.mgrid[-40:40, -40:40]
#
# I'll try to calculate the distance the same way as propagate
# jaywalk min(i,j) times and go straight abs(i - j) times
#
jaywalks = numpy.minimum(numpy.abs(i), numpy.abs(j))
straights = numpy.abs(numpy.abs(i) - numpy.abs(j))
distance = jaywalks * numpy.sqrt(2) + straights
labels = (distance <= 35).astype(int)
r = numpy.random.RandomState()
r.seed(35)
image = r.uniform(size=i.shape)
total_intensity = numpy.sum(image[labels == 1])
bin_labels = (distance / 5).astype(int)
bin_labels[bin_labels > 4] = 4
m = run_module(image, labels, bin_count=4, maximum_radius=20, wants_scaled=False)
for bin in range(1, 6):
data = m.get_current_measurement(OBJECT_NAME, feature_frac_at_d(bin, 4))
assert len(data) == 1
bin_intensity = numpy.sum(image[(labels == 1) & (bin_labels == bin - 1)])
expected = bin_intensity / total_intensity
assert round(abs(expected - data[0]), 4) == 0
data = m.get_current_measurement(OBJECT_NAME, feature_mean_frac(bin, 4))
expected = (
expected
* numpy.sum(labels == 1)
/ numpy.sum((labels == 1) & (bin_labels == bin - 1))
)
assert round(abs(data[0] - expected), 4) == 0
data = m.get_current_measurement(OBJECT_NAME, feature_radial_cv(bin, 4))
assert len(data) == 1
def test_edges_of_objects():
r = numpy.random.RandomState()
r.seed(36)
i, j = numpy.mgrid[-20:21, -20:21]
labels = ((i > -19) & (i < 19) & (j > -19) & (j < 19)).astype(int)
centers = numpy.zeros(labels.shape, int)
centers[(i > -5) * (i < 5) & (j > -5) & (j < 5)] = 1
image = r.uniform(size=labels.shape)
m = run_module(
image,
labels,
center_labels=centers,
center_choice=cellprofiler.modules.measureobjectintensitydistribution.C_EDGES_OF_OTHER,
bin_count=4,
maximum_radius=8,
wants_scaled=False,
)
_, d_from_center = centrosome.propagate.propagate(
numpy.zeros(labels.shape), centers, (labels > 0), 1
)
good_mask = (labels > 0) & (centers == 0)
d_from_center = d_from_center[good_mask]
bins = (d_from_center / 2).astype(int)
bins[bins > 4] = 4
bin_counts = numpy.bincount(bins)
image_sums = numpy.bincount(bins, image[good_mask])
frac_at_d = image_sums / numpy.sum(image_sums)
for i in range(1, 6):
data = m.get_current_measurement(OBJECT_NAME, feature_frac_at_d(i, 4))
assert len(data) == 1
assert round(abs(data[0] - frac_at_d[i - 1]), 7) == 0
def test_two_circles():
i, j = numpy.mgrid[-50:51, -50:51]
i, j = [numpy.hstack((x, x)) for x in (i, j)]
d = numpy.sqrt(i * i + j * j)
labels = (d <= 40).astype(int)
labels[:, (j.shape[1] // 2) :] *= 2
img = numpy.zeros(labels.shape)
img[labels == 1] = 1
img[labels == 2] = d[labels == 2] / 40
m, workspace = run_module(img, labels, wants_workspace=True)
assert isinstance(workspace, cellprofiler_core.workspace.Workspace)
bins = (labels != 0) * (1 + (
|
numpy.sqrt(i * i + j * j)
|
numpy.sqrt
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 25 18:13:25 2020
@author: rdamseh
"""
import numpy as np
from tqdm import tqdm
from VirtualMRI.MRI.Sequence import Sequence
from VirtualMRI.MRI.DiffusionGradient import DiffusionGradient
import scipy.io as sio
from time import time
class DiffusionSignal:
def __init__(self,
binary_image,
delta_B,
T2_image, # ms
vx_image, # mm/s
vy_image, # mm/s
vz_image, # mm/s
grad_image=None, # this is to model the propagation
vel_image=None, # mm/s
n_protons=int(1e6),
n_protons_all=1,
T1on = False,
T2on = True,
TR = 1000.0,
compute_phase = 0.0,
compute_A = 0.0,
dt = 0.2, # ms
T1 = 1590.0, # ms
TE = 5.0, # ms
echo_spacing = 1.0,
delta_big=2.0,
delta_small=1.0,
b_value=10.0,
phi=45,
theta=45,
gamma=2.675e5, # rad/Tesla/msec (gyromagenatic ratio)
diff_coeff=0.8,
apply_spin_labeling=False,
apply_diffusion=True,
exp_name='MRIexp',
savepath=''): #Le Bihan 2013 Assumed isotropic; %Proton (water) diffusion coefficient(um2/msec)
self.binary_image = binary_image
self.delta_B = delta_B
# should be in sec
self.T2_image = T2_image
self.vx_image = vx_image
self.vy_image = vy_image
self.vz_image = vz_image
self.vel_image = vel_image
self.n_protons = n_protons
self.n_protons_init = n_protons
self.n_protons_all = n_protons_all
self.T1on = T1on
self.T2on = T2on
self.TR = TR
self.compute_phase = compute_phase
self.compute_A = compute_A
self.dt = dt
self.T1 = T1
self.TE = TE
self.echo_spacing = echo_spacing
self.delta_big = delta_big
self.delta_small = delta_small
self.b_value = b_value
self.phi = phi
self.theta = theta
self.gamma = gamma
self.diff_coeff = diff_coeff
self.apply_spin_labeling = apply_spin_labeling
self.apply_diffusion=apply_diffusion
self.diff_sigma=np.sqrt(2*self.diff_coeff*self.dt)
self.map_size=self.T2_image.shape
#### timing and sequence
self.sequence=Sequence(Type='DiffSE',
TE=self.TE,
echo_spacing=self.echo_spacing,
delta_big=self.delta_big,
delta_small=self.delta_small,
dt=self.dt).Sequence
self.sequence_toplot=Sequence(Type='DiffSE',
TE=self.TE,
echo_spacing=self.echo_spacing,
delta_big=self.delta_big,
delta_small=self.delta_small,
dt=.001).Sequence
self.signals=[]
self.b_values=[]
self.phi_values=[]
self.theta_values=[]
self.mean_phase=[]
self.name=exp_name
self.shape=np.shape(self.binary_image)
self.center=np.array(self.shape)/2.0
self.labels=[]
# get boundary and flow gradients
if grad_image is not None:
grad_image_boundary=self.get_gradimage(self.binary_image)
grad_image_vessel=self.get_gradimage(grad_image)
# normalized boundary gradients
self.boundary=self.get_norm(grad_image_boundary)>0
self.grad_image_boundary=self.get_normgrad(grad_image_boundary)
# normalized vessel gradients
grad_image_vessel=self.get_normgrad(grad_image_vessel)
grad_image=[self.grad_image_boundary[i]+grad_image_vessel[i] for i in [0,1,2]]
self.grad_image=self.get_normgrad(grad_image)
def get_gradimage(self, x):
'''
compute image gradients
'''
return(np.gradient(x))
def get_norm(self, gr):
'''
computre norm o gradients
'''
return(np.sqrt(np.sum(np.array([i**2 for i in gr]), axis=0)))
def get_normgrad(self, gr, boundary=False):
'''
compute normal gradients at the boundary of vessels
to be applied for relecting moving spins if hits vessels wall
'''
normal=self.get_norm(gr)
if boundary:
b=normal>0
normal[normal==0]=1
ret=[i/normal for i in gr]
if boundary:
return ret, b
else:
return ret
def InitiateSpins(self):
if self.apply_spin_labeling:
self.InitiateSpinsSL()
else:
np.random.seed(999)
self.pos_protons=np.random.rand(int(self.n_protons_init), 3)*np.array(self.map_size) # protons positions
self.n_protons=len(self.pos_protons)
print('Number of protons: ', self.n_protons)
def InitiateSpinsSL(self):
self.pos_protons=np.empty((0, 3))
idx=0
while len(self.pos_protons)<self.n_protons_init:
np.random.seed(999+10*idx)
idx+=1
pos_protons=np.random.rand(int(1e7), 3)*np.array(self.map_size) # protons positions
ind=tuple([pos_protons[:,i].astype(int) for i in [0,1,2]])
valid_pos_ind=(self.binary_image[ind]>0)
pos_protons=pos_protons[valid_pos_ind]
left=int(self.n_protons_init)-len(self.pos_protons)
if left>1e7:
left=int(1e7)
self.pos_protons=np.vstack((self.pos_protons, pos_protons[0:left]))
self.pos_protons_copy=self.pos_protons.copy()
def InitiateGradient(self, b_value=None, phi=None, theta=None):
if b_value is not None:
self.b_value=b_value
if phi is not None:
self.phi=phi
if theta is not None:
self.theta=theta
#### Diffusion gradient
self.DiffusionGradient=DiffusionGradient(shape=self.map_size,
b=self.b_value,
delta_big=self.delta_big, #ms
delta_small=self.delta_small, #ms
gamma=self.gamma)
self.gradient=self.DiffusionGradient.GetGradient(phi=self.phi, theta=self.theta)
print('max delta_B: ', self.delta_B.max())
print('max gradient: ', self.gradient.max())
def __UpdateSpinsPos(self, vx, vy, vz, ret=False):
# dt should be in sec
self.shift=np.array([vx, vy, vz]).T*self.dt*1e-3 # shift vectors
valid_pos=np.less_equal(self.pos_protons+self.shift, np.array(self.map_size)) # check if new pos do not exceed space borders
valid_pos=valid_pos[:,0]*valid_pos[:,1]*valid_pos[:,2]
self.pos_protons+=self.shift*valid_pos[:,None] # update positions
if ret:
return self.shift
def __UpdateSpinsPosWithReplacement(self, vx, vy, vz, ret=False):
# def randpos_invessel(im, n):
#
# shape=np.array(im.shape)
# cont =1
# pos=np.zeros((n,3))
# ind1=0
# while cont:
# p=np.random.rand(n, 3)*shape[None,:]
# p=p.astype(int)
# ind=(p[:,0],p[:,1],p[:,2])
# check=im[ind].ravel()
# p=p[check>0]
# l=len(p)
# if l+ind1>n:
# l=n-ind1
# pos[ind1:ind1+l]=p[:l]
# ind1=ind1+l
# if ind1==n:
# cont=0
# dt should be in sec
self.shift=np.array([vx, vy, vz]).T*self.dt*1e-3 # shift vectors
valid_pos=np.less_equal(self.pos_protons+self.shift, np.array(self.map_size)) # check if new pos do not exceed space borders
valid_pos=valid_pos[:,0]*valid_pos[:,1]*valid_pos[:,2]
self.pos_protons+=self.shift*valid_pos[:, None] # update positions
novalid_pos=np.bitwise_not(valid_pos)
np.random.shuffle(self.pos_protons_copy)
self.pos_protons[novalid_pos>0]= self.pos_protons_copy[:sum(novalid_pos>0)]# update positions
if ret:
return self.shift
def __UpdateSpinsPosNew(self, imageind, ret=False):
# dt should be in sec
self.shift=np.array([self.grad_image[0][imageind],
self.grad_image[1][imageind],
self.grad_image[2][imageind]]).T*self.vel_image[imageind][:, None]*self.dt*1e-3 # shift vectors
valid_pos=np.less_equal(self.pos_protons+self.shift, np.array(self.map_size)) # check if new pos do not exceed space borders
valid_pos=valid_pos[:,0]*valid_pos[:,1]*valid_pos[:,2]
self.pos_protons+=self.shift*valid_pos[:,None] # update positions
if ret:
return self.shift
def __UpdateSpinsPosOneReflection(self, vx, vy, vz, ret=False):
def Update(shift):
# valid if wihtin image domain
newpos=self.pos_protons+shift
valid1=np.less_equal(newpos, np.array(self.map_size))
valid1=valid1[:,0]*valid1[:,1]*valid1[:,2]
# valid if within vessles boundary
ind=tuple([newpos[valid1][:, i].astype(int) for i in [0,1,2]])
valid2=self.binary_image[ind]>0
valid=valid1.copy()
valid[valid]=valid2
# protons that need reflectance
toreflect=valid1.copy()
toreflect[toreflect]=
|
np.bitwise_not(valid2)
|
numpy.bitwise_not
|
# -*- coding: utf-8 -*-
"""
gpd_lite_toolboox
@author: mthh
"""
import shapely.ops
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
from shapely.geometry import Point, Polygon, MultiPolygon
from geopandas import GeoDataFrame
from sklearn.metrics.pairwise import pairwise_distances
from .utils import (
db_connect, Borderiz, dbl_range, ftouches_byid, l_shared_border,
make_index, nrepeat, mparams, dorling_radius, dorling_radius2,
)
__all__ = ('get_borders', 'find_borders', 'transform_cartogram', 'dissolve',
'intersects_byid', 'multi_to_single', 'dumb_multi_to_single',
'snap_to_nearest', 'read_spatialite', 'match_lines',
'mean_coordinates', 'non_contiguous_cartogram', 'make_grid',
'gridify_data', 'random_pts_on_surface', 'access_isocrone')
def match_lines(gdf1, gdf2, method='cheap_hausdorff', limit=None):
"""
Return a pandas.Series (with the length of *gdf1*) with each row containing
the id of the matching feature in *gdf2* (i.e the closest based on the
computation of a "hausdorff-distance-like" between the two lines or
the most similar based on some geometry properties) or nothing if nothing
is found according to the *limit* argument.
If a *limit* is given, features situed far from this distance
will not be taken into account (in order to avoid retrieving the id of
too far located segments, even if the closest when no one seems to
be matching).
Parameters
----------
gdf1: GeoDataFrame of LineStrings (the reference dataset).
gdf2: GeoDataFrame of LineStrings (the dataset to match).
limit: Integer
The maximum distance, where it is sure that segments
couldn't match.
Returns
-------
match_table: pandas.Series containing the matching table (with index
based on *gdf1*)
"""
if 'cheap_hausdorff' in method:
if limit:
return (gdf1.geometry.apply(
lambda x: [fh_dist_lines(x, gdf2.geometry[i]) for i in range(len(gdf2))]
)).apply(lambda x: [nb for nb, i in enumerate(x) if i == min(x) and i < limit])
else:
return (gdf1.geometry.apply(
lambda x: [fh_dist_lines(x, gdf2.geometry[i]) for i in range(len(gdf2))]
)).apply(lambda x: [nb for nb, i in enumerate(x) if i == min(x)])
elif 'cluster' in method:
return match_line_cluster(gdf1, gdf2)
else:
raise ValueError('Incorrect matching method\nMethod should '
'be \'cheap_hausdorff\' or \'cluster\'.')
def match_line_cluster(gdf1, gdf2):
"""
Try to match two layers of linestrings with KMeans cluster analysis based
on a triplet of descriptive attributes :
(centroid coords., rounded length, approximate bearing)
Parameters
----------
gdf1: GeoDataFrame
The reference dataset.
gdf2: GeoDataFrame
The collection of LineStrings to match.
Returns
-------
matching_table: pandas.Series
A table (index-based on *gdf1*) containing the id of the matching
feature found in *gdf2*.
"""
param1, param2 = list(map(mparams, [gdf1, gdf2]))
k_means = KMeans(init='k-means++', n_clusters=len(gdf1),
n_init=10, max_iter=1000)
k_means.fit(np.array((param1+param2)))
df1 = pd.Series(k_means.labels_[len(gdf1):])
df2 = pd.Series(k_means.labels_[len(gdf1):])
# gdf1['fid_layer2'] = \
# df1.apply(lambda x: df2.where(gdf2['key'] == x).notnull().nonzero()[0][0])
return pd.DataFrame(
index=list(range(len(gdf1))),
data=df1.apply(
lambda x: df2.where(df2 == x).notnull().nonzero())
)
def fh_dist_lines(li1, li2):
"""
Compute a cheap distance (based on hausdorff-distance) between
*li1* and *li2*, two LineString.
Parameters
----------
li1: shapely.geometry.LineString
li2: shapely.geometry.LineString
Returns
-------
max_dist: Float of the distance between li1 and li2.
"""
coord_li1 = np.array([i for i in zip(li1.coords.xy[0], li1.coords.xy[1])])
coord_li2 = np.array([i for i in zip(li2.coords.xy[0], li2.coords.xy[1])])
if len(coord_li2) > len(coord_li2):
coord_li1, coord_li2 = coord_li2, coord_li1
dist_mat = pairwise_distances(
coord_li1, coord_li2, metric='euclidean', n_jobs=2
)
chkl = round(len(coord_li1)/len(coord_li2))
return max(
[dist_mat[i, j] for i, j in zip(
list(range(len(coord_li1))),
list(nrepeat(range(len(coord_li2)), chkl))[:len(coord_li1)])]
)
def get_borders(gdf, tol=1, col_name='id'):
"""
Get the lines corresponding to the border between each
polygon from the dataset, each line containing the *col_name* of the
two polygons around (quicker computation than :py:func:`find_borders`).
Likely a minimalist python port of cartography::getBorders R function from
https://github.com/Groupe-ElementR/cartography/blob/master/R/getBorders.R
Parameters
----------
gdf: :py:class: `geopandas.GeoDataFrame`
Input collection of polygons.
tol: int, default=1
The tolerance (in units of :py:obj:`gdf`).
col_name: str, default='id'
The field name of the polygon to yield.
Returns
-------
borders: GeoDataFrame
A GeoDataFrame of linestrings corresponding to the border between each
polygon from the dataset, each line containing the *col_name* of the
two polygon around.
"""
buff = gdf.geometry.buffer(tol)
intersect_table = intersects_byid(buff, buff)
attr, new_geoms = [], []
for i in range(len(gdf)):
tmp1 = gdf.iloc[i]
buff_geom1 = buff[i]
for j in intersect_table[i]:
if not i == j:
tmp2 = gdf.iloc[j]
buff_geom2 = buff[j]
new_geoms.append(
(buff_geom1.intersection(buff_geom2)).boundary
)
attr.append(tmp1[col_name] + '-' + tmp2[col_name])
return GeoDataFrame(attr, geometry=new_geoms, columns=[col_name])
def find_borders(gdf, tol=1, col_name='id'):
"""
Parameters
----------
gdf: :py:class::`geopandas.GeoDataFrame`
Input collection of polygons.
tol: int, default=1
The tolerance (in units of :py:obj:`gdf`).
col_name: str, default='id'
The field name of the polygon to yield.
Returns
-------
borders: GeoDataFrame
Return lines corresponding to the border between each polygon of the
dataset, each line containing the id of the two polygon around it.
This function is slower/more costly than :py:func:`get_borders`.
"""
if col_name not in gdf.columns:
raise ValueError("Column name error : can't find {}".format(col_name))
bor = Borderiz(gdf)
return bor.run(tol, col_name)
def transform_cartogram(gdf, field_name, iterations=5, inplace=False):
"""
Make a continuous cartogram on a geopandas.GeoDataFrame collection
of Polygon/MultiPolygon (wrapper to call the core functions
written in cython).
Based on the transformation of Dougenik and al.(1985).
Parameters
----------
gdf: geopandas.GeoDataFrame
The GeoDataFrame containing the geometry and a field to use for the
transformation.
field_name: String
The label of the field containing the value to use.
iterations: Integer, default 5
The number of iteration to make.
inplace, Boolean, default False
Append in place if True. Otherwhise return a new :py:obj:GeoDataFrame
with transformed geometry.
Returns
-------
GeoDataFrame: A new GeoDataFrame (or None if inplace=True)
References
----------
``<NAME>, <NAME>, and <NAME>. 1985.
"An algorithm to construct continuous cartograms."
Professional Geographer 37:75-81``
"""
from gpd_lite_toolbox.cycartogram import make_cartogram
return make_cartogram(gdf, field_name, iterations, inplace)
def intersects_byid(geoms1, geoms2):
"""
Return a table with a row for each features of *geoms1*, containing the id
of each *geoms2* intersecting features (almost like an intersecting matrix).
Parameters
----------
geoms1: GeoSeries or GeoDataFrame
Collection on which the intersecting table will be based.
geoms2: GeoSeries or GeoDataFrame
Collection to test on intersects.
Returns
-------
intersect_table: pandas.Series
A Series with the same index id as geoms1, each row containg the ids of
the features of geoms2 intersecting it.
"""
return geoms1.geometry.apply(
lambda x: [i for i in range(len(geoms2.geometry))
if x.intersects(geoms2.geometry[i])]
)
def dissolve(gdf, colname, inplace=False):
"""
Parameters
----------
gdf: GeoDataFrame
The geodataframe to dissolve
colname: String
The label of the column containg the common values to use to dissolve
the collection.
Returns
-------
Return a new :py:obj:`geodataframe` with
dissolved features around the selected columns.
"""
if not inplace:
gdf = gdf.copy()
df2 = gdf.groupby(colname)
gdf.set_index(colname, inplace=True)
gdf['geometry'] = df2.geometry.apply(shapely.ops.unary_union)
gdf.reset_index(inplace=True)
gdf.drop_duplicates(colname, inplace=True)
gdf.set_index(pd.Int64Index([i for i in range(len(gdf))]),
inplace=True)
if not inplace:
return gdf
def multi_to_single(gdf):
"""
Return a new geodataframe with exploded geometries (where each feature
has a single-part geometry).
Parameters
----------
gdf: GeoDataFrame
The input GeoDataFrame to explode to single part geometries.
Returns
-------
gdf: GeoDataFrame
The exploded result.
See-also
--------
The method GeoDataFrame.explode() in recent versions of **geopandas**.
"""
values = gdf[[i for i in gdf.columns if i != 'geometry']]
geom = gdf.geometry
geoms, attrs = [], []
for i in range(len(gdf)):
try:
for single_geom in geom.iloc[i]:
geoms.append(single_geom)
attrs.append(values.iloc[i])
except:
geoms.append(geom.iloc[i])
attrs.append(values.iloc[i])
return GeoDataFrame(attrs, index=[i for i in range(len(geoms))],
geometry=geoms,
columns=[i for i in gdf.columns if i != 'geometry'])
def snap_to_nearest(pts_ref, target_layer, inplace=False,
searchframe=50, max_searchframe=500):
"""
Snap each point from :py:obj:`pts_ref` on the nearest
line-segment/polygon-vertex of :py:obj:`target_layer` according to a
*searchframe* defined in units of both two input layers.
Append inplace or return a new object.
(A larger search frame can be set in *max_searchframe* : the search frame
will be progressivly increased from *searchframe* to *max_searchframe* in
order to snap the maximum of points without using a large orginal search
frame)
Parameters
----------
pts_ref: GeoDataFrame
The collection of points to snap on *target_layer*.
target_layer: GeoDataFrame
The collection of LineString or Polygon on which *pts_ref* will be
snapped, according to the *max_searchframe*.
inplace: Boolean, default=False
Append inplace or return a new GeoDataFrame containing moved points.
searchframe: Integer or float, default=50
The original searchframe (in unit of the two inputs GeoDataFrame),
which will be raised to *max_searchframe* if there is no objects to
snap on.
max_searchframe: Integer or float, default=500
The maximum searchframe around each features of *pts_ref* to search in.
Returns
-------
snapped_pts: GeoDataFrame
The snapped collection of points (or None if inplace=True, where
points are moved in the original geodataframe).
"""
new_geoms = pts_ref.geometry.values.copy()
target_geoms = target_layer.geometry.values
start_buff = searchframe
index = make_index([i.bounds for i in target_geoms])
for id_pts_ref in range(len(new_geoms)):
while True:
try:
tmp = {
(new_geoms[id_pts_ref].distance(target_geoms[fid])): fid
for fid in list(index.intersection(
new_geoms[id_pts_ref].buffer(searchframe).bounds,
objects='raw'))
}
road_ref = tmp[min(tmp.keys())]
break
except ValueError as err:
searchframe += (max_searchframe-start_buff)/3
if searchframe > max_searchframe:
break
try:
res = {new_geoms[id_pts_ref].distance(Point(x, y)): Point(x, y)
for x, y in zip(*target_geoms[road_ref].coords.xy)}
new_geoms[id_pts_ref] = res[min(res.keys())]
except NameError as err:
print(err, 'No value for {}'.format(id_pts_ref))
if inplace:
pts_ref.set_geometry(new_geoms, drop=True, inplace=True)
else:
result = pts_ref.copy()
result.set_geometry(new_geoms, drop=True, inplace=True)
return result
def dumb_multi_to_single(gdf):
"""
A "dumb" (but sometimes usefull) multi-to-single function, returning a
GeoDataFrame with the first single geometry of each multi-part geometry
(and also return single geometry features untouched), so the returned
GeoDataFrame will have the same number of features.
Parameters
----------
gdf: GeoDataFrame
The input collection of features.
Returns
-------
gdf: GeoDataFrame
The exploded result.
"""
values = gdf[[i for i in gdf.columns if i != 'geometry']]
geom = gdf.geometry
geoms, attrs = [], []
for i in range(len(gdf)):
try:
for single_geom in geom.iloc[i]:
geoms.append(single_geom)
attrs.append(values.iloc[i])
break
except:
geoms.append(geom.iloc[i])
attrs.append(values.iloc[i])
return GeoDataFrame(attrs, index=[i for i in range(len(geoms))],
geometry=geoms,
columns=[i for i in gdf.columns if i != 'geometry'])
def read_spatialite(sql, conn, geom_col='geometry', crs=None,
index_col=None, coerce_float=True, params=None,
db_path=None):
"""
Wrap :py:func:`geopandas.read_postgis()` and allow to read from spatialite.
Returns
-------
gdf: GeoDataframe
Example
-------
>>> # With a connection object (conn) already instancied :
>>> gdf = read_spatialite("SELECT PK_UID, pop_t, gdp FROM countries", conn,
geom_col="GEOM")
>>> # Without being already connected to the database :
>>> gdf = read_spatialite("SELECT PK_UID, pop_t, gdp FROM countries", None,
geom_col="GEOM",
db_path='/home/mthh/tmp/db.sqlite')
"""
from geopandas import read_postgis
if '*' in sql:
raise ValueError('Column names have to be specified')
if not conn and db_path:
conn = db_connect(db_path)
elif not conn:
raise ValueError(
'A connection object or a path to the DB have to be provided')
if sql.lower().find('select') == 0 and sql.find(' ') == 6:
sql = sql[:7] \
+ "HEX(ST_AsBinary({0})) as {0}, ".format(geom_col) + sql[7:]
else:
raise ValueError(
'Unable to understand the query')
return read_postgis(
sql, conn, geom_col=geom_col, crs=crs, index_col=index_col,
coerce_float=coerce_float, params=params
)
def mean_coordinates(gdf, id_field=None, weight_field=None):
"""
Compute the (weighted) mean coordinate(s) of a set of points. If provided
the point(s) will be located according to *weight_field* (numérical field).
If an *id_field* is given, a mean coordinate pt will be calculated for each
subset of points differencied by this *id_field*.
Parameters
----------
gdf: GeoDataFrame
The input collection of Points.
id_field: String, optional
The label of the field containing a value to weight each point.
weight_field: String, optional
The label of the field which differenciate features of *gdf* in subsets
in order to get multiples mean points returned.
Returns
-------
mean_points: GeoDataFrame
A new GeoDataFrame with the location of the computed point(s).
"""
assert 'Multi' not in gdf.geometry.geom_type, \
"Multipart geometries aren't allowed"
fields = ['geometry']
if id_field:
assert id_field in gdf.columns
fields.append(id_field)
if weight_field:
assert weight_field in gdf.columns
fields.append(weight_field)
else:
weight_field = 'count'
tmp = gdf[fields].copy()
tmp['x'] = tmp.geometry.apply(lambda x: x.coords.xy[0][0])
tmp['y'] = tmp.geometry.apply(lambda x: x.coords.xy[1][0])
tmp.x = tmp.x * tmp[weight_field]
tmp.y = tmp.y * tmp[weight_field]
tmp['count'] = 1
if id_field:
tmp = tmp.groupby(id_field).sum()
else:
tmp = tmp.sum()
tmp = tmp.T
tmp.x = tmp.x / tmp[weight_field]
tmp.y = tmp.y / tmp[weight_field]
tmp['geometry'] = [Point(i[0], i[1]) for i in tmp[['x', 'y']].values]
return GeoDataFrame(tmp[weight_field], geometry=tmp['geometry'],
index=tmp.index).reset_index()
def random_pts_on_surface(gdf, coef=1, nb_field=None):
"""
For each polygon, return a point (or a set of points, according to
*nb_field* and *coef*), lying on the polygon surface.
Parameters
----------
gdf: GeoDataFrame
A collection of polygons on which generate points.
coef: Integer, default 1
The multiplicant, which applies to each feature of *gdf*,
If used, the values contained on *nb_field* will also be multiplicated.
nb_field: String, optional
The name of the field to read, containing an integer
which will be used as the number of points to create.
Returns
-------
rand_points: GeoDataFrame
A collection of points, located on *gdf*, accordingly to *coef* and
values contained in *nb_field* if used.
"""
nb_ft = len(gdf)
if nb_field:
nb_pts = gdf[nb_field].values * coef
else:
nb_pts = np.array([coef for i in range(nb_ft)])
res = []
for i in range(nb_ft):
pts_to_create = round(nb_pts[i])
(minx, miny, maxx, maxy) = gdf.geometry[i].bounds
while True:
xpt = \
(maxx-minx) * np.random.random_sample((pts_to_create,)) + minx
ypt = \
(maxy-miny) *
|
np.random.random_sample((pts_to_create,))
|
numpy.random.random_sample
|
import numpy as np
from scipy.integrate import odeint
from scipy.special import legendre, chebyt
import sys
sys.path.append('../src')
from sindy_utils import library_size
from data_manage import DataStruct
import pdb
class Lorenz:
def __init__(self,
option='delay',
coefficients=[10, 8/3, 28.],
noise=0.0,
input_dim=128,
normalization=[1/40, 1/40, 1/40],
linear=False,
poly_order=3):
self.option = option
self.sigma = coefficients[0]
self.beta = coefficients[1]
self.rho = coefficients[2]
self.noise = noise
self.input_dim = input_dim
self.normalization = np.array(normalization) if normalization is not None else np.array([1, 1, 1])
self.linear = linear
self.poly_order = poly_order
def get_solution(self, n_ics, tend, dt, ic_means=[0, 0, 25], ic_widths=[36, 48, 41], tau=None):
"""
Generate a set of Lorenz training data for multiple random initial conditions.
Arguments:
n_ics - Integer specifying the number of initial conditions to use.
noise_strength - Amount of noise to add to the data.
Return:
data - Dictionary containing elements of the dataset. See generate_lorenz_data()
doc string for list of contents.
"""
t = np.arange(0, tend, dt)
n_steps = len(t) - self.input_dim
if tau is not None:
n_steps = len(t) - self.input_dim * int(tau/dt)
ic_means = np.array(ic_means)
ic_widths = 2 * np.array(ic_widths)
# training data
ics = ic_widths*(np.random.rand(n_ics, 3)-.5) + ic_means
data = self.generate_data(ics, t, tau=tau)
data.x = data.x.reshape((n_steps*n_ics, self.input_dim)) \
+ self.noise * np.random.randn(n_steps * n_ics, self.input_dim)
data.dx = data.dx.reshape((n_steps*n_ics, self.input_dim)) \
+ self.noise * np.random.randn(n_steps * n_ics, self.input_dim)
data.ddx = data.ddx.reshape((n_steps*n_ics, self.input_dim)) \
+ self.noise * np.random.randn(n_steps * n_ics, self.input_dim)
full_z = data.z[0, :-self.input_dim, :]
full_dz = data.dz[0, :-self.input_dim, :]
full_ddz = data.ddz[0, :-self.input_dim, :]
for i in range(1, data.z.shape[0]):
full_z = np.concatenate((full_z, data.z[i, :-self.input_dim, :]), axis=0)
full_dz = np.concatenate((full_dz, data.dz[i, :-self.input_dim, :]), axis=0)
full_ddz = np.concatenate((full_ddz, data.ddz[i, :-self.input_dim, :]), axis=0)
data.z = full_z
data.dz = full_dz
data.ddz = full_ddz
return data
def simulate_lorenz(self, z0, t):
"""
Simulate the Lorenz dynamics.
Arguments:
z0 - Initial condition in the form of a 3-value list or array.
t - Array of time points at which to simulate.
sigma, beta, rho - Lorenz parameters
Returns:
z, dz, ddz - Arrays of the trajectory values and their 1st and 2nd derivatives.
"""
f = lambda z,t : [self.sigma*(z[1] - z[0]), z[0]*(self.rho - z[2]) - z[1], z[0]*z[1] - self.beta*z[2]]
df = lambda z,dz,t : [self.sigma*(dz[1] - dz[0]),
dz[0]*(self.rho - z[2]) + z[0]*(-dz[2]) - dz[1],
dz[0]*z[1] + z[0]*dz[1] - self.beta*dz[2]]
z = odeint(f, z0, t)
dt = t[1] - t[0]
dz = np.zeros(z.shape)
ddz = np.zeros(z.shape)
for i in range(t.size):
dz[i] = f(z[i],dt*i)
ddz[i] = df(z[i], dz[i], dt*i)
return z, dz, ddz
def generate_data(self, ics, t, tau=None):
"""
Generate high-dimensional Lorenz data set.
Arguments:
ics - Nx3 array of N initial conditions
t - array of time points over which to simulate
n_points - size of the high-dimensional dataset created
linear - Boolean value. If True, high-dimensional dataset is a linear combination
of the Lorenz dynamics. If False, the dataset also includes cubic modes.
normalization - Optional 3-value array for rescaling the 3 Lorenz variables.
sigma, beta, rho - Parameters of the Lorenz dynamics.
Returns:
data - Dictionary containing elements of the dataset. This includes the time points (t),
spatial mapping (y_spatial), high-dimensional modes used to generate the full dataset
(modes), low-dimensional Lorenz dynamics (z, along with 1st and 2nd derivatives dz and
ddz), high-dimensional dataset (x, along with 1st and 2nd derivatives dx and ddx), and
the true Lorenz coefficient matrix for SINDy.
"""
n_ics = ics.shape[0]
n_steps = t.size - self.input_dim # careful consistency
dt = t[1]-t[0]
d = 3
z = np.zeros((n_ics, t.size, d))
dz =
|
np.zeros(z.shape)
|
numpy.zeros
|
#!/usr/bin/env python
"""
This contains some of what is common to all of the peak finding algorithms.
Note on handling RQE differences:
1) The images are corrected at loading for RQE differences, which is done
by dividing the image be the measured camera RQE.
2) The peak finding classes use this corrected image. I think this is not
quite correct from a statistical perspective, but is simple and works well
for the small RQE differences (~5%) that are usually encountered. "Works
well" means that the finder is noticeably biased against localizations in
areas with lower than average RQE.
3) The peak fitting (C library) undoes the RQE correction, it multiplies
the image it receives by the RQE correction. This way the fitting is correct,
at least in a statistical sense.
Hazen 06/19
"""
import numpy
import os
import tifffile
import storm_analysis.sa_library.i3dtype as i3dtype
import storm_analysis.sa_library.ia_utilities_c as iaUtilsC
import storm_analysis.sa_library.matched_filter_c as matchedFilterC
import storm_analysis.sa_library.parameters as params
import storm_analysis.sa_library.sa_h5py as saH5Py
import storm_analysis.simulator.draw_gaussians_c as dg
#
# Functions.
#
def gaussianPSF(shape, sigma):
"""
Return a normalized 2D Gaussian, usually used for creating MatchedFilter objects.
"""
psf = dg.drawGaussiansXY(shape,
numpy.array([0.5*shape[0]]),
numpy.array([0.5*shape[1]]),
sigma = sigma)
return psf/numpy.sum(psf)
def getPeakLocations(peak_filename, margin, pixel_size, sigma):
"""
This is for if you already know where your want fitting to happen, as
for example in a bead calibration movie and you just want to use the
approximate locations as inputs for fitting.
There are two choices for peak_locations file format:
1. A text file with the peak x, y, height and background values as
white spaced columns (x and y positions are in pixels as determined
using visualizer).
1.0 2.0 1000.0 100.0
10.0 5.0 2000.0 200.0
...
2. An HDF5 format localization file. This is treated in a similar
fashion to the text file in that all of the locations are loaded.
If the fields 'xsigma' or 'ysigma' exist they will be used for
the initial X/Y sigma values of the localization.
"""
if os.path.exists(peak_filename):
print("Using peak starting locations specified in", peak_filename)
elif os.path.exists(os.path.basename(peak_filename)):
peak_filename = os.path.basename(peak_filename)
print("Using peak starting locations specified in", peak_filename)
# Check if the file is a storm-analysis HDF5 file.
#
if saH5Py.isSAHDF5(peak_filename):
peak_locations_type = "hdf5"
peak_locations = saH5Py.loadLocalizations(peak_filename)
if not "ysigma" in peak_locations:
if not "xsigma" in peak_locations:
peak_locations["xsigma"] = numpy.ones(peak_locations["x"].size) * sigma
peak_locations["ysigma"] = peak_locations["xsigma"].copy()
else:
peak_locations_type = "text"
# Load peak x,y locations.
peak_locs = numpy.loadtxt(peak_filename, ndmin = 2)
# Create peak dictionary.
peak_locations = {"background" : peak_locs[:,3],
"height" : peak_locs[:,2],
"x" : peak_locs[:,0],
"y" : peak_locs[:,1]}
peak_locations["xsigma"] = numpy.ones(peak_locations["x"].size) * sigma
peak_locations["ysigma"] = numpy.ones(peak_locations["x"].size) * sigma
peak_locations["z"] = numpy.zeros(peak_locations["x"].size)
# Adjust positions for finding/fitting margin.
peak_locations["x"] += margin
peak_locations["y"] += margin
print("Loaded", peak_locations["x"].size, "peak locations")
#
# We return is_text as the caller might want to do different things if
# the file is text, like initialize the Z value.
#
return [peak_locations, peak_locations_type]
def padArray(ori_array, pad_size):
"""
Pads out an array to a large size.
ori_array - A 2D numpy array.
pad_size - The number of elements to add to each of the "sides" of the array.
The padded 2D numpy array.
"""
if (pad_size > 0):
[x_size, y_size] = ori_array.shape
lg_array = numpy.ones((x_size+2*pad_size,y_size+2*pad_size))
lg_array[pad_size:(x_size+pad_size),pad_size:(y_size+pad_size)] = ori_array.astype(numpy.float64)
lg_array[0:pad_size,:] = numpy.flipud(lg_array[pad_size:2*pad_size,:])
lg_array[(x_size+pad_size):(x_size+2*pad_size),:] = numpy.flipud(lg_array[x_size:(x_size+pad_size),:])
lg_array[:,0:pad_size] = numpy.fliplr(lg_array[:,pad_size:2*pad_size])
lg_array[:,(y_size+pad_size):(y_size+2*pad_size)] = numpy.fliplr(lg_array[:,y_size:(y_size+pad_size)])
return lg_array
else:
return ori_array
def peakMask(shape, parameters, margin):
"""
Return the array that is used to mask the image to reduce the ROI
where peaks can be found.
"""
peak_mask = numpy.ones(shape)
# Check for circular AOI.
if parameters.hasAttr("x_center"):
assert parameters.hasAttr("y_center"), "Y center must be specified."
assert parameters.hasAttr("aoi_radius"), "AOI radius must be specified."
rr = parameters.getAttr("aoi_radius")
xc = parameters.getAttr("x_center") + margin
yc = parameters.getAttr("y_center") + margin
rr = rr*rr
xr = numpy.arange(peak_mask.shape[0]) - xc
yr = numpy.arange(peak_mask.shape[0]) - yc
xv, yv = numpy.meshgrid(xr, yr)
peak_mask[((xv*xv + yv*yv) > rr)] = 0
# This catches 'y_center' without 'x_center'.
elif parameters.hasAttr("y_center"):
assert parameters.hasAttr("x_center"), "X center must be specified."
# This catches 'aoi_radius' without 'x_center'.
elif parameters.hasAttr("aoi_radius"):
assert parameters.hasAttr("x_center"), "X center must be specified."
# Check for square AOI
else:
if parameters.hasAttr("x_start"):
peak_mask[:,0:parameters.getAttr("x_start")+margin] = 0.0
if parameters.hasAttr("x_stop"):
peak_mask[:,parameters.getAttr("x_stop")+margin:-1] = 0.0
if parameters.hasAttr("y_start"):
peak_mask[0:parameters.getAttr("y_start")+margin,:] = 0.0
if parameters.hasAttr("y_stop"):
peak_mask[parameters.getAttr("y_stop")+margin:-1,:] = 0.0
return peak_mask
#
# Classes.
#
def FittingException(Exception):
pass
class PeakFinder(object):
"""
Base class for peak finding. This handles identification of peaks in an image.
If you want to modify this with a custom peak finder or an alternative
way to estimate the background, the recommended approach is to sub-class this
class and then modify backgroundEstimator(), newImage() and peakFinder().
"""
def __init__(self, parameters = None, **kwds):
"""
This is called once at the start of analysis to initialize the
parameters that will be used for peak fitting.
parameters - A parameters object.
"""
super(PeakFinder, self).__init__(**kwds)
# Initialized from parameters.
self.find_max_radius = parameters.getAttr("find_max_radius") # Radius (in pixels) over which the maxima is maximal.
self.iterations = parameters.getAttr("iterations") # Maximum number of cycles of peak finding, fitting and subtraction to perform.
self.sigma = parameters.getAttr("sigma") # Peak sigma (in pixels).
self.threshold = parameters.getAttr("threshold") # Peak minimum threshold in units of sigma (as in "3 sigma effect").
self.check_mode = parameters.getAttr("check_mode")
# Other member variables.
self.background = None # Current estimate of the image background.
self.bg_filter = None # Background MatchedFilter object.
self.camera_variance = None # Camera variance, only relevant for a sCMOS camera.
#self.check_mode = True #False # Run in diagnostic mode. Only useful for debugging.
self.image = None # The original image.
self.margin = None # Size of the unanalyzed "edge" around the image.
self.mfinder = None # The maxima finder.
self.parameters = parameters # Keep access to the parameters object.
self.peak_locations = None # Initial peak locations, as explained below.
self.peak_locations_type = None # Initial peak locations type.
self.peak_mask = None # Mask for limiting peak identification to a particular AOI.
self.pf_iterations = 0 # Keep track of the total number of iterations that were performed.
# Print warning about check mode
if self.check_mode:
print("Warning! Running in check mode!")
# Only do one cycle of peak finding as we'll always return the same locations.
if parameters.hasAttr("peak_locations"):
if (self.iterations != 1):
print("WARNING: setting number of iterations to 1!")
self.iterations = 1
def backgroundEstimator(self, image):
"""
This method does the actual background estimation. It is just a simple
low pass filter.
Override this if you want to change how the background is estimated.
FIXME: Convolution should be weighted by the camera variance if it is
not uniform?
"""
return self.bg_filter.convolve(image)
def cleanUp(self):
print(" ", self.pf_iterations, "peak finding iterations.")
print()
if self.bg_filter is not None:
self.bg_filter.cleanup()
def estimateBackground(self, fit_peaks_image, bg_estimate):
"""
Estimate the background for the image.
fit_peaks_image - The current best fit image.
bg_estimate - An estimate of the background.
Returns the current background estimate.
"""
# If we are provided with an estimate of the background then just use it.
if bg_estimate is not None:
self.background = bg_estimate
# Otherwise make our own estimate.
else:
image = self.image - fit_peaks_image
self.background = self.backgroundEstimator(image)
if self.check_mode:
with tifffile.TiffWriter("bg_estimate.tif") as tf:
tf.save(self.background.astype(numpy.float32))
return self.background
def findPeaks(self, fit_peaks_image):
"""
Finds the peaks in the image.
fit_peaks_image - The current fit image.
Return [new peaks, new peak type, done]
"""
self.pf_iterations += 1
# Use pre-specified peak locations if available, e.g. bead calibration.
if self.peak_locations is not None:
return [self.peak_locations, self.peak_locations_type, True]
# Otherwise, identify local maxima in the image.
new_peaks = self.peakFinder(fit_peaks_image)
# Update new peak identification threshold (if necessary).
# Also, while threshold is greater than min_threshold we
# are automatically not done.
if (self.cur_threshold > self.threshold):
self.cur_threshold -= 1.0
return [new_peaks, "finder", False]
# If we did not find any new peaks then we may be done.
if (new_peaks["x"].size == 0):
return [new_peaks, "finder", True]
else:
return [new_peaks, "finder", False]
def newImage(self, new_image):
"""
This is called once at the start of the analysis of a new image.
new_image - A 2D numpy array.
"""
# Make a copy of the starting image.
#
# FIXME: Is this copy necessary? We're not doing this in multiplane.
#
self.image = numpy.copy(new_image)
# Initialize new peak minimum threshold. If we are doing more
# than one iteration we start a bit higher and come down to
# the specified threshold.
if(self.iterations>4):
self.cur_threshold = self.threshold + 4.0
else:
self.cur_threshold = self.threshold + float(self.iterations)
# Create mask to limit peak finding to a user defined sub-region of the image.
if self.peak_mask is None:
self.peak_mask = peakMask(new_image.shape, self.parameters, self.margin)
# Create filter objects if necessary.
if self.bg_filter is None:
# Create matched filter for background.
bg_psf = gaussianPSF(new_image.shape, self.parameters.getAttr("background_sigma"))
self.bg_filter = matchedFilterC.MatchedFilter(bg_psf,
fftw_estimate = self.parameters.getAttr("fftw_estimate"),
memoize = True,
max_diff = 1.0e-3)
# Reset maxima finder.
self.mfinder.resetTaken()
def padArray(self, np_array):
"""
Return a version of array padded to the correct size.
"""
return padArray(np_array, self.margin)
def peakFinder(self, fit_peaks_image):
"""
Sub-classes must provide this method.
"""
raise FittingException("Finder had no peakFinder() method.")
def setVariance(self, camera_variance):
"""
Set the camera variance, usually used in sCMOS analysis.
"""
self.camera_variance = self.padArray(camera_variance)
return self.camera_variance
class PeakFinderGaussian(PeakFinder):
"""
This is the peak finder for 3D-DAOSTORM and sCMOS, it handles Gaussian shaped peaks.
"""
def __init__(self, parameters = None, **kwds):
"""
This is called once at the start of analysis to initialize the
parameters that will be used for peak fitting.
parameters - A parameters object.
"""
kwds["parameters"] = parameters
super(PeakFinderGaussian, self).__init__(**kwds)
# Figure out what margin and ROI to use.
if (self.parameters.getAttr("roi_size", -1) != -1):
self.roi_size = parameters.getAttr("roi_size")
else:
# Calculate roi size based on sigma.
self.roi_size = int(8.0 * self.sigma)
# Make it even larger for variable width fitters.
if(parameters.getAttr("model") != "2dfixed"):
self.roi_size = int(1.5 * self.roi_size)
self.margin = int(self.roi_size/2 + 2)
# Initialized from parameters.
self.z_value = self.parameters.getAttr("z_value", 0.0) # The starting z value to use for peak fitting.
# Other member variables.
self.fg_mfilter = None # Foreground MatchedFilter object (may be None).
self.fg_vfilter = None # Foreground variance MatchedFilter object, will
# be none if self.fg_mfilter is None.
# Configure maxima finder.
#
self.mfinder = iaUtilsC.MaximaFinder(margin = self.margin,
radius = self.find_max_radius,
threshold = self.threshold,
z_values = [self.z_value])
# Load peak locations if specified.
#
# FIXME: The starting z value is always 0.0. Not sure why we don't use
# self.z_value for this. Though I guess it would only really be
# relevant for the 'Z' fitting model.
#
if parameters.hasAttr("peak_locations"):
[self.peak_locations, self.peak_locations_type] = getPeakLocations(parameters.getAttr("peak_locations"),
self.margin,
parameters.getAttr("pixel_size"),
self.sigma)
def cleanUp(self):
super(PeakFinderGaussian, self).cleanUp()
if self.fg_mfilter is not None:
self.fg_mfilter.cleanup()
self.fg_vfilter.cleanup()
def getROISize(self):
return self.roi_size
def newImage(self, new_image):
super(PeakFinderGaussian, self).newImage(new_image)
#
# Create matched filter for foreground as well as a matched filter
# for calculating the expected variance of the background if it was
# smoothed on the same scale as the foreground.
#
if (self.fg_mfilter is None) and self.parameters.hasAttr("foreground_sigma"):
if (self.parameters.getAttr("foreground_sigma") > 0.0):
fg_psf = gaussianPSF(new_image.shape, self.parameters.getAttr("foreground_sigma"))
self.fg_mfilter = matchedFilterC.MatchedFilter(fg_psf,
fftw_estimate = self.parameters.getAttr("fftw_estimate"),
memoize = True,
max_diff = 1.0e-3)
self.fg_vfilter = matchedFilterC.MatchedFilter(fg_psf * fg_psf,
fftw_estimate = self.parameters.getAttr("fftw_estimate"),
memoize = True,
max_diff = 1.0e-3)
def peakFinder(self, fit_peaks_image):
"""
This method does the actual peak finding.
"""
# Calculate background variance.
#
# Note the assumption here that we are working in units of photo-electrons
# so Poisson statistics applies, variance = mean.
#
bg_var = self.background + fit_peaks_image
# Add camera variance if set.
if self.camera_variance is not None:
bg_var += self.camera_variance
# Calculate weighted variance if the image is being smoothed.
if self.fg_vfilter is not None:
bg_var = self.fg_vfilter.convolve(bg_var)
if self.check_mode:
with tifffile.TiffWriter("variances.tif") as tf:
tf.save(bg_var.astype(numpy.float32))
# Remove problematic values.
#
mask = (bg_var <= 0.1)
if (numpy.sum(mask) > 0):
if self.check_mode:
print("Warning! small and/or negative values detected in background variance!")
bg_var[mask] = 0.1
# Convert to standard deviation.
bg_std = numpy.sqrt(bg_var)
# Calculate foreground.
foreground = self.image - self.background - fit_peaks_image
# Calculate smoothed image if we have a foreground filter.
if self.fg_mfilter is not None:
foreground = self.fg_mfilter.convolve(foreground)
if self.check_mode:
with tifffile.TiffWriter("foreground.tif") as tf:
tf.save(foreground.astype(numpy.float32))
# Calculate foreground in units of signal to noise.
foreground = foreground/bg_std
if self.check_mode:
with tifffile.TiffWriter("fg_bg_ratio.tif") as tf:
tf.save(foreground.astype(numpy.float32))
# Mask the image so that peaks are only found in the AOI.
masked_image = foreground * self.peak_mask
# Identify local maxima in the masked image.
[x, y, z] = self.mfinder.findMaxima([masked_image])
return {"x" : x, "y" : y, "z" : z, "sigma" :
|
numpy.ones(x.size)
|
numpy.ones
|
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.axes import Axes
def exponential(a: float, b: float):
def wrapper(xs):
return a * np.exp(b * xs)
return wrapper
def exponential_sketch(ax, a, b, xlabel, ylabel):
ax.plot(xs, exponential(a, b)(xs))
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.tick_params(
bottom=False, labelbottom=False, right=False, left=False, labelleft=False
)
def interaction(ax: Axes):
return exponential_sketch(ax, 10, 0.7, "distance", "Number of interactions")
def probability(ax: Axes):
return exponential_sketch(
ax, 0.088, -1.5, "distance", "Infection chance per interaction"
)
xs =
|
np.arange(0, 3, 0.1)
|
numpy.arange
|
import numpy as np
from ektelo import matrix
from scipy import sparse
from collections import OrderedDict
from functools import reduce
from mbi import Factor, FactoredInference
import argparse
import benchmarks
import time
"""
Script for running MWEM and MWEM+PGM on adult dataset. Note that pure MWEM fails after a few iterations, requiring too much memory to proceed, while MWEM+PGM is able to run until completion.
<NAME>, <NAME>, and <NAME>. "A simple and practical algorithm for differentially private data release." In Advances in Neural Information Processing Systems, pp. 2339-2347. 2012.
Note that we run the ordinary version of MWEM that selects a single query at each iteration. An enhanced version of MWEM can select an entire marginal to measure at once (because the queriess in the marginal compose in parallel), but this version doesn't apply directly because the workload consists ofrange-marginals.
"""
class ProductDist:
""" factored representation of data from MWEM paper """
def __init__(self, factors, domain, total):
"""
:param factors: a list of contingency tables,
defined over disjoint subsets of attributes
:param domain: the domain object
:param total: known or estimated total
"""
self.factors = factors
self.domain = domain
self.total = total
for a in domain:
if not any(a in f.domain for f in factors):
sub = domain.project([a])
x = np.ones(domain[a]) / domain[a]
factors.append(Factor(sub, x))
def project(self, cols):
domain = self.domain.project(cols)
factors = []
for factor in self.factors:
pcol = [c for c in cols if c in factor.domain]
if pcol != []:
factors.append(factor.project(pcol))
return ProductDist(factors, domain, self.total)
def datavector(self):
ans = reduce(lambda x,y: x*y, self.factors, 1.0)
ans = ans.transpose(self.domain.attrs)
return ans.values.flatten() * self.total
class FactoredMultiplicativeWeights:
def __init__(self, domain, iters = 100):
self.domain = domain
self.iters = iters
def infer(self, measurements, total):
self.multWeightsFast(measurements, total)
return self.model
def multWeightsFast(self, measurements, total):
domain = self.domain
groups, projections = _cluster(measurements)
factors = []
for group, proj in zip(groups, projections):
dom = self.domain.project(proj)
fact = Factor.uniform(dom)
for i in range(self.iters):
update = Factor.zeros(dom)
for Q, y, noise_scale, p in group:
dom2 = dom.project(p)
hatx = fact.project(p).values.flatten()*total
error = y - Q.dot(hatx)
update += Factor(dom2, Q.T.dot(error).reshape(dom2.shape))
fact *= np.exp(update / (2*total))
fact /= fact.sum()
factors.append(fact)
self.model = ProductDist(factors, self.domain, total)
def _cluster(measurement_cache):
"""
Cluster the measurements into disjoint subsets by finding the connected
components of the graph implied by the measurement projections
"""
# create the adjacency matrix
k = len(measurement_cache)
G = sparse.dok_matrix((k,k))
for i, (_, _, _, p) in enumerate(measurement_cache):
for j, (_, _, _, q) in enumerate(measurement_cache):
if len(set(p) & set(q)) >= 1:
G[i,j] = 1
# find the connected components and group measurements
ncomps, labels = sparse.csgraph.connected_components(G)
groups = [ [] for _ in range(ncomps) ]
projections = [ set() for _ in range(ncomps) ]
for i, group in enumerate(labels):
groups[group].append(measurement_cache[i])
projections[group] |= set(measurement_cache[i][3])
projections = [tuple(p) for p in projections]
return groups, projections
def average_error(workload, data, est):
errors = []
for ax, W in workload:
x = data.project(ax).datavector()
xest = est.project(ax).datavector()
ans = W.dot(x - xest)
err = np.linalg.norm(W.dot(x-xest), 1) / np.linalg.norm(W.dot(x), 1)
errors.append(err)
return
|
np.mean(errors)
|
numpy.mean
|
import gym
from gym import error, spaces, utils
from gym.utils import seeding
import numpy as np
import pybullet as p
import pybullet_data
import time
from sklearn.preprocessing import normalize
MAX_EPISODE_LEN = 500 # Number of steps for one training episode
REWARD_FACTOR = 1000
REWARD_WEIGHT_1 = 1.0
REWARD_WEIGHT_2 = 1.0
BOUND_ANGLE = 40
STEP_ANGLE = 15 # Maximum angle delta per step
class OpenCatGymEnv(gym.Env):
""" Gym environment (stable baselines 3) for OpenCat robots.
"""
metadata = {'render.modes': ['human']}
def __init__(self):
self.step_counter = 0
self.state_robot_history = np.array([])
self.jointAngles_history = np.array([])
self.boundAngles = np.deg2rad(BOUND_ANGLE)
p.connect(p.GUI) #, options="--width=960 --height=540 --mp4=\"training.mp4\" --mp4fps=60") # uncommend to create a video
p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0)
p.resetDebugVisualizerCamera(cameraDistance=0.5, cameraYaw=-10, cameraPitch=-40, cameraTargetPosition=[0.4,0,0])
# The action space are the 8 joint angles
self.action_space = spaces.Box(np.array([-1]*8), np.array([1]*8))
# The observation space are the torso roll, pitch and the joint angles and a history of the last 20 joint angles
self.observation_space = spaces.Box(
|
np.array([-1]*166)
|
numpy.array
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
Name: NetPanelAnalysis
Function: 计算环形网片顶破力、顶破位移、耗能能力
Note: 国际单位制
Version: 1.0.3
Author: <NAME>
Date: 2021/4/7
命名方式:以平行于x方向及y方向分别作为后缀
Remark: 影响计算结果的细节因素:
(1)直线传力纤维与变形后的环网曲面传力路径之间的角度差异 1.5F;
(2)三环环链拉伸代表了一种网环受力的最不利情形,实际网片中传力路径上环网轴向应力发展程度可能高于环链试验值+0.18
(3)需保证夹持卡扣的强度,一旦卡扣强度不足,将发生钢丝滑移失效,造成网片承载力下降。
(4) 该版本修正了一个错误:对应网环顶破试验,R3/2.2/300其实为R4/2.2/300,而R4/2.2/300其实为R4/3.0/300。
(5)由于(3)中的原因,剔除了网片试验RN3对应的试验结果,只保留R4/3.0/300的试验结果
(6)由于(3)中的原因,对于直径为2.2mm钢丝对应的网片规格R4/2.2/300,进行了单独计算
(7)Bug: gamma_N2的拟合结果与FN2的拟合结果存在不一致情况
(8)弹簧-纤维单元的双线性刚度特征,导致F_2及E_2在计算时每个单元分别修正与单元不同阶段实际刚度一致
(9)钢柱摆动、残余高度、窗帘效应
'''
import numpy as np
from userfunc_NPA import *
# 参数输入----------------------------------------------------------------------------------- #
if __name__ == '__main__':
# MULTIPLE FACTORS INPUT
nw = 5 # 网环圈数
d = func_return_d(nw) # 制作网环的钢丝直径
D = 0.3 # 单个网环直径
Rp = 1.0/2 # 加载顶头水平投影半径,若加载形状为多边形时考虑为半径为Rp圆内切
w = 3.0
# ns = 12
# w = (np.sqrt(2)*(ns-1)+1)*0.3+0.2-0.0455844122715714 # 矩形网片短边长度
# print('w=',w)
kappa = 1 # 网片长宽比:为大于1的常数
ls0_PQ = 0.05 # 初始弹簧长度
ls0_CD = 0.05 # 初始弹簧长度
ex = 0.0 # 加载位置偏心距离
ey = 0.0 # 加载位置偏心距离
sigma_y = 1770e6 # 钢丝材料屈服强度
blockShape = 'round' # blockShape must be 'Round' or 'Polygon'!
curtain = False # 是否考虑窗帘效应,True为考虑,False为不考虑!
boundary = 'Flexible' # boundary must be 'Rigid' or 'Flexible'!
A = nw * np.pi*d**2/4 # 单肢截面面积
a = np.pi*D/(2*(1+kappa)) # 变形后网环短边长度
mPQ, mCD = func_m(blockShape,Rp,kappa,a) # 坐标系中x(PQ)y(CD)方向力矢量个数
print('mPQ, mCD=',mPQ, mCD)
Rope1770Steel = {'3':5.29,'4':9.40,'5':14.7,'6':21.2,'7':28.8,'8':37.6,'9':47.6,'10':58.8,'11':71.1,'12':84.6,'13':99.3,'14':115,'16':150,'18':190,'20':235,'22':284,'24':338}
sigma_rope = 1770e6
E_rope = 94.5e9
d_ropePQ = 18
F_ropePQ = 1000*Rope1770Steel[str(d_ropePQ)] # 1000为换算国际单位
l0_ropePQ = kappa*w # 钢丝绳初始长度
sigma_ropePQ = sigma_rope
E_ropePQ = E_rope # 钢丝绳弹性模量
d_ropeCD = 18
F_ropeCD = 1000*Rope1770Steel[str(d_ropeCD)] # 1000为换算国际单位,钢丝绳破断力
l0_ropeCD = w # 钢丝绳初始长度
sigma_ropeCD = sigma_rope # 钢丝绳应力强度
E_ropeCD = E_rope # 钢丝绳弹性模量
lb_onePQ = 0.8 # 单个耗能器最大行程800mm
b_numPQ = 0 # 边界钢丝绳两端串联的耗能器数量(耗能器连接方式:串联!!)
lb_maxPQ = b_numPQ * lb_onePQ # 边界钢丝绳两端串联的耗能器总伸长量
lb_oneCD= 0.8 # 单个耗能器最大行程800mm
b_numCD = 0 # 边界钢丝绳一端串联的耗能器数量(耗能器连接方式:串联!!)
lb_maxCD = b_numCD * lb_oneCD # 边界钢丝绳两端串联的耗能器总伸长量
# 环链试验----------------------------------------------------------------------------------- #
# 将三环环链试验力位移数据转换为弹簧纤维单元中的纤维双折线E1,E2应力应变关系
FN1, FN2, lN0, lN1, lN2, gamma_N1, gamma_N2 = func_ringChianDataFit(nw, sigma_y, d)
E1 = FN1*lN0/(2*A*(lN1 - lN0))
E2 = (FN2-FN1)*lN0 / (2*A*(lN2 - lN1))
gamma_ave = gamma_N2/4 # 柔性边界下作用于钢丝绳上的平均分布荷载集度
dictRigid = {'ks':1e20}
dictRopePQ = {'l0_rope':l0_ropePQ,'m':mPQ,'F_rope':F_ropePQ,'sigma_rope':sigma_ropePQ,'E_rope':E_ropePQ}
dictRopeCD = {'l0_rope':l0_ropeCD,'m':mCD,'F_rope':F_ropeCD,'sigma_rope':sigma_ropeCD,'E_rope':E_ropeCD}
dictBrakerPQ = {'lb_one':lb_maxPQ,'b_num':b_numPQ,'lb_max':lb_maxPQ}
dictBrakerCD = {'lb_one':lb_maxCD,'b_num':b_numCD,'lb_max':lb_maxCD}
dictFiber = {'gamma_ave':gamma_ave,'sigma_y':sigma_y,'gamma_N2':gamma_N2,'A':A}
dictBoundaryPQ = {**dictRigid,**dictRopePQ,**dictBrakerPQ,**dictFiber} # PQ连接的钢丝绳参数字典
dictBoundaryCD = {**dictRigid,**dictRopeCD,**dictBrakerCD,**dictFiber} # CD连接的钢丝绳参数字典
ks_PQ = func_ks(boundary,**dictBoundaryPQ) # 弹簧刚度,指代刚性边界或柔性边界
ks_CD = func_ks(boundary,**dictBoundaryCD) # 弹簧刚度,指代刚性边界或柔性边界
#ks11 = 50000000
#ks_PQ = ks11
#ks_CD = ks11
print('ks_PQ=',ks_PQ)
print('ks_CD=',ks_CD)
func_inputCheck(nw,d,D,Rp,w,kappa,ks_PQ,ks_CD,ls0_PQ,ls0_CD,ex,ey) # 检查参数输入有无错误
# 各个纤维弹簧单元初始长度
L0_PQxy , L0_CDxy = func_xyz(blockShape, curtain, '+x+y', w, kappa, Rp, a, ex, ey, 0)
L0_PQ_xy , L0_CD_xy = func_xyz(blockShape, curtain, '-x+y', w, kappa, Rp, a, ex, ey, 0)
L0_PQx_y , L0_CDx_y = func_xyz(blockShape, curtain, '+x-y', w, kappa, Rp, a, ex, ey, 0)
L0_PQ_x_y, L0_CD_x_y= func_xyz(blockShape, curtain, '-x-y', w, kappa, Rp, a, ex, ey, 0)
# 各个纤维弹簧单元中纤维的初始长度
lf0_PQxy , lf0_CDxy = L0_PQxy -ls0_PQ, L0_CDxy -ls0_CD
lf0_PQ_xy , lf0_CD_xy = L0_PQ_xy -ls0_PQ, L0_CD_xy -ls0_CD
lf0_PQx_y , lf0_CDx_y = L0_PQx_y -ls0_PQ, L0_CDx_y -ls0_CD
lf0_PQ_x_y, lf0_CD_x_y= L0_PQ_x_y-ls0_PQ, L0_CD_x_y-ls0_CD
# 第一阶段纤维弹簧单元刚度
K1_PQxy , K1_CDxy = 1/(lf0_PQxy /(E1*A)+1/ks_PQ), 1/(lf0_CDxy /(E1*A)+1/ks_CD)
K1_PQ_xy , K1_CD_xy = 1/(lf0_PQ_xy /(E1*A)+1/ks_PQ), 1/(lf0_CD_xy /(E1*A)+1/ks_CD)
K1_PQx_y , K1_CDx_y = 1/(lf0_PQx_y /(E1*A)+1/ks_PQ), 1/(lf0_CDx_y /(E1*A)+1/ks_CD)
K1_PQ_x_y, K1_CD_x_y= 1/(lf0_PQ_x_y/(E1*A)+1/ks_PQ), 1/(lf0_CD_x_y/(E1*A)+1/ks_CD)
# 第二阶段纤维弹簧单元刚度
K2_PQxy , K2_CDxy = 1/(lf0_PQxy /(E2*A)+1/ks_PQ), 1/(lf0_CDxy /(E2*A)+1/ks_CD)
K2_PQ_xy , K2_CD_xy = 1/(lf0_PQ_xy /(E2*A)+1/ks_PQ), 1/(lf0_CD_xy /(E2*A)+1/ks_CD)
K2_PQx_y , K2_CDx_y = 1/(lf0_PQx_y /(E2*A)+1/ks_PQ), 1/(lf0_CDx_y /(E2*A)+1/ks_CD)
K2_PQ_x_y, K2_CD_x_y= 1/(lf0_PQ_x_y/(E2*A)+1/ks_PQ), 1/(lf0_CD_x_y/(E2*A)+1/ks_CD)
# 两个方向最短纤维弹簧单元(最薄弱单元)的长度与该单元的位置,两阶段刚度,用于计算顶破高度
L0minPQ,idPQ,K1minPQ,K2minPQ = func_minElement(L0_PQxy, L0_PQ_xy, L0_PQx_y, L0_PQ_x_y,K1_PQxy,K2_PQxy)
L0minCD,idCD,K1minCD,K2minCD = func_minElement(L0_CDxy, L0_CD_xy, L0_CDx_y, L0_CD_x_y,K1_CDxy,K2_CDxy)
#print('L0minPQ,idPQ,K1minPQ,K2minPQ=',L0minPQ,idPQ,K1minPQ,K2minPQ)
#print('L0minCD,idCD,K1minCD,K2minCD=',L0minCD,idCD,K1minCD,K2minCD)
# 两个方向单元确定的两阶段高度
z1PQ, z2PQ = func_compute_z1z2(L0minPQ,K1minPQ,K2minPQ,gamma_N1,gamma_N2,sigma_y,A)
z1CD, z2CD = func_compute_z1z2(L0minCD,K1minCD,K2minCD,gamma_N1,gamma_N2,sigma_y,A)
# 找出计算模型所有单元中的最短纤维弹簧单元
L0min = np.min([L0minPQ,L0minCD])
#print('z1PQ,z1CD,z2PQ,z2CD=',z1PQ,z1CD,z2PQ,z2CD)
z1, z2 = func_Checkz1z2(z1PQ,z1CD,z2PQ,z2CD)
maxTheta1 = np.arctan(z1/L0min) # 第一阶段纤维-弹簧单元最大角度
maxTheta2 = np.arctan(z2/L0min) # 第二阶段纤维-弹簧单元最大角度
# 第一阶段各个纤维弹簧单元长度
L1_PQxy , L1_CDxy = func_xyz(blockShape, curtain, '+x+y', w, kappa, Rp, a, ex, ey, z1)
L1_PQ_xy , L1_CD_xy = func_xyz(blockShape, curtain, '-x+y', w, kappa, Rp, a, ex, ey, z1)
L1_PQx_y , L1_CDx_y = func_xyz(blockShape, curtain, '+x-y', w, kappa, Rp, a, ex, ey, z1)
L1_PQ_x_y, L1_CD_x_y= func_xyz(blockShape, curtain, '-x-y', w, kappa, Rp, a, ex, ey, z1)
# 第二阶段各个纤维弹簧单元长度
L2_PQxy , L2_CDxy = func_xyz(blockShape, curtain, '+x+y', w, kappa, Rp, a, ex, ey, z2)
L2_PQ_xy , L2_CD_xy = func_xyz(blockShape, curtain, '-x+y', w, kappa, Rp, a, ex, ey, z2)
L2_PQx_y , L2_CDx_y = func_xyz(blockShape, curtain, '+x-y', w, kappa, Rp, a, ex, ey, z2)
L2_PQ_x_y, L2_CD_x_y= func_xyz(blockShape, curtain, '-x-y', w, kappa, Rp, a, ex, ey, z2)
# 第一阶段各个纤维弹簧单元与加载方向的夹角
Ang1_PQxy , Ang1_CDxy = np.arccos(z1/L1_PQxy) , np.arccos(z1/L1_CDxy)
Ang1_PQ_xy , Ang1_CD_xy = np.arccos(z1/L1_PQ_xy) , np.arccos(z1/L1_CD_xy)
Ang1_PQx_y , Ang1_CDx_y = np.arccos(z1/L1_PQx_y) , np.arccos(z1/L1_CDx_y)
Ang1_PQ_x_y, Ang1_CD_x_y = np.arccos(z1/L1_PQ_x_y), np.arccos(z1/L1_CD_x_y)
# 第二阶段各个纤维弹簧单元与加载方向的夹角
Ang2_PQxy , Ang2_CDxy = np.arccos(z2/L2_PQxy) , np.arccos(z2/L2_CDxy)
Ang2_PQ_xy , Ang2_CD_xy = np.arccos(z2/L2_PQ_xy) , np.arccos(z2/L2_CD_xy)
Ang2_PQx_y , Ang2_CDx_y = np.arccos(z2/L2_PQx_y) , np.arccos(z2/L2_CDx_y)
Ang2_PQ_x_y, Ang2_CD_x_y = np.arccos(z2/L2_PQ_x_y), np.arccos(z2/L2_CD_x_y)
chi_ang = 0.65 # chi为考虑实际顶破后环绕加载区域边缘网环内钢丝纤维与竖直方向夹角小于模型角度的修正系数
# 第一阶段各个纤维弹簧单元内力
F1_PQxy , F2_PQxy , E1_PQxy ,E2_PQxy = func_vectorFiEi(L0_PQxy ,L1_PQxy ,L2_PQxy ,K1_PQxy ,K2_PQxy ,gamma_N1,sigma_y,A)
F1_PQ_xy , F2_PQ_xy , E1_PQ_xy ,E2_PQ_xy = func_vectorFiEi(L0_PQ_xy ,L1_PQ_xy ,L2_PQ_xy ,K1_PQ_xy ,K2_PQ_xy ,gamma_N1,sigma_y,A)
F1_PQx_y , F2_PQx_y , E1_PQx_y ,E2_PQx_y = func_vectorFiEi(L0_PQx_y ,L1_PQx_y ,L2_PQx_y ,K1_PQx_y ,K2_PQx_y ,gamma_N1,sigma_y,A)
F1_PQ_x_y, F2_PQ_x_y, E1_PQ_x_y,E2_PQ_x_y = func_vectorFiEi(L0_PQ_x_y,L1_PQ_x_y,L2_PQ_x_y,K1_PQ_x_y,K2_PQ_x_y,gamma_N1,sigma_y,A)
F1_CDxy , F2_CDxy , E1_CDxy ,E2_CDxy = func_vectorFiEi(L0_CDxy ,L1_CDxy ,L2_CDxy ,K1_CDxy ,K2_CDxy ,gamma_N1,sigma_y,A)
F1_CD_xy , F2_CD_xy , E1_CD_xy ,E2_CD_xy = func_vectorFiEi(L0_CD_xy ,L1_CD_xy ,L2_CD_xy ,K1_CD_xy ,K2_CD_xy ,gamma_N1,sigma_y,A)
F1_CDx_y , F2_CDx_y , E1_CDx_y ,E2_CDx_y = func_vectorFiEi(L0_CDx_y ,L1_CDx_y ,L2_CDx_y ,K1_CDx_y ,K2_CDx_y ,gamma_N1,sigma_y,A)
F1_CD_x_y, F2_CD_x_y, E1_CD_x_y,E2_CD_x_y = func_vectorFiEi(L0_CD_x_y,L1_CD_x_y,L2_CD_x_y,K1_CD_x_y,K2_CD_x_y,gamma_N1,sigma_y,A)
# 第一、二阶段各个纤维弹簧单元中纤维长度与弹簧长度
ls1_PQxy ,ls2_PQxy ,lf1_PQxy ,lf2_PQxy = func_lslf(F1_PQxy ,F2_PQxy ,L1_PQxy ,L2_PQxy ,ls0_PQ,lf0_PQxy ,ks_PQ,E1,E2,gamma_N1,sigma_y,A)
ls1_PQ_xy ,ls2_PQ_xy ,lf1_PQ_xy ,lf2_PQ_xy = func_lslf(F1_PQ_xy ,F2_PQ_xy ,L1_PQ_xy ,L2_PQ_xy ,ls0_PQ,lf0_PQ_xy ,ks_PQ,E1,E2,gamma_N1,sigma_y,A)
ls1_PQx_y ,ls2_PQx_y ,lf1_PQx_y ,lf2_PQx_y = func_lslf(F1_PQx_y ,F2_PQx_y ,L1_PQx_y ,L2_PQx_y ,ls0_PQ,lf0_PQx_y ,ks_PQ,E1,E2,gamma_N1,sigma_y,A)
ls1_PQ_x_y,ls2_PQ_x_y,lf1_PQ_x_y,lf2_PQ_x_y = func_lslf(F1_PQ_x_y,F2_PQ_x_y,L1_PQ_x_y,L2_PQ_x_y,ls0_PQ,lf0_PQ_x_y,ks_PQ,E1,E2,gamma_N1,sigma_y,A)
ls1_CDxy ,ls2_CDxy ,lf1_CDxy ,lf2_CDxy = func_lslf(F1_CDxy ,F2_CDxy ,L1_CDxy ,L2_CDxy ,ls0_CD,lf0_CDxy ,ks_CD,E1,E2,gamma_N1,sigma_y,A)
ls1_CD_xy ,ls2_CD_xy ,lf1_CD_xy ,lf2_CD_xy = func_lslf(F1_CD_xy ,F2_CD_xy ,L1_CD_xy ,L2_CD_xy ,ls0_CD,lf0_CD_xy ,ks_CD,E1,E2,gamma_N1,sigma_y,A)
ls1_CDx_y ,ls2_CDx_y ,lf1_CDx_y ,lf2_CDx_y = func_lslf(F1_CDx_y ,F2_CDx_y ,L1_CDx_y ,L2_CDx_y ,ls0_CD,lf0_CDx_y ,ks_CD,E1,E2,gamma_N1,sigma_y,A)
ls1_CD_x_y,ls2_CD_x_y,lf1_CD_x_y,lf2_CD_x_y = func_lslf(F1_CD_x_y,F2_CD_x_y,L1_CD_x_y,L2_CD_x_y,ls0_PQ,lf0_CD_x_y,ks_PQ,E1,E2,gamma_N1,sigma_y,A)
H_net = z2 # 顶破高度
# 顶破时刻各个单元内力在加载方向的分量
F_PQx = np.sum(F2_PQxy *np.cos(chi_ang*Ang2_PQxy)) + np.sum(F2_PQxy *np.cos(chi_ang*Ang2_PQx_y))
F_PQ_x= np.sum(F2_PQ_xy*np.cos(chi_ang*Ang2_PQ_xy))+ np.sum(F2_PQ_xy*np.cos(chi_ang*Ang2_PQ_x_y))
F_CDy = np.sum(F2_CDxy *np.cos(chi_ang*Ang2_CDxy)) + np.sum(F2_CDxy *np.cos(chi_ang*Ang2_CD_xy))
F_CD_y= np.sum(F2_CDx_y*np.cos(chi_ang*Ang2_CDx_y))+ np.sum(F2_CDx_y*np.cos(chi_ang*Ang2_CD_x_y))
F_net = F_PQx + F_PQ_x + F_CDy + F_CD_y # 顶破力
# 顶破时刻各个单元消耗能量值
Exy = np.sum(E2_PQxy) + np.sum(E2_CDxy)
E_xy = np.sum(E2_PQ_xy) + np.sum(E2_CD_xy)
Ex_y =
|
np.sum(E2_PQx_y)
|
numpy.sum
|
import tensor.libtensor as _ts
import pytest
import numpy as np
def test_matrix_f_init():
tensor = _ts.MatrixF(2, 2)
assert tensor.shape() == [2, 2]
assert tensor.data_size() == 4
def test_matrix_f_getitem():
tensor = _ts.MatrixF(2, 2)
assert tensor[0, 0] == 0.
assert tensor[0, 1] == 0.
assert tensor[1, 0] == 0.
assert tensor[1, 1] == 0.
def test_matrix_f_setitem():
tensor = _ts.MatrixF(2, 2)
assert tensor[1, 1] == 0.
tensor[1, 1] = 1337.
assert tensor[1, 1] == 1337.
def test_tensor_from_numpy():
with pytest.raises(RuntimeError) as e:
_ts.MatrixF(np.array([1, 2, 3])) # trying to assign a int array
assert str(e.value) == "Incompatible buffer format!"
array = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
tensor = _ts.MatrixF(array)
for i in range(tensor.shape()[0]):
for j in range(tensor.shape()[1]):
assert tensor[i, j] == array[i, j]
def test_numpy_from_tensor():
tensor = _ts.MatrixF(5, 4)
assert memoryview(tensor).shape == (5, 4)
assert tensor[2, 3] == 0.
tensor[2, 3] = 11.
tensor[3, 2] = 7.
assert tensor[2, 3] == 11.
assert tensor[3, 2] == 7.
array = np.array(tensor)
assert array.shape == (5, 4)
assert array[2, 3] == 11.
assert array[3, 2] == 7.
assert abs(array).sum() == 11 + 7
array[2, 3] = 5.
assert array[2, 3] == 5.
def test_cross_entropy_loss():
loss = _ts.CrossEntropyLoss()
logits = _ts.MatrixF(np.random.randn(300, 3).astype(np.float32))
labels = _ts.VectorI(
|
np.random.randint(0, 2, (300,))
|
numpy.random.randint
|
import numpy as np
import pandas as pd
import torch
from pathlib import Path
from PIL import Image
from wilds.datasets.wilds_dataset import WILDSDataset
from wilds.common.grouper import CombinatorialGrouper
from wilds.common.metrics.all_metrics import DetectionAccuracy
SESSIONS = [
'Arvalis_1',
'Arvalis_2',
'Arvalis_3',
'Arvalis_4',
'Arvalis_5',
'Arvalis_6',
'Arvalis_7',
'Arvalis_8',
'Arvalis_9',
'Arvalis_10',
'Arvalis_11',
'Arvalis_12',
'ETHZ_1',
'Inrae_1',
'NMBU_1',
'NMBU_2',
'Rres_1',
'ULiège-GxABT_1',
'Utokyo_1',
'Utokyo_2',
'Utokyo_3',
'Ukyoto_1',
'NAU_1',
'NAU_2',
'NAU_3',
'ARC_1',
'UQ_1',
'UQ_2',
'UQ_3',
'UQ_4',
'UQ_5',
'UQ_6',
'UQ_7',
'UQ_8',
'UQ_9',
'UQ_10',
'UQ_11',
'Terraref_1',
'Terraref_2',
'KSU_1',
'KSU_2',
'KSU_3',
'KSU_4',
'CIMMYT_1',
'CIMMYT_2',
'CIMMYT_3',
'Usask_1'
]
COUNTRIES = [
'Switzerland',
'UK',
'Belgium',
'Norway',
'France',
'Canada',
'US',
'Mexico',
'Japan',
'China',
'Australia',
'Sudan',
]
LOCATIONS = [
'Baima',
'Brookstead',
'Ciudad Obregon',
'Gatton',
'Gembloux',
'Gréoux',
'KSU',
'Kyoto',
'Maricopa, AZ',
'McAllister',
'Mons',
'NARO-Hokkaido',
'NARO-Tsukuba',
'NMBU',
'Rothamsted',
'Saskatchewan',
'Toulouse',
'Usask',
'VLB',
'VSC',
'Wad Medani',
'Eschikon'
]
STAGES = [
'Filling',
'Filling-Ripening',
'multiple',
'Post-flowering',
'Ripening',
]
class GlobalWheatDataset(WILDSDataset):
"""
The GlobalWheat-WILDS wheat head localization dataset.
This is a modified version of the original Global Wheat Head Dataset 2021.
Supported `split_scheme`:
- 'official'
- 'official_with_subsampled_test'
- 'test-to-test'
- 'mixed-to-test'
Input (x):
1024 x 1024 RGB images of wheat field canopy starting from anthesis (flowering) to ripening.
Output (y):
y is a n x 4-dimensional vector where each line represents a box coordinate (x_min, y_min, x_max, y_max)
Metadata:
Each image is annotated with the ID of the domain (session) it came from (integer from 0 to 46).
Website:
http://www.global-wheat.com/
Original publication:
@article{david_global_2020,
title = {Global {Wheat} {Head} {Detection} ({GWHD}) {Dataset}: {A} {Large} and {Diverse} {Dataset} of {High}-{Resolution} {RGB}-{Labelled} {Images} to {Develop} and {Benchmark} {Wheat} {Head} {Detection} {Methods}},
volume = {2020},
url = {https://doi.org/10.34133/2020/3521852},
doi = {10.34133/2020/3521852},
journal = {Plant Phenomics},
author = {<NAME> and Madec, Simon and Sadeghi-Tehran, Pouria and Aasen, Helge and Zheng, Bangyou and Liu, Shouyang and Kirchgessner, Norbert and Ishikawa, Goro and Nagasawa, Koichi and Badhon, <NAME>. and Pozniak, Curtis and <NAME>, Benoit and Hund, Andreas and Chapman, <NAME>. and Baret, Frédéric and Stavness, Ian and <NAME>},
month = Aug,
year = {2020},
note = {Publisher: AAAS},
pages = {3521852},
}
@misc{david2021global,
title={Global Wheat Head Dataset 2021: more diversity to improve the benchmarking of wheat head localization methods},
author={<NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME>},
year={2021},
eprint={2105.07660},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
License:
This dataset is distributed under the MIT license.
"""
_dataset_name = 'globalwheat'
_versions_dict = {
'1.0': {
'download_url': 'https://worksheets.codalab.org/rest/bundles/0x443fbcb18eeb4f80b5ea4a9f77795168/contents/blob/',
'compressed_size': 10_286_120_960},
'1.1': {
'download_url': 'https://worksheets.codalab.org/rest/bundles/0x36e16907b7254571b708b725f8beae52/contents/blob/',
'compressed_size': 10_284_949_504},
}
def __init__(self, version=None, root_dir='data', download=False, split_scheme='official'):
self._version = version
self._data_dir = self.initialize_data_dir(root_dir, download)
self._original_resolution = (1024, 1024)
self.root = Path(self.data_dir)
self._is_detection = True
self._is_classification = False
self._y_size = None
self._n_classes = 1
self._split_scheme = split_scheme
self._split_dict = {
'train': 0,
'val': 1,
'test': 2,
}
self._split_names = {
'train': 'Train',
'val': 'Validation (OOD)',
'test':'Test (OOD)',
}
data_dfs = {}
if split_scheme == "official":
data_dfs['train'] = pd.read_csv(self.root / f'official_train.csv')
data_dfs['val'] = pd.read_csv(self.root / f'official_val.csv')
data_dfs['test'] = pd.read_csv(self.root / f'official_test.csv')
data_dfs['id_val'] = pd.read_csv(self.root / f'fixed_train_val.csv')
data_dfs['id_test'] = pd.read_csv(self.root / f'fixed_train_test.csv')
self._split_dict = {
'train': 0,
'val': 1,
'test': 2,
'id_val': 3,
'id_test': 4,
}
self._split_names = {
'train': 'Train',
'val': 'Validation (OOD)',
'test':'Test (OOD)',
'id_val': 'Validation (ID)',
'id_test': 'Test (ID)'
}
elif split_scheme == "official_with_subsampled_test":
data_dfs['train'] = pd.read_csv(self.root / f'official_train.csv')
data_dfs['val'] = pd.read_csv(self.root / f'official_val.csv')
data_dfs['test'] = pd.read_csv(self.root / f'fixed_test_test.csv')
elif split_scheme == "test-to-test":
data_dfs['train'] = pd.read_csv(self.root / f'fixed_test_train.csv')
data_dfs['val'] = pd.read_csv(self.root / f'official_val.csv')
data_dfs['test'] = pd.read_csv(self.root / f'fixed_test_test.csv')
elif split_scheme == "mixed-to-test":
data_dfs['train'] = pd.read_csv(self.root / f'mixed_train_train.csv')
data_dfs['val'] = pd.read_csv(self.root / f'official_val.csv')
data_dfs['test'] = pd.read_csv(self.root / f'mixed_train_test.csv')
else:
raise ValueError(f'Split scheme {self.split_scheme} not recognized')
self._image_array = []
self._split_array, self._y_array, self._metadata_array = [], [], []
for split_name, split_idx in self._split_dict.items():
df = data_dfs[split_name]
self._image_array.extend(list(df['image_name'].values))
boxes_string = list(df['BoxesString'].values)
all_boxes = [GlobalWheatDataset._decode_string(box_string) for box_string in boxes_string]
self._split_array.extend([split_idx] * len(all_boxes))
labels = [{
"boxes": torch.stack([
torch.tensor(box)
for box in boxes
]),
"labels": torch.tensor([1]*len(boxes)).long()
} if len(boxes) > 0 else {
"boxes": torch.empty(0,4),
"labels": torch.empty(0,dtype=torch.long)
} for boxes in all_boxes]
self._y_array.extend(labels)
self._metadata_array.extend([int(item) for item in df['domain'].values])
self._split_array = np.array(self._split_array)
self._metadata_array = torch.tensor(self._metadata_array,
dtype=torch.long).unsqueeze(1)
self._metadata_array = torch.cat(
(self._metadata_array,
torch.zeros(
(len(self._metadata_array), 3),
dtype=torch.long)),
dim=1)
domain_df = pd.read_csv(self.root / 'metadata_domain.csv', sep=';')
for session_idx, session_name in enumerate(SESSIONS):
idx = pd.Index(domain_df['name']).get_loc(session_name)
country = domain_df.loc[idx, 'country']
location = domain_df.loc[idx, 'location']
stage = domain_df.loc[idx, 'development_stage']
session_mask = (self._metadata_array[:, 0] == session_idx)
self._metadata_array[session_mask, 1] = COUNTRIES.index(country)
self._metadata_array[session_mask, 2] = LOCATIONS.index(location)
self._metadata_array[session_mask, 3] = STAGES.index(stage)
self._metadata_fields = ['session', 'country', 'location', 'stage']
self._metadata_map = {
'session': SESSIONS,
'country': COUNTRIES,
'location': LOCATIONS,
'stage': STAGES,
}
self._eval_grouper = CombinatorialGrouper(
dataset=self,
groupby_fields=['session'])
self._metric = DetectionAccuracy()
self._collate = GlobalWheatDataset._collate_fn
super().__init__(root_dir, download, split_scheme)
def get_input(self, idx):
"""
Returns x for a given idx.
"""
img_filename = self.root / "images" / self._image_array[idx]
x = Image.open(img_filename)
return x
def eval(self, y_pred, y_true, metadata):
"""
The main evaluation metric, detection_acc_avg_dom,
measures the simple average of the detection accuracies
of each domain.
"""
results, results_str = self.standard_group_eval(
self._metric,
self._eval_grouper,
y_pred, y_true, metadata)
detection_accs = []
for k, v in results.items():
if k.startswith('detection_acc_session:'):
d = k.split(':')[1]
count = results[f'count_session:{d}']
if count > 0:
detection_accs.append(v)
detection_acc_avg_dom = np.array(detection_accs).mean()
results['detection_acc_avg_dom'] = detection_acc_avg_dom
results_str = f'Average detection_acc across session: {detection_acc_avg_dom:.3f}\n' + results_str
return results, results_str
@staticmethod
def _decode_string(box_string):
"""
Helper method to decode each box_string in the BoxesString field of the data CSVs
"""
if box_string == "no_box":
return
|
np.zeros((0,4))
|
numpy.zeros
|
import argparse
import time
import numpy as np
import tensorflow as tf
import socket
import importlib
import os
import sys
baseDir = os.path.dirname(os.path.abspath(__file__))
rootDir = os.path.dirname(baseDir)
sys.path.append(baseDir)
sys.path.append(os.path.join(rootDir, 'models'))
sys.path.append(os.path.join(rootDir, 'utils'))
import data_util
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--model', default='SPH3D_shapenet_onehot', help='Model name [default: SPH3D_shapenet_onehot]')
parser.add_argument('--config', default='shapenet_config', help='Model name [default: shapenet_config]')
parser.add_argument('--log_dir', default='log_shapenet_onehot', help='Log dir [default: log_shapenet_onehot]')
parser.add_argument('--max_epoch', type=int, default=501, help='Epoch to run [default: 501]')
parser.add_argument('--batch_size', type=int, default=32, help='Batch Size during training [default: 32]')
parser.add_argument('--model_name', default='model.ckpt', help='model checkpoint file path [default: model.ckpt]')
FLAGS = parser.parse_args()
BATCH_SIZE = FLAGS.batch_size
MAX_EPOCH = FLAGS.max_epoch
GPU_INDEX = FLAGS.gpu
MODEL_FILE = os.path.join(rootDir, 'models', FLAGS.model+'.py')
LOG_DIR = os.path.join(rootDir,FLAGS.log_dir)
LOG_FOUT = open(os.path.join(LOG_DIR,'log_evaluate.txt'), 'a+')
LOG_FOUT.write(str(FLAGS)+'\n')
HOSTNAME = socket.gethostname()
# import network module
spec = importlib.util.spec_from_file_location('',os.path.join(LOG_DIR,FLAGS.model+'.py'))
MODEL = importlib.util.module_from_spec(spec)
spec.loader.exec_module(MODEL)
# import network config
spec = importlib.util.spec_from_file_location('',os.path.join(LOG_DIR,FLAGS.config+'.py'))
net_config = importlib.util.module_from_spec(spec)
spec.loader.exec_module(net_config)
resultDir = 'shapenet_seg/results'
if not os.path.exists(resultDir):
os.mkdir(resultDir)
NUM_POINT = net_config.num_input
NUM_CLASSES = 50
INPUT_DIM = 3
dataDir = os.path.join(rootDir, 'data/shapenet_onehot')
seg_info = [int(line.rstrip().split('\t')[-1])
for line in open(os.path.join(dataDir, 'class_info_all.txt'))]
seg_info.append(NUM_CLASSES)
shape_names = [line.rstrip().split('\t')[0]
for line in open(os.path.join(dataDir, 'class_info_all.txt'))]
trainlist = [line.rstrip() for line in open(os.path.join(dataDir, 'train_files.txt'))]
testlist = [line.rstrip() for line in open(os.path.join(dataDir, 'test_files.txt'))]
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def placeholder_inputs(batch_size, num_point, normal=False):
xyz_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 3))
label_pl = tf.placeholder(tf.int32, shape=(batch_size, num_point))
cls_label_pl = tf.placeholder(tf.int32, shape=(batch_size))
return xyz_pl, label_pl, cls_label_pl
def augment_fn1(batch_xyz):
augment_xyz = batch_xyz
augment_xyz = data_util.rotate_point_cloud(augment_xyz)
augment_xyz = data_util.rotate_perturbation_point_cloud(augment_xyz)
augment_xyz = data_util.random_scale_point_cloud(augment_xyz)
augment_xyz = data_util.shift_point_cloud(augment_xyz)
augment_xyz = data_util.jitter_point_cloud(augment_xyz)
batch_xyz = augment_xyz
return batch_xyz
def augment_fn2(batch_xyz):
augment_xyz = batch_xyz
augment_xyz = data_util.rotate_perturbation_point_cloud(augment_xyz)
augment_xyz = data_util.random_scale_point_cloud(augment_xyz)
augment_xyz = data_util.shift_point_cloud(augment_xyz)
augment_xyz = data_util.jitter_point_cloud(augment_xyz)
batch_xyz = augment_xyz
return batch_xyz
def parse_fn(item):
features = tf.parse_single_example(
item,
features={
'xyz_raw': tf.FixedLenFeature([], dtype=tf.string),
'seg_label':tf.FixedLenFeature([], dtype=tf.string),
'cls_label': tf.FixedLenFeature([], dtype=tf.int64)})
xyz = tf.decode_raw(features['xyz_raw'], tf.float32)
seg_label = tf.decode_raw(features['seg_label'], tf.int32)
cls_label = tf.cast(features['cls_label'], tf.int32)
xyz = tf.reshape(xyz,[-1,3])
seg_label = tf.reshape(seg_label, [-1, 1])
print(cls_label)
cls_label = tf.cast([cls_label,cls_label,cls_label,cls_label], tf.float32)
cls_label = tf.reshape(cls_label, [1, -1])
all_in_one = tf.concat((xyz, tf.cast(seg_label, tf.float32)), axis=-1)
all_in_one = tf.concat((all_in_one, cls_label), axis=0)
return all_in_one
def input_fn(filelist, batch_size=16, buffer_size=10000):
dataset = tf.data.TFRecordDataset(filelist)
dataset = dataset.map(parse_fn, num_parallel_calls=4)
dataset = dataset.padded_batch(batch_size, padded_shapes=(None, INPUT_DIM+1),
padding_values=-1.0, drop_remainder=False)
return dataset
def evaluate():
# ===============================Prepare the Dataset===============================
testset = input_fn(testlist, BATCH_SIZE, 10000)
test_iterator = testset.make_initializable_iterator()
next_test_element = test_iterator.get_next()
# =====================================The End=====================================
with tf.device('/gpu:0'):
# =================================Define the Graph================================
xyz_pl, label_pl, cls_label_pl = placeholder_inputs(BATCH_SIZE, NUM_POINT)
training_pl = tf.placeholder(tf.bool, shape=())
# Get model and loss
pred, end_points = MODEL.get_model(xyz_pl, cls_label_pl, NUM_CLASSES, training_pl, config=net_config)
MODEL.get_loss(pred, label_pl, end_points)
if net_config.weight_decay is not None:
reg_loss = tf.multiply(tf.losses.get_regularization_loss(), net_config.weight_decay, name='reg_loss')
tf.add_to_collection('losses', reg_loss)
losses = tf.get_collection('losses')
total_loss = tf.add_n(losses, name='total_loss')
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
# =====================================The End=====================================
n = len([n.name for n in tf.get_default_graph().as_graph_def().node])
print("*****************The Graph has %d nodes*****************"%(n))
# =================================Start a Session================================
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
with tf.Session(config=config) as sess:
ops = {'xyz_pl': xyz_pl,
'label_pl': label_pl,
'cls_label_pl': cls_label_pl,
'training_pl': training_pl,
'pred': pred,
'loss': total_loss}
saver.restore(sess, os.path.join(LOG_DIR, FLAGS.model_name))
sess.run(test_iterator.initializer)
eval_one_epoch(sess, ops, next_test_element)
# =====================================The End=====================================
def eval_one_epoch(sess, ops, next_test_element):
""" ops: dict mapping from string to tf ops """
is_training = False
# Make sure batch data is of same size
cur_batch_xyz = np.zeros((BATCH_SIZE, NUM_POINT, 3))
cur_batch_label = np.zeros((BATCH_SIZE, NUM_POINT), dtype=np.int32)
cur_batch_cls_label = np.zeros((BATCH_SIZE), dtype=np.int32)
total_correct = 0
total_seen = 0
loss_sum = 0
batch_idx = 0
total_seen_class = [0 for _ in range(NUM_CLASSES)]
total_correct_class = [0 for _ in range(NUM_CLASSES)]
shape_ious = {cat:[] for cat in shape_names}
test_time = 0.0
while True:
try:
padded_all = sess.run(next_test_element)
bsize = padded_all.shape[0]
batch_xyz = np.zeros((bsize, NUM_POINT, INPUT_DIM))
batch_label = np.zeros((bsize, NUM_POINT), dtype=np.int32)
batch_cls_label = np.zeros((bsize), dtype=np.int32)
batch_gt_label = []
batch_pred_sum = []
batch_sample_count = []
batch_sample_index = []
batch_point_size = np.zeros((bsize,), np.int32)
batch_point_covered = np.zeros((bsize,), np.int32)
for b in range(bsize):
loc = np.where(padded_all[b, :, -1]<0)
if len(loc[0])==0:
num = padded_all.shape[1]
else:
num = loc[0][0]
if num==0:
print(loc, padded_all[b, 0:10, :])
print('problem of eval')
exit()
num = num-1
batch_cls_label[b] = padded_all[b, num, 0]
# print(padded_all[b, num-1:num+1, :])
batch_point_size[b] = num
batch_gt_label.append(padded_all[b, 0:num, -1])
batch_pred_sum.append(
|
np.zeros((num, NUM_CLASSES), dtype=np.float32)
|
numpy.zeros
|
#!/usr/bin/env python
"""
statistic.py
Description: statistic information for several runs
printSTAT:
input is a dictionary for results in several tests:
result['FunEvalRuntimes']: a list for function evaluation runtime in each test
result['Runtimes']: a list for total runtime in each test
result['FunEvalNums']: a list for function evaluation numbers in each test
result['optimalYs']: a list for optimal values obtained in each test
FuncEvalRatio: the proportion of function evaluation in whole runtime
"""
import numpy as np
__all__ = ['printSTAT']
def printSTAT(result):
# result: dictionary
# result[FunEvalRuntimes]
# result[Runtimes]
# result[FunEvalNums]
# result[optimals]
# result['optimalYs'], result['FuncEvalRuntimes'] = [], []
# result['Runtimes'], result['FuncEvalNums'] = [], []
print('$--------Statistic Information--------$\n')
FuncEvalRatio = 100.0 * np.array(result['FuncEvalRuntimes']) / np.array(result['Runtimes'])
print('Optimal Y : ---- Mean & Std: {0} & {1} \n'.format(
np.array(result['optimalYs']).mean(), np.array(result['optimalYs']).std()))
print('Runtime : ---- Mean & Std: {0} & {1} \n'.format(
np.array(result['Runtimes']).mean(), np.array(result['Runtimes']).std()))
print('FE Runtime : ---- Mean & Std: {0} & {1} \n'.format(
np.array(result['FuncEvalRuntimes']).mean(), np.array(result['FuncEvalRuntimes']).std()))
print('FE Ratio : ---- Mean & Std: {0} & {1} \n'.format(
np.array(FuncEvalRatio).mean(),
|
np.array(FuncEvalRatio)
|
numpy.array
|
import os
import pickle
import numpy as np
import torch.utils.data as torch_data
from collections import defaultdict
import datasets.kitti_common as kitti_common
import utils.box_np_ops as box_np_ops
import utils.geometry as geometry
import utils.box_coder as box_coder_utils
from datasets.target_assigner import TargetAssigner, AnchorGeneratorRange
from config import cfg
import imageio
import copy
class KittiStereoDataset(torch_data.Dataset):
def __init__(self, root_path, class_names, split, training, logger=None):
"""
:param root_path: KITTI data path
:param split:
"""
super().__init__()
self.root_path = os.path.join(root_path, 'object')
self.root_split_path = os.path.join(self.root_path, 'training' if split != 'test' else 'testing')
self.class_names = class_names
self.split = split
self.training = training
self.logger = logger
self.img_w = cfg.INPUT_DATA.IMAGE_WIDTH
self.img_h = cfg.INPUT_DATA.IMAGE_HEIGHT
# read kitti infos, which is a list of dict with the following keys:
# image_idx, velodyne_path, img_path, image_shape, calib matrices, annos (annotations)
info_path = os.path.join(self.root_path, 'kitti_infos_%s.pkl' % split)
with open(info_path, 'rb') as f:
self.kitti_infos = pickle.load(f)
# NOTE: we are not able to use sampler any more because we cannot edit stereo images for augmentation.
# database sampler
# self.db_sampler = None
# if self.training and cfg.DB_SAMPLER.ENABLED:
# # read db info, which is a dict for all categories (Car, Ped, Cyclist, etc.)
# # For each category, for example 'Car', there is a list of dict with the following keys:
# # name, path, image_idx, gt_idx, box3d_lidar, num_points_in_gt, difficulty, group_id, etc/
# # actually each sample represents an object subsest of lidar points
# db_info_path = os.path.join(self.root_path, 'kitti_dbinfos_%s.pkl' % split)
# with open(db_info_path, 'rb') as f:
# db_infos = pickle.load(f)
# self.db_sampler = DataBaseSampler(db_infos=db_infos, sampler_cfg=cfg.DB_SAMPLER, logger=logger)
# NOTE: voxel generator is replaced by cost volume generator, which is inside the backbone network
# voxel generator: convert point cloud into a voxel grid
# voxel_generator_cfg = cfg.VOXEL_GENERATOR
# self.voxel_generator = VoxelGenerator(voxel_size=voxel_generator_cfg.VOXEL_SIZE,
# point_cloud_range=voxel_generator_cfg.POINT_CLOUD_RANGE,
# max_num_points=voxel_generator_cfg.MAX_POINTS_PER_VOXEL,
# max_voxels=20000)
# a list of configs for each class, car, cyclist, pedestrian, etc.
# each config consists of the following keys:
# anchor_range: array [6]
# matched_threshold, unmatched_threshold
# sizes, rotations: (for each position xyz, generate anchors with different sizes and rotations)
anchor_cfg = cfg.TARGET_ASSIGNER.ANCHOR_GENERATOR
# anchor generators for each class: `generate anchors` with different x,y,z,sizes,rotations
anchor_generators = []
for a_cfg in anchor_cfg:
anchor_generator = AnchorGeneratorRange(
anchor_ranges=a_cfg['anchor_range'],
sizes=a_cfg['sizes'],
rotations=a_cfg['rotations'],
class_name=a_cfg['class_name'],
match_threshold=a_cfg['matched_threshold'],
unmatch_threshold=a_cfg['unmatched_threshold']
)
anchor_generators.append(anchor_generator)
# box coder: compute the `regression target` for the outputs (according to the anchors)
# usually it could be the residual values based on the anchor size and rotation
# will be used in target_assigner
self.box_coder = getattr(box_coder_utils, cfg.BOX_CODER)()
# target assigner: assign anchors with corresponding gt boxes to obtain training labels and regression targets
self.target_assigner = TargetAssigner(
anchor_generators=anchor_generators,
pos_fraction=cfg.TARGET_ASSIGNER.SAMPLE_POS_FRACTION,
sample_size=cfg.TARGET_ASSIGNER.SAMPLE_SIZE,
region_similarity_fn_name=cfg.TARGET_ASSIGNER.REGION_SIMILARITY_FN,
box_coder=self.box_coder
)
# the number of anchors should be D/dd/stride * W/dw/stride * 2 (angle), 7
# generate cached anchors
# because anchors are fixed, generate beforehand to save computation cost
# compute the output size factor of the 3D CNN
out_size_factor = cfg.RPN_STAGE.RPN_HEAD.LAYER_STRIDES[0] / cfg.RPN_STAGE.RPN_HEAD.UPSAMPLE_STRIDES[0]
# out_size_factor *= cfg.RPN_STAGE.COST_VOLUME_GENERATOR.DOWNSAMPLE_RATE_3D
out_size_factor = int(out_size_factor)
assert out_size_factor > 0
# the grid size of the initial cost volume
grid_size = np.array(cfg.RPN_STAGE.COST_VOLUME.GRID_SIZE, dtype=np.int64)
# the feature map size after the cost volume is processed by the 3D CNN
feature_map_size = grid_size[:2] // out_size_factor
feature_map_size = [*feature_map_size, 1][::-1]
ret = self.target_assigner.generate_anchors(feature_map_size)
anchors_dict = self.target_assigner.generate_anchors_dict(feature_map_size)
anchors = ret["anchors"].reshape([-1, 7])
self.anchor_cache = {
"anchors": anchors,
"anchors_dict": anchors_dict,
}
def get_lidar(self, idx):
lidar_file = os.path.join(self.root_split_path, 'velodyne', '%06d.bin' % idx)
assert os.path.exists(lidar_file)
points = np.fromfile(lidar_file, dtype=np.float32).reshape(-1, 4)
points = points[:, :3]
points = points[points[:, 0] > 0]
return points
def get_lidar_reduced(self, idx):
lidar_file = os.path.join(self.root_split_path, 'velodyne_reduced', '%06d.bin' % idx)
assert os.path.exists(lidar_file)
return np.fromfile(lidar_file, dtype=np.float32).reshape(-1, 4)
def get_image(self, idx, left_or_right='left'):
if left_or_right == 'left':
tag = 'image_2'
elif left_or_right == 'right':
tag = 'image_3'
else:
raise ValueError('can only be left or right')
img_file = os.path.join(self.root_split_path, tag, '%06d.png' % idx)
assert os.path.exists(img_file), 'img_file: %s not exist' % img_file
img = imageio.imread(img_file)
img = img.astype(np.float32) / 255.
img = img * 2 - 1
return img
def get_depth_image(self, idx, left_or_right='left', img_shape=None):
if left_or_right == 'left':
tag = 'depth_2'
elif left_or_right == 'right':
tag = 'depth_3'
else:
raise ValueError('can only be left or right')
img_file = os.path.join(self.root_split_path, tag, '%06d.png' % idx)
if not os.path.exists(img_file):
print('img_file: %s not exist' % img_file)
return None
img = imageio.imread(img_file)
assert np.max(img) > 255
img = img.astype(np.float32) / 256.
img[img == 0] = np.nan
return img
# def get_road_plane(self, idx):
# plane_file = os.path.join(self.root_split_path, 'planes', '%06d.txt' % idx)
# with open(plane_file, 'r') as f:
# lines = f.readlines()
# lines = [float(i) for i in lines[3].split()]
# plane = np.asarray(lines)
#
# # Ensure normal is always facing up, this is in the rectified camera coordinate
# if plane[1] > 0:
# plane = -plane
#
# norm = np.linalg.norm(plane[0:3])
# plane = plane / norm
# return planes
def __len__(self):
return len(self.kitti_infos)
def __getitem__(self, index):
info = self.kitti_infos[index]
sample_idx = info['image_idx']
rect = info['calib/R0_rect'].astype(np.float32)
Trv2c = info['calib/Tr_velo_to_cam'].astype(np.float32)
P2 = info['calib/P2'].astype(np.float32)
P3 = info['calib/P3'].astype(np.float32)
points = self.get_lidar(sample_idx)
left_img = self.get_image(sample_idx, 'left')
right_img = self.get_image(sample_idx, 'right')
left_depth = self.get_depth_image(sample_idx, 'left')
right_depth = self.get_depth_image(sample_idx, 'right')
if left_depth is not None:
assert left_img.shape[:2] == left_depth.shape[:2]
input_dict = {
'left_img': left_img,
'right_img': right_img,
'left_depth': left_depth,
'right_depth': right_depth,
'points': points,
'rect': rect,
'Trv2c': Trv2c,
'P2': P2,
'P3': P3,
'image_shape': np.array(info["img_shape"], dtype=np.int32),
'sample_idx': sample_idx,
'image_path': info['img_path']
}
if 'annos' in info:
annos = info['annos']
# we need other objects to avoid collision when sample
annos = kitti_common.remove_dontcare(annos)
loc = annos["location"]
dims = annos["dimensions"]
rots = annos["rotation_y"]
gt_names = annos["name"]
# print(gt_names, len(loc))
gt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1).astype(np.float32)
# gt_boxes = box_np_ops.box_camera_to_lidar(gt_boxes, rect, Trv2c)
difficulty = annos["difficulty"]
input_dict.update({
'gt_boxes': gt_boxes,
'gt_names': gt_names,
'difficulty': difficulty,
})
example = self.prepare_data(input_dict=input_dict)
example["sample_idx"] = sample_idx
if "anchors_mask" in example:
example["anchors_mask"] = example["anchors_mask"].astype(np.uint8)
return example
def convert_points_to_grid(self, points):
"""
:param points [N, 3]: should be in camera coordinate (cam0 or cam2)
:return: voxel_grid [N_h, N_w, N_d], inverse format of VOXEL_SIZE in config.py
"""
points = box_np_ops.camera_to_pseudo_lidar(points) # convert into D, W, H order (pseudo lidar coordinate)
cost_volume_cfg = cfg.RPN_STAGE.COST_VOLUME
self.voxel_size = np.array(cost_volume_cfg.VOXEL_SIZE) # size of each voxel
self.point_cloud_range = np.array(cost_volume_cfg.POINT_CLOUD_RANGE)
grid_size = (self.point_cloud_range[3:] - self.point_cloud_range[:3]) / self.voxel_size
self.grid_size = np.round(grid_size).astype(np.int64)
# generate voxel centers for cost volume resampling
voxel_grid = np.zeros(self.grid_size, dtype=np.float32) # in D, W, H order
voxel_ids = np.round((points - self.point_cloud_range[:3]) / self.voxel_size).astype(np.int64)
valid = np.logical_and(np.all(voxel_ids >= 0, axis=1), np.all(voxel_ids < self.grid_size, axis=1))
voxel_ids = voxel_ids[valid, :]
# print(self.grid_size, voxel_ids.min(0), voxel_ids.max(0))
np.add.at(voxel_grid, (voxel_ids[:, 0], voxel_ids[:, 1], voxel_ids[:, 2]), 1)
voxel_grid = np.transpose(voxel_grid, [2, 1, 0])
return voxel_grid
def convert_points_to_depth_map(self, points, K):
assert K.shape == (3, 3)
depth = points[:, 2:3]
uv = points @ K.T
uv = uv[:, :2] / uv[:, 2:3]
# uv -= 1 # use minus 1 to get the exact same value as KITTI matlab code
# TODO: do not fix the size, use configs or input_dict["image_shape"]
depth_map = np.zeros([self.img_h, self.img_w], dtype=np.float32)
depth_map_shape = np.array(list(depth_map.shape)[::-1])
valid = np.logical_and(np.all(uv > 0, 1), np.all(uv < depth_map_shape - 1, 1))
valid = np.logical_and(valid, depth[:, 0] > 0)
uv = uv[valid]
depth = depth[valid]
u, v, depth = uv[:, 0], uv[:, 1], depth[:, 0]
depth_map[...] = 10000. # set to a large value
np.minimum.at(depth_map, (np.floor(v).astype(np.uint32), np.floor(u).astype(np.uint32)), depth)
np.minimum.at(depth_map, (np.ceil(v).astype(np.uint32), np.floor(u).astype(np.uint32)), depth)
np.minimum.at(depth_map, (
|
np.floor(v)
|
numpy.floor
|
import os
from src.data_management.New_DataSplitter_leave_k_out import New_DataSplitter_leave_k_out
from src.data_management.RecSys2019Reader import RecSys2019Reader
from src.data_management.RecSys2019Reader_utils import merge_UCM
from src.data_management.data_preprocessing import apply_feature_engineering_ICM, apply_filtering_ICM, \
apply_transformation_ICM, apply_discretization_ICM, build_ICM_all_from_dict, apply_feature_engineering_UCM, \
apply_transformation_UCM, apply_discretization_UCM, build_UCM_all_from_dict, apply_imputation_ICM, \
apply_imputation_UCM, apply_feature_entropy_UCM, apply_advanced_discretization_ICM
import scipy.sparse as sps
import numpy as np
from src.utils.general_utility_functions import get_project_root_path
# -------- COLD DATA MATRICES ALL --------
def read_URM_cold_all(path="../data/data_train.csv"):
"""
:return: all the user rating matrix, in csr format
"""
import scipy.sparse as sps
import numpy as np
import pandas as pd
# Reading data
df_original = pd.read_csv(path)
user_id_list = df_original['row'].values
item_id_list = df_original['col'].values
rating_list = np.ones(len(user_id_list))
# Creating URM
URM_all = sps.coo_matrix((rating_list, (user_id_list, item_id_list)))
URM_all = URM_all.tocsr()
return URM_all
def read_UCM_cold_all(num_users, root_path="../data/"):
"""
:return: all the UCM in csr format
"""
import scipy.sparse as sps
import numpy as np
import pandas as pd
import os
# Reading age data
df_age = pd.read_csv(os.path.join(root_path, "data_UCM_age.csv"))
user_id_list = df_age['row'].values
age_id_list = df_age['col'].values
UCM_age = sps.coo_matrix((np.ones(len(user_id_list)), (user_id_list, age_id_list)),
shape=(num_users, np.max(age_id_list) + 1))
# Reading region data
df_region = pd.read_csv(os.path.join(root_path, "data_UCM_region.csv"))
user_id_list = df_region['row'].values
region_id_list = df_region['col'].values
UCM_region = sps.coo_matrix((np.ones(len(user_id_list)), (user_id_list, region_id_list)),
shape=(num_users, np.max(region_id_list) + 1))
# Merge UCMs
UCM_all, _ = merge_UCM(UCM_age, UCM_region, {}, {})
UCM_all = UCM_all.tocsr()
return UCM_all
def read_UCM_cold_all_with_user_act(num_users, root_path="../data/"):
"""
:return: all the UCM in csr format
"""
import scipy.sparse as sps
import numpy as np
import pandas as pd
import os
# Reading age data
df_age = pd.read_csv(os.path.join(root_path, "data_UCM_age.csv"))
user_id_list = df_age['row'].values
age_id_list = df_age['col'].values
UCM_age = sps.coo_matrix((np.ones(len(user_id_list)), (user_id_list, age_id_list)),
shape=(num_users, np.max(age_id_list) + 1))
# Reading region data
df_region = pd.read_csv(os.path.join(root_path, "data_UCM_region.csv"))
user_id_list = df_region['row'].values
region_id_list = df_region['col'].values
UCM_region = sps.coo_matrix((np.ones(len(user_id_list)), (user_id_list, region_id_list)),
shape=(num_users, np.max(region_id_list) + 1))
# Reading user_act data from URM
df_original = pd.read_csv(os.path.join(root_path, "data_train.csv"))
user_act = df_original.groupby(by='row')['data'].sum()
user_act = (user_act - 0) / (user_act.max() - 0)
user_id_list = user_act.index
feature_list = [0] * len(user_id_list)
data_list = user_act.values.astype(np.float32)
UCM_user_act = sps.coo_matrix((data_list, (user_id_list, feature_list)), shape=(num_users, 1))
# Create UCM_all_dict
UCM_all_dict = {"UCM_age": UCM_age, "UCM_region": UCM_region, "UCM_user_act": UCM_user_act}
UCM_all_dict = apply_transformation_UCM(UCM_all_dict,
UCM_name_to_transform_mapper={"UCM_user_act": np.log1p})
UCM_all_dict = apply_discretization_UCM(UCM_all_dict, UCM_name_to_bins_mapper={"UCM_user_act": 50})
# Merge UCMs
UCM_all = build_UCM_all_from_dict(UCM_all_dict)
return UCM_all
# -------- GET ITEM CONTENT MATRIX --------
def get_ICM_all(reader: RecSys2019Reader):
"""
It returns all the ICM_all after applying feature engineering
:param reader: data splitter
:return: return ICM_all
"""
URM_all = reader.get_URM_all()
UCM_all_dict = reader.get_loaded_UCM_dict()
ICM_all_dict = reader.get_loaded_ICM_dict()
ICM_all_dict.pop("ICM_all")
ICM_all_dict = apply_feature_engineering_ICM(ICM_all_dict, URM_all, UCM_all_dict,
ICM_names_to_count=["ICM_sub_class"], UCM_names_to_list=["UCM_age"])
ICM_all_dict = apply_filtering_ICM(ICM_all_dict,
ICM_name_to_filter_mapper={"ICM_asset": lambda x: x < np.quantile(x, q=0.75) +
0.72 * (np.quantile(x,
q=0.75) -
np.quantile(x,
q=0.25)),
"ICM_price": lambda x: x < np.quantile(x, q=0.75) +
4 * (np.quantile(x, q=0.75) -
np.quantile(x, q=0.25))})
ICM_all_dict = apply_transformation_ICM(ICM_all_dict,
ICM_name_to_transform_mapper={"ICM_asset": lambda x: np.log1p(1 / x),
"ICM_price": lambda x: np.log1p(1 / x),
"ICM_item_pop": np.log1p,
"ICM_sub_class_count": np.log1p,
"ICM_age": lambda x: x ** (1 / 2.5)})
ICM_all_dict = apply_discretization_ICM(ICM_all_dict,
ICM_name_to_bins_mapper={"ICM_asset": 200,
"ICM_price": 200,
"ICM_item_pop": 50,
"ICM_sub_class_count": 50})
ICM_all = build_ICM_all_from_dict(ICM_all_dict)
return ICM_all
def get_ICM_all_new(reader: RecSys2019Reader):
"""
It returns all the ICM_all after applying feature engineering
:param reader: data splitter
:return: return ICM_all
"""
ICM_all_dict = reader.get_loaded_ICM_dict()
ICM_all_dict.pop("ICM_all")
ICM_all_dict = apply_filtering_ICM(ICM_all_dict,
ICM_name_to_filter_mapper={"ICM_asset": lambda x: x < np.quantile(x, q=0.75) +
0.72 * (np.quantile(x,
q=0.75) -
np.quantile(x,
q=0.25)),
"ICM_price": lambda x: x < np.quantile(x, q=0.75) +
4 * (np.quantile(x, q=0.75) -
np.quantile(x, q=0.25))})
# Apply useful transformation
ICM_all_dict = apply_transformation_ICM(ICM_all_dict,
ICM_name_to_transform_mapper={"ICM_asset": lambda x: np.log1p(1 / x),
"ICM_price": lambda x: np.log1p(1 / x),
"ICM_item_pop": np.log1p})
ICM_all_dict = apply_discretization_ICM(ICM_all_dict,
ICM_name_to_bins_mapper={"ICM_asset": 200,
"ICM_price": 200,
"ICM_item_pop": 50})
# Apply feature weighting
ICM_all_dict = apply_transformation_ICM(ICM_all_dict,
ICM_name_to_transform_mapper={"ICM_price": lambda x: x * 1.8474248499810804,
"ICM_asset": lambda x: x * 1.2232716972721878,
"ICM_sub_class": lambda
x: x * 1.662671860026709,
"ICM_item_pop": lambda
x: x * 0.886528360392298})
ICM_all = build_ICM_all_from_dict(ICM_all_dict)
return ICM_all
def get_ICM_train(reader: New_DataSplitter_leave_k_out):
"""
It returns all the ICM_train_all after applying feature engineering. This preprocessing is used on new_best_models
file
:param reader: data splitter
:return: return ICM_train_all
"""
URM_train, _ = reader.get_holdout_split()
UCM_all_dict = reader.get_loaded_UCM_dict()
ICM_all_dict = reader.get_loaded_ICM_dict()
ICM_all_dict.pop("ICM_all")
ICM_all_dict = apply_feature_engineering_ICM(ICM_all_dict, URM_train, UCM_all_dict,
ICM_names_to_count=["ICM_sub_class"], UCM_names_to_list=["UCM_age"])
ICM_all_dict = apply_filtering_ICM(ICM_all_dict,
ICM_name_to_filter_mapper={"ICM_asset": lambda x: x < np.quantile(x, q=0.75) +
0.72 * (np.quantile(x,
q=0.75) -
np.quantile(x,
q=0.25)),
"ICM_price": lambda x: x < np.quantile(x, q=0.75) +
4 * (np.quantile(x, q=0.75) -
np.quantile(x, q=0.25))})
ICM_all_dict = apply_transformation_ICM(ICM_all_dict,
ICM_name_to_transform_mapper={"ICM_asset": lambda x: np.log1p(1 / x),
"ICM_price": lambda x: np.log1p(1 / x),
"ICM_item_pop": np.log1p,
"ICM_sub_class_count": np.log1p,
"ICM_age": lambda x: x ** (1 / 2.5)})
ICM_all_dict = apply_discretization_ICM(ICM_all_dict,
ICM_name_to_bins_mapper={"ICM_asset": 200,
"ICM_price": 200,
"ICM_item_pop": 50,
"ICM_sub_class_count": 50})
ICM_all = build_ICM_all_from_dict(ICM_all_dict)
return ICM_all
def get_ICM_train_new(reader: New_DataSplitter_leave_k_out):
"""
It returns all the ICM_train_all after applying feature engineering.
:param reader: data splitter
:return: return ICM_train_all
"""
ICM_all_dict = reader.get_loaded_ICM_dict()
ICM_all_dict.pop("ICM_all")
ICM_all_dict = apply_filtering_ICM(ICM_all_dict,
ICM_name_to_filter_mapper={"ICM_asset": lambda x: x < np.quantile(x, q=0.75) +
0.72 * (np.quantile(x,
q=0.75) -
np.quantile(x,
q=0.25)),
"ICM_price": lambda x: x < np.quantile(x, q=0.75) +
4 * (np.quantile(x, q=0.75) -
np.quantile(x, q=0.25))})
# Apply useful transformation
ICM_all_dict = apply_transformation_ICM(ICM_all_dict,
ICM_name_to_transform_mapper={"ICM_asset": lambda x: np.log1p(1 / x),
"ICM_price": lambda x: np.log1p(1 / x),
"ICM_item_pop": np.log1p})
ICM_all_dict = apply_discretization_ICM(ICM_all_dict,
ICM_name_to_bins_mapper={"ICM_asset": 200,
"ICM_price": 200,
"ICM_item_pop": 50})
# Apply feature weighting
ICM_all_dict = apply_transformation_ICM(ICM_all_dict,
ICM_name_to_transform_mapper={"ICM_price": lambda x: x * 1.8474248499810804,
"ICM_asset": lambda x: x * 1.2232716972721878,
"ICM_sub_class": lambda
x: x * 1.662671860026709,
"ICM_item_pop": lambda
x: x * 0.886528360392298})
ICM_all = None
item_feature_to_range_mapper = {}
last_range = 0
for idx, ICM_key_value in enumerate(ICM_all_dict.items()):
ICM_name, ICM_object = ICM_key_value
if idx == 0:
ICM_all = ICM_object
else:
ICM_all = sps.hstack([ICM_all, ICM_object], format="csr")
item_feature_to_range_mapper[ICM_name] = (last_range, last_range + ICM_object.shape[1])
last_range = last_range + ICM_object.shape[1]
return ICM_all, item_feature_to_range_mapper
# -------- GET USER CONTENT MATRIX --------
def get_UCM_all(reader: RecSys2019Reader):
URM_all = reader.get_URM_all()
UCM_all_dict = reader.get_loaded_UCM_dict()
ICM_dict = reader.get_loaded_ICM_dict()
UCM_all_dict = apply_feature_engineering_UCM(UCM_all_dict, URM_all, ICM_dict,
ICM_names_to_UCM=["ICM_sub_class"])
# These are useful feature weighting for UserCBF_CF_Warm
UCM_all_dict = apply_transformation_UCM(UCM_all_dict,
UCM_name_to_transform_mapper={"UCM_sub_class": lambda x: x / 2,
"UCM_user_act": np.log1p})
UCM_all_dict = apply_discretization_UCM(UCM_all_dict, UCM_name_to_bins_mapper={"UCM_user_act": 50})
UCM_all = build_UCM_all_from_dict(UCM_all_dict)
return UCM_all
def get_UCM_all_new(reader: RecSys2019Reader):
URM_all = reader.get_URM_all()
UCM_all_dict = reader.get_loaded_UCM_dict()
ICM_dict = reader.get_loaded_ICM_dict()
# Preprocess ICM
ICM_dict.pop("ICM_all")
ICM_dict = apply_feature_engineering_ICM(ICM_dict, URM_all, UCM_all_dict,
ICM_names_to_count=["ICM_sub_class"], UCM_names_to_list=["UCM_age"])
ICM_dict = apply_filtering_ICM(ICM_dict,
ICM_name_to_filter_mapper={"ICM_asset": lambda x: x < np.quantile(x, q=0.75) +
0.72 * (np.quantile(x,
q=0.75) -
np.quantile(x,
q=0.25)),
"ICM_price": lambda x: x < np.quantile(x, q=0.75) +
4 * (np.quantile(x, q=0.75) -
np.quantile(x, q=0.25))})
ICM_dict = apply_transformation_ICM(ICM_dict,
ICM_name_to_transform_mapper={"ICM_asset": lambda x: np.log1p(1 / x),
"ICM_price": lambda x: np.log1p(1 / x),
"ICM_item_pop": np.log1p,
"ICM_sub_class_count": np.log1p,
"ICM_age": lambda x: x ** (1 / 2.5)})
ICM_dict = apply_discretization_ICM(ICM_dict,
ICM_name_to_bins_mapper={"ICM_asset": 200,
"ICM_price": 200,
"ICM_item_pop": 50,
"ICM_sub_class_count": 50})
# Preprocess UCM
UCM_all_dict = apply_feature_engineering_UCM(UCM_all_dict, URM_all, ICM_dict,
ICM_names_to_UCM=["ICM_sub_class", "ICM_item_pop"])
UCM_all_dict = apply_feature_entropy_UCM(UCM_all_dict, UCM_names_to_entropy=["UCM_sub_class"])
# Apply useful transformation
UCM_all_dict = apply_transformation_UCM(UCM_all_dict,
UCM_name_to_transform_mapper={"UCM_user_act": np.log1p})
UCM_all_dict = apply_discretization_UCM(UCM_all_dict, UCM_name_to_bins_mapper={"UCM_user_act": 50,
"UCM_sub_class_entropy": 20})
UCM_all = build_UCM_all_from_dict(UCM_all_dict)
return UCM_all
def get_UCM_train(reader: New_DataSplitter_leave_k_out):
"""
It returns all the UCM_all after applying feature engineering. This preprocessing is used on new_best_models file
:param reader: data splitter
:return: return UCM_all
"""
URM_train, _ = reader.get_holdout_split()
UCM_all_dict = reader.get_loaded_UCM_dict()
ICM_dict = reader.get_loaded_ICM_dict()
UCM_all_dict = apply_feature_engineering_UCM(UCM_all_dict, URM_train, ICM_dict,
ICM_names_to_UCM=["ICM_sub_class"])
# These are useful feature weighting for UserCBF_CF_Warm
UCM_all_dict = apply_transformation_UCM(UCM_all_dict,
UCM_name_to_transform_mapper={"UCM_sub_class": lambda x: x / 2,
"UCM_user_act": np.log1p})
UCM_all_dict = apply_discretization_UCM(UCM_all_dict, UCM_name_to_bins_mapper={"UCM_user_act": 50})
UCM_all = build_UCM_all_from_dict(UCM_all_dict)
return UCM_all
def get_UCM_train_new(reader: New_DataSplitter_leave_k_out):
URM_train, _ = reader.get_holdout_split()
UCM_all_dict = reader.get_loaded_UCM_dict()
ICM_dict = reader.get_loaded_ICM_dict()
# Preprocess ICM
ICM_dict.pop("ICM_all")
ICM_dict = apply_feature_engineering_ICM(ICM_dict, URM_train, UCM_all_dict,
ICM_names_to_count=["ICM_sub_class"], UCM_names_to_list=["UCM_age"])
ICM_dict = apply_filtering_ICM(ICM_dict,
ICM_name_to_filter_mapper={"ICM_asset": lambda x: x <
|
np.quantile(x, q=0.75)
|
numpy.quantile
|
import numpy as np
import os
import random
from utility import *
import tensorflow as tf
def generate_batch_skip_gram(data, batch_size, num_skips):
'''
Batch generator for Skip-gram
'''
data_index = 0
assert batch_size % num_skips == 0
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
while True:
for i in range(batch_size // num_skips):
span = len(data[data_index])
# target label at the center of the buffer
label_index = random.randint(0, span - 1)
singe_data = data[data_index]
targets_to_avoid = [label_index]
target = label_index
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = singe_data[label_index]
labels[i * num_skips + j, 0] = singe_data[target]
data_index = (data_index + 1) % len(data)
yield batch, labels
def generate_batch_cbow(data, batch_size, num_skips):
'''
Batch generator for CBOW (Continuous Bag of Words).
'''
data_index = 0
batch = np.ndarray(shape=(batch_size, num_skips), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
while True:
for i in range(batch_size):
span = len(data[data_index])
label_index = random.randint(0, span - 1)
singe_data = data[data_index]
labels[i, 0] = singe_data[label_index]
targets_to_avoid = [label_index]
target = label_index
sample = []
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
sample.append(singe_data[target])
batch[i] = sample
data_index = (data_index + 1) % len(data)
yield batch, labels
class Options(object):
"""Options used by the model."""
def __init__(self):
# Model options.
# Embedding dimension.
self.embedding_size = 32
# The initial learning rate.
self.learning_rate = 1.
# Number of epochs to train. After these many epochs, the learning
# rate decays linearly to zero and the training stops.
self.epochs_to_train = 100
# Number of examples for one training step.
self.batch_size = 128
# Number of class
self.num_classes = 2
self.sequence_length = 25
self.interaction_times = 3
# self.k = self.sequence_length
self.vocabulary_size = 10000000
# Where to write out summaries.
self.save_path = './'
self.dataset = 'ipinyou'
self.eval_data = './data/' + self.dataset + '/questions.txt'
self.architecture = 'skip-gram'
self.num_skips = 4
self.field_cate_indices_path = \
'./data/ipinyou/field_cates_index_not_aligned.csv'
# for negative sampling
self.valid_size = 16
self.valid_window = 100
self.valid_examples = np.random.choice(
self.valid_window, self.valid_size, replace=False)
self.num_sampled = 64 # Number of negative examples to sample.
class Cat2Vec(object):
"""
Base Class for Cat2Vec
Based on tensorflow/embedding/word2vec.py
"""
def __init__(self, options, session, id2cate, cate2id):
self._options = options
self._cate2id = cate2id
self._id2cate = id2cate
self._session = session
self.fields_index = None
# self.embeddings = None
# self._read_analogies()
self.build_graph()
self.build_eval_graph()
def _read_analogies(self):
"""Reads through the analogy question file.
Returns:
questions: a [n, 4] numpy array containing the analogy question's
word ids.
questions_skipped: questions skipped due to unknown words.
"""
questions = []
questions_skipped = 0
with open(self._options.eval_data, "rb") as analogy_f:
for line in analogy_f:
if line.startswith(":"): # Skip comments.
continue
words = line.strip().lower().split(" ")
# print words
ids = [self._cate2id.get(w.strip()) for w in words]
# print ids
if None in ids or len(ids) != 4:
questions_skipped += 1
else:
questions.append(np.array(ids))
print("Eval analogy file: ", self._options.eval_data)
print("Questions: ", len(questions))
print("Skipped: ", questions_skipped)
questions =
|
np.array(questions, dtype=np.int32)
|
numpy.array
|
import random
import math
import numpy as np
class DrawTicTacToe:
# class to represent a game
# scale parameters and board initializer
def __init__(self, scale, combo):
self.scale = scale
self.combo = combo
self.board = [[" " for x in range(self.scale)] for y in range(self.scale)]
self.game_winner = ''
print(f'New game created with a scale of {scale} and a combo of {combo}.')
# function to display the board of the game
def printboard(self):
[[print(self.board[x][y], end=' ') if y < self.scale - 1 else print(self.board[x][y]) for y in range(self.scale)] for x in range(self.scale)]
# function to place a stone at a given positioon
def move(self, x, y, player):
self.board[x][y] = player
# function to perform generalized row check
def check_general_rows(self, rows, player):
current_player = " "
for row in rows:
for index, place in enumerate(row):
check_space = []
begin = 0
while len(check_space) < self.combo:
if index + begin <= len(row) - 1:
if row[index + begin] == player:
check_space.append(row[index + begin])
begin = begin + 1
else:
break
else:
break
if len(check_space) == self.combo:
current_player = player
return current_player
return current_player
# function to see if we have a winner
def whowins(self):
for player in ["X", "O"]:
# check horizontal
current_player1 = self.check_general_rows(self.board, player)
if current_player1 == 'X':
return "X"
elif current_player1 == 'O':
return "O"
# check vertical
rotated = zip(*self.board[::-1])
current_player2 = self.check_general_rows(rotated, player)
if current_player2 == 'X':
return "X"
elif current_player2 == 'O':
return "O"
matrix =
|
np.array(self.board)
|
numpy.array
|
# Copyright (c) 2018 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
import numpy as np
import toast.timing as timing
class GlitchFlagger():
def __init__(self, fwhm=5, threshold=4.0, fsample=180.3737, twice=False,
wkernel=3):
"""
Instantiate a glitch flagging object. Parameters:
fwhm (float): Beam width [arc minutes]
threshold (float): glitch detection limit in units of the
filtered signal RMS
twice(bool): Run the glitch detection on regular and convolved TOD
"""
self.fwhm = fwhm
wbin = self.fwhm
self.order = 6 # order of signal model across 3 bins
nbin_min = np.int(2 * np.pi / np.radians(wbin / 60))
nbin = 2
while nbin < nbin_min:
nbin *= 2
wbin = 2 * np.pi / nbin
self.nbin = nbin
self.wbin = wbin
self.threshold = threshold
self.fsample = fsample
self.twice = twice
self.wkernel = wkernel
def flag_glitches(self, signal_in, flag_in, phase=None, dark=False,
pntflag=None):
"""
Find and flag glitches.
"""
if not dark:
if phase is None:
raise RuntimeError('Optical detectors must provide phase')
if pntflag is None:
raise RuntimeError(
'Optical detectors must provide pointing flags')
signal_in = signal_in.copy()
self.subtract_trend(signal_in)
flag = flag_in.copy()
if dark:
self.flag_outliers(signal_in, flag)
flag_intense = np.zeros_like(flag)
else:
# POD = phase-ordered data
ind = np.argsort(phase)
reverse_ind = np.argsort(ind)
POD_signal_in = signal_in[ind]
POD_signal = POD_signal_in.copy()
POD_flag = flag[ind]
POD_pntflag = pntflag[ind]
POD_phase = phase[ind]
bin_lim = np.arange(self.nbin) * self.wbin
bin_ind = np.searchsorted(POD_phase, bin_lim)
bin_ranges = [
(bin_ind[i], bin_ind[i + 1]) for i in range(self.nbin - 1)]
bin_ranges.append((bin_ind[-1], POD_phase.size))
# Identify outliers in each phase bin
POD_signal, POD_flag, bin_rms = self.flag_outliers_by_phase(
POD_signal, POD_phase, POD_flag, POD_pntflag, bin_ranges)
POD_signal_estimate = POD_signal_in - POD_signal
POD_flag_intense = self.get_intense(
bin_ranges, bin_rms, POD_signal, POD_signal_estimate, POD_flag)
if self.twice:
POD_signal2 = np.convolve(signal_in, [.25, .5, .25],
mode='same')[ind]
# POD_signal2_in = POD_signal2.copy()
flag2_in = POD_flag[reverse_ind]
flag2_in = np.convolve(flag2_in, np.ones(3), mode='same') != 0
POD_flag2_in = flag2_in[ind]
POD_flag2 = POD_flag2_in.copy()
POD_signal2, POD_flag2, bin_rms = self.flag_outliers_by_phase(
POD_signal2, POD_phase, POD_flag2, POD_pntflag, bin_ranges)
"""
# DEBUG begin
import matplotlib.pyplot as plt
import pdb
plt.figure()
good = flag_in[ind] + POD_pntflag == 0
plt.plot(POD_phase[good] / self.wbin, POD_signal_in[good], '.',
label='input')
good = POD_flag + POD_pntflag == 0
plt.plot(POD_phase[good] / self.wbin, POD_signal_in[good],
label='unflagged')
plt.plot(POD_phase[good] / self.wbin, POD_signal_estimate[good],
label='model')
good[POD_flag_intense == 0] = False
plt.plot(POD_phase[good] / self.wbin, POD_signal_in[good], '.',
label='unflagged intense')
# plt.plot(POD_phase[good] / self.wbin, POD_signal[good], '.',
# label='unflagged - model')
if self.twice:
plt.legend(loc='best')
plt.figure()
POD_signal_estimate2 = POD_signal2_in - POD_signal2
good = POD_flag2_in + POD_pntflag == 0
plt.plot(POD_phase[good] / self.wbin, POD_signal2_in[good], '.',
label='input')
good = POD_flag2 + POD_pntflag == 0
plt.plot(POD_phase[good] / self.wbin, POD_signal2_in[good],
label='unflagged')
plt.plot(POD_phase[good] / self.wbin,
POD_signal_estimate2[good], label='model')
good[POD_flag_intense == 0] = False
plt.plot(POD_phase[good] / self.wbin, POD_signal2_in[good], '.',
label='unflagged intense')
# plt.plot(POD_phase[good] / self.wbin, POD_signal2[good], '.',
# label='unflagged - model')
plt.legend(loc='best')
plt.show()
pdb.set_trace()
# DEBUG end
"""
if self.twice:
POD_flag2[POD_flag2_in] = False
POD_flag[POD_flag2] = True
flag = POD_flag[reverse_ind]
# flag = POD_flag[reverse_ind]
flag_intense = POD_flag_intense[reverse_ind]
signal_estimate = POD_signal_estimate[reverse_ind]
if self.wkernel:
# Extend the flagging
flag[flag_in] = False
flag = np.convolve(flag, np.ones(self.wkernel), mode='same') != 0
flag = np.roll(flag, self.wkernel // 2 - 1)
flag[flag_in] = True
return flag, flag_intense, signal_estimate
def subtract_trend(self, signal):
"""
subtract a simple trend
"""
istart = 0
step = np.int(60 * self.fsample)
while istart < signal.size:
istop = istart + step
if istop + step > signal.size:
istop += step
ind = slice(istart, istop)
offset = np.median(signal[ind])
signal[ind] -= offset
istart = istop
return
def flag_outliers(self, signal, flag):
"""
Find outliers in offset-removed signal
"""
for _ in range(10):
offset = np.median(signal[flag == 0])
signal -= offset
rms = np.mean(signal[flag == 0] ** 2) ** .5
bad = np.abs(signal) > self.threshold * rms
bad[flag != 0] = False
nbad = np.sum(bad)
if nbad == 0:
break
flag[bad] = True
return
def _get_bin(self, ibin, signal, phase, flag, pntflag, bin_ranges):
"""
Return signal in the current bin with margins
"""
nbin = len(bin_ranges)
signals = []
phases = []
flags = []
pntflags = []
# previous bin
if ibin == 0:
bin_start, bin_stop = bin_ranges[-1]
else:
bin_start, bin_stop = bin_ranges[ibin - 1]
ind = slice(bin_start, bin_stop)
signals.append(signal[ind])
if ibin == 0:
phases.append(phase[ind] - 2 * np.pi)
else:
phases.append(phase[ind])
flags.append(flag[ind])
pntflags.append(pntflag[ind])
# current bin
bin_start, bin_stop = bin_ranges[ibin]
ind = slice(bin_start, bin_stop)
signals.append(signal[ind])
phases.append(phase[ind])
flags.append(flag[ind])
pntflags.append(pntflag[ind])
# next bin
if ibin < nbin - 1:
bin_start, bin_stop = bin_ranges[ibin + 1]
else:
bin_start, bin_stop = bin_ranges[0]
ind = slice(bin_start, bin_stop)
signals.append(signal[ind])
if ibin < nbin - 1:
phases.append(phase[ind])
else:
phases.append(phase[ind] + 2 * np.pi)
flags.append(flag[ind])
pntflags.append(pntflag[ind])
center = slice(signals[0].size, signals[0].size + signals[1].size)
# concatenate
signals = np.hstack(signals)
phases = np.hstack(phases)
flags = np.hstack(flags)
pntflags = np.hstack(pntflags)
return signals, phases, flags, pntflags, center
def robust_rms(self, x):
"""
Measure the sample variance using the interquartile range (IQR) method
"""
if len(x) < 4:
return np.std(x)
xsorted = np.sort(x)
nx = x.size
i1 = np.int(0.25 * nx)
i2 = np.int(0.75 * nx)
iqr = xsorted[i2] - xsorted[i1]
rms = iqr * 0.7412
return rms
def flag_outliers_by_phase(self, signal, phase, flag, pntflag, bin_ranges):
"""
Find outliers in the de-trended signal and derive a signal estimate.
"""
bin_rms = []
nbin = len(bin_ranges)
signal_out = np.zeros_like(signal)
flag_out = np.zeros_like(flag)
for ibin in range(nbin):
bin_start, bin_stop = bin_ranges[ibin]
ind = slice(bin_start, bin_stop)
sig, phse, flg, pntflg, center = self._get_bin(
ibin, signal, phase, flag, pntflag, bin_ranges)
rms = 0
for iiter in range(10):
good_ind = flg + pntflg == 0
ngood = np.sum(good_ind)
if ngood < 10:
# This bin is beyond hope
flg[:] = True
break
if iiter < 2:
# Signal model is an offset
offset = np.median(sig[good_ind])
else:
# Signal model is a polynomial
offset = self.fit_poly(phse, sig, good_ind)
sig -= offset
rms = self.robust_rms(sig[good_ind])
bad = np.abs(sig) > self.threshold * rms
bad[flg != 0] = False
nbad = np.sum(bad)
if nbad == 0 and iiter > 2:
break
flg[bad] = True
signal_out[ind] = sig[center]
flag_out[ind] = flg[center]
bin_rms.append(rms)
return signal_out, flag_out, np.array(bin_rms)
def get_intense(self, bin_ranges, bin_rms, noise, estimate, flag):
"""
Flag all samples falling into bins with extreme RMS as intense
"""
snr = []
for ibin, ((bin_start, bin_stop), rms) in enumerate(zip(bin_ranges,
bin_rms)):
ind = slice(bin_start, bin_stop)
good = flag[ind] == 0
rms_signal = np.std(estimate[ind][good])
rms_noise = np.std(noise[ind][good])
snr.append(rms_signal / rms_noise)
flag_intense = np.zeros_like(flag)
good = bin_rms != 0
for _ in range(10):
ngood = np.sum(good)
good_rms = bin_rms[good]
rms_median = np.median(good_rms)
rms_rms = (np.sum((good_rms - rms_median) ** 2) / (ngood - 1)) ** .5
for ibin, ((bin_start, bin_stop), rms) in enumerate(zip(bin_ranges,
bin_rms)):
if rms < max(2 * rms_median,
rms_median + 5 * rms_rms) and snr[ibin] < 1:
continue
good[ibin] = False
ind = slice(bin_start, bin_stop)
flag_intense[ind] = True
return flag_intense
def fit_poly(self, x, y, ind_fit):
templates = []
xx = (x - np.mean(x)) / np.ptp(x)
for iorder in range(self.order + 1):
templates.append(xx[ind_fit] ** iorder)
templates =
|
np.vstack(templates)
|
numpy.vstack
|
from .socp import Model as SOCModel
from .ro import Model as ROModel
from .lp import LinConstr, ConeConstr, CvxConstr
from .lp import Vars, Affine
from .lp import RoAffine, RoConstr
from .lp import DecVar, RandVar, DecLinConstr, DecCvxConstr
from .lp import DecRoConstr
from .lp import Scen
from .lp import Solution
from .lpg_solver import solve as def_sol
from .subroutines import *
import numpy as np
import pandas as pd
import warnings
from collections import Sized, Iterable
class Model:
"""
Returns a model object with the given number of scenarios.
Parameters
----------
scens : int or array-like objects
The number of scenarios, if it is an integer. It could also be
an array of scenario indices.
name : str
Name of the model
Returns
-------
model : rsome.dro.Model
A model object
"""
def __init__(self, scens=1, name=None):
self.ro_model = ROModel()
self.vt_model = SOCModel(mtype='V')
self.sup_model = self.ro_model.sup_model
self.exp_model = SOCModel(nobj=True, mtype='E')
self.pro_model = SOCModel(nobj=True, mtype='P')
self.obj_ambiguity = None
if isinstance(scens, int):
num_scen = scens
series = pd.Series(np.arange(num_scen).astype(int))
elif isinstance(scens, Sized):
num_scen = len(scens)
series = pd.Series(np.arange(num_scen).astype(int), index=scens)
else:
raise ValueError('Incorrect scenarios.')
self.num_scen = num_scen
self.series_scen = series
self.dec_vars = [DecVar(self, self.vt_model.vars[0])]
self.rand_vars = []
self.all_constr = []
self.var_ev_list = None
# self.affadapt_mat = None
pr = self.pro_model.dvar(num_scen, name='probabilities')
self.p = pr
self.obj = None
self.sign = 1
self.primal = None
self.dual = None
self.solution = None
self.pupdate = True
self.dupdate = True
self.solution = None
self.name = name
def rvar(self, shape=(1,), name=None):
sup_var = self.sup_model.dvar(shape, 'C', name)
exp_var = self.exp_model.dvar(shape, 'C', name)
rand_var = RandVar(sup_var, exp_var)
self.rand_vars.append(rand_var)
return rand_var
def dvar(self, shape=(1,), vtype='C', name=None):
"""
Returns an array of decision variables with the given shape
and variable type.
Parameters
----------
shape : int or tuple
Shape of the variable array.
vtype : {'C', 'B', 'I'}
Type of the decision variables. 'C' means continuous; 'B'
means binary, and 'I" means integer.
name : str
Name of the variable array
Returns
-------
new_var : rsome.lp.DecVar
An array of new decision variables
"""
dec_var = self.vt_model.dvar(shape, vtype, name)
dec_var = DecVar(self, dec_var, name=name)
self.dec_vars.append(dec_var)
return dec_var
def ambiguity(self):
if self.all_constr:
raise SyntaxError('Ambiguity set must be specified ' +
'before defining constraints.')
return Ambiguity(self)
def bou(self, *args):
if self.num_scen != 1:
raise ValueError('The uncertainty set can only be applied '
'to a one-scenario model')
bou_set = self.ambiguity()
for arg in args:
if arg.model is not self.sup_model:
raise ValueError('Constraints are not for this support.')
bou_set.sup_constr = [tuple(args)]
return bou_set
def wks(self, *args):
if self.num_scen != 1:
raise ValueError('The WKS ambiguity set can only be applied '
'to a one-scenario model')
wks_set = self.ambiguity()
sup_constr = []
exp_constr = []
for arg in args:
if arg.model is self.sup_model:
sup_constr.append(arg)
elif arg.model is self.exp_model:
exp_constr.append(arg)
else:
raise ValueError('Constraints are not defined for the '
'ambiguity support.')
wks_set.sup_constr = [tuple(sup_constr)]
wks_set.exp_constr = [tuple(exp_constr)]
wks_set.exp_constr_indices = [np.array([0], dtype=np.int32)]
return wks_set
def rule_var(self):
if self.var_ev_list is not None:
return self.var_ev_list
total = sum(dvar.size*len(dvar.event_adapt)
for dvar in self.dec_vars)
vtype = ''.join([dvar.vtype * dvar.size * len(dvar.event_adapt)
if len(dvar.vtype) == 1
else dvar.vtype * len(dvar.event_adapt)
for dvar in self.dec_vars])
var_const = self.ro_model.dvar(total, vtype=vtype)
count = 0
for dvar in self.dec_vars:
dvar.ro_first = count
count += dvar.size*len(dvar.event_adapt)
num_scen = self.num_scen
self.var_ev_list = []
for s in range(num_scen):
start = 0
index = []
total_size = 0
total_col = 0
for dvar in self.dec_vars:
edict = event_dict(dvar.event_adapt)
size = dvar.size
index.extend(list(start + size * edict[s]
+
|
np.arange(size, dtype=int)
|
numpy.arange
|
#!/usr/bin/env python
u"""
ecco_monthly_harmonics.py
Written by <NAME> (10/2021)
Reads monthly ECCO ocean bottom pressure anomalies and converts to
spherical harmonic coefficients
INPUTS:
ECCO Near Real-Time models
kf080i: Kalman filter analysis
https://ecco.jpl.nasa.gov/drive/files/NearRealTime/KalmanFilter/
dr080i: RTS smoother analysis
https://ecco.jpl.nasa.gov/drive/files/NearRealTime/Smoother/
ECCO2 Cube92 models
Cube92
ECCO version 4 models
V4r3: Version 4, Revision 3
V4r4: Version 4, Revision 4
COMMAND LINE OPTIONS:
-D X, --directory X: Working data directory
-Y X, --year X: Years to run
-l X, --lmax X: maximum spherical harmonic degree
-m X, --mmax X: maximum spherical harmonic order
-n X, --love X: Load Love numbers dataset
0: Han and Wahr (1995) values from PREM
1: Gegout (2005) values from PREM
2: Wang et al. (2012) values from PREM
--reference X: Reference frame for load love numbers
CF: Center of Surface Figure (default)
CM: Center of Mass of Earth System
CE: Center of Mass of Solid Earth
-F X, --format X: Input and output data format
ascii
netcdf
HDF5
-V, --verbose: Output information for each output file
-M X, --mode X: Permission mode of directories and files
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
dateutil: powerful extensions to datetime
https://dateutil.readthedocs.io/en/stable/
netCDF4: Python interface to the netCDF C library
https://unidata.github.io/netcdf4-python/netCDF4/index.html
h5py: Pythonic interface to the HDF5 binary data format.
https://www.h5py.org/
PROGRAM DEPENDENCIES:
plm_holmes.py: computes fully-normalized associated Legendre polynomials
read_love_numbers.py: reads Load Love Numbers from Han and Wahr (1995)
ref_ellipsoid.py: calculate reference parameters for common ellipsoids
norm_gravity.py: calculates the normal gravity for locations on an ellipsoid
gen_pressure_stokes.py: converts a pressure field into spherical harmonics
harmonics.py: spherical harmonic data class for processing GRACE/GRACE-FO
destripe_harmonics.py: calculates the decorrelation (destriping) filter
and filters the GRACE/GRACE-FO coefficients for striping errors
ncdf_read_stokes.py: reads spherical harmonic netcdf files
ncdf_stokes.py: writes output spherical harmonic data to netcdf
hdf5_read_stokes.py: reads spherical harmonic HDF5 files
hdf5_stokes.py: writes output spherical harmonic data to HDF5
spatial.py: spatial data class for reading, writing and processing data
ncdf_read.py: reads input spatial data from netCDF4 files
hdf5_read.py: reads input spatial data from HDF5 files
ncdf_write.py: writes output spatial data to netCDF4
hdf5_write.py: writes output spatial data to HDF5
time.py: utilities for calculating time operations
utilities.py: download and management utilities for files
UPDATE HISTORY:
Updated 10/2021: using python logging for handling verbose output
use output harmonic file wrapper routine to write to file
Updated 09/2021: use GRACE/GRACE-FO month to calendar month converters
Updated 07/2021: can use input files to define command line arguments
Updated 05/2021: define int/float precision to prevent deprecation warning
Updated 03/2021: automatically update years to run based on current time
Updated 02/2021: separate inputs to gen_pressure_stokes
Updated 01/2021: added Cube92 choice to input model types
outputs from gen_pressure_stokes are now harmonics objects
Updated 12/2020: use argparse to set command line parameters
using spatial and harmonics modules for read/write operations
added more love number options. using utilities from time module
Updated 10/2019: changing Y/N flags to True/False
Updated 06/2019: recommending kf080i for the Kalman filtered solution
Updated 10/2018: separated gen_pressure_stokes into separate function
Updated 07/2018: output index and date files in separate loop for all files
Updated 03/2018: use realistic geometry from bathymetry and local gravity
simplified love number extrapolation if LMAX is greater than 696
Updated 01/2018: using getopt to set parameters
Updated 08/2017: convert from geodetic coordinates to geocentric
Updated 08/2016: fixed find_new_files function with previous updates
Updated 06/2016: can use dr080g model, using __future__ print option
Updated 05/2016: complete rewrite of program
Written 05/2013
"""
from __future__ import print_function
import os
import re
import logging
import netCDF4
import argparse
import numpy as np
import gravity_toolkit.time
import gravity_toolkit.spatial
import gravity_toolkit.harmonics
import gravity_toolkit.utilities as utilities
from gravity_toolkit.plm_holmes import plm_holmes
from gravity_toolkit.read_love_numbers import read_love_numbers
from model_harmonics.gen_pressure_stokes import gen_pressure_stokes
from geoid_toolkit.ref_ellipsoid import ref_ellipsoid
from geoid_toolkit.norm_gravity import norm_gravity
#-- PURPOSE: convert monthly ECCO OBP data to spherical harmonics
def ecco_monthly_harmonics(ddir, MODEL, YEARS, LMAX=0, MMAX=None,
LOVE_NUMBERS=0, REFERENCE=None, DATAFORM=None, VERBOSE=False,
MODE=0o775):
#-- create logger for verbosity level
loglevel = logging.INFO if VERBOSE else logging.CRITICAL
logging.basicConfig(level=loglevel)
#-- input and output subdirectory
input_sub = 'ECCO_{0}_AveRmvd_OBP'.format(MODEL)
output_sub = 'ECCO_{0}_AveRmvd_OBP_CLM_L{1:d}'.format(MODEL,LMAX)
#-- upper bound of spherical harmonic orders (default = LMAX)
MMAX = np.copy(LMAX) if not MMAX else MMAX
#-- output string for both LMAX == MMAX and LMAX != MMAX cases
order_str = 'M{0:d}'.format(MMAX) if (MMAX != LMAX) else ''
#-- output file format
output_file_format = 'ECCO_{0}_AveRmvd_OBP_CLM_L{1:d}{2}_{3:03d}.{4}'
#-- Creating subdirectory if it doesn't exist
if (not os.access(os.path.join(ddir,output_sub), os.F_OK)):
os.makedirs(os.path.join(ddir,output_sub),MODE)
#-- input/output data file format
suffix = dict(ascii='txt', netCDF4='nc', HDF5='H5')
#-- parameters for each model
if MODEL in ('kf080i','dr080i'):
#-- grid step size
dlon,dlat = (1.0,1.0)
#-- grid extent
LAT_MAX = 78.5
extent = [0.5,359.5,-LAT_MAX,LAT_MAX]
input_depth_file = os.path.join(ddir,'depth.nc')
input_geoid_file = os.path.join(ddir,'egm_2008.nc')
#-- indices to read
indices = np.arange(1,2*LAT_MAX+2).astype(np.int64)
elif MODEL in ('Cube92',):
#-- grid step size
dlon,dlat = (0.25,0.25)
#-- grid extent
extent = [0.125,359.875,-89.875,89.875]
input_depth_file = os.path.join(ddir,'DEPTH.2020.1440x720.nc')
input_geoid_file = os.path.join(ddir,'EGM_2008.1440x720.nc')
#-- indices to read (all)
indices = Ellipsis
elif MODEL in ('V4r3','V4r4'):
#-- grid step size
dlon,dlat = (0.5,0.5)
#-- grid extent
extent = [-179.75,179.75,-89.75,89.75]
input_depth_file = os.path.join(ddir,'DEPTH.2020.720x360.nc')
input_geoid_file = os.path.join(ddir,'EGM_2008.720x360.nc')
#-- indices to read (all)
indices = Ellipsis
#-- input grid dimensions
glon = np.arange(extent[0],extent[1]+dlon,dlon)
glat =
|
np.arange(extent[2],extent[3]+dlat,dlat)
|
numpy.arange
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import division
import numpy as np
from numpy.random import rand
from numpy import linalg as LA
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.lines as mlines
import math
import sys
import os
from random import shuffle
from random import gauss
from scipy.interpolate import UnivariateSpline
from scipy.interpolate import InterpolatedUnivariateSpline
from scipy.optimize import fmin
from scipy.optimize import fsolve
from scipy import interpolate
from scipy.optimize import curve_fit
import scipy.optimize as opt
import matplotlib.colors as colors
import matplotlib.cm as cmx
from pylab import polyfit
import matplotlib.ticker as ticker
from matplotlib import gridspec
from mpl_toolkits.axes_grid.inset_locator import (inset_axes, InsetPosition,
mark_inset)
from scipy.optimize import differential_evolution
import warnings
from matplotlib.collections import PatchCollection
from matplotlib.patches import Rectangle
from matplotlib import rc
rc('font',**{'family':'sans-serif', 'size' : 10}) #, 'sans-serif':['Arial']})
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
color_red = (0.73, 0.13869999999999993, 0.)
color_orange = (1., 0.6699999999999999, 0.)
color_green = (0.14959999999999996, 0.43999999999999995, 0.12759999999999994)
color_blue = (0.06673600000000002, 0.164512, 0.776)
color_purple = (0.25091600000000003, 0.137378, 0.29800000000000004)
color_ocker = (0.6631400000000001, 0.71, 0.1491)
color_pink = (0.71, 0.1491, 0.44730000000000003)
color_brown = (0.651, 0.33331200000000005, 0.054683999999999955)
color_all = [color_red, color_orange, color_green, color_blue, color_purple, color_ocker,color_pink, color_brown]
def fit_func_cv(x ,a ,b, c, d, e):
return np.exp(a)*x + b*np.absolute(x - d)**(-c) + np.exp(e)
def fit_func_cv_fix(x, b, d):
return b*np.absolute(x - d)**(-1/3)
######
#-----------------------------------------------------------------------------------------------------------------------
#######
#parameters of the code
######
#-----------------------------------------------------------------------------------------------------------------------
######
j2 = 1.0
j6 = 1.0
Kc = 0.0
lambda3 = 2.1
q_Q = 1
N_list = [60, 80, 100, 120, 140, 160, 180, 200, 300, 380]
N_og = [10, 20, 40, 60, 80, 100, 120, 140, 160, 180, 200, 300, 380]
#N_fit_cv = 200
N_fit_cv = 300
####
#code
###
cv_ind = 1
data = np.load('Delta1_data.npy',allow_pickle=True)
#create a mix of all of them:
flat_data = []
flat_error = []
flat_temp = []
Tmin_l = []
Tmax_l = []
for i in range(len(N_list)):
i_og = i + 3
flat_data.extend((N_list[i]**2)*data[i_og][2*cv_ind + 1])
flat_error.extend((N_list[i])*data[i_og][2*cv_ind + 2])
flat_temp.extend(data[i_og][0])
Tmin_l.append(np.min(data[i_og][0]))
Tmax_l.append(np.max(data[i_og][0]))
flat_data = np.array(flat_data)
flat_error = np.array(flat_error)
flat_temp = np.array(flat_temp)
all_together = np.array([flat_temp, flat_data, flat_error])
all_together = np.transpose(all_together)
all_together_sorted = all_together[all_together[:,0].argsort()]
#print(all_together_sorted)
#create duplicates list
seen = set()
uniq = []
for x in list(all_together_sorted[:,0]):
if x not in seen:
uniq.append(x)
seen.add(x)
sorted_and_filter_temp = np.sort(np.array(list(seen)))
values_avg = np.zeros(len(sorted_and_filter_temp))
values_err = np.zeros(len(sorted_and_filter_temp))
temps_all = all_together_sorted[:,0]
j=0
n = 1
for i in range(len(sorted_and_filter_temp)):
indices = np.argwhere(temps_all == sorted_and_filter_temp[i])
values_avg[i] = np.mean((all_together_sorted[:,1])[indices])
values_err[i] = np.mean((all_together_sorted[:,2])[indices])
###########
#fit for a specific Cv
jind = N_og.index(N_fit_cv)
fit_all = False
if fit_all:
data_fit = values_avg
data_err = values_err
x_range_fit = sorted_and_filter_temp
else:
data_fit = (N_og[jind]**2)*data[jind][2*cv_ind + 1]
data_err = (N_og[jind])*data[jind][2*cv_ind + 1]
x_range_fit = data[jind][0]
#popt2, pcov2 = curve_fit(fit_func_cv, x_range_fit, data_fit, sigma = data_err,\
# absolute_sigma = True, p0 = [0.5, 0.4, 1.2], bounds = ([0, 0.01, 0.1], [10, 1.0, 2.0]))
popt2, pcov2 = curve_fit(fit_func_cv, x_range_fit, data_fit, sigma = data_err, absolute_sigma = True, p0 = [0.0, 0.5, 0.4, 1.201, 0.0])
#popt2, pcov2 = curve_fit(fit_func_cv_fix, x_range_fit, data_fit, sigma = data_err, absolute_sigma = True, p0 = [0.5, 1.201])
print('fit vals')
print(popt2)
print('errors')
print(np.sqrt(np.diag(pcov2)))
#y_fit = fit_func_cv_fix(x_range_fit, *popt2)
y_fit = fit_func_cv(x_range_fit, *popt2)
y = data_fit
# residual sum of squares
ss_res = np.sum((y - y_fit) ** 2)
# total sum of squares
ss_tot = np.sum((y -
|
np.mean(y)
|
numpy.mean
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
gnpy.tools.cli_examples
=======================
Common code for CLI examples
'''
import argparse
import logging
import sys
from math import ceil
from numpy import linspace, mean
from pathlib import Path
import gnpy.core.ansi_escapes as ansi_escapes
from gnpy.core.elements import Transceiver, Fiber, RamanFiber
from gnpy.core.equipment import trx_mode_params
import gnpy.core.exceptions as exceptions
from gnpy.core.network import build_network
from gnpy.core.parameters import SimParams
from gnpy.core.utils import db2lin, lin2db, automatic_nch
from gnpy.topology.request import (ResultElement, jsontocsv, compute_path_dsjctn, requests_aggregation,
BLOCKING_NOPATH, correct_json_route_list,
deduplicate_disjunctions, compute_path_with_disjunction,
PathRequest, compute_constrained_path, propagate)
from gnpy.topology.spectrum_assignment import build_oms_list, pth_assign_spectrum
from gnpy.tools.json_io import load_equipment, load_network, load_json, load_requests, save_network, \
requests_from_json, disjunctions_from_json, save_json
from gnpy.tools.plots import plot_baseline, plot_results
_logger = logging.getLogger(__name__)
_examples_dir = Path(__file__).parent.parent / 'example-data'
_help_footer = '''
This program is part of GNPy, https://github.com/TelecomInfraProject/oopt-gnpy
Learn more at https://gnpy.readthedocs.io/
'''
_help_fname_json = 'FILE.json'
_help_fname_json_csv = 'FILE.(json|csv)'
def show_example_data_dir():
print(f'{_examples_dir}/')
def load_common_data(equipment_filename, topology_filename, simulation_filename, save_raw_network_filename):
'''Load common configuration from JSON files'''
try:
equipment = load_equipment(equipment_filename)
network = load_network(topology_filename, equipment)
if save_raw_network_filename is not None:
save_network(network, save_raw_network_filename)
print(f'{ansi_escapes.blue}Raw network (no optimizations) saved to {save_raw_network_filename}{ansi_escapes.reset}')
if not simulation_filename:
sim_params = {}
if next((node for node in network if isinstance(node, RamanFiber)), None) is not None:
print(f'{ansi_escapes.red}Invocation error:{ansi_escapes.reset} '
f'RamanFiber requires passing simulation params via --sim-params')
sys.exit(1)
else:
sim_params = load_json(simulation_filename)
SimParams.set_params(sim_params)
except exceptions.EquipmentConfigError as e:
print(f'{ansi_escapes.red}Configuration error in the equipment library:{ansi_escapes.reset} {e}')
sys.exit(1)
except exceptions.NetworkTopologyError as e:
print(f'{ansi_escapes.red}Invalid network definition:{ansi_escapes.reset} {e}')
sys.exit(1)
except exceptions.ParametersError as e:
print(f'{ansi_escapes.red}Simulation parameters error:{ansi_escapes.reset} {e}')
sys.exit(1)
except exceptions.ConfigurationError as e:
print(f'{ansi_escapes.red}Configuration error:{ansi_escapes.reset} {e}')
sys.exit(1)
except exceptions.ServiceError as e:
print(f'{ansi_escapes.red}Service error:{ansi_escapes.reset} {e}')
sys.exit(1)
return (equipment, network)
def _setup_logging(args):
logging.basicConfig(level={2: logging.DEBUG, 1: logging.INFO, 0: logging.CRITICAL}.get(args.verbose, logging.DEBUG))
def _add_common_options(parser: argparse.ArgumentParser, network_default: Path):
parser.add_argument('topology', nargs='?', type=Path, metavar='NETWORK-TOPOLOGY.(json|xls|xlsx)',
default=network_default,
help='Input network topology')
parser.add_argument('-v', '--verbose', action='count', default=0,
help='Increase verbosity (can be specified several times)')
parser.add_argument('-e', '--equipment', type=Path, metavar=_help_fname_json,
default=_examples_dir / 'eqpt_config.json', help='Equipment library')
parser.add_argument('--sim-params', type=Path, metavar=_help_fname_json,
default=None, help='Path to the JSON containing simulation parameters (required for Raman). '
f'Example: {_examples_dir / "sim_params.json"}')
parser.add_argument('--save-network', type=Path, metavar=_help_fname_json,
help='Save the final network as a JSON file')
parser.add_argument('--save-network-before-autodesign', type=Path, metavar=_help_fname_json,
help='Dump the network into a JSON file prior to autodesign')
parser.add_argument('--no-insert-edfas', action='store_true',
help='Disable insertion of EDFAs after ROADMs and fibers '
'as well as splitting of fibers by auto-design.')
def transmission_main_example(args=None):
parser = argparse.ArgumentParser(
description='Send a full spectrum load through the network from point A to point B',
epilog=_help_footer,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
_add_common_options(parser, network_default=_examples_dir / 'edfa_example_network.json')
parser.add_argument('--show-channels', action='store_true', help='Show final per-channel OSNR and GSNR summary')
parser.add_argument('-pl', '--plot', action='store_true')
parser.add_argument('-l', '--list-nodes', action='store_true', help='list all transceiver nodes')
parser.add_argument('-po', '--power', default=0, help='channel ref power in dBm')
parser.add_argument('source', nargs='?', help='source node')
parser.add_argument('destination', nargs='?', help='destination node')
args = parser.parse_args(args if args is not None else sys.argv[1:])
_setup_logging(args)
(equipment, network) = load_common_data(args.equipment, args.topology, args.sim_params, args.save_network_before_autodesign)
if args.plot:
plot_baseline(network)
transceivers = {n.uid: n for n in network.nodes() if isinstance(n, Transceiver)}
if not transceivers:
sys.exit('Network has no transceivers!')
if len(transceivers) < 2:
sys.exit('Network has only one transceiver!')
if args.list_nodes:
for uid in transceivers:
print(uid)
sys.exit()
# First try to find exact match if source/destination provided
if args.source:
source = transceivers.pop(args.source, None)
valid_source = True if source else False
else:
source = None
_logger.info('No source node specified: picking random transceiver')
if args.destination:
destination = transceivers.pop(args.destination, None)
valid_destination = True if destination else False
else:
destination = None
_logger.info('No destination node specified: picking random transceiver')
# If no exact match try to find partial match
if args.source and not source:
# TODO code a more advanced regex to find nodes match
source = next((transceivers.pop(uid) for uid in transceivers
if args.source.lower() in uid.lower()), None)
if args.destination and not destination:
# TODO code a more advanced regex to find nodes match
destination = next((transceivers.pop(uid) for uid in transceivers
if args.destination.lower() in uid.lower()), None)
# If no partial match or no source/destination provided pick random
if not source:
source = list(transceivers.values())[0]
del transceivers[source.uid]
if not destination:
destination = list(transceivers.values())[0]
_logger.info(f'source = {args.source!r}')
_logger.info(f'destination = {args.destination!r}')
params = {}
params['request_id'] = 0
params['trx_type'] = ''
params['trx_mode'] = ''
params['source'] = source.uid
params['destination'] = destination.uid
params['bidir'] = False
params['nodes_list'] = [destination.uid]
params['loose_list'] = ['strict']
params['format'] = ''
params['path_bandwidth'] = 0
params['effective_freq_slot'] = None
trx_params = trx_mode_params(equipment)
if args.power:
trx_params['power'] = db2lin(float(args.power)) * 1e-3
params.update(trx_params)
req = PathRequest(**params)
power_mode = equipment['Span']['default'].power_mode
print('\n'.join([f'Power mode is set to {power_mode}',
f'=> it can be modified in eqpt_config.json - Span']))
pref_ch_db = lin2db(req.power * 1e3) # reference channel power / span (SL=20dB)
pref_total_db = pref_ch_db + lin2db(req.nb_channel) # reference total power / span (SL=20dB)
try:
build_network(network, equipment, pref_ch_db, pref_total_db, args.no_insert_edfas)
except exceptions.NetworkTopologyError as e:
print(f'{ansi_escapes.red}Invalid network definition:{ansi_escapes.reset} {e}')
sys.exit(1)
except exceptions.ConfigurationError as e:
print(f'{ansi_escapes.red}Configuration error:{ansi_escapes.reset} {e}')
sys.exit(1)
path = compute_constrained_path(network, req)
spans = [s.params.length for s in path if isinstance(s, RamanFiber) or isinstance(s, Fiber)]
print(f'\nThere are {len(spans)} fiber spans over {sum(spans)/1000:.0f} km between {source.uid} '
f'and {destination.uid}')
print(f'\nNow propagating between {source.uid} and {destination.uid}:')
power_range = [0]
if power_mode:
# power cannot be changed in gain mode
try:
p_start, p_stop, p_step = equipment['SI']['default'].power_range_db
p_num = abs(int(round((p_stop - p_start) / p_step))) + 1 if p_step != 0 else 1
power_range = list(linspace(p_start, p_stop, p_num))
except TypeError:
print('invalid power range definition in eqpt_config, should be power_range_db: [lower, upper, step]')
for dp_db in power_range:
req.power = db2lin(pref_ch_db + dp_db) * 1e-3
if power_mode:
print(f'\nPropagating with input power = {ansi_escapes.cyan}{lin2db(req.power*1e3):.2f} dBm{ansi_escapes.reset}:')
else:
print(f'\nPropagating in {ansi_escapes.cyan}gain mode{ansi_escapes.reset}: power cannot be set manually')
infos = propagate(path, req, equipment)
if len(power_range) == 1:
for elem in path:
print(elem)
if power_mode:
print(f'\nTransmission result for input power = {lin2db(req.power*1e3):.2f} dBm:')
else:
print(f'\nTransmission results:')
print(f' Final GSNR (0.1 nm): {ansi_escapes.cyan}{mean(destination.snr_01nm):.02f} dB{ansi_escapes.reset}')
else:
print(path[-1])
if args.save_network is not None:
save_network(network, args.save_network)
print(f'{ansi_escapes.blue}Network (after autodesign) saved to {args.save_network}{ansi_escapes.reset}')
if args.show_channels:
print('\nThe GSNR per channel at the end of the line is:')
print(
'{:>5}{:>26}{:>26}{:>28}{:>28}{:>28}' .format(
'Ch. #',
'Channel frequency (THz)',
'Channel power (dBm)',
'OSNR ASE (signal bw, dB)',
'SNR NLI (signal bw, dB)',
'GSNR (signal bw, dB)'))
for final_carrier, ch_osnr, ch_snr_nl, ch_snr in zip(
infos.carriers, path[-1].osnr_ase, path[-1].osnr_nli, path[-1].snr):
ch_freq = final_carrier.frequency * 1e-12
ch_power = lin2db(final_carrier.power.signal * 1e3)
print(
'{:5}{:26.2f}{:26.2f}{:28.2f}{:28.2f}{:28.2f}' .format(
final_carrier.channel_number, round(
ch_freq, 2), round(
ch_power, 2), round(
ch_osnr, 2), round(
ch_snr_nl, 2), round(
ch_snr, 2)))
if not args.source:
print(f'\n(No source node specified: picked {source.uid})')
elif not valid_source:
print(f'\n(Invalid source node {args.source!r} replaced with {source.uid})')
if not args.destination:
print(f'\n(No destination node specified: picked {destination.uid})')
elif not valid_destination:
print(f'\n(Invalid destination node {args.destination!r} replaced with {destination.uid})')
if args.plot:
plot_results(network, path, source, destination)
def _path_result_json(pathresult):
return {'response': [n.json for n in pathresult]}
def path_requests_run(args=None):
parser = argparse.ArgumentParser(
description='Compute performance for a list of services provided in a json file or an excel sheet',
epilog=_help_footer,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
_add_common_options(parser, network_default=_examples_dir / 'meshTopologyExampleV2.xls')
parser.add_argument('service_filename', nargs='?', type=Path, metavar='SERVICES-REQUESTS.(json|xls|xlsx)',
default=_examples_dir / 'meshTopologyExampleV2.xls',
help='Input service file')
parser.add_argument('-bi', '--bidir', action='store_true',
help='considers that all demands are bidir')
parser.add_argument('-o', '--output', type=Path, metavar=_help_fname_json_csv,
help='Store satisifed requests into a JSON or CSV file')
args = parser.parse_args(args if args is not None else sys.argv[1:])
_setup_logging(args)
_logger.info(f'Computing path requests {args.service_filename} into JSON format')
(equipment, network) = load_common_data(args.equipment, args.topology, args.sim_params, args.save_network_before_autodesign)
# Build the network once using the default power defined in SI in eqpt config
# TODO power density: db2linp(ower_dbm": 0)/power_dbm": 0 * nb channels as defined by
# spacing, f_min and f_max
p_db = equipment['SI']['default'].power_dbm
p_total_db = p_db + lin2db(automatic_nch(equipment['SI']['default'].f_min,
equipment['SI']['default'].f_max, equipment['SI']['default'].spacing))
try:
build_network(network, equipment, p_db, p_total_db, args.no_insert_edfas)
except exceptions.NetworkTopologyError as e:
print(f'{ansi_escapes.red}Invalid network definition:{ansi_escapes.reset} {e}')
sys.exit(1)
except exceptions.ConfigurationError as e:
print(f'{ansi_escapes.red}Configuration error:{ansi_escapes.reset} {e}')
sys.exit(1)
if args.save_network is not None:
save_network(network, args.save_network)
print(f'{ansi_escapes.blue}Network (after autodesign) saved to {args.save_network}{ansi_escapes.reset}')
oms_list = build_oms_list(network, equipment)
try:
data = load_requests(args.service_filename, equipment, bidir=args.bidir,
network=network, network_filename=args.topology)
rqs = requests_from_json(data, equipment)
except exceptions.ServiceError as e:
print(f'{ansi_escapes.red}Service error:{ansi_escapes.reset} {e}')
sys.exit(1)
# check that request ids are unique. Non unique ids, may
# mess the computation: better to stop the computation
all_ids = [r.request_id for r in rqs]
if len(all_ids) != len(set(all_ids)):
for item in list(set(all_ids)):
all_ids.remove(item)
msg = f'Requests id {all_ids} are not unique'
_logger.critical(msg)
sys.exit()
rqs = correct_json_route_list(network, rqs)
# pths = compute_path(network, equipment, rqs)
dsjn = disjunctions_from_json(data)
print(f'{ansi_escapes.blue}List of disjunctions{ansi_escapes.reset}')
print(dsjn)
# need to warn or correct in case of wrong disjunction form
# disjunction must not be repeated with same or different ids
dsjn = deduplicate_disjunctions(dsjn)
# Aggregate demands with same exact constraints
print(f'{ansi_escapes.blue}Aggregating similar requests{ansi_escapes.reset}')
rqs, dsjn = requests_aggregation(rqs, dsjn)
# TODO export novel set of aggregated demands in a json file
print(f'{ansi_escapes.blue}The following services have been requested:{ansi_escapes.reset}')
print(rqs)
print(f'{ansi_escapes.blue}Computing all paths with constraints{ansi_escapes.reset}')
try:
pths = compute_path_dsjctn(network, equipment, rqs, dsjn)
except exceptions.DisjunctionError as this_e:
print(f'{ansi_escapes.red}Disjunction error:{ansi_escapes.reset} {this_e}')
sys.exit(1)
print(f'{ansi_escapes.blue}Propagating on selected path{ansi_escapes.reset}')
propagatedpths, reversed_pths, reversed_propagatedpths = compute_path_with_disjunction(network, equipment, rqs, pths)
# Note that deepcopy used in compute_path_with_disjunction returns
# a list of nodes which are not belonging to network (they are copies of the node objects).
# so there can not be propagation on these nodes.
pth_assign_spectrum(pths, rqs, oms_list, reversed_pths)
print(f'{ansi_escapes.blue}Result summary{ansi_escapes.reset}')
header = ['req id', ' demand', ' GSNR@bandwidth A-Z (Z-A)', ' [email protected] A-Z (Z-A)',
' Receiver minOSNR', ' mode', ' Gbit/s', ' nb of tsp pairs',
'N,M or blocking reason']
data = []
data.append(header)
for i, this_p in enumerate(propagatedpths):
rev_pth = reversed_propagatedpths[i]
if rev_pth and this_p:
psnrb = f'{round(
|
mean(this_p[-1].snr)
|
numpy.mean
|
import numpy as np
import h5py
import time
import os
# functions (to be moved to utils.py)
def add_meta_keys(fn, pars_keys, image_keys=[]):
with h5py.File(fn, 'r') as f:
for key in f.keys():
if key not in pars_keys and key not in image_keys:
pars_keys.append(key)
return 0
def get_square_images_fn(cdict, file_number=None):
# in the future: load the first n_events from a file with more images
event_type = cdict['event_type']
n_events = cdict['n_events']
mode = cdict['mode']
Etrue_min = cdict['Etrue_min']
fn = '%s_%i_images_%s' % (event_type, n_events, mode)
if Etrue_min is not None and Etrue_min != 'None':
fn += '_Etrue_min%.1fTeV' % Etrue_min
if cdict.get('tel') != None:
fn += '_%s' % cdict['tel']
if file_number is not None:
fn += '_file%i' % file_number
fn += '.h5'
return fn
def get_images_fns(cdict, folder=None, exists=False, nfiles=200):
ev_types = cdict['model_events']
#n_events = cdict['n_events']
#n_events_tot = cdict.get('n_events_tot', None)
#if n_events_tot == None:
# n_events_tot = n_events
#nfiles = int(n_events_tot / n_events)
out_dict = {}
for k, event_type in enumerate(ev_types):
cdict['event_type'] = event_type
out_dict[event_type] = [get_square_images_fn(cdict, file_number=j+1) for j in range(nfiles)]
if folder is not None and exists:
out_dict[event_type] = [fn for fn in out_dict[event_type] if os.path.isfile(folder + fn)]
return out_dict
def get_zeta_fns(cdict, folder=None, exists=False):
out_dict = get_images_fns(cdict)
for key in out_dict.keys():
out_dict[key] = [fn.replace('.h5', '_zeta.h5') for fn in out_dict[key]]
if folder is not None and exists:
out_dict[key] = [fn for fn in out_dict[key] if os.path.isfile(folder + fn)]
return out_dict
def load_images(folder, cdict):
ev_types = cdict['model_events']
n_events = cdict['n_events']
n_events_tot = cdict.get('n_events_tot', None)
if n_events_tot == None:
n_events_tot = n_events
nfiles = int(n_events_tot / n_events)
data_key = cdict['data_key']
print('load images')
for k, event_type in enumerate(ev_types):
print('load %s images' % event_type)
cdict['event_type'] = event_type
for j in range(nfiles):
fn = folder + get_square_images_fn(cdict, file_number=j+1)
with h5py.File(fn, 'r') as f:
if k == 0 and j == 0:
dims = f[data_key].shape
out_dims = list(dims)
out_dims[0] = n_events_tot * len(ev_types)
images = np.zeros(out_dims, dtype=np.float32)
ind_start = n_events_tot * k + dims[0] * j
ind_end = n_events_tot * k + dims[0] * j + dims[0]
fill_inds = list(range(ind_start, ind_end))
images[fill_inds] = f[data_key][:]
return images
def load_images_from_file(fn, key):
with h5py.File(fn, 'r') as f:
return f[key][:]
def get_group_key(key, f):
for gkey in f.keys():
if type(f[gkey]) != h5py._hl.dataset.Dataset and key in f[gkey].keys():
return gkey
return None
def load_meta_data(folder, cdict):
ev_types = cdict['model_events']
n_events = cdict['n_events']
n_events_tot = cdict.get('n_events_tot', None)
if n_events_tot == None:
n_events_tot = n_events
nfiles = int(n_events_tot / n_events)
data_key = cdict['data_key']
pars_keys = cdict['pars_keys']
print('load meta data')
for k, event_type in enumerate(ev_types):
print(event_type)
cdict['event_type'] = event_type
for j in range(nfiles):
fn = folder + get_square_images_fn(cdict, file_number=j+1)
with h5py.File(fn, 'r') as f:
if k == 0 and j == 0:
pars_dict = {}
for key in pars_keys:
gkey = get_group_key(key, f)
if gkey is None:
dims = [n_events]
out_dims = n_events_tot * len(ev_types)
else:
dims = f[gkey][key].shape
out_dims = list(dims)
out_dims[0] = n_events_tot * len(ev_types)
pars_dict[key] =
|
np.zeros(out_dims, dtype=np.float32)
|
numpy.zeros
|
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME> and <NAME>
# --------------------------------------------------------
import numpy as np
import yaml
from fast_rcnn.config import cfg
from rpn_msr.generate_anchors import generate_anchors_bv, generate_anchors
from rpn_msr.anchor_target_layer_tf import clip_anchors
from fast_rcnn.bbox_transform import bbox_transform_inv, clip_boxes, bbox_transform_inv_3d
from fast_rcnn.nms_wrapper import nms
from utils.transform import bv_anchor_to_lidar, lidar_to_bv, lidar_3d_to_bv, lidar_3d_to_corners, lidar_cnr_to_img
import pdb,time
#DEBUG = False
"""
Outputs object detection proposals by applying estimated bounding-box
transformations to a set of regular boxes (called "anchors").
"""
def proposal_layer_3d_debug(rpn_cls_prob_reshape,rpn_bbox_pred,im_info,calib,cfg_in, _feat_stride = [8,], anchor_scales=[1.0, 1.0],debug_state=True):
#copy part of the code from proposal_layer_3d for debug
_anchors = generate_anchors_bv()
# _anchors = generate_anchors(scales=np.array(anchor_scales))
_num_anchors = _anchors.shape[0]
im_info = im_info[0]
assert rpn_cls_prob_reshape.shape[0] == 1, \
'Only single item batches are supported'
# cfg_key = str(self.phase) # either 'TRAIN' or 'TEST'
# the first set of _num_anchors channels are bg probs
# the second set are the fg probs, which we want
# print rpn_cls_prob_reshape.shape
height, width = rpn_cls_prob_reshape.shape[1:3]
# scores = rpn_cls_prob_reshape[:, _num_anchors:, :, :]
scores = np.reshape(np.reshape(rpn_cls_prob_reshape, [1, height, width, _num_anchors, 2])[:,:,:,:,1],[1, height, width, _num_anchors])
bbox_deltas = rpn_bbox_pred
if debug_state:
print ('im_size: ({}, {})'.format(im_info[0], im_info[1]))
print ('scale: {}'.format(im_info[2]))
if debug_state:
print ('score map size: {}'.format(scores.shape))
# Enumerate all shifts
shift_x = np.arange(0, width) * _feat_stride
shift_y = np.arange(0, height) * _feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel())).transpose()
# Enumerate all shifted anchors:
#
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
A = _num_anchors
K = shifts.shape[0]
anchors = _anchors.reshape((1, A, 4)) + \
shifts.reshape((1, K, 4)).transpose((1, 0, 2))
anchors = anchors.reshape((K * A, 4))
bbox_deltas = bbox_deltas.reshape((-1, 6))
scores = scores.reshape((-1, 1))
# convert anchors bv to anchors_3d
anchors_3d = bv_anchor_to_lidar(anchors)
# Convert anchors into proposals via bbox transformations
proposals_3d = bbox_transform_inv_3d(anchors_3d, bbox_deltas)
# convert back to lidar_bv
proposals_bv = lidar_3d_to_bv(proposals_3d) #[x1,y1,x2,y2]
lidar_corners = lidar_3d_to_corners(proposals_3d)
proposals_img = lidar_cnr_to_img(lidar_corners,
calib[3], calib[2], calib[0])
if debug_state:
# print "bbox_deltas: ", bbox_deltas[:10]
# print "proposals number: ", proposals_3d[:10]
print ("proposals_bv shape: ", proposals_bv.shape)
print ("proposals_3d shape: ", proposals_3d.shape)
print ("scores shape:", scores.shape)
# 2. clip predicted boxes to image
#WZN: delete those not in image
ind_inside = clip_anchors(anchors, im_info[:2])
#ind_inside = np.logical_and(ind_inside,clip_anchors(proposals_bv, im_info[:2]))
proposals_bv = proposals_bv[ind_inside,:]
proposals_3d = proposals_3d[ind_inside,:]
proposals_img = proposals_img[ind_inside,:]
scores = scores[ind_inside,:]
proposals_bv = clip_boxes(proposals_bv, im_info[:2])
# TODO: pass real image_info
#keep = _filter_img_boxes(proposals_img, [375, 1242])
#proposals_bv = proposals_bv[keep, :]
#proposals_3d = proposals_3d[keep, :]
#proposals_img = proposals_img[keep, :]
#scores = scores[keep]
if debug_state:
print ("proposals after clip")
print ("proposals_bv shape: ", proposals_bv.shape)
print ("proposals_3d shape: ", proposals_3d.shape)
print ("proposals_img shape: ", proposals_img.shape)
# 4. sort all (proposal, score) pairs by score from highest to lowest
# 5. take top pre_nms_topN (e.g. 6000)
order = scores.ravel().argsort()[::-1]
if cfg_in['pre_keep_topN'] > 0:
order = order[:cfg_in['pre_keep_topN']]
#keep = keep[order]
proposals_bv = proposals_bv[order, :]
proposals_3d = proposals_3d[order, :]
proposals_img = proposals_img[order, :]
scores = scores[order]
# 6. apply nms (e.g. threshold = 0.7)
# 7. take after_nms_topN (e.g. 300)
# 8. return the top proposals (-> RoIs top)
if cfg_in['use_nms']:
keep = nms(np.hstack((proposals_bv, scores)), cfg_in['nms_thresh'])
if cfg_in['nms_topN'] > 0:
keep = keep[:cfg_in['nms_topN']]
proposals_bv = proposals_bv[keep, :]
proposals_3d = proposals_3d[keep, :]
proposals_img = proposals_img[keep, :]
scores = scores[keep]
if debug_state:
print ("proposals after nms")
print ("proposals_bv shape: ", proposals_bv.shape)
print ("proposals_3d shape: ", proposals_3d.shape)
# debug only: keep probabilities above a threshold
if cfg_in['prob_thresh']:
keep_ind = scores[:,0]>cfg_in['prob_thresh']
print ('scores: ',scores)
print ('threshold: ', cfg_in['prob_thresh'])
print ('score shape:', scores.shape)
#print keep_ind.shape
#print keep.shape
#keep = keep[keep_ind]
proposals_bv = proposals_bv[keep_ind, :]
proposals_3d = proposals_3d[keep_ind, :]
proposals_img = proposals_img[keep_ind, :]
scores = scores[keep_ind]
return proposals_bv,proposals_3d,proposals_img,scores
def proposal_layer_3d(rpn_cls_prob_reshape,rpn_bbox_pred,im_info,calib,cfg_key, _feat_stride = [8,], anchor_scales=[1.0, 1.0],DEBUG = False):
# Algorithm:
#
# for each (H, W) location i
# generate A anchor boxes centered on cell i
# apply predicted bbox deltas at cell i to each of the A anchors
# clip predicted boxes to image
# remove predicted boxes with either height or width < threshold
# sort all (proposal, score) pairs by score from highest to lowest
# take top pre_nms_topN proposals before NMS
# apply NMS with threshold 0.7 to remaining proposals
# take after_nms_topN proposals after NMS
# return the top proposals (-> RoIs top, scores top)
#layer_params = yaml.load(self.param_str_)
#t0 = time.time()
_anchors = generate_anchors_bv()
# _anchors = generate_anchors(scales=np.array(anchor_scales))
_num_anchors = _anchors.shape[0]
#print 'time for anchors: ', time.time()-t0
#t0 = time.time()
im_info = im_info[0]
assert rpn_cls_prob_reshape.shape[0] == 1, \
'Only single item batches are supported'
# cfg_key = str(self.phase) # either 'TRAIN' or 'TEST'
if type(cfg_key) is bytes:
cfg_key = cfg_key.decode('UTF-8','ignore')
pre_score_filt = cfg[cfg_key].RPN_SCORE_FILT
pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
nms_thresh = cfg[cfg_key].RPN_NMS_THRESH
min_size = cfg[cfg_key].RPN_MIN_SIZE
# the first set of _num_anchors channels are bg probs
# the second set are the fg probs, which we want
# print rpn_cls_prob_reshape.shape
height, width = rpn_cls_prob_reshape.shape[1:3]
# scores = rpn_cls_prob_reshape[:, _num_anchors:, :, :]
scores = np.reshape(np.reshape(rpn_cls_prob_reshape, [1, height, width, _num_anchors, 2])[:,:,:,:,1],[1, height, width, _num_anchors])
bbox_deltas = rpn_bbox_pred
if DEBUG:
print ('im_size: ({}, {})'.format(im_info[0], im_info[1]))
print ('scale: {}'.format(im_info[2]))
# 1. Generate proposals from bbox deltas and shifted anchors
if DEBUG:
print ('score map size: {}'.format(scores.shape))
# Enumerate all shifts
shift_x = np.arange(0, width) * _feat_stride
shift_y = np.arange(0, height) * _feat_stride
shift_x, shift_y =
|
np.meshgrid(shift_x, shift_y)
|
numpy.meshgrid
|
#!/usr/bin/env python
# coding: utf-8
# <a href="https://colab.research.google.com/github/korymath/public_notebooks/blob/master/Building_Equal_Size_Clusters_Kyle_Mathewson_Sept_2019.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# # Imports
# In[1]:
# get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.cluster import KMeans
from collections import Counter
import numpy as np
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
random_state = 1017
# # Generate and Visualize Data
# In[2]:
n_samples = 6
n_groups = 3
n_members = 2
# ensure that the calculus works out
assert n_groups * n_members == n_samples
X, y_true = make_blobs(n_samples=n_samples, centers=n_groups,
cluster_std=0.50, random_state=random_state)
plt.scatter(X[:, 0], X[:, 1], s=50);
# In[3]:
for x in X:
print('{};'.format(x))
# # K-Means Clustering
# In[4]:
kmeans = KMeans(n_clusters=n_groups, n_init=100, max_iter=1000)
kmeans.fit(X)
labels = kmeans.predict(X)
plt.scatter(X[:, 0], X[:, 1], c=labels, s=50, cmap='viridis')
centers = kmeans.cluster_centers_
plt.scatter(centers[:, 0], centers[:, 1], c='black', s=200, alpha=0.5);
# In[5]:
# test the group size, AssertionError on failure
C = Counter(labels)
print('Group sizes: {}'.format(C))
try:
assert list(C.values()) == [n_members] * n_groups
except AssertionError as e:
print('Unequal group sizes')
# # (optional) Explicit Algorithm Details
# In[6]:
from sklearn.metrics import pairwise_distances_argmin
def find_clusters(X, n_groups, rseed=random_state):
# 1. Randomly choose clusters
rng = np.random.RandomState(rseed)
i = rng.permutation(X.shape[0])[:n_groups]
centers = X[i]
while True:
# 2a. Assign labels based on closest center
labels = pairwise_distances_argmin(X, centers)
# 2b. Find new centers from means of points
new_centers = np.array([X[labels == i].mean(0)
for i in range(n_groups)])
# 2c. Check for convergence
if np.all(centers == new_centers):
break
centers = new_centers
return centers, labels
centers, labels = find_clusters(X=X, n_groups=n_groups)
plt.scatter(X[:, 0], X[:, 1], c=labels, s=50, cmap='viridis');
# # Limitations of K-Means
#
# 1. Global optimum not guaranteed
# 2. n_groups must be selected beforehand
# 3. limited to linear cluster boundaries
# 4. slow for large n_samples
# 5. group sizes unequal
# In[7]:
# To address limitation 1, we can increase n_init for different random
# starting points on centroids. We can also increase the number of iterations
# particularly if there is a small n_samples
# To address limitation 3, we can use spectral clustering
# use a kernel transformation to project the data into a higher dimension where
# a linear separation is possible.
# Allow k-means to discover non-linear boundaries.
from sklearn.cluster import SpectralClustering
model = SpectralClustering(n_clusters=n_groups, affinity='nearest_neighbors',
assign_labels='kmeans', n_neighbors=n_members,
n_init=100, random_state=random_state)
labels = model.fit_predict(X)
plt.scatter(X[:, 0], X[:, 1], c=labels, s=50, cmap='viridis');
# In[8]:
# test the group size, AssertionError on failure
C = Counter(labels)
print('Group sizes: {}'.format(C))
try:
assert list(C.values()) == [n_members] * n_groups
except AssertionError as e:
print('Unequal group sizes')
# # Contrained Group Size k-means Clustering
# In[9]:
def average_data_distance_error(n_groups, memberships, distances):
'''Calculate average distance between data in clusters.'''
error = 0
for k in range(n_groups):
# indices of datapoints belonging to class k
i = np.where(memberships == k)[0]
error += np.mean(distances[tuple(np.meshgrid(i, i))])
return error / n_groups
def cluster_equal_groups(data, n_groups=None, n_members=None, verbose=False):
# equal-size clustering based on data exchanges between pairs of clusters
# given two of three num_points, num_clusters, group_size
# the third is trivial to calculate
n_samples, _ = data.shape
if n_members is None and n_groups is not None:
n_members = n_samples // n_groups
elif n_groups is None and n_members is not None:
n_groups = n_samples // n_members
else:
raise Exception('must specify either n_members or n_groups')
# distance matrix
distances = squareform(pdist(data))
# print(distances)
# Random initial membership
# np.random.seed(random_state)
# memberships = np.random.permutation(n_samples) % n_groups
# Initial membership
kmeans = KMeans(n_clusters=n_groups, n_init=100, max_iter=1000)
kmeans.fit(data)
memberships = kmeans.predict(data)
current_err = average_data_distance_error(n_groups, memberships, distances)
# print(n_groups, memberships)
t = 1
while True:
past_err = current_err
for a in range(n_samples):
for b in range(a):
# exchange membership
memberships[a], memberships[b] = memberships[b], memberships[a]
# calculate new error
test_err = average_data_distance_error(n_groups, memberships, distances)
if verbose:
print("{}: {}<->{} E={}".format(t, a, b, current_err))
if test_err < current_err:
current_err = test_err
else:
# put them back
memberships[a], memberships[b] = memberships[b], memberships[a]
if past_err == current_err:
break
t += 1
return memberships
# In[10]:
import time
n_samples = 32
n_groups = 8
n_members = n_samples // n_groups
# ensure that the calculus works out
assert n_groups * n_members == n_samples
X, y_true = make_blobs(n_samples=n_samples,
centers=n_groups,
cluster_std=0.50,
random_state=random_state)
plt.scatter(X[:, 0], X[:, 1], s=50);
t0 = time.time()
labels = cluster_equal_groups(X, n_groups=n_groups, verbose=False)
t1 = time.time()
# test the group size, AssertionError on failure
C = Counter(labels)
print('Group sizes: {}'.format(C))
try:
assert list(C.values()) == [n_members] * n_groups
print('Success, group sizes are equal!')
except AssertionError as e:
print('Unequal group sizes')
print('Equal group memberships found in {} s'.format(round(t1-t0, 2)))
# Plot the memberships
plt.scatter(X[:, 0], X[:, 1], c=labels, s=50, cmap='viridis');
# In[14]:
nx, ny = 4, 8
xs = np.linspace(0, 1, nx)
ys = np.linspace(0, 1, ny)
x, y = np.meshgrid(xs, ys) + np.random.normal(scale=0.01, size=(ny, nx))
print(x.shape, y.shape)
# In[15]:
X = np.zeros(shape=(len(x.flatten()), 2))
X[:, 0] = x.flatten()
X[:, 1] = y.flatten()
plt.scatter(X[:, 0], X[:, 1], s=50);
# In[16]:
labels = cluster_equal_groups(X, n_groups=n_groups, verbose=False)
plt.scatter(X[:, 0], X[:, 1], c=labels, s=50, cmap='jet');
# test the group size, AssertionError on failure
C = Counter(labels)
print('Group sizes: {}'.format(C))
try:
assert list(C.values()) == [n_members] * n_groups
except AssertionError as e:
print('Unequal group sizes')
# In[17]:
# This import registers the 3D projection, but is otherwise unused.
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
X, y_true = make_blobs(n_samples=n_samples,
centers=n_groups,
n_features=3,
cluster_std=0.50,
random_state=random_state)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], s=50)
# In[18]:
labels = cluster_equal_groups(X, n_groups=n_groups)
# In[19]:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=labels, s=50, cmap='viridis');
# In[20]:
np.random.permutation(n_samples) % 4
# In[21]:
distances = squareform(pdist(X))
distances
# In[22]:
distances[
|
np.meshgrid((0,1), (0,1))
|
numpy.meshgrid
|
import cv2
import functions as fc
import numpy as np
import os
import time
from dataset import Dataset
def main():
dataset = Dataset('highway',1,20)
imgs = dataset.readInput()
imgs_GT = dataset.readGT()
# ori_imgs = fc.readImages("../../Datasets/highway/groundtruth", "png")
# A_imgs = fc.readImages("results/highway/A", "png")
# B_imgs = fc.readImages("results/highway/B", "png")
# TASK 1.1 - Background Substraction Evaluation
# test A
t = time.time()
A_CMatrix = fc.ConfusionMatrix(ori_imgs, A_imgs)
A_Metrics = fc.Metrics(A_CMatrix)
elapsed = time.time() - t
print('Elapsed time is ' + str(elapsed) + ' seconds')
# test B
t = time.time()
B_CMatrix = fc.ConfusionMatrix_2(ori_imgs, B_imgs)
B_Metrics = fc.Metrics_2(B_CMatrix)
elapsed = time.time() - t
print('Elapsed time is ' + str(elapsed) + ' seconds')
# TASK 2.1 - Plot TP vs Time
A_CMatrix, TPFv_A, TPGTv_A, F1v_A = fc.ConfusionMatrix(ori_imgs, A_imgs)
B_CMatrix, TPFv_B, TPGTv_B, F1v_B = fc.ConfusionMatrix(ori_imgs, B_imgs)
x = [np.arange(0, 200, 1), np.arange(0, 200, 1), np.arange(0, 200, 1)]
y = [np.array(TPFv_A), np.array(TPFv_B), np.array(TPGTv_A)]
axis = ["Time", "#Pixels"]
labels = ["True Positives A", "True Positives B", "Total Positives"]
fc.plotGraphics(x, y, axis, labels)
# TASK 2.2 - Plot F1-score vs Time
x = [np.arange(0, 200, 1),
|
np.arange(0, 200, 1)
|
numpy.arange
|
"""
Library Features:
Name: lib_data_io_binary
Author(s): <NAME> (<EMAIL>), <NAME> (<EMAIL>)
Date: '20210603'
Version: '1.0.0'
"""
#######################################################################################
# Library
import logging
import os
import struct
from copy import deepcopy
import numpy as np
import pandas as pd
import xarray as xr
from lib_info_args import logger_name
# Logging
log_stream = logging.getLogger(logger_name)
# Debug
import matplotlib.pylab as plt
#######################################################################################
# --------------------------------------------------------------------------------
# Method to get size of binary file
def search_geo_reference(file_name, info_dict, tag_geo_reference=None,
tag_cols='ncols', tag_rows='nrows', scale_factor=4):
file_handle = open(file_name, 'rb')
file_stream = file_handle.read(-1)
straem_n = file_stream.__len__()
data_tag = None
for info_key, info_fields in info_dict.items():
data_n = int(info_fields[tag_cols]) * int(info_fields[tag_rows]) * scale_factor
if data_n == straem_n:
data_info = info_fields
data_tag = info_key
break
file_handle.close()
assert data_tag == tag_geo_reference, " ===> Geographical reference set and found are not equal. " \
"Check your settings and datasets"
return data_tag
# --------------------------------------------------------------------------------
# --------------------------------------------------------------------------------
# Method to read 2d variable in binary format (saved as 1d integer array)
def read_data_binary(file_name, var_geo_x, var_geo_y, var_geo_attrs=None, var_format='i', var_scale_factor=10,
var_name=None, var_time=None, var_geo_1d=True,
coord_name_geo_x='west_east', coord_name_geo_y='south_north', coord_name_time='time',
dim_name_geo_x='west_east', dim_name_geo_y='south_north', dim_name_time='time',
dims_order=None):
if dims_order is None:
dims_order = [dim_name_geo_y, dim_name_geo_x, dim_name_time]
if os.path.exists(file_name):
# Open file handle
file_handle = open(file_name, 'rb')
rows = var_geo_y.shape[0]
cols = var_geo_x.shape[0]
# Values shape (1d)
var_n = rows * cols
# Values format
data_format = var_format * var_n
# Open and read binary file
data_stream = file_handle.read(-1)
array_data = struct.unpack(data_format, data_stream)
# Close file handle
file_handle.close()
# Reshape binary file in Fortran order and scale Data (float32)
file_values = np.reshape(array_data, (rows, cols), order='F')
file_values = np.float32(file_values / var_scale_factor)
if var_geo_1d:
var_geo_x_2d, var_geo_y_2d = np.meshgrid(var_geo_x, var_geo_y)
else:
var_geo_x_2d = var_geo_x
var_geo_y_2d = var_geo_y
geo_y_upper = var_geo_y_2d[0, 0]
geo_y_lower = var_geo_y_2d[-1, 0]
if geo_y_lower > geo_y_upper:
var_geo_y_2d = np.flipud(var_geo_y_2d)
file_dims = file_values.shape
file_high = file_dims[0]
file_wide = file_dims[1]
var_data =
|
np.zeros(shape=[var_geo_x_2d.shape[0], var_geo_y_2d.shape[1], 1])
|
numpy.zeros
|
import numpy as np
def root_mean_squared_error(y_true, y_pred):
rmse = np.sqrt(np.square(np.subtract(y_true, y_pred)).mean())
return rmse
def root_mean_squared_log_error(y_true, y_pred):
rmsle = np.sqrt(np.square(np.subtract(np.log(y_pred + 1), np.log(y_true + 1))).mean())
return rmsle
def root_mean_squared_precentage_error(y_true, y_pred):
rmspe = (np.sqrt(np.mean(np.square(np.subtract(y_true, y_pred) / y_true)))) * 100
return rmspe
def symmetric_mean_absolute_precentage_error(y_true, y_pred):
smape = 100/len(y_true) * np.sum(2 * np.abs(np.subtract(y_pred, y_true)) / (np.abs(y_true) + np.abs(y_pred)))
return smape
def mean_bias_error(y_true, y_pred):
mbe = np.mean(np.subtract(y_pred, y_true))
return mbe
def relative_squared_error(y_true, y_pred):
rse = np.sum(np.square(np.subtract(y_true, y_pred))) / np.sum(np.square(np.subtract(np.average(y_true), y_true)))
return rse
def root_relative_squared_error(y_true, y_pred):
rrse = np.sqrt(np.sum(np.square(np.subtract(y_true, y_pred))) / np.sum(np.square(np.subtract(
|
np.average(y_true)
|
numpy.average
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright © 2022 <NAME>
"""Wavefront aberration calculations
Functions for setting up and calculating wavefront aberrations for
(fld, wvl, foc), including focus and image shift.
.. Created on Thu Mar 31 22:28:44 2022
.. codeauthor: <NAME>
"""
from math import sqrt
import numpy as np
from rayoptics.optical import model_constants as mc
from rayoptics.elem.transform import transform_after_surface
from rayoptics.util.misc_math import normalize
def calculate_reference_sphere(opt_model, fld, wvl, foc,
chief_ray_pkg, image_pt_2d=None):
"""Compute the reference sphere for a defocussed image point at **fld**.
Args:
opt_model: :class:`~.OpticalModel` instance
fld: :class:`~.Field` point for wave aberration calculation
wvl: wavelength of ray (nm)
foc: defocus amount
chief_ray_pkg: input tuple of chief_ray, cr_exp_seg
image_pt_2d: x, y image point in (defocussed) image plane, if None, use
the chief ray coordinate.
Returns:
ref_sphere: tuple of image_pt, ref_dir, ref_sphere_radius
"""
cr, cr_exp_seg = chief_ray_pkg
# cr_exp_pt: E upper bar prime: pupil center for pencils from Q
# cr_exp_pt, cr_b4_dir, cr_dst
# cr_exp_pt = cr_exp_seg[mc.p]
if image_pt_2d is None:
# get distance along cr corresponding to a z shift of the defocus
dist = foc / cr.ray[-1][mc.d][2]
image_pt = cr.ray[-1][mc.p] + dist*cr.ray[-1][mc.d]
else:
image_pt = np.array([image_pt_2d[0], image_pt_2d[1], foc])
# get the image point wrt the final surface
image_thi = opt_model['seq_model'].gaps[-1].thi
img_pt = np.array(image_pt)
img_pt[2] += image_thi
# R' radius of reference sphere for O'
ref_sphere_vec = img_pt - cr_exp_seg[mc.p]
ref_sphere_radius = np.linalg.norm(ref_sphere_vec)
ref_dir = normalize(ref_sphere_vec)
ref_sphere = (image_pt, ref_dir, ref_sphere_radius)
return ref_sphere
def transfer_to_exit_pupil(interface, ray_seg, exp_dst_parax):
"""Given the exiting interface and chief ray data, return exit pupil ray coords.
Args:
interface: the exiting :class:'~.Interface' for the path sequence
ray_seg: ray segment exiting from **interface**
exp_dst_parax: z distance to the paraxial exit pupil
Returns:
(**exp_pt**, **exp_dir**, **exp_dst**)
- **exp_pt** - ray intersection with exit pupil plane
- **exp_dir** - direction cosine of the ray in exit pupil space
- **exp_dst** - distance from interface to exit pupil pt
"""
b4_pt, b4_dir = transform_after_surface(interface, ray_seg)
# h = b4_pt[0]**2 + b4_pt[1]**2
# u = b4_dir[0]**2 + b4_dir[1]**2
# handle field points in the YZ plane
h = b4_pt[1]
u = b4_dir[1]
if abs(u) < 1e-14:
exp_dst = exp_dst_parax
else:
# exp_dst = -np.sign(b4_dir[2])*sqrt(h/u)
exp_dst = -h/u
exp_pt = b4_pt + exp_dst*b4_dir
exp_dir = b4_dir
return exp_pt, exp_dir, exp_dst, interface, b4_pt, b4_dir
# --- Wavefront aberration
def eic_distance(r, r0):
""" calculate equally inclined chord distance between 2 rays
Args:
r: (p, d), where p is a point on the ray r and d is the direction
cosine of r
r0: (p0, d0), where p0 is a point on the ray r0 and d0 is the direction
cosine of r0
Returns:
float: distance along r from equally inclined chord point to p
"""
# eq 3.9
e = (np.dot(r[mc.d] + r0[mc.d], r[mc.p] - r0[mc.p]) /
(1. +
|
np.dot(r[mc.d], r0[mc.d])
|
numpy.dot
|
"""
keras.py
Methods to create, use, save and load pilots. Pilots contain the highlevel
logic used to determine the angle and throttle of a vehicle. Pilots can
include one or more models to help direct the vehicles motion.
"""
from abc import ABC, abstractmethod
import numpy as np
from typing import Dict, Any, Tuple, Optional, Union
import donkeycar as dk
from donkeycar.utils import normalize_image, linear_bin, process_image, linear_unbin
from donkeycar.pipeline.types import TubRecord
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Input, Dense
from tensorflow.keras.layers import Conv2D, MaxPooling2D, \
BatchNormalization
from tensorflow.keras.layers import Activation, Dropout, Flatten
from tensorflow.keras.layers import LSTM
from tensorflow.keras.layers import TimeDistributed as TD
from tensorflow.keras.layers import Conv3D, MaxPooling3D, Conv2DTranspose
from tensorflow.keras.backend import concatenate
from tensorflow.keras.models import Model, Sequential
from tensorflow.python.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.optimizers import Optimizer
ONE_BYTE_SCALE = 1.0 / 255.0
# type of x
XY = Union[float, np.ndarray, Tuple[float, ...], Tuple[np.ndarray, ...]]
class KerasPilot(ABC):
"""
Base class for Keras models that will provide steering and throttle to
guide a car.
"""
def __init__(self) -> None:
self.model: Optional[Model] = None
self.optimizer = "adam"
self.s_t = None
print(f'Created {self}')
def load(self, model_path: str) -> None:
self.model = keras.models.load_model(model_path, compile=False)
def load_weights(self, model_path: str, by_name: bool = True) -> None:
assert self.model, 'Model not set'
self.model.load_weights(model_path, by_name=by_name)
def shutdown(self) -> None:
pass
def compile(self) -> None:
pass
def set_optimizer(self, optimizer_type: str,
rate: float, decay: float) -> None:
assert self.model, 'Model not set'
if optimizer_type == "adam":
self.model.optimizer = keras.optimizers.Adam(lr=rate, decay=decay)
elif optimizer_type == "sgd":
self.model.optimizer = keras.optimizers.SGD(lr=rate, decay=decay)
elif optimizer_type == "rmsprop":
self.model.optimizer = keras.optimizers.RMSprop(lr=rate, decay=decay)
else:
raise Exception("unknown optimizer type: %s" % optimizer_type)
def get_input_shape(self) -> tf.TensorShape:
assert self.model, 'Model not set'
return self.model.inputs[0].shape
def run(self, img_arr: np.ndarray, other_arr: np.ndarray = None) \
-> Tuple[Union[float, np.ndarray], ...]:
"""
Donkeycar parts interface to run the part in the loop.
:param img_arr: uint8 [0,255] numpy array with image data
:param other_arr: numpy array of additional data to be used in the
pilot, like IMU array for the IMU model or a
state vector in the Behavioural model
:return: tuple of (angle, throttle)
"""
norm_arr = normalize_image(img_arr)
x_t = process_image(norm_arr)
if not self.s_t:
self.s_t = np.stack((x_t,x_t,x_t,x_t),axis=2)
# In Keras, need to reshape
self.s_t = self.s_t.reshape(1, self.s_t.shape[0], self.s_t.shape[1], self.s_t.shape[2]) #1*80*80*4
else:
x_t = x_t.reshape(1, x_t.shape[0], x_t.shape[1], 1) #1x80x80x1
self.s_t =
|
np.append(x_t, self.s_t[:, :, :, :3], axis=3)
|
numpy.append
|
import numpy as np
import matplotlib as plt
import os
import serial
import time
import h5py
import sys
import matplotlib.pyplot as plt
from frgpl.camera import camera
from frgpl.stage import stage
from frgpl.kepco import kepco
from frgpl.daq import daq
from frgpl.laser import laser
from frgpl.tec import omega
import datetime
import time
from mpl_toolkits.axes_grid1 import make_axes_locatable
from tqdm import tqdm
import threading
import pdb
import winsound
from frgpl.checkPLmaps import plotPL
soundpath='C:\\Users\\Operator\\Documents\\GitHub\\Instruments\\FRG Hardware\\frgpl\\frgpl\\tada.wav'
root = 'C:\\Users\\Operator\\Desktop\\frgPL' #default folder to save data
if not os.path.exists(root):
os.mkdir(root)
datafolder = os.path.join(root, 'Data')
if not os.path.exists(datafolder):
os.mkdir(datafolder)
calibrationfolder = os.path.join(root, 'Calibration')
if not os.path.exists(calibrationfolder):
os.mkdir(calibrationfolder)
class control:
def __init__(self, kepcoport = 'COM5',laserport = 'COM1', spotmapnumber = None):
# hardware properties
self.kepcoport = kepcoport
self.laserport = laserport
self.__laserON = False
self.__kepcoON = False
self.__cameraON = False
# measurement settings
self.bias = 0 #bias applied to sample
self.laserpower = 0 #current supplied to laser ###may replace this with n_suns, if calibration is enabled
self.saturationtime = 0.5 #delay between applying voltage/illumination and beginning measurement
self.numIV = 20 #number of IV measurements to average
self.numframes = 50 #number of image frames to average
self.__temperature = 25 #TEC stage temperature setpoint (C) during measurement
self.temperatureTolerance = 0.2 #how close to the setpoint we need to be to take a measurement (C)
self.maxSoakTime = 60 # max soak time, in seconds, to wait for temperature to reach set point. If we reach this point, just go ahead with the measurement
self.note = ''
self._spotMap = None # optical power map of laser spot, used for PL normalization
self._sampleOneSun = None # fractional laser power with which to approximate one-sun injection levels
self._sampleOneSunJsc = None # target Jsc, matching of which is used for one-sun injection level is approximated
self._sampleOneSunSweep = None # fractional laser power vs photocurrent (Isc), fit to provide one-sun estimate
self.__previewFigure = None #handle for matplotlib figure, used for previewing most recent image results
self.__previewAxes = [None, None] # handle for matplotib axes, used to hold the image and colorbar
self.__backgroundImage = None
# data saving settings
todaysDate = datetime.datetime.now().strftime('%Y%m%d')
self.outputDirectory = os.path.join(root, 'Data', todaysDate) #default save locations is desktop/frgPL/Data/(todaysDate)
self.sampleName = None
self.__dataBuffer = [] # buffer to hold data files during sequential measurements of single sample. Held until a batch export
# stage/positioning constants
self.__sampleposition = (52000, 56000) #position where TEC stage is centered in camera FOV, um
self.__detectorposition = (68000, 117000) #delta position between detector and sampleposition, um.
self.__fov = (77000, 56000) #dimensions of FOV, um
self.connect()
self.loadSpotCalibration(spotmapnumber)
@property
def temperature(self):
return self.__temperature
@temperature.setter
def temperature(self, t):
if self.tec.setSetPoint(t):
self.__temperature = t
def connect(self):
self.camera = camera() # connect to FLIR camera
self.kepco = kepco() # connect to Kepco
self.kepco.set(voltage=0) # set voltage to 0, seems to solve current compliance issues
self.laser = laser() # Connect to OSTECH Laser
self.daq = daq() # connect to NI-USB6000 DAQ
self.stage = stage(sampleposition = self.__sampleposition) # connect to FRG stage
self.tec = omega() # connect to omega PID controller, which is driving the TEC stage.
def disconnect(self):
try:
self.camera.disconnect()
except:
print('Could not disconnect camera')
try:
self.kepco.disconnect()
except:
print('Could not disconnect Kepco')
try:
self.laser.disconnect()
except:
print('Could not disconnect OSTech Laser')
try:
self.daq.disconnect()
except:
print('Could not disconnect DAQ')
try:
self.stage.disconnect()
except:
print('Could not disconnect stage')
try:
self.tec.disconnect()
except:
print('Could not disconnect TEC controller')
### basic use functions
def setMeas(self, bias = None, laserpower = None, suns = None, saturationtime = None, temperature = None, numIV = None, numframes = None, note = ''):
if bias is None:
bias = self.bias
if laserpower is None:
if suns is None:
laserpower = self.laserpower
else:
if self._sampleOneSun is None:
print('Error: can\'t use "suns =" without calibration - please run .findOneSun to calibrate one-sun power level for this sample.')
return False
else:
laserpower = suns * self._sampleOneSun
if (laserpower > 1) or (laserpower < 0):
maxsuns = 1/self._sampleOneSun
print('Error: {0} suns is out of range! Based on laser power and current sample, allowed suns range = 0 - {1}.'.format(suns, maxsuns))
if laserpower > 1:
print('Setting to max laser power ({0} suns)'.format(maxsuns))
laserpower = 1
else:
print('Setting laser off')
laserpower = 0
# return False
if saturationtime is None:
saturationtime = self.saturationtime
if temperature is None:
temperature = self.__temperature
if numIV is None:
numIV = self.numIV
if numframes is None:
numframes = self.numframes
result = self.kepco.set(voltage = bias)
if result:
self.bias = bias
else:
print('Error setting kepco')
# return False
result = self.laser.set(power = laserpower)
if result:
self.laserpower = laserpower
else:
print('Error setting laser')
# return False
result = self.tec.setSetPoint(temperature)
if result:
self.__temperature = temperature
else:
print('Error setting TEC temperature')
# return False
self.numIV = numIV
self.numframes = numframes
self.note = note
def takeMeas(self, lastmeasurement = True, preview = True, imputeHotPixels = False):
### takes a measurement with settings stored in method (can be set with .setMeas()).
# measurement settings + results are appended to .__dataBuffer
#
# if .__dataBuffer is empty (ie, no measurements have been taken yet), takeMeas() will
# automatically take a 0 bias, 0 laser power baseline measurement before the scheduled
# measurement.
if len(self.__dataBuffer) == 0: # sample is being measured for the first time, take a baseline image
print('New sample: taking a 0 bias, 0 illumination baseline image.')
# store scheduled measurement parameters
savedlaserpower = self.laserpower
savedbias = self.bias
savednote = self.note
# take a 0 bias, 0 laserpower measurement, append to .__dataBuffer
self.setMeas(bias = 0, laserpower = 0, note = 'automatic baseline image')
self._waitForTemperature()
measdatetime = datetime.datetime.now()
temperature = self.tec.getTemperature()
im, _, _ = self.camera.capture(frames = self.numframes, imputeHotPixels = imputeHotPixels)
v, i = self.kepco.read(counts = self.numIV)
irradiance = self._getOpticalPower()
temperature = (temperature + self.tec.getTemperature()) / 2 #average the temperature from just before and after the measurement. Typically averaging >1 second of time here.
meas = {
'sample': self.sampleName,
'note': self.note,
'date': measdatetime.strftime('%Y-%m-%d'),
'time': measdatetime.strftime('%H:%M:%S'),
'cameraFOV':self.__fov,
'bias': self.bias,
'laserpower': self.laserpower,
'saturationtime': self.saturationtime,
'numIV': self.numIV,
'numframes':self.numframes,
'v_meas': v,
'i_meas': i,
'image': im,
'image_bgcorrected': im-im,
'irradiance_ref': irradiance,
'temperature': temperature,
'temperature_setpoint': self.temperature
}
self.__dataBuffer.append(meas)
self.__backgroundImage = im #store background image for displaying preview
# restore scheduled measurement parameters + continue
self.setMeas(bias = savedbias, laserpower = savedlaserpower, note = savednote)
if not self.__laserON and self.laserpower > 0:
self.laser.on()
self.__laserON = True
if not self.__kepcoON: #and self.bias is not 0:
self.kepco.on() #turn on the kepco source
self.__kepcoON = True
time.sleep(self.saturationtime)
#take image, take IV meas during image
self._waitForTemperature()
measdatetime = datetime.datetime.now()
temperature = self.tec.getTemperature()
im, _, _ = self.camera.capture(frames = self.numframes, imputeHotPixels = imputeHotPixels)
v, i = self.kepco.read(counts = self.numIV)
#pdb.set_trace()
irradiance = self._getOpticalPower()
temperature = (temperature + self.tec.getTemperature()) / 2 #average the temperature from just before and after the measurement. Typically averaging >1 second of time here.
if self.__laserON and lastmeasurement:
self.laser.off()
self.__laserON = False
if self.__kepcoON and lastmeasurement:
self.kepco.off()
self.__kepcoON = False
meas = {
'sample': self.sampleName,
'note': self.note,
'date': measdatetime.strftime('%Y-%m-%d'),
'time': measdatetime.strftime('%H:%M:%S'),
'cameraFOV':self.__fov,
'bias': self.bias,
'laserpower': self.laserpower,
'saturationtime': self.saturationtime,
'numIV': self.numIV,
'numframes':self.numframes,
'v_meas': v,
'i_meas': i,
'image': im,
'image_bgcorrected': self._backgroundCorrection(im),
'irradiance_ref': irradiance,
'temperature': temperature,
'temperature_setpoint': self.temperature
}
self.__dataBuffer.append(meas)
if preview:
self.displayPreview(self._backgroundCorrection(im), v, i)
return im, v, i
def displayPreview(self, img, v, i):
def handle_close(evt, self):
self.__previewFigure = None
self.__previewAxes = [None, None]
if self.__previewFigure is None: #preview window is not created yet, lets make it
plt.ioff()
self.__previewFigure, self.__previewAxes[0] = plt.subplots()
divider = make_axes_locatable(self.__previewAxes[0])
self.__previewAxes[1] = divider.append_axes('right', size='5%', pad=0.05)
self.__previewFigure.canvas.mpl_connect('close_event', lambda x: handle_close(x, self)) # if preview figure is closed, lets clear the figure/axes handles so the next preview properly recreates the handles
plt.ion()
plt.show()
for ax in self.__previewAxes: #clear the axes
ax.clear()
img_handle = self.__previewAxes[0].imshow(img)
self.__previewFigure.colorbar(img_handle, cax = self.__previewAxes[1])
self.__previewAxes[0].set_title('{0} V, {1} A, {2} Laser'.format(v, i, self.laserpower))
self.__previewFigure.canvas.draw()
self.__previewFigure.canvas.flush_events()
time.sleep(1e-4) #pause allows plot to update during series of measurements
def save(self, samplename = None, note = '', outputdirectory = None, reset = True):
if len(self.__dataBuffer) == 0:
print('Data buffer is empty - no data to save!')
return False
## figure out the sample directory, name, total filepath
if samplename is not None:
self.sampleName = samplename
if outputdirectory is not None:
self.outputDirectory = outputdirectory
if not os.path.exists(self.outputDirectory):
os.mkdir(self.outputDirectory)
fids = os.listdir(self.outputDirectory)
sampleNumber = 1
for fid in fids:
if 'frgPL' in fid:
sampleNumber = sampleNumber + 1
todaysDate = datetime.datetime.now().strftime('%Y%m%d')
if self.sampleName is not None:
fname = 'frgPL_{0}_{1:04d}_{2}.h5'.format(todaysDate, sampleNumber, self.sampleName)
else:
fname = 'frgPL_{0}_{1:04d}.h5'.format(todaysDate, sampleNumber)
self.sampleName = ''
fpath = os.path.join(self.outputDirectory, fname)
## build each category in h5 file
### example dataset saved to _dataBuffer by .takeMeas
# meas = {
# 'sample': self.sampleName,
# 'date': measdatetime.strftime('%Y-%m-%d'),
# 'time': measdatetime.strftime('%H:%M:%S'),
# 'cameraFOV':self.__fov,
# 'bias': self.bias,
# 'laserpower': self.laserpower,
# 'saturationtime': self.saturationtime,
# 'numIV': self.numIV,
# 'numframes':self.numframes,
# 'v_meas': v,
# 'i_meas': i,
# 'image': im,
# }
numData = len(self.__dataBuffer)
data = {}
for field in self.__dataBuffer[0].keys():
data[field] = []
### field to store normalized PL images
# if self._spotmap is not None:
# data['image_norm'] = []
for meas in self.__dataBuffer:
for field, measdata in meas.items():
data[field].append(measdata)
### normalize PL images here
# if field is 'image' and self._spotmap is not None:
# data['image_norm']
## write h5 file
with h5py.File(fpath, 'w') as f:
# sample info
info = f.create_group('/info')
info.attrs['description'] = 'Metadata describing sample, datetime, etc.'
temp = info.create_dataset('name', data = self.sampleName.encode('utf-8'))
temp.attrs['description'] = 'Sample name.'
temp = info.create_dataset('notes', data = np.array(note.encode('utf-8')))
temp.attrs['description'] = 'Any notes describing each measurement.'
date = info.create_dataset('date', data = np.array([x.encode('utf-8') for x in data['date']]))
temp.attrs['description'] = 'Measurement date.'
temp = info.create_dataset('time', data = np.array([x.encode('utf-8') for x in data['time']]))
temp.attrs['description'] = 'Measurement time of day.'
# measurement settings
settings = f.create_group('/settings')
settings.attrs['description'] = 'Settings used for measurements.'
temp = settings.create_dataset('vbias', data = np.array(data['bias']))
temp.attrs['description'] = 'Nominal voltage bias set by Kepco during measurement.'
temp = settings.create_dataset('notes', data = np.array([x.encode('utf-8') for x in data['note']]))
temp.attrs['description'] = 'Any notes describing each measurement.'
temp = settings.create_dataset('laserpower', data = np.array(data['laserpower']))
temp.attrs['description'] = 'Fractional laser power during measurement. Calculated as normalized laser current (max current = 55 A). Laser is operated at steady state.'
temp = settings.create_dataset('sattime', data = np.array(data['saturationtime']))
temp.attrs['description'] = 'Saturation time for laser/bias conditioning prior to sample measurement. Delay between applying condition and measuring, in seconds.'
temp = settings.create_dataset('numIV', data = np.array(data['numIV']))
temp.attrs['description'] = 'Number of current/voltage measurements averaged by Kepco when reading IV.'
temp = settings.create_dataset('numframes', data = np.array(data['numframes']))
temp.attrs['description'] = 'Number of camera frames averaged when taking image.'
temp = settings.create_dataset('tempsp', data = np.array(data['temperature_setpoint']))
temp.attrs['description'] = 'TEC stage temperature setpoint for each measurement.'
if self.stage.position[0] is None:
stagepos = self.__sampleposition
else:
stagepos = self.stage.position
temp = settings.create_dataset('position', data = np.array(stagepos))
temp.attrs['description'] = 'Stage position during measurement.'
if self._sampleOneSun is not None:
suns = [x/self._sampleOneSun for x in data['laserpower']]
temp = settings.create_dataset('suns', data = np.array(suns))
temp.attrs['description'] = 'PL injection level in terms of suns. Only present if sample was calibrated with .findOneSun to match measured Isc to provided expected value, presumably from solar simulator JV curve.'
# calibrations
calibrations = f.create_group('/calibrations')
calibrations.attrs['description'] = 'Instrument calibrations to be used for data analysis.'
temp = settings.create_dataset('samplepos', data = np.array(self.__sampleposition))
temp.attrs['description'] = 'Stage position (um)[x,y] where sample is centered in camera field of view'
temp = settings.create_dataset('detectorpos', data = np.array(self.__detectorposition))
temp.attrs['description'] = 'Stage position (um) [x,y] where photodetector is centered in camera field of view'
temp = settings.create_dataset('camerafov', data = np.array(self.__fov))
temp.attrs['description'] = 'Camera field of view (um) [x,y]'
if self._spotMap is not None:
temp = calibrations.create_dataset('spot', data = np.array(self._spotMap))
temp.attrs['description'] = 'Map [y, x] of incident optical power across camera FOV, can be used to normalize PL images. Laser power set to 0.5 during spot mapping.'
temp = calibrations.create_dataset('spotx', data = np.array(self._spotMapX))
temp.attrs['description'] = 'X positions (um) for map of incident optical power across camera FOV, can be used to normalize PL images.'
temp = calibrations.create_dataset('spoty', data = np.array(self._spotMap))
temp.attrs['description'] = 'Y positions (um) for map of incident optical power across camera FOV, can be used to normalize PL images.'
if self._sampleOneSunSweep is not None:
temp = calibrations.create_dataset('onesunsweep', data = np.array(self._sampleOneSunSweep))
temp.attrs['description'] = 'Laser current vs photocurrent, measured for this sample. Column 1: fractional laser current. Column 2: total photocurrent (Isc), NOT current density (Jsc). Only present if sample was calibrated with .findOneSun to match measured Isc to provided expected value, presumably from solar simulator JV curve.'
temp = calibrations.create_dataset('onesun', data =
|
np.array(self._sampleOneSun)
|
numpy.array
|
# -*- coding: utf-8 -*-
# @Time : 2019/3/19 20:19
# @Author : Alan
# @Email : <EMAIL>
# @File : train.py
# @Software: PyCharm
import time
import logging
import numpy as np
import tensorflow as tf
import os
import tqdm
import sys
from copy import deepcopy
stdout = sys.stdout
from data_helper import *
from model import SiameseQACNN
from model_utils import *
# 创建一个logger
logger = logging.getLogger('mylogger')
logger.setLevel(logging.DEBUG)
# 创建一个handler,用于写入日志文件
timestamp = str(int(time.time()))
fh = logging.FileHandler('./log/log_' + timestamp +'.txt')
fh.setLevel(logging.DEBUG)
# 定义handler的输出格式
formatter = logging.Formatter('[%(asctime)s][%(levelname)s] ## %(message)s')
fh.setFormatter(formatter)
# ch.setFormatter(formatter)
# 给logger添加handler
logger.addHandler(fh)
# logger.addHandler(ch)
class NNConfig(object):
def __init__(self, embeddings=None):
# 输入问题(句子)长度
self.ques_length = 25
# 输入答案长度
self.ans_length = 90
# 循环数
self.num_epochs = 100
# batch大小
self.batch_size = 128
# 不同类型的filter,对应不同的尺寸
self.window_sizes = [1, 2, 3, 5, 7, 9]
# 隐层大小
self.hidden_size = 128
self.output_size = 128
self.keep_prob = 0.5
# 每种filter的数量
self.n_filters = 128
# margin大小
self.margin = 0.5
# 词向量大小
self.embeddings = np.array(embeddings).astype(np.float32)
# 学习率
self.learning_rate = 0.001
# contrasive loss 中的 positive loss部分的权重
self.pos_weight = 0.25
# 优化器
self.optimizer = 'adam'
self.clip_value = 5
self.l2_lambda = 0.0001
# 评测
self.eval_batch = 100
# self.cf = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
# self.cf.gpu_options.per_process_gpu_memory_fraction = 0.2
def evaluate(sess, model, corpus, config):
iterator = Iterator(corpus)
count = 0
total_qids = []
total_aids = []
total_pred = []
total_labels = []
total_loss = 0.
Acc = []
for batch_x in iterator.next(config.batch_size, shuffle=False):
batch_qids, batch_q, batch_aids, batch_a, batch_qmask, batch_amask, labels = zip(*batch_x)
batch_q = np.asarray(batch_q)
batch_a = np.asarray(batch_a)
q_ap_cosine, loss, acc = sess.run([model.q_a_cosine, model.total_loss, model.accu],
feed_dict={model._ques: batch_q,
model._ans: batch_a,
model._ans_neg: batch_a,
model.dropout_keep_prob: 1.0})
total_loss += loss
Acc.append(acc)
count += 1
total_qids.append(batch_qids)
total_aids.append(batch_aids)
total_pred.append(q_ap_cosine)
total_labels.append(labels)
# print(batch_qids[0], [id2word[_] for _ in batch_q[0]],
# batch_aids[0], [id2word[_] for _ in batch_ap[0]])
total_qids = np.concatenate(total_qids, axis=0)
total_aids = np.concatenate(total_aids, axis=0)
total_pred = np.concatenate(total_pred, axis=0)
total_labels = np.concatenate(total_labels, axis=0)
MAP, MRR = eval_map_mrr(total_qids, total_aids, total_pred, total_labels)
acc_ = np.sum(Acc)/count
ave_loss = total_loss/count
# print('Eval loss:{}'.format(total_loss / count))
return MAP, MRR, ave_loss, acc_
def test(corpus, config):
with tf.Session() as sess:
model = SiameseQACNN(config)
saver = tf.train.Saver()
saver.restore(sess, tf.train.latest_checkpoint(best_path))
test_MAP, test_MRR, _, acc = evaluate(sess, model, corpus, config)
print('start test...............')
print("-- test MAP %.5f -- test MRR %.5f" % (test_MAP, test_MRR))
def train(train_corpus, val_corpus, test_corpus, config, eval_train_corpus=None):
iterator = Iterator(train_corpus)
if not os.path.exists(save_path):
os.makedirs(save_path)
if not os.path.exists(best_path):
os.makedirs(best_path)
with tf.Session() as sess:
# training
print('Start training and evaluating ...')
start_time = time.time()
model = SiameseQACNN(config)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=10)
best_saver = tf.train.Saver(tf.global_variables(), max_to_keep=5)
ckpt = tf.train.get_checkpoint_state(save_path)
print('Configuring TensorBoard and Saver ...')
summary_writer = tf.summary.FileWriter(save_path, graph=sess.graph)
if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
print('Reloading model parameters..')
saver.restore(sess, ckpt.model_checkpoint_path)
else:
print('Created new model parameters..')
sess.run(tf.global_variables_initializer())
# count trainable parameters
total_parameters = count_parameters()
print('Total trainable parameters : {}'.format(total_parameters))
current_step = 0
best_map_val = 0.0
best_mrr_val = 0.0
last_dev_map = 0.0
last_dev_mrr = 0.0
for epoch in range(config.num_epochs):
print("----- Epoch {}/{} -----".format(epoch + 1, config.num_epochs))
count = 0
for batch_x in iterator.next(config.batch_size, shuffle=True):
batch_q, batch_a_pos, batch_a_neg, batch_qmask, batch_a_pos_mask, batch_a_neg_mask = zip(*batch_x)
batch_q =
|
np.asarray(batch_q)
|
numpy.asarray
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Methods for working with the CNOT-dihedral group.
Example:
from dihedral import CNOTDihedral
g = CNOTDihedral(3) # create identity element on 3 qubits
g.cnot(0,1) # apply CNOT from qubit 0 to qubit 1
g.flip(2) # apply X on qubit 2
g.phase(3, 1) # apply T^3 on qubit 1
print(g) # pretty print g
phase polynomial =
0 + 3*x_0 + 3*x_1 + 2*x_0*x_1
affine function =
(x_0,x_0 + x_1,x_2 + 1)
This means that |x_0 x_1 x_2> transforms to omega^{p(x)}|f(x)>,
where omega = exp(i*pi/4) from which we can read that
T^3 on qubit 1 AFTER CNOT_{0,1} is the same as
T^3 on qubit 0, T^3 on qubit 1, and CS_{0,1} BEFORE CNOT_{0,1}.
"""
import itertools
from itertools import combinations
import copy
from functools import reduce
from operator import mul
import numpy as np
from numpy.random import RandomState
from qiskit.exceptions import QiskitError
from qiskit.quantum_info.operators.base_operator import BaseOperator
from qiskit.quantum_info.operators.operator import Operator
from qiskit.quantum_info.operators.pauli import Pauli
from qiskit.quantum_info.operators.scalar_op import ScalarOp
from qiskit.circuit import QuantumCircuit, Instruction
from qiskit.circuit.library import U1Gate
class SpecialPolynomial():
"""Multivariate polynomial with special form.
Maximum degree 3, n Z_2 variables, coefficients in Z_8.
"""
def __init__(self, n_vars):
"""Construct the zero polynomial on n_vars variables."""
# 1 constant term
# n linear terms x_1, ..., x_n
# {n choose 2} quadratic terms x_1x_2, x_1x_3, ..., x_{n-1}x_n
# {n choose 3} cubic terms x_1x_2x_3, ..., x_{n-2}x_{n-1}x_n
# and coefficients in Z_8
if n_vars < 1:
raise QiskitError("n_vars for SpecialPolynomial is too small.")
self.n_vars = n_vars
self.nc2 = int(n_vars * (n_vars-1) / 2)
self.nc3 = int(n_vars * (n_vars-1) * (n_vars-2) / 6)
self.weight_0 = 0
self.weight_1 = np.zeros(n_vars, dtype=np.int8)
self.weight_2 = np.zeros(self.nc2, dtype=np.int8)
self.weight_3 = np.zeros(self.nc3, dtype=np.int8)
def mul_monomial(self, indices):
"""Multiply by a monomial given by indices.
Returns the product.
"""
length = len(indices)
if length >= 4:
raise QiskitError("There is no term with on more than 3 indices.")
indices_arr = np.array(indices)
if (indices_arr < 0).any() and (indices_arr > self.n_vars).any():
raise QiskitError("Indices are out of bounds.")
if length > 1 and (np.diff(indices_arr) <= 0).any():
raise QiskitError("Indices are non-increasing!")
result = SpecialPolynomial(self.n_vars)
if length == 0:
result = copy.deepcopy(self)
else:
terms0 = [[]]
terms1 = list(combinations(range(self.n_vars), r=1))
terms2 = list(combinations(range(self.n_vars), r=2))
terms3 = list(combinations(range(self.n_vars), r=3))
for term in terms0 + terms1 + terms2 + terms3:
value = self.get_term(term)
new_term = list(set(term).union(set(indices)))
result.set_term(new_term, (result.get_term(new_term) + value) % 8)
return result
def __mul__(self, other):
"""Multiply two polynomials."""
if not isinstance(other, SpecialPolynomial):
other = int(other)
result = SpecialPolynomial(self.n_vars)
if isinstance(other, int):
result.weight_0 = (self.weight_0 * other) % 8
result.weight_1 = (self.weight_1 * other) % 8
result.weight_2 = (self.weight_2 * other) % 8
result.weight_3 = (self.weight_3 * other) % 8
else:
if self.n_vars != other.n_vars:
raise QiskitError("Multiplication on different n_vars.")
terms0 = [[]]
terms1 = list(combinations(range(self.n_vars), r=1))
terms2 = list(combinations(range(self.n_vars), r=2))
terms3 = list(combinations(range(self.n_vars), r=3))
for term in terms0 + terms1 + terms2 + terms3:
value = other.get_term(term)
if value != 0:
temp = copy.deepcopy(self)
temp = temp.mul_monomial(term)
temp = temp * value
result = result + temp
return result
def __rmul__(self, other):
"""Right multiplication.
This operation is commutative.
"""
return self.__mul__(other)
def __add__(self, other):
"""Add two polynomials."""
if not isinstance(other, SpecialPolynomial):
raise QiskitError("Element to add is not a SpecialPolynomial.")
if self.n_vars != other.n_vars:
raise QiskitError("Addition on different n_vars.")
result = SpecialPolynomial(self.n_vars)
result.weight_0 = (self.weight_0 + other.weight_0) % 8
result.weight_1 = (self.weight_1 + other.weight_1) % 8
result.weight_2 = (self.weight_2 + other.weight_2) % 8
result.weight_3 = (self.weight_3 + other.weight_3) % 8
return result
def evaluate(self, xval):
"""Evaluate the multinomial at xval.
if xval is a length n z2 vector, return element of Z8.
if xval is a length n vector of multinomials, return
a multinomial. The multinomials must all be on n vars.
"""
if len(xval) != self.n_vars:
raise QiskitError("Evaluate on wrong number of variables.")
check_int = list(map(lambda x: isinstance(x, int), xval))
check_poly = list(map(lambda x: isinstance(x, SpecialPolynomial), xval))
if False in check_int and False in check_poly:
raise QiskitError("Evaluate on a wrong type.")
is_int = (False not in check_int)
if not is_int:
if False in [i.n_vars == self.n_vars for i in xval]:
raise QiskitError("Evaluate on incompatible polynomials.")
else:
xval = xval % 2
# Examine each term of this polynomial
terms0 = [[]]
terms1 = list(combinations(range(self.n_vars), r=1))
terms2 = list(combinations(range(self.n_vars), r=2))
terms3 = list(combinations(range(self.n_vars), r=3))
# Set the initial result and start for each term
if is_int:
result = 0
start = 1
else:
result = SpecialPolynomial(self.n_vars)
start = SpecialPolynomial(self.n_vars)
start.weight_0 = 1
# Compute the new terms and accumulate
for term in terms0 + terms1 + terms2 + terms3:
value = self.get_term(term)
if value != 0:
newterm = reduce(mul, [xval[j] for j in term], start)
result = result + value * newterm
if isinstance(result, int):
result = result % 8
return result
def set_pj(self, indices):
"""Set to special form polynomial on subset of variables.
p_J(x) := sum_{a subseteq J,|a| neq 0} (-2)^{|a|-1}x^a
"""
indices_arr = np.array(indices)
if (indices_arr < 0).any() or (indices_arr >= self.n_vars).any():
raise QiskitError("Indices are out of bounds.")
indices = sorted(indices)
subsets_2 = itertools.combinations(indices, 2)
subsets_3 = itertools.combinations(indices, 3)
self.weight_0 = 0
self.weight_1 = np.zeros(self.n_vars)
self.weight_2 = np.zeros(self.nc2)
self.weight_3 = np.zeros(self.nc3)
for j in indices:
self.set_term([j], 1)
for j in subsets_2:
self.set_term(list(j), 6)
for j in subsets_3:
self.set_term(list(j), 4)
def get_term(self, indices):
"""Get the value of a term given the list of variables.
Example: indices = [] returns the constant
indices = [0] returns the coefficient of x_0
indices = [0,3] returns the coefficient of x_0x_3
indices = [0,1,3] returns the coefficient of x_0x_1x_3
If len(indices) > 3 the method fails.
If the indices are out of bounds the method fails.
If the indices are not increasing the method fails.
"""
length = len(indices)
if length >= 4:
return 0
indices_arr = np.array(indices)
if (indices_arr < 0).any() or (indices_arr >= self.n_vars).any():
raise QiskitError("Indices are out of bounds.")
if length > 1 and (np.diff(indices_arr) <= 0).any():
raise QiskitError("Indices are non-increasing.")
if length == 0:
return self.weight_0
if length == 1:
return self.weight_1[indices[0]]
if length == 2:
# sum(self.n_vars-j, {j, 1, indices[0]})
offset_1 = int(indices[0] * self.n_vars -
((indices[0] + 1) * indices[0])/2)
offset_2 = int(indices[1] - indices[0] - 1)
return self.weight_2[offset_1 + offset_2]
# handle length = 3
tmp_1 = self.n_vars - indices[0]
offset_1 = int((tmp_1 - 3) * (tmp_1 - 2) * (tmp_1 - 1) / 6)
tmp_2 = self.n_vars - indices[1]
offset_2 = int((tmp_2 - 2) * (tmp_2 - 1) / 2)
offset_3 = self.n_vars - indices[2]
offset = int(self.n_vars * (self.n_vars - 1) * (self.n_vars - 2) / 6 -
offset_1 - offset_2 - offset_3)
return self.weight_3[offset]
def set_term(self, indices, value):
"""Set the value of a term given the list of variables.
Example: indices = [] returns the constant
indices = [0] returns the coefficient of x_0
indices = [0,3] returns the coefficient of x_0x_3
indices = [0,1,3] returns the coefficient of x_0x_1x_3
If len(indices) > 3 the method fails.
If the indices are out of bounds the method fails.
If the indices are not increasing the method fails.
The value is reduced modulo 8.
"""
length = len(indices)
if length >= 4:
return
indices_arr = np.array(indices)
if (indices_arr < 0).any() or (indices_arr >= self.n_vars).any():
raise QiskitError("Indices are out of bounds.")
if length > 1 and (np.diff(indices_arr) <= 0).any():
raise QiskitError("Indices are non-increasing.")
value = value % 8
if length == 0:
self.weight_0 = value
elif length == 1:
self.weight_1[indices[0]] = value
elif length == 2:
# sum(self.n_vars-j, {j, 1, indices[0]})
offset_1 = int(indices[0] * self.n_vars -
((indices[0] + 1) * indices[0])/2)
offset_2 = int(indices[1] - indices[0] - 1)
self.weight_2[offset_1 + offset_2] = value
else: # length = 3
tmp_1 = self.n_vars - indices[0]
offset_1 = int((tmp_1 - 3) * (tmp_1 - 2) * (tmp_1 - 1) / 6)
tmp_2 = self.n_vars - indices[1]
offset_2 = int((tmp_2 - 2) * (tmp_2 - 1) / 2)
offset_3 = self.n_vars - indices[2]
offset = int(self.n_vars * (self.n_vars - 1) * (self.n_vars - 2) / 6 -
offset_1 - offset_2 - offset_3)
self.weight_3[offset] = value
@property
def key(self):
"""Return a string representation."""
tup = (self.weight_0, tuple(self.weight_1),
tuple(self.weight_2), tuple(self.weight_3))
return str(tup)
def __eq__(self, x):
"""Test equality."""
return isinstance(x, SpecialPolynomial) and self.key == x.key
def __str__(self):
"""Return formatted string representation."""
out = str(self.weight_0)
for i in range(self.n_vars):
value = self.get_term([i])
if value != 0:
out += " + "
if value != 1:
out += (str(value) + "*")
out += ("x_" + str(i))
for i in range(self.n_vars-1):
for j in range(i+1, self.n_vars):
value = self.get_term([i, j])
if value != 0:
out += " + "
if value != 1:
out += (str(value) + "*")
out += ("x_" + str(i) + "*x_" + str(j))
for i in range(self.n_vars-2):
for j in range(i+1, self.n_vars-1):
for k in range(j+1, self.n_vars):
value = self.get_term([i, j, k])
if value != 0:
out += " + "
if value != 1:
out += (str(value) + "*")
out += ("x_" + str(i) + "*x_" + str(j) +
"*x_" + str(k))
return out
class CNOTDihedral(BaseOperator):
"""CNOT-dihedral Object Class.
The CNOT-dihedral group on num_qubits qubits is generated by the gates
CNOT, T and X.
References:
1. <NAME> and <NAME>, *On the structure of the CNOT-Dihedral group*,
`arXiv:2006.12042 [quant-ph] <https://arxiv.org/abs/2006.12042>`_
2. <NAME>, Easwar Magesan, <NAME>, <NAME> and <NAME>,
*Scalable randomised benchmarking of non-Clifford gates*,
npj Quantum Inf 2, 16012 (2016).
"""
def __init__(self, data, validate=True):
"""Initialize a CNOTDihedral operator object."""
# Initialize from another CNOTDihedral by sharing the underlying
# poly, linear and shift
if isinstance(data, CNOTDihedral):
self.linear = data.linear
self.shift = data.shift
self.poly = data.poly
# Initialize from ScalarOp as N-qubit identity discarding any global phase
elif isinstance(data, ScalarOp):
if not data.is_unitary() or set(data._input_dims) != {2} or \
data.num_qubits is None:
raise QiskitError("Can only initialize from N-qubit identity ScalarOp.")
self._num_qubits = data.num_qubits
# phase polynomial
self.poly = SpecialPolynomial(self._num_qubits)
# n x n invertible matrix over Z_2
self.linear = np.eye(self._num_qubits, dtype=np.int8)
# binary shift, n coefficients in Z_2
self.shift = np.zeros(self._num_qubits, dtype=np.int8)
# Initialize from a QuantumCircuit or Instruction object
elif isinstance(data, (QuantumCircuit, Instruction)):
self._num_qubits = data.num_qubits
elem = self.from_circuit(data)
self.poly = elem.poly
self.linear = elem.linear
self.shift = elem.shift
# Construct the identity element on num_qubits qubits.
elif isinstance(data, int):
self._num_qubits = data
# phase polynomial
self.poly = SpecialPolynomial(self._num_qubits)
# n x n invertible matrix over Z_2
self.linear = np.eye(self._num_qubits, dtype=np.int8)
# binary shift, n coefficients in Z_2
self.shift = np.zeros(self._num_qubits, dtype=np.int8)
elif isinstance(data, Pauli):
self._num_qubits = data.num_qubits
elem = self.from_circuit(data.to_instruction())
self.poly = elem.poly
self.linear = elem.linear
self.shift = elem.shift
# Initialize BaseOperator
super().__init__(num_qubits=self._num_qubits)
# Validate the CNOTDihedral element
if validate and not self.is_cnotdihedral():
raise QiskitError('Invalid CNOTDihedsral element.')
def _z2matmul(self, left, right):
"""Compute product of two n x n z2 matrices."""
prod = np.mod(np.dot(left, right), 2)
return prod
def _z2matvecmul(self, mat, vec):
"""Compute mat*vec of n x n z2 matrix and vector."""
prod = np.mod(np.dot(mat, vec), 2)
return prod
def __mul__(self, other):
"""Left multiplication self * other."""
if self.num_qubits != other.num_qubits:
raise QiskitError("Multiplication on different number of qubits.")
result = CNOTDihedral(self.num_qubits)
result.shift = [(x[0] + x[1]) % 2
for x in zip(self._z2matvecmul(self.linear, other.shift), self.shift)]
result.linear = self._z2matmul(self.linear, other.linear)
# Compute x' = B1*x + c1 using the p_j identity
new_vars = []
for i in range(self.num_qubits):
support =
|
np.arange(self.num_qubits)
|
numpy.arange
|
import unittest
import nose.tools as nt
import numpy as np
from daps.utils.pooling import pyramid1d
@unittest.skip("A contribution is required")
def test_concat1d():
return None
def test_pyramid1d():
x = np.array([[0, 4],
[4, 2],
[0, 4],
[2, 0],
[1, 2],
[1, 4],
[3, 4],
[1, 4],
[1, 1],
[4, 2]])
nt.assert_equal((2,), pyramid1d(x).shape)
py1_x = pyramid1d(x, 1)
nt.assert_equal((6,), py1_x.shape)
rst = np.array([2, 3])/np.sqrt(13)
np.testing.assert_array_almost_equal(rst, py1_x[4:6])
py2_x = pyramid1d(x, 2)
nt.assert_equal((14,), py2_x.shape)
rst = np.array([1, 2])/np.sqrt(5)
|
np.testing.assert_array_almost_equal(rst, py2_x[8:10])
|
numpy.testing.assert_array_almost_equal
|
import sys
import itertools
import numpy as np
from lab5 import input_matrix, input_vector
def print_matrix(matrix):
return(f'\n'.join([' '.join(list(map(str, map(int, i)))) for i in matrix]))
def square_task(c, D, A, x, J, J_adv):
m = len(A)
n = len(A[0])
itr = 1
while True:
print(f'\nIteration {itr}:\n')
# STEP 1
print('X:\t' + np.array2string(x))
cx = c + x @ D
print(f'Cx:\t{np.array2string(cx)}')
cbx = np.array([cx[J[i] - 1] for i in range(len(J))])
A_b = np.array([(A[:, J[i] - 1]) for i in range(len(J))])
A_b_inv = np.linalg.inv(A_b)
print(f'A_b_inv:\n{print_matrix(A_b_inv)}')
ux = -cbx @ A_b_inv
print(f'ux:\t{ux}')
deltax = ux @ A + cx
print(f'deltax:\t{deltax}')
# STEP 2
if min(deltax) >= 0:
return f'ANSWER:\t{x}' # return
# STEP 3
j0 = list(deltax).index(min(deltax)) # python index
print(f'j0:\t{j0}')
# STEP 4
vector_l = np.zeros(n)
vector_l[j0] = 1
print(f'J_adv:\t{J_adv}')
l_adv = np.delete(vector_l, J_adv - 1, axis=0)
print(f'l_adv:\t{l_adv}')
# STEP 4.a
# all linear combinations of J(coordinates of Ds elements)
D_adv_indx = list(
itertools.combinations_with_replacement(J_adv - 1, 2))
lJ = len(J_adv)
D_adv = np.zeros([lJ, lJ])
k = 0
for i in range(lJ):
for j in range(i, lJ):
if D_adv_indx[k][0] != D_adv_indx[k][1]:
D_adv[j][i] = D[D_adv_indx[k][1]][D_adv_indx[k][0]]
D_adv[i][j] = D[D_adv_indx[k][0]][D_adv_indx[k][1]]
k += 1
A_adv_b = np.array([(A[:, J_adv[i] - 1]) for i in range(len(J_adv))])
At_adv_b = np.transpose(A_adv_b)
print(f'D_adv:\n{print_matrix(D_adv)}')
print(f'A_adv_b:\n{print_matrix(A_adv_b)}')
Matrix1 = np.row_stack((D_adv, At_adv_b))
Matrix2 = np.row_stack(
(A_adv_b, np.zeros([len(A_adv_b[0]), len(At_adv_b)])))
H = np.column_stack((Matrix1, Matrix2))
# STEP 4.b
b_up = np.array([D[:, j0][J_adv[i] - 1] for i in range(len(J_adv))])
print(f'b_up:\t{b_up}')
b_down = A[:, j0]
print(f'b_down:\t{b_down}')
b = np.concatenate((b_up, b_down))
# STEP 4.c
print(f'H:\n{print_matrix(H)}')
print(f'b:\t{b}')
x_hb = -np.linalg.inv(H) @ b
print(f'x_hb:\t{x_hb}')
ladv = np.array([x_hb[i] for i in range(len(J_adv))])
print(f'ladv:\t{ladv}')
vector_l = np.concatenate((ladv, l_adv))
# STEP 5
print(f'vector_l:\t{vector_l}')
print(f'D:\n{print_matrix(D)}')
delta = vector_l @ D @ vector_l[:, np.newaxis]
print(f'delta:\t{delta}')
teta = np.full(len(J_adv), np.inf)
teta_j0 = np.inf
if delta > 0:
teta_j0 = abs(deltax[j0]) / delta
print(f'teta_j0:\t{teta_j0}')
for i in range(len(teta)):
if vector_l[i] < 0:
teta[i] = -x[i] / vector_l[i]
teta = np.append(teta, teta_j0)
print(f'teta:\t{teta}')
teta0 = min(teta)
print(f'teta0:\t{teta0}')
if teta0 == np.inf or teta0 > 1e+16:
return ('TARGET FUNCTION IS UNBOUNDED') # return
js = j0 # j*
if teta0 != teta_j0:
js = J_adv[list(teta).index(teta0)] - 1 # python index
print(f'js:\t{js}')
# STEP 6(UPDATE)
x = x + teta0 * vector_l
print(f'teta0 * vector_l:\t{teta0 * vector_l}')
print(f'J:\t{J}\nJ_adv:\t{J_adv}')
last_condition = True
if js == j0:
J_adv = np.append(J_adv, js + 1)
last_condition = False
elif js + 1 in J_adv and js+1 not in J:
J_adv = np.delete(J_adv, np.where(J_adv == js + 1))
last_condition = False
elif js + 1 in J:
s = list(J).index(js + 1)
J_adv_tmp = set(J_adv) - set(J)
J_adv_tmp = list(J_adv_tmp)
print(f'J_adv_tmp:\t{J_adv_tmp}')
for i in range(len(J_adv_tmp)):
j_plus = J_adv_tmp[i] # not python index
vector_tmp = A_b_inv @ A[:, j_plus - 1]
print(vector_tmp)
if vector_tmp[s] != 0:
J = np.where(J == js + 1, j_plus, J)
J_adv = np.delete(J_adv, np.where(J_adv == j_plus))
last_condition = False
break
if last_condition:
J = np.where(J == js + 1, j0 + 1, J)
J_adv = np.where(J_adv == js + 1, j0 + 1, J_adv)
itr += 1
def test1():
c = np.array([0, -1, 0])
D = np.array([[2, -1, 0],
[-1, 2, -1],
[0, -1, 2]])
A = np.array([[2, 1, 0],
[0, 1, 2]])
x = np.array([0, 2, 1])
J = np.array([2, 3])
J_adv =
|
np.array([2, 3])
|
numpy.array
|
#- This simulation with gpu (with the below parameters) took 14h
#- In this experiment we also set lr from 0.01 to 0.0025
# but here with masking is like the no masking case (exp2a-d) with 0.03 to 0.0075
# thefactor of corecction is approx 3.
# So: probably we should set the next time for masking case: lr=0.005-0.001
# ssh no100
# screen -S exp2e
# cd /export/lv4/user/jfajardourbina/dws_ulf_getm_2D_depth_avg/experiments_post_proc/lagrangian_simulation_36years/machine_learning_github/Lagrangian_ML/notebooks/convlstm
# conda activate phd_parcelsv221
# python3 convlstm_dws_exp2e_use_only_displacement_standarized_5std_train_test_adaptive_lr_masking_loss_gpu.py &
# to comeback: screen -r exp2e
import os
import sys
import numpy as np
import torch
import torch.nn.functional
from torch.autograd import Variable
import matplotlib.pyplot as plt
from copy import deepcopy
import matplotlib as mpl
import glob
import xarray as xr
import dask as da
from tqdm import tqdm
# import convlstm
sys.path.append("../../../src")
import convlstm
#path to files---
homee = "/export/lv4/user/jfajardourbina/"
dir_post_proc_data=f"{homee}dws_ulf_getm_2D_depth_avg/experiments_post_proc/lagrangian_simulation_36years/machine_learning_github/Lagrangian_ML/post_proc_data/"
dir_displacement="net_displacement"
dir_interp_wind="wind"
#for output after train and test---
exp="exp2e"
dir_convlstm_model_out="ouput_convlstm_model_data/"
case_train="training"; file_out_train=f"{exp}_train.nc"
case_test="testing"; file_out_test=f"{exp}_test.nc"
#for plotting---
#dir_wind="{homee}dws_ulf_getm_2D_depth_avg/data/atmosphere/" #winds
dir_dws_bound=f"{homee}dws_ulf_getm_2D_depth_avg/experiments_post_proc/analysis_eulerian_data_36years/data_dws_boundaries/" #DWS boundarie with contour0
file_dws_bound0="dws_boundaries_contour0.nc"
file_grid=f"{homee}dws_ulf_getm_2D_depth_avg/experiments_post_proc/analysis_eulerian_data_36years/data_bathy_grid/" #topo data
#dir_vel = f'{homee}dws_ulf_getm_2D_depth_avg/data/velocity/' #vel data
#file_vel="RE.DWS200m.uvz.20090301.nc" #any vel file
#file_wind="UERRA.2009.nc4" #any wind file
#
#savee='everyM2' #saving track data every m2
#deploy='everyM2'#deploy set of particles every m2
#minTsim=60 #mimimum time of simulation (days)
#maxTsim=91 #maximum time of simulation (days)
#dir_tracks = f"{homee}dws_ulf_getm_2D_depth_avg/experiments_post_proc/lagrangian_simulation_36years/exp-deployHighVolume_coords-xcyc_save-{savee}_deploy-{deploy}_Tsim-{minTsim}-{maxTsim}d/tracks/"
#
#parameters
#npa_per_dep=12967 #number of particles per deployment
m2=int(12.42*3600+2) #period in seconds
#dx=400/1e3;dy=400/1e3 #particle grid reso
#Hyper-parameter of neural network---
run_model = True #True = run model; False = open results from old run
input_channels = 2 # number of input channels: u10,v10 wind
output_channels = 2 #number of output channels: dx, dy displacement
hidden_channels = [6, 3, output_channels] # the last digit is the output channel of each ConvLSTMCell (so we are using 3 layers)
#hidden_channels = [4, output_channels] # the last digit is the output channel of each ConvLSTMCell (so we are using 2 layers)
kernel_size = 3 #3, does not work with kernel=2
batch_size = 1 #not used in this tutorial
num_epochs = 3000
#learning parameters:
adaptive_learning=True #False: lr=learning_rate; True: lr=[learning_rate - learning_rate_end]
#learning_rate = 0.0025 ##too slow convergence if used since the beginning of simulation
learning_rate = 0.01 #initial lr
learning_rate_end=0.0025 #final lr
#
std_fac=5 #standarize using "std_fac" times the standard deviation
#
#if: hidden_channels = [6, 3, output_channels]
#the model will create 6GB of data in GPU memory after 400 training time steps
#so, after nt_steps=2000 (around 3y) we will exceed the mem limit of GPU (around 30GB)
#2.5years for training needs approx 26GB for the above model and with: input_channels = 2; output_channels = 2; kernel_size = 3
#this is because in every time step the graph of computations is stored in the cummulative lost (after calling the model), to perform then a backpropagation
#for this reason is sometimes important to use mini_batches and perform backpropagation after finish with 1.
#then use the next mini_batch and so on until using all the data and finishes 1 eppoch.
#use cuda if possible---
print ("Pytorch version {}".format(torch.__version__))
# check if CUDA is available
use_cuda = torch.cuda.is_available()
# use GPU if possible
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Device to be used for computation: {}".format(device))
print(f"{torch.cuda.get_device_name(0)}")
#open files----
#open DWS contours
dsb0=xr.open_dataset(dir_dws_bound+file_dws_bound0)
bdr_dws0=dsb0.bdr_dws.values #points that define DWS with contour0
#open topo file
dsto=xr.open_dataset(file_grid+"DWS200m.2012.v03.nc")
xct0=dsto.xc.min().values/1e3; yct0=dsto.yc.min().values/1e3 #=(0,0)
mask_topo=dsto.bathymetry.copy(); mask_topo=xr.where(np.isfinite(mask_topo),1,0) #mask ocean=1, land=0
#open net displacement files---
files_displacement=sorted(glob.glob(f'{dir_post_proc_data}{dir_displacement}/*.nc',recursive=True))
#files_displacement=files_displacement[29:31] #2009-2010
#concat all the files
dsdis=xr.open_mfdataset(files_displacement,concat_dim="time",parallel='True',chunks={'time': -1},
decode_cf=True, decode_times=True)#.load() #this are default decodes
#data_vars='minimal', coords='minimal', compat='override') #takes 1second more with this, see https://xarray.pydata.org/en/stable/io.html#reading-multi-file-datasets
#open interp files for wind---
files_interp_wind=sorted(glob.glob(f'{dir_post_proc_data}{dir_interp_wind}/*.nc',recursive=True))
#files_interp_wind=files_interp_wind[29:31]
#concat all the files
dswi=xr.open_mfdataset(files_interp_wind,concat_dim="time",parallel='True',chunks={'time': -1},
decode_cf=True, decode_times=True)#.load() #this are default decodes
#data_vars='minimal', coords='minimal', compat='override') #takes 1second more with this, see https://xarray.pydata.org/en/stable/io.html#reading-multi-file-datasets
#set training data---
#
#wind (input)
#dswi_train=dswi.sel(time=slice("2009-10-01","2009-12-31"),x=slice(60000,80000),y=slice(60000,70000))
#dswi_train=dswi_train.fillna(0) #fill nan with 0s in case displacement is in land (not neccesary for the above small domain)
#in_u10_train,in_v10_train=da.compute(dswi_train.u10.values.astype('float32'),dswi_train.v10.values.astype('float32'))
#
#displacement (output)
#dsdis_train=dsdis.sel(time=slice("2009-01-01","2012-12-31"))#,x=slice(70000,80000),y=slice(60000,70000))
#dsdis_train=dsdis.sel(time=slice("2009-10-01","2009-12-31"))#,x=slice(70000,80000),y=slice(60000,70000))
dsdis_train=dsdis.sel(time=slice("2009-06-01","2011-12-31"))#,x=slice(70000,80000),y=slice(60000,70000))
#dsdis_train=dsdis_train.fillna(0) #fill nan with 0s in case displacement is on land (not neccesary for the above small domain)
dx_train,dy_train=da.compute(dsdis_train.dx.values.astype('float32'),dsdis_train.dy.values.astype('float32'))
#
#use the past displacement to predict the displacement after M2=12.42h
in_u10_train,in_v10_train=dx_train[0:-1,...],dy_train[0:-1,...]
out_dx_train,out_dy_train=dx_train[1:,...],dy_train[1:,...]
#
del dx_train,dy_train
times_train=dsdis_train.time[0:-1].values
nt_train,ny,nx=out_dx_train.shape
print(times_train[[0,-1]],out_dx_train.shape)
#set testing data---
#
#wind (input)
#dswi_test=dswi.sel(time=slice("2009-10-01","2009-12-31"),x=slice(60000,80000),y=slice(60000,70000))
#dswi_test=dswi_test.fillna(0) #fill nan with 0s in case displacement is in land (not neccesary for the above small domain)
#in_u10_test,in_v10_test=da.compute(dswi_test.u10.values.astype('float32'),dswi_test.v10.values.astype('float32'))
#
#displacement (output)
#dsdis_test=dsdis.sel(time=slice("2013-01-01",None))#,x=slice(70000,80000),y=slice(60000,70000))
#dsdis_test=dsdis.sel(time=slice("2010-01-01","2010-03-31"))#,x=slice(70000,80000),y=slice(60000,70000))
dsdis_test=dsdis.sel(time=slice("2012-01-01",None))#,x=slice(70000,80000),y=slice(60000,70000))
#dsdis_test=dsdis_test.fillna(0) #fill nan with 0s in case displacement is on land (not neccesary for the above small domain)
dx_test,dy_test=da.compute(dsdis_test.dx.values.astype('float32'),dsdis_test.dy.values.astype('float32'))
#
#use the past displacement to predict the displacement after m2
in_u10_test,in_v10_test=dx_test[0:-1,...],dy_test[0:-1,...]
out_dx_test,out_dy_test=dx_test[1:,...],dy_test[1:,...]
#
del dx_test,dy_test
times_test=dsdis_test.time[0:-1].values
nt_test,ny,nx=out_dx_test.shape
print(times_test[[0,-1]],out_dx_test.shape)
#for plotting maps of predictions---
#mask: ocean=1, land=nan
mask=out_dx_train[0,...]*1.; mask[np.isfinite(mask)]=1.; mask[np.isnan(mask)]=np.nan
xx=dsdis_train.x/1e3; yy=dsdis_train.y/1e3; xx,yy=np.meshgrid(xx,yy)
#for masking values on land when computing loss---
mask_torch=torch.tensor(np.where(np.isnan(mask),0,1)[np.newaxis,np.newaxis,...]*np.ones((output_channels,ny,nx)))*1.
mask_numpy=mask_torch.numpy()*1.
def standarization(var,fac=3):
mean=np.nanmean(var)
std=np.nanstd(var)*fac #using 3 times std (seems to works better than just 1std)
var[np.isnan(var)]=0. #fill with 0 in case of nan. This is modifing our input array
return ((var-mean)/std),mean,std #.astype('float32')
def destandarization(var,mean,std):
return (var*std+mean) #.astype('float32')
#standarization of data---
#training---
#inputs
in_u10_train, in_u10_mean_train, in_u10_std_train = standarization(in_u10_train,std_fac)
in_v10_train, in_v10_mean_train, in_v10_std_train = standarization(in_v10_train,std_fac)
#outputs
out_dx_train, out_dx_mean_train, out_dx_std_train = standarization(out_dx_train,std_fac)
out_dy_train, out_dy_mean_train, out_dy_std_train = standarization(out_dy_train,std_fac)
print("train info:")
print(f"steps={nt_train}; (ny,nx)=({ny},{nx})")
print("input approx output")
print(f"dx_mean, dx_std*{std_fac}, dy_mean, dy_std*{std_fac}:")
print(in_u10_mean_train, in_u10_std_train, in_v10_mean_train, in_v10_std_train)
print()
#testing---
#inputs
in_u10_test, in_u10_mean_test, in_u10_std_test = standarization(in_u10_test,std_fac)
in_v10_test, in_v10_mean_test, in_v10_std_test = standarization(in_v10_test,std_fac)
#outputs
out_dx_test, out_dx_mean_test, out_dx_std_test = standarization(out_dx_test,std_fac)
out_dy_test, out_dy_mean_test, out_dy_std_test = standarization(out_dy_test,std_fac)
print("test info:")
print(f"steps={nt_test}; (ny,nx)=({ny},{nx})")
print("input approx output")
print(f"dx_mean, dx_std*{std_fac}, dy_mean, dy_std*{std_fac}:")
print(in_u10_mean_test, in_u10_std_test, in_v10_mean_test, in_v10_std_test)
# initialize model---
if run_model:
#loss functions with and without masking---
class initialization:
def __init__(self, masking=False, mask=None):
self.masking=masking
self.mask=mask
class loss_function:
class mse(initialization):
#we call this function without using its name
def __call__(self, predict=torch.zeros(1), target=torch.zeros(1)):
if self.masking:
#masking land points---
#
#- the masking affect:
# the value of the total loss (that only includes points inside DWS) and hence the last gradient of the backpropagation
# loss=sum(prediction-output)**2/N; dlos/dpred=2*sum(prediction-output)/N,
# with masking N is smaller because we dont consider land points, so seems that its like increasing the overall lr
#- similar effect to masking without using it:
# if we use another custom loss like torch.nn.MSELoss(reduction='sum')
# masking is irrelevant since we dont divide with N
#
#disregard land points (=0) for the mean, so the loss value will increase
#mask_torch: 0=land, 1=ocean
#however, because we only have particles inside DWS, mask_torch=0 for the land and all points outside DWS
loss_val = torch.mean(((predict-target)[self.mask==1])**2)
else:
#original---
loss_val = torch.mean((predict-target)**2) #=torch.nn.MSELoss()
#
return loss_val
class mse_numpy(initialization):
#we call this function without using its name
def __call__(self, predict=np.zeros(1), target=np.zeros(1)):
if self.masking:
#masking land points---
#disregard land points (=0) for the mean, so the loss value will increase
#probably because land points decrease the loss, the model don't perform so well
#mask_torch: 0=land, 1=ocean
#however, because we only have particles inside DWS, mask_torch=0 all points except inside it
loss_val = np.mean(((predict-target)[self.mask==1])**2)
else:
#original---
loss_val = np.mean((predict-target)**2) #=torch.nn.MSELoss()
#
return loss_val
# initialize our model---
model = convlstm.ConvLSTM(input_channels, hidden_channels, kernel_size).to(device)
# choose loss function
#loss_fn = torch.nn.MSELoss()
#loss_fn = loss_function.mse() #for training (the same as above)
masking=True
loss_fn = loss_function.mse(masking=masking,mask=mask_torch) #for training (masking land points)
#loss_fn_np = loss_function.mse_numpy() #for testing
loss_fn_np = loss_function.mse_numpy(masking=masking,mask=mask_numpy) #for testing
# choose optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# check the model / loss function and optimizer
print(model)
print(loss_fn.__class__.__name__) #this works for pytorch, but also for our custom class
#print(loss_fn) #only works for pytorch
print(optimizer)
def training(epoch,num_epochs,nt,model):
predict=[]
# Clear stored gradient
model.zero_grad()
# loop through all timesteps
for t in range(nt):
#stack data---
#
#old method using torch.autograd.Variable and .view()---
#
#data_in=np.stack((in_u10_train[t,...],in_v10_train[t,...]))
#data_out=np.stack((out_dx_train[t,...],out_dy_train[t,...]))
#data_in = torch.autograd.Variable(torch.Tensor(data_in).view(-1,input_channels,ny,nx)).to(device)
#data_out = torch.autograd.Variable(torch.Tensor(data_out).view(-1,input_channels,ny,nx)).to(device)
#
#new method using torch.tensor and np.newaxis (the same results as above)---
data_in = torch.tensor(np.stack((in_u10_train[t,...],
in_v10_train[t,...]),axis=0)[np.newaxis,...]).to(device) #(1,input_channels,ny,nx)
data_out = torch.tensor(np.stack((out_dx_train[t,...],
out_dy_train[t,...]),axis=0)[np.newaxis,...]).to(device) #(1,input_channels,ny,nx)
# Forward process---
#print(torch.cuda.memory_summary()) #check GPU memory usage
predict0, _ = model(data_in, t) #data_in=(1,input_channels,ny,nx) pred_y=(1,output_channels,ny,nx)
#del data_in; torch.cuda.empty_cache() #to delete this variable from GPU memory
# compute loss (and the cumulative loss from all the time steps)---
if t == 0:
loss0 = loss_fn(predict0, data_out) #data_out=(1,output_channels,ny,nx)
if masking:
mae0 = np.mean((abs(predict0-data_out).detach().cpu().numpy())[mask_numpy==1])
else:
mae0 = np.mean(abs(predict0-data_out).detach().cpu().numpy())
#mape0=np.mean( abs((predict0-data_out)/data_out).detach().numpy() ) #problems with mape if denominator = 0
else:
loss0 += loss_fn(predict0, data_out)
if masking:
mae0 += np.mean((abs(predict0-data_out).detach().cpu().numpy())[mask_numpy==1])
else:
mae0 += np.mean(abs(predict0-data_out).detach().cpu().numpy())
#mape0 += np.mean( abs((predict0-data_out)/data_out).detach().numpy() )
#del data_out; torch.cuda.empty_cache()
#cumulative loss from all the time steps (the loss we use for backward propagation)---
if epoch % 50 == 0: print("Train epoch ", epoch, "; mean(MSE(t)) = ", loss0.item()/nt*std_fac**2, "; mean(MAE(t)) = ", mae0/nt*std_fac)
# Zero out gradient, else they will accumulate between epochs---
optimizer.zero_grad()
# Backward pass---
# Backward propagation is kicked off when we call .backward() on the error tensor
# Autograd then calculates and stores the gradients for each model parameter in the parameter’s ".grad" attribute.
loss0.backward()
# Update parameters---
optimizer.step() #to initiate gradient descent
# Clear stored gradient---
model.zero_grad()
# save lr
lr0=optimizer.param_groups[0]["lr"]
#predict train data for the last epoch, after updating model parameters
if epoch == num_epochs-1:
with torch.no_grad():
for t in range(nt):
data_in = torch.from_numpy(np.stack((in_u10_train[t,...],
in_v10_train[t,...]),axis=0)[np.newaxis,...]).to(device) #(1,input_channels,ny,nx)
predict0, _ = model(data_in, t) #data_in=(1,input_channels,ny,nx) predict=(1,output_channels,ny,nx)
predict0 = predict0.detach().cpu().numpy()
predict.append(predict0) #save the predictions for the last epoch
predict=
|
np.reshape(predict,(nt,output_channels,ny,nx))
|
numpy.reshape
|
import numpy as np
from scipy import signal
from scipy.interpolate import splev, splrep
class Record:
sr = 20000 # Sample Rate - 20 kHz
def __init__(self,array):
self.array = array
## Instance Variables
# Control System Features
self.dom_pp = []
self.rec_pp = []
self.dom_bp = []
self.rec_bp = []
self.dom_pt = []
self.rec_pt = []
self.dom_ssv = []
self.rec_ssv = []
self.dom_sse = []
self.rec_sse = []
self.dom_po = []
self.rec_po = []
self.dom_st_s = []
self.rec_st_s = []
self.dom_rt_s = []
self.rec_rt_s = []
self.dom_dt_s = []
self.rec_dt_s = []
# Spectral Analysis Features
self.dom_pulse_data = []
self.rec_pulse_data = []
self.dom_sd = []
self.rec_sd = []
self.dom_snr = []
self.rec_snr = []
self.dom_mdfr = []
self.rec_mdfr = []
self.dom_mnfr = []
self.rec_mnfr = []
# Outlier Check Results
self.outlier_count = []
## Instance Methods
# Control System Processing
self.PeakDetection()
self.PeakTime()
self.SteadyStateValErr()
self.PercentOvershoot()
self.SettlingTime()
self.RiseTime()
self.DelayTime()
# Spectral Analysis Processing
self.RunSpectralAnalysis()
self.total_rec = np.min((len(self.dom_sd), len(self.rec_sd)))
# # Outlier Detection and Removal
# self.OutlierCount()
# self.RemoveOutliers()
# # Build Feature Datastructure
self.features = self.GenerateFeatures()
self.headers = []
self.headers.append('Dom_Peak_Time')
self.headers.append('Dom_Steady_State_Value')
self.headers.append('Dom_Steady_State_Error')
self.headers.append('Dom_Percent_Overshoot')
self.headers.append('Dom_Settling_Time')
self.headers.append('Dom_Rise_Time')
self.headers.append('Dom_Delay_Time')
for i in np.arange(len(self.dom_sd[0])):
self.headers.append('Dom_Spectral_Bin_%d' % i)
self.headers.append('Dom_SNR')
self.headers.append('Dom_Mean_Freq')
self.headers.append('Dom_Median_Freq')
self.headers.append('Rec_Peak_Time')
self.headers.append('Rec_Steady_State_Value')
self.headers.append('Rec_Steady_State_Error')
self.headers.append('Rec_Percent_Overshoot')
self.headers.append('Rec_Settling_Time')
self.headers.append('Rec_Rise_Time')
self.headers.append('Rec_Delay_Time')
for i in np.arange(len(self.rec_sd[0])):
self.headers.append('Rec_Spectral_Bin_%d' % i)
self.headers.append('Rec_SNR')
self.headers.append('Rec_Mean_Freq')
self.headers.append('Rec_Median_Freq')
def PeakDetection(self):
##### PeakDetection
# Input: array - raw signal data for record
# Output: dom_pp - dominant Pulse Peak index
# dom_bp - dominant Before Pulse peak index
# rec_pp - recessive Pulse Peak index
# rec_bp - recessive Before Pulse peak index
### Pulse Peak Detection ###
# Calculate difference array
arr_diff = np.diff(self.array, prepend=self.array[0])
# Perform moving average filter, width=3, x2
w = 3
arr_diff = np.convolve(arr_diff, np.ones(w), 'valid') / w
arr_diff = np.convolve(arr_diff, np.ones(w), 'valid') / w
# Prepend zeros to offset processing delay
arr_diff = np.insert(arr_diff, 0, np.zeros((w-1)*2), axis=0)
# Crossing filter to detect dominant and recessive leading edge zones
dom_pp_ts = (arr_diff > 0.2).astype(float)
rec_pp_ts = (arr_diff < -0.2).astype(float)
# Find peak for each zone (dominant)
a = np.where(dom_pp_ts == 1)[0].astype(float)
b = np.diff(a, prepend=0)
c = np.where(b > 1)[0]
dom_pp = a[c].astype(int)
# Remove errant peaks (dominant)
corr_idx = np.concatenate((np.diff(dom_pp),[np.average(np.diff(dom_pp))]))
if np.min(np.diff(corr_idx)) < 100:
corr_idx = np.where(corr_idx > np.average(corr_idx/4))[0]
dom_pp = dom_pp[corr_idx]
# Find peak for each zone (recessive)
a = np.where(rec_pp_ts == 1)[0].astype(float)
b = np.diff(a, prepend=0)
c = np.where(b > 1)[0]
rec_pp = a[c].astype(int)
# Remove errant peaks (recessive)
corr_idx = np.concatenate((np.diff(rec_pp),[np.average(np.diff(rec_pp))]))
if np.min(np.diff(corr_idx)) < 15:
corr_idx = np.where(corr_idx > np.average(corr_idx/4))[0]
rec_pp = rec_pp[corr_idx]
# Pair dom and rec indices
dom_len = len(dom_pp)
rec_len = len(rec_pp)
dom_is_larger = []
if dom_len > rec_len + 1:
dom_is_larger = 1
elif rec_len > dom_len + 1:
dom_is_larger = 0
if not dom_is_larger == []:
len_min = np.min((dom_len, rec_len))
len_dif = np.abs(dom_len - rec_len) + 1
dif_amt = []
for i in np.arange(len_dif):
if dom_is_larger:
temp = dom_pp[0:dom_len] - rec_pp[i:dom_len+i]
else:
temp = dom_pp[0:dom_len] - rec_pp[i:dom_len+i]
temp = np.abs(temp)
temp = np.sum(temp)
dif_amt.append(temp)
dif_loc = np.where(np.min(dif_amt) == dif_amt)[0]
if dom_is_larger:
dom_pp = dom_pp[dif_loc[0]:rec_len+dif_loc[0]+1]
else:
rec_pp = rec_pp[dif_loc[0]:dom_len+dif_loc[0]+1]
# Create timestamps using indices
dom_pp_ts = np.zeros(dom_pp_ts.size)
dom_pp_ts[dom_pp] = 1
self.dom_pp = np.where(dom_pp_ts == 1)[0]
rec_pp_ts = np.zeros(rec_pp_ts.size)
rec_pp_ts[rec_pp] = 1
self.rec_pp = np.where(rec_pp_ts == 1)[0]
### Pre-Peak Detection ###
# Crossing filter to detect pre-dominant steady state (Before Leading-edge)
dom_bp_ts = np.abs(np.diff(self.array - 2.5, prepend = self.array[0]))
w = 5
dom_bp_ts = np.convolve(dom_bp_ts, np.ones(w), 'valid') / w
dom_bp_ts = np.insert(dom_bp_ts, 0, np.zeros(w-1), axis=0)
dom_bp_ts = 1-(dom_bp_ts > 0.05).astype(float)
# Crossing filter to detect pre-recessive steady state (Before Leading-edge)
rec_bp_ts = np.abs(np.diff(3.5 - self.array, prepend = self.array[0]))
w = 5
rec_bp_ts = np.convolve(rec_bp_ts, np.ones(w), 'valid') / w
rec_bp_ts = np.insert(rec_bp_ts, 0, np.zeros(w-1), axis=0)
rec_bp_ts = 1-(rec_bp_ts > 0.05).astype(float)
## Find the last instance of steady state prior to dominant peaks
jj = np.zeros(dom_pp.size).astype(int)
for k in np.arange(0,dom_pp.size):
# "Dominant-low steady state" indices before peak
j = np.where(dom_bp_ts[0:dom_pp[k]] == 1)
j = j[0]
# Find nearest index before dominant peak
min_idx = j-dom_pp[k]
min_idx = min_idx[np.where(np.min(np.abs(min_idx)) == np.abs(min_idx))[0]]
jj[k] = ((min_idx + dom_pp[k])[0])
# Dominant prior-to-peak steady-state indices
dom_bp_ts2 = np.zeros(dom_bp_ts.size, dtype=int)
dom_bp_ts2[jj] = 1
self.dom_bp = jj
## Find the last instance of steady state prior to recessive peaks
jj = np.zeros(rec_pp.size).astype(int)
for k in np.arange(0,rec_pp.size):
# "Recesive-low steady state" indices before peak
j = np.where(rec_bp_ts[0:rec_pp[k]] == 1)
j = j[0]
# Find nearest index before recessive peak
min_idx = j-rec_pp[k]
min_idx = min_idx[np.where(np.min(np.abs(min_idx)) == np.abs(min_idx))[0]]
jj[k] = ((min_idx + rec_pp[k])[0])
# Recessive prior-to-peak steady-state indices
rec_bp_ts2 = np.zeros(rec_bp_ts.size, dtype=int)
rec_bp_ts2[jj] = 1
self.rec_bp = jj
def PeakTime(self):
##### PeakTime
# Input: dom_pp - dominant Pulse Peak index
# dom_bp - dominant Before Pulse peak index
# rec_pp - recessive Pulse Peak index
# rec_bp - recessive Before Pulse peak index
# sr - sample rate of the raw data
# Output: dom_pt - dominant Peak Time
# rec_pt - recessive Peak Time
self.dom_pt = (self.dom_pp-self.dom_bp)/Record.sr
self.rec_pt = (self.rec_pp-self.rec_bp)/Record.sr
def SteadyStateValErr(self):
##### Steady State Value and Error
# Input: array - raw signal data for record
# dom_bp - dominant Before Pulse peak index
# rec_bp - recessive Before Pulse peak index
# Output: dom_ssv - dominant Steady State Value
# rec_ssv - recessive Steady State Value
# dom_sse - dominant Steady State Error
# rec_sse - recessive Steady State Error
# Perform moving average filter, width=19
w = 19
arr_avg = np.convolve(self.array, np.ones(w), 'valid') / w
arr_avg = np.insert(arr_avg, 0, arr_avg[0]*np.ones(w-1), axis=0)
# Extract Steady State Value from previous Steady State Index
dom_ssv_idx = self.rec_bp
rec_ssv_idx = self.dom_bp
self.dom_ssv = arr_avg[dom_ssv_idx]
self.rec_ssv = arr_avg[rec_ssv_idx]
# Calculate Steady State Error
self.dom_sse = arr_avg[dom_ssv_idx] - 3.5
self.rec_sse = arr_avg[rec_ssv_idx] - 2.5
def PercentOvershoot(self):
##### Percent Overshoot
# Input: array - raw signal data for record
# dom_pp - dominant Before Pulse peak index
# rec_pp - recessive Before Pulse peak index
# dom_ssv - dominant Steady State Value
# rec_ssv - recessive Steady State Value
# Output: dom_po - dominant Percent Overshoot
# rec_po - recessive Percent Overshoot
dom_pv = self.array[self.dom_pp]
rec_pv = self.array[self.rec_pp]
try:
self.dom_po = 100 * (dom_pv - self.dom_ssv) / self.dom_ssv
self.rec_po = 100 * (self.rec_ssv - rec_pv) / self.rec_ssv
except:
self.dom_po = 100 * (dom_pv - np.average(self.dom_ssv)) / np.average(self.dom_ssv)
self.rec_po = 100 * (np.average(self.rec_ssv) - rec_pv) / np.average(self.rec_ssv)
def SettlingTime(self):
##### Settling Time
# Input: array - raw signal data for record
# dom_pp - dominant Before Pulse peak index
# rec_pp - recessive Before Pulse peak index
# dom_ssv - dominant Steady State Value
# rec_ssv - recessive Steady State Value
# sr - sample rate of the raw data
# Output: dom_st_s - dominant Settling Time (s)
# rec_st_s - recessive Settling Time (s)
ss_rng = 0.05 # 5% Steady State Range of 1V Vpp design
# Find index and time of settling point (dominant)
w = 3
arr_avg1 = np.convolve(np.abs(self.array-np.average(self.dom_ssv)), np.ones(w), 'valid') / w
arr_avg1 = np.insert(arr_avg1, 0, arr_avg1[0]*np.ones(w-1), axis=0)
arr_avg11 = np.abs(np.round(arr_avg1,decimals=2))
dom_st_idx = np.where(arr_avg11 <= ss_rng)[0]
dom_st = np.zeros(self.dom_pp.size)
if dom_st_idx.size != 0:
for i in np.arange(self.dom_pp.size):
dom_st_idx[dom_st_idx <= self.dom_pp[i]] = -self.array.size
j = np.where(
np.min(np.abs(dom_st_idx - self.dom_pp[i]))
== np.abs(dom_st_idx - self.dom_pp[i])
)[0][-1]
dom_st[i] = dom_st_idx[j]
dom_st = dom_st.astype(int)
else:
self.dom_st = np.concatenate((self.dom_pp[1:],[self.array.size]))
self.dom_st_s = (dom_st - self.dom_pp)/Record.sr
# Find index and time of settling point (dominant)
w = 3
arr_avg2 = np.convolve(np.average(self.dom_ssv)-self.array, np.ones(w), 'valid') / w
arr_avg2 = np.insert(arr_avg2, 0, arr_avg2[0]*np.ones(w-1), axis=0)
arr_avg22 = np.abs(np.round(arr_avg2,decimals=2))
rec_st_idx = np.where(arr_avg22 <= ss_rng)[0]
rec_st = np.zeros(self.rec_pp.size)
for i in np.arange(self.rec_pp.size):
rec_st_idx[rec_st_idx <= self.rec_pp[i]] = -self.array.size
j = np.where(
np.min(np.abs(rec_st_idx - self.rec_pp[i]))
== np.abs(rec_st_idx - self.rec_pp[i])
)[0][-1]
rec_st[i] = rec_st_idx[j]
rec_st = rec_st.astype(int)
self.rec_st_s = (rec_st - self.rec_pp)/Record.sr
def RiseTime(self):
##### Rise Time
# Input: array - raw signal data for record
# dom_pp - dominant Pulse Peak index
# rec_pp - recessive Pulse Peak index
# dom_bp - dominant Before Pulse peak index
# rec_bp - recessive Before Pulse peak index
# dom_ssv - dominant Steady State Value
# rec_ssv - recessive Steady State Value
# sr - sample rate of the raw data
# Output: dom_rt_s - dominant Settling Time (s)
# rec_rt_s - recessive Settling Time (s)
# Find index and time of rise point (dominant)
dom_rt_ts = (self.array.copy() - np.average(self.rec_ssv) <= 1).astype(int)
dom_rt_idx = np.where(dom_rt_ts == 1)[0]
dom_rt = np.zeros(self.dom_pp.size)
for i in np.arange(self.dom_pp.size):
j = np.where(np.min(np.abs(dom_rt_idx - self.dom_pp[i]))
== np.abs(dom_rt_idx - self.dom_pp[i]))[0][-1]
dom_rt[i] = dom_rt_idx[j]
dom_rt = dom_rt.astype(int)
self.dom_rt_s = (dom_rt - self.dom_bp)/Record.sr
# Find index and time of rise point (recessive)
rec_rt_ts = (-self.array.copy() + np.average(self.dom_ssv) <= 1).astype(int)
rec_rt_idx = np.where(rec_rt_ts == 1)[0]
rec_rt = np.zeros(self.rec_pp.size)
for i in np.arange(self.rec_pp.size):
j = np.where(np.min(np.abs(rec_rt_idx - self.rec_pp[i]))
== np.abs(rec_rt_idx - self.rec_pp[i]))[0][-1]
rec_rt[i] = rec_rt_idx[j]
rec_rt = rec_rt.astype(int)
self.rec_rt_s = (rec_rt - self.rec_bp)/Record.sr
def DelayTime(self):
##### Delay Time
# Input: array - raw signal data for record
# dom_pp - dominant Pulse Peak index
# rec_pp - recessive Pulse Peak index
# dom_bp - dominant Before Pulse peak index
# rec_bp - recessive Before Pulse peak index
# dom_ssv - dominant Steady State Value
# rec_ssv - recessive Steady State Value
# sr - sample rate of the raw data
# Output: dom_rt_s - dominant Settling Time (s)
# rec_rt_s - recessive Settling Time (s)
# Find index and time of delay point (dominant)
dom_dt_ts = (self.array.copy() - np.average(self.rec_ssv) <= 0.5).astype(int)
dom_dt_idx = np.where(dom_dt_ts == 1)[0]
dom_dt = np.zeros(self.dom_pp.size)
for i in np.arange(self.dom_pp.size):
j = np.where(np.min(np.abs(dom_dt_idx - self.dom_pp[i]))
== np.abs(dom_dt_idx - self.dom_pp[i]))[0][-1]
dom_dt[i] = dom_dt_idx[j]
dom_dt = dom_dt.astype(int)
self.dom_dt_s = (dom_dt - self.dom_bp)/Record.sr
# Find index and time of delay point (recessive)
rec_dt_ts = (-self.array.copy() + np.average(self.dom_ssv) <= 0.5).astype(int)
rec_dt_idx = np.where(rec_dt_ts == 1)[0]
rec_dt = np.zeros(self.rec_pp.size)
for i in np.arange(self.rec_pp.size):
j = np.where(np.min(np.abs(rec_dt_idx - self.rec_pp[i]))
== np.abs(rec_dt_idx - self.rec_pp[i]))[0][-1]
rec_dt[i] = rec_dt_idx[j]
rec_dt = rec_dt.astype(int)
self.rec_dt_s = (rec_dt - self.rec_bp)/Record.sr
def RunSpectralAnalysis(self):
##### Spectral Analysis
# Run the following methods:
#
# + Spectral Density Binning
# + Signal-to-Noise Ratio
# + Median Frequency
# + Mean Frequency
#
# Features will be processed for both
# Dominant and Recessive CAN High bits
self.SpectralDensityBinning()
self.SignalToNoiseRatio()
self.MeanMedianFrequency()
def SpectralDensityBinning(self):
##### Bin Spectral Density
index_shift = -5 # Include some steady state info from prev pulse
dom_pp_sd = self.dom_pp.copy() + index_shift
rec_pp_sd = self.rec_pp.copy() + index_shift
# Find the start/end pulse indices
if self.dom_pp[0] <= self.rec_pp[0]:
if len(self.dom_pp) > len(self.rec_pp):
dom_pp_sd = dom_pp_sd[0:-1]
idx_dom_se = np.array([dom_pp_sd,rec_pp_sd])
idx_rec_se = np.array([rec_pp_sd[0:-1],dom_pp_sd[1:]])
else:
if len(self.rec_pp) > len(self.dom_pp):
rec_pp_sd = rec_pp_sd[0:-1]
idx_rec_se = np.array([rec_pp_sd,dom_pp_sd])
idx_dom_se = np.array([dom_pp_sd[0:-1],rec_pp_sd[1:]])
# Remove pulses that don't provide enough steady-state information from the prev pulse
if idx_dom_se[0][0] < -index_shift:
idx_dom_se = np.array([idx_dom_se[0][1:],idx_dom_se[1][1:]])
if idx_rec_se[0][0] < -index_shift:
idx_rec_se = np.array([idx_rec_se[0][1:],idx_rec_se[1][1:]])
# Check for out-or-order index error
if idx_dom_se[0][0] > idx_dom_se[1][0]:
temp1 = np.array([idx_dom_se[1],idx_dom_se[0]])
temp2 = np.array([idx_dom_se[0],idx_rec_se[1]])
idx_dom_se = temp2
idx_rec_se = temp1
# Save dom pulse info to parent method variable dom_pulse_data
for i in np.arange(idx_dom_se.shape[1]):
self.dom_pulse_data.append(self.array[idx_dom_se[0][i]:idx_dom_se[1][i]])
# Save dom pulse info to parent method variable rec_pulse_data
for i in np.arange(idx_rec_se.shape[1]):
self.rec_pulse_data.append(self.array[idx_rec_se[0][i]:idx_rec_se[1][i]])
# Reset indices
idx_dom_se = idx_dom_se - index_shift
idx_rec_se = idx_rec_se - index_shift
# Bin power densities
def binned_sd(Pxx_den, nbins):
bs = Pxx_den.size/nbins
bs = round(bs)
Pxx_hist = []
for i in np.arange(nbins):
idx_s = i*bs
idx_e = (i+1)*bs
if idx_e >= Pxx_den.size:
idx_e = Pxx_den.size - 1
Pxx_hist.append(np.average(Pxx_den[idx_s:idx_e]))
Pxx_hist = np.nan_to_num(Pxx_hist)
return Pxx_hist
# Select bin sizes
bin_sel = 2
dom_nbin = [15,13,10] # Bin size limited by pulse length
# Perform binning of spectral density
self.dom_sd = []
for i in np.arange(len(self.dom_pulse_data)):
f, pd = signal.welch(self.dom_pulse_data[i], Record.sr, nperseg=len(self.dom_pulse_data[i]));
self.dom_sd.append(binned_sd(pd, dom_nbin[bin_sel]))
rec_nbin = [10, 8, 5] # Bin size limited by pulse length
self.rec_sd = []
for i in np.arange(len(self.rec_pulse_data)):
f, pd = signal.welch(self.rec_pulse_data[i], Record.sr, nperseg=len(self.rec_pulse_data[i]));
self.rec_sd.append(binned_sd(pd, rec_nbin[bin_sel]))
def SignalToNoiseRatio(self):
index_shift = -5
self.dom_snr = []
for i in np.arange(len(self.dom_pulse_data)):
cur_array = self.dom_pulse_data[i]
signl = (np.arange(len(cur_array)) > -index_shift-1).astype(float)*np.average(self.dom_ssv) + \
(np.arange(len(cur_array)) <= -index_shift-1).astype(float)*np.average(self.rec_ssv)
noise = signl - cur_array
f, s_pd = signal.welch(signl, Record.sr, nperseg=len(signl));
f, n_pd = signal.welch(noise, Record.sr, nperseg=len(noise));
Ps = sum(s_pd)
Pn = sum(n_pd)
if Pn == 0:
self.rec_snr.append(np.nan)
continue
self.dom_snr.append(10*np.log10(Ps/Pn))
self.rec_snr = []
for i in np.arange(len(self.rec_pulse_data)):
cur_array = self.rec_pulse_data[i]
signl = (np.arange(len(cur_array)) > -index_shift-2).astype(float)*np.average(self.rec_ssv) + \
(np.arange(len(cur_array)) <= -index_shift-2).astype(float)*np.average(self.dom_ssv)
noise = signl - cur_array
f, s_pd = signal.welch(signl, Record.sr, nperseg=len(signl))
f, n_pd = signal.welch(noise, Record.sr, nperseg=len(noise))
Ps = sum(s_pd)
Pn = sum(n_pd)
if Pn == 0:
self.rec_snr.append(np.nan)
continue
self.rec_snr.append(10*np.log10(Ps/Pn))
def MeanMedianFrequency(self):
self.dom_mdfr = []
self.rec_mdfr = []
self.dom_mnfr = []
self.rec_mnfr = []
self.dom_mnfr = []
self.dom_mdfr = []
for i in np.arange(len(self.dom_pulse_data)):
cur_pulse = self.dom_pulse_data[i]
f, pd = signal.welch(cur_pulse, Record.sr, nperseg=len(cur_pulse))
spl = splrep(f, pd, k=1)
x2 = np.arange(f[0], f[-1],0.01)
y2 = splev(x2, spl)
y21 = y2/np.sum(y2) # Normalize spectra
y22 = np.cumsum(y21) # Cummulative sum (CDF for SPD)
y23 = y22-0.5 # Subtract 50% of energy
y24 = abs(y23) # Abs value to create a minima
y25 = np.where(np.min(y24) == y24)[0][-1] # Locate minima index
self.dom_mdfr.append(x2[y25]) # Retrieve minima frequency
self.dom_mnfr.append(np.sum(pd*f)/np.sum(pd))
self.rec_mnfr = []
self.rec_mdfr = []
for i in np.arange(len(self.rec_pulse_data)):
cur_pulse = self.rec_pulse_data[i]
f, pd = signal.welch(cur_pulse, Record.sr, nperseg=len(cur_pulse))
spl = splrep(f, pd, k=1)
x2 = np.arange(f[0], f[-1],0.01)
y2 = splev(x2, spl)
y21 = y2/np.sum(y2) # Normalize spectra
y22 = np.cumsum(y21) # Cummulative sum (CDF for SPD)
y23 = y22-0.5 # Subtract 50% of energy
y24 = abs(y23) # Abs value to create a minima
y25 = np.where(np.min(y24) == y24)[0][-1] # Locate minima index
self.rec_mdfr.append(x2[y25]) # Retrieve minima frequency
self.rec_mnfr.append(np.sum(pd*f)/np.sum(pd))
def OutlierCount(self):
##### Outlier Count
# Calculates the standard deviation for each feature and creates a binary
# mask of pulses that exceed the standard deviation threshold
# Binary masks are added to determine total number of deviations per pulse
# across all features
std = 1.5 # Threshold
def fix_size_disparity(in1, in2):
if in1.size > in2.size:
in2 = np.concatenate((in2,np.zeros(in1.size - in2.size))).astype(int)
elif in2.size > in1.size:
in1 = np.concatenate((in1,np.zeros(in2.size - in1.size))).astype(int)
return in1, in2
# Outlier check and size correction
self.dom_pp, self.rec_pp = fix_size_disparity(self.dom_pp, self.rec_pp)
self.dom_bp, self.rec_bp = fix_size_disparity(self.dom_bp, self.rec_bp)
self.dom_pt, self.rec_pt = fix_size_disparity(self.dom_pt, self.rec_pt)
dom_pt_out = (np.abs(self.dom_pt-np.average(self.dom_pt)) >
std*np.std(self.dom_pt)).astype(int)
rec_pt_out = (np.abs(self.rec_pt-np.average(self.rec_pt)) >
std*np.std(self.rec_pt)).astype(int)
pt_out = dom_pt_out + rec_pt_out
self.dom_ssv, self.rec_ssv = fix_size_disparity(self.dom_ssv, self.rec_ssv)
dom_ssv_out = (np.abs(self.dom_ssv-np.average(self.dom_ssv)) >
std*np.std(self.dom_ssv)).astype(int)
rec_ssv_out = (np.abs(self.rec_ssv-np.average(self.rec_ssv)) >
std*np.std(self.rec_ssv)).astype(int)
ssv_out = dom_ssv_out + rec_ssv_out
self.dom_sse, self.rec_sse = fix_size_disparity(self.dom_sse, self.rec_sse)
dom_sse_out = (np.abs(self.dom_sse-np.average(self.dom_sse)) >
std*np.std(self.dom_sse)).astype(int)
rec_sse_out = (np.abs(self.rec_sse-np.average(self.rec_sse)) >
std*np.std(self.rec_sse)).astype(int)
sse_out = dom_sse_out + rec_sse_out
self.dom_po, self.rec_po = fix_size_disparity(self.dom_po, self.rec_po)
dom_po_out = (np.abs(self.dom_po-np.average(self.dom_po)) >
std*np.std(self.dom_po)).astype(int)
rec_po_out = (np.abs(self.rec_po-np.average(self.rec_po)) >
std*np.std(self.rec_po)).astype(int)
po_out = dom_po_out + rec_po_out
self.dom_st_s, self.rec_st_s = fix_size_disparity(self.dom_st_s, self.rec_st_s)
dom_st_s_out = (np.abs(self.dom_st_s-np.average(self.dom_st_s)) >
std*np.std(self.dom_st_s)).astype(int)
rec_st_s_out = (np.abs(self.rec_st_s-np.average(self.rec_st_s)) >
std*np.std(self.rec_st_s)).astype(int)
st_s_out = dom_st_s_out + rec_st_s_out
self.dom_rt_s, self.rec_rt_s = fix_size_disparity(self.dom_rt_s, self.rec_rt_s)
dom_rt_s_out = (np.abs(self.dom_rt_s-np.average(self.dom_rt_s)) >
std*np.std(self.dom_rt_s)).astype(int)
rec_rt_s_out = (np.abs(self.rec_rt_s-np.average(self.rec_rt_s)) >
std*np.std(self.rec_rt_s)).astype(int)
rt_s_out = dom_rt_s_out + rec_rt_s_out
self.dom_dt_s, self.rec_dt_s = fix_size_disparity(self.dom_dt_s, self.rec_dt_s)
dom_dt_s_out = (np.abs(self.dom_dt_s-np.average(self.dom_dt_s)) >
std*np.std(self.dom_dt_s)).astype(int)
rec_dt_s_out = (np.abs(self.rec_dt_s-np.average(self.rec_dt_s)) >
std*np.std(self.rec_dt_s)).astype(int)
dt_s_out = dom_dt_s_out + rec_dt_s_out
self.outlier_count = pt_out + ssv_out + sse_out + \
po_out + st_s_out + rt_s_out + dt_s_out
return self.outlier_count
def RemoveOutliers(self):
##### Remove Outlier Pulses
# Checks outlier count for each pulse and removes pulses that exceed
# the deviation threshold
dev = 6
noutlier_idx = np.where(self.outlier_count < dev + 1)[0]
self.dom_pp = self.dom_pp[noutlier_idx]
self.rec_pp = self.rec_pp[noutlier_idx]
self.dom_bp = self.dom_bp[noutlier_idx]
self.rec_bp = self.rec_bp[noutlier_idx]
self.dom_pt = self.dom_pt[noutlier_idx]
self.rec_pt = self.rec_pt[noutlier_idx]
self.dom_ssv = self.dom_ssv[noutlier_idx]
self.rec_ssv = self.rec_ssv[noutlier_idx]
self.dom_sse = self.dom_sse[noutlier_idx]
self.rec_sse = self.rec_sse[noutlier_idx]
self.dom_po = self.dom_po[noutlier_idx]
self.rec_po = self.rec_po[noutlier_idx]
self.dom_st_s = self.dom_st_s[noutlier_idx]
self.rec_st_s = self.rec_st_s[noutlier_idx]
self.dom_rt_s = self.dom_rt_s[noutlier_idx]
self.rec_rt_s = self.rec_rt_s[noutlier_idx]
self.dom_dt_s = self.dom_dt_s[noutlier_idx]
self.rec_dt_s = self.rec_dt_s[noutlier_idx]
self.OutlierCount()
def summary(self):
print('Peak Time (s):')
print(' dom: ', self.dom_pt)
print(' avg: ', np.average(self.dom_pt))
print(' std: ', np.std(self.dom_pt))
print(' dev: ', np.abs(self.dom_pt-np.average(self.dom_pt)))
# print(' out: ', dom_pt_out)
print(' rec: ', self.rec_pt)
print(' avg: ', np.average(self.rec_pt))
print(' std: ', np.std(self.rec_pt))
print(' dev: ', np.abs(self.rec_pt-np.average(self.rec_pt)))
# print(' out: ', rec_pt_out)
print('')
print('Steady State Value (V):')
print(' dom: ', self.dom_ssv)
print(' avg: ', np.average(self.dom_ssv))
print(' std: ', np.std(self.dom_ssv))
print(' dev: ', np.abs(self.dom_ssv-np.average(self.dom_ssv)))
# print(' out: ', dom_ssv_out)
print(' rec: ', self.rec_ssv)
print(' avg: ', np.average(self.rec_ssv))
print(' std: ', np.std(self.rec_ssv))
print(' dev: ', np.abs(self.rec_ssv-np.average(self.rec_ssv)))
# print(' out: ', rec_ssv_out)
print('')
print('Steady State Error (V):')
print(' dom: ', self.dom_sse)
print(' avg: ', np.average(self.dom_sse))
print(' std: ', np.std(self.dom_sse))
print(' dev: ', np.abs(self.dom_sse-np.average(self.dom_sse)))
# print(' out: ', dom_sse_out)
print(' rec: ', self.rec_sse)
print(' avg: ', np.average(self.rec_sse))
print(' std: ', np.std(self.rec_sse))
print(' dev: ', np.abs(self.rec_sse-np.average(self.rec_sse)))
# print(' out: ', rec_sse_out)
print('')
print('Percent Overshoot')
print(' dom: ', self.dom_po)
print(' avg: ', np.average(self.dom_po))
print(' std: ', np.std(self.dom_po))
print(' dev: ', np.abs(self.dom_po-np.average(self.dom_po)))
# print(' out: ', dom_po_out)
print(' rec: ', self.rec_po)
print(' avg: ', np.average(self.rec_po))
print(' std: ', np.std(self.rec_po))
print(' dev: ', np.abs(self.rec_po-np.average(self.rec_po)))
# print(' out: ', rec_po_out)
print('')
print('Settling Time (s)')
print(' dom: ', self.dom_st_s)
print(' avg: ', np.average(self.dom_st_s))
print(' std: ', np.std(self.dom_st_s))
print(' dev: ', np.abs(self.dom_st_s-np.average(self.dom_st_s)))
# print(' out: ', dom_st_s_out)
print(' rec: ', self.rec_st_s)
print(' avg: ', np.average(self.rec_st_s))
print(' std: ', np.std(self.rec_st_s))
print(' dev: ', np.abs(self.rec_st_s-np.average(self.rec_st_s)))
# print(' out: ', rec_st_s_out)
print('')
print('Rise Time (s)')
print(' dom: ', self.dom_rt_s)
print(' avg: ', np.average(self.dom_rt_s))
print(' std: ', np.std(self.dom_rt_s))
print(' dev: ', np.abs(self.dom_rt_s-np.average(self.dom_rt_s)))
# print(' out: ', dom_rt_s_out)
print(' rec: ', self.rec_rt_s)
print(' avg: ', np.average(self.rec_rt_s))
print(' std: ', np.std(self.rec_rt_s))
print(' dev: ', np.abs(self.rec_rt_s-np.average(self.rec_rt_s)))
# print(' out: ', rec_rt_s_out)
print('')
print('Delay Time (s)')
print(' dom: ', self.dom_dt_s)
print(' avg: ',
|
np.average(self.dom_dt_s)
|
numpy.average
|
import numpy as np; import pandas as pd
from pyg_timeseries._math import stdev_calculation_ewm, skew_calculation, cor_calculation_ewm, covariance_calculation, corr_calculation_ewm, LR_calculation_ewm, variance_calculation_ewm, _w
from pyg_timeseries._decorators import compiled, first_, _data_state
from pyg_base import pd2np, clock, loop_all, loop, is_pd, is_df, presync, df_concat
__all__ = ['ewma', 'ewmstd', 'ewmvar', 'ewmskew', 'ewmrms', 'ewmcor', 'ewmcorr', 'ewmLR', 'ewmGLM',
'ewma_', 'ewmstd_', 'ewmskew_', 'ewmrms_', 'ewmcor_', 'ewmvar_','ewmLR_', 'ewmGLM_',]
############################################
##
## compiled functions, unfortunately, both these methods are much slower
##
###########################################
# import numba
# from numba import int32, float32 # import the types
# from numba.experimental import jitclass
# spec = [
# ('t0', float32), # a simple scalar field
# ('t1', float32), # a simple scalar field
# ('t', float32), # a simple scalar field
# ('a', float32), # a simple scalar field
# ('w', float32), # a simple scalar field
# ]
# @jitclass(spec)
# class c_ewma(object):
# def __init__(self, a, t, t0, t1, w):
# self.a = a
# self.t = t
# self.t0 = t0
# self.t1 = t1
# self.w = w
# def push(self, a, t):
# if np.isnan(a):
# return np.nan
# if t == self.t:
# self.t1 = self.t1 + (1-self.w) * (a - self.a)
# return self.t1/self.t0
# else:
# p = self.w if np.isnan(t) else self.w**(t-self.t)
# self.t0 = self.t0 * p + (1-self.w)
# self.t1 = self.t1 * p + (1-self.w) * a
# return self.t1/self.t0
# @compiled
# def _ewma_(ai, ti, ai0, ti0, t0, t1, w):
# """
# we receive
# - current values, (ai, ti)
# - previous values (ai0, ti0)
# - current state of the moments t0, t1
# - parameters, w
# We return:
# result, current values, updated moments
# res, a, t, t0, t1
# """
# if np.isnan(ai):
# res = np.nan
# return res, ai0, ti0, t0, t1
# else:
# if ti == ti0:
# t1 = t1 + (1-w) * (ai - ai0)
# res = t1/t0
# return res, ai, ti, t0, t1
# else:
# p = w**(ti-ti0)
# t0 = t0 * p + (1-w)
# t1 = t1 * p + (1-w) * ai
# res = t1/t0
# return res, ai, ti, t0, t1
@pd2np
@compiled
def _ewma(a, n, time, t = np.nan, t0 = 0, t1 = 0):
if n == 1:
return a, t, t0, t1
w = _w(n)
res = np.empty_like(a)
i0 = 0
for i in range(a.shape[0]):
if np.isnan(a[i]):
res[i] = np.nan
else:
if time[i] == t:
t1 = t1 + (1-w) * (a[i] - a[i0])
else:
p = w if np.isnan(time[i]) else w**(time[i]-t)
t0 = t0 * p + (1-w)
t1 = t1 * p + (1-w) * a[i]
t = time[i]
i0 = i
res[i] = np.nan if t0 == 0 else t1/t0
return res, t, t0, t1
@pd2np
@compiled
def _ewmrms(a, n, time, t = np.nan, t0 = 0., t2 = 0.):
if n == 1:
return a, t, t0, t2
w = _w(n)
res = np.empty_like(a)
i0 = 0
for i in range(a.shape[0]):
if np.isnan(a[i]):
res[i] = np.nan
else:
if time[i] == t:
t2 = t2 + (1-w) * (a[i]**2 - a[i0]**2)
else:
p = w if np.isnan(time[i]) else w**(time[i]-t)
v = a[i]
t0 = t0 * p + (1-w)
t2 = t2 * p + (1-w) * v**2
t = time[i]
i0 = i
res[i] = np.nan if t0 == 0 else np.sqrt(t2/t0)
return res, t, t0, t2
@pd2np
@compiled
def _ewmstd(a, n, time, t = np.nan, t0 = 0, t1 = 0, t2 = 0, w2 = 0, min_sample = 0.25, bias = False, calculator = stdev_calculation_ewm):
if n == 1:
return np.full_like(a, 0.0), t, t0, t1, t2, w2
w = _w(n)
res = np.empty_like(a)
i0 = 0
for i in range(a.shape[0]):
if np.isnan(a[i]):
res[i] = np.nan
else:
if time[i] == t:
t1 = t1 + (1-w) * (a[i] - a[i0])
t2 = t2 + (1-w) * (a[i]**2 - a[i0]**2)
else:
p = w if np.isnan(time[i]-t) else w**(time[i]-t)
v = a[i]
t0 = t0 * p + (1-w)
w2 = w2 * p**2 + (1-w)**2
t1 = t1 * p + (1-w) * v
t2 = t2 * p + (1-w) * v**2
t = time[i]
i0 = i
res[i] = calculator(t0, t1, t2, w2 = w2, min_sample = min_sample, bias = bias)
return res, t, t0, t1, t2, w2
@pd2np
@compiled
def _ewmcor(a, b, ba, n, time, t = np.nan, t0 = 0, a1 = 0, a2 = 0, b1 = 0, b2 = 0, ab = 0, w2 = 0, min_sample = 0.25, bias = False):
"""
_ewmcor(a, b, ba, n, time, t)
n = 50
t = np.nan; t0 = 0; a1 = 0; a2 = 0; b1 = 0; b2 = 0; ab = 0; w2 = 0; min_sample = 0.25; bias = False
data, t, t0, a1, a2, b1, b2, ab, w2 = _ewmcor(a, b, ba, 200, time, t = np.nan, t0 = 0, a1 = 0, a2 = 0, b1 = 0, b2 = 0, ab = 0, w2 = 0, min_sample = 0.25, bias = False)
pd.Series(data, drange(-9999)).plot()
"""
if n == 1:
return np.full_like(a, np.nan), t, t0, a1, a2, b1, b2, ab, w2
w = _w(n)
res = np.empty_like(a)
i0 = 0
for i in range(a.shape[0]):
if np.isnan(a[i]) or np.isnan(b[i]):
res[i] = np.nan
else:
if time[i] == t:
a1 = a1 + (1-w) * (a[i] - a[i0])
a2 = a2 + (1-w) * (a[i]**2 - a[i0]**2)
b1 = b1 + (1-w) * (b[i] - b[i0])
b2 = b2 + (1-w) * (b[i]**2 - b[i0]**2)
ab = ab + (1-w) * (ba[i] - ba[i0])
else:
p = w if np.isnan(time[i]) else w**(time[i]-t)
t0 = t0 * p + (1-w)
w2 = w2 * p**2 + (1-w)**2
a1 = a1 * p + (1-w) * a[i]
a2 = a2 * p + (1-w) * a[i]**2
b1 = b1 * p + (1-w) * b[i]
b2 = b2 * p + (1-w) * b[i]**2
ab = ab * p + (1-w) * ba[i]
t = time[i]
i0 = i
res[i0] = cor_calculation_ewm(t0 = t0, a1 = a1, a2 = a2, b1 = b1, b2 = b2, ab = ab, w2 = w2, min_sample = min_sample, bias = bias)
return res, t, t0, a1, a2, b1, b2, ab, w2
@compiled
def _ewmcorr(a, n, a0 = None, a1 = None, a2 = None, aa0 = None, aa1 = None, w2 = None, min_sample = 0.25, bias = False):
"""
"""
m = a.shape[1]
if n == 1:
return np.full((a.shape[0], m, m), np.nan), a0, a1, a2, aa0, aa1, w2
p = w = _w(n)
v = 1 - w
res = np.zeros((a.shape[0], m, m))
a0 = np.zeros(m) if a0 is None else a0
a1 = np.zeros(m) if a1 is None else a1
a2 = np.zeros(m) if a2 is None else a2
aa1 = np.zeros((m,m)) if aa1 is None else aa1
aa0 = np.zeros((m,m)) if aa0 is None else aa0
w2 = np.zeros(m) if w2 is None else w2
for i in range(a.shape[0]):
for j in range(m):
if ~np.isnan(a[i,j]):
w2[j] = w2[j] * p**2 + v**2
a0[j] = a0[j] * p + v
a1[j] = a1[j] * p + v * a[i,j]
a2[j] = a2[j] * p + v * a[i,j] ** 2
for j in range(m):
res[i, j, j] = 1.
if np.isnan(a[i,j]):
res[i, j, :] = np.nan #if i == 0 else res[i-1, j, :] # we ffill correlations
res[i, :, j] = np.nan #if i == 0 else res[i-1, :, j]
else:
for k in range(j):
if ~np.isnan(a[i,k]):
aa0[j,k] = aa0[j,k] * p + v
aa1[j,k] = aa1[j,k] * p + v * a[i, j] * a[i, k]
res[i, k, j] = res[i, j, k] = corr_calculation_ewm(a0 = a0[j], a1 = a1[j], a2 = a2[j], aw2 = w2[j],
b0 = a0[k], b1 = a1[k], b2 = a2[k], bw2 = w2[k],
ab = aa1[j,k], ab0 = aa0[j,k],
min_sample = min_sample, bias = bias)
return res, a0, a1, a2, aa0, aa1, w2
@compiled
def _ewmcovar(a, n, a0 = None, a1 = None, aa0 = None, aa1 = None, min_sample = 0.25, bias = False):
"""
"""
m = a.shape[1]
if n == 1:
return np.full((a.shape[0], m, m), np.nan), a0, a1, aa0, aa1
p = w = _w(n)
v = 1 - w
res = np.zeros((a.shape[0], m, m))
a0 =
|
np.zeros(m)
|
numpy.zeros
|
import argparse
import configparser
from datetime import datetime
import json
import matplotlib.pyplot as plt
import numpy as np
import os
import pickle
import torch
import torch.nn as nn
from torch.nn.functional import softplus
from torch.optim import Adam
from torch.utils.data import DataLoader, TensorDataset
import pyro
import pyro.distributions as dist
from pyro import poutine
from pyro.distributions.torch_distribution import TorchDistribution
from pyro.infer import SVI, JitTrace_ELBO, Trace_ELBO
from pyro.optim import MultiStepLR
class Jorganizer(nn.Module):
"""Construct structured J matrix."""
def __init__(self, N):
super().__init__()
self.N = N
self._make_transfer()
def _make_transfer(self):
N = self.N
self.register_buffer("J_transf", torch.zeros(N*N, dtype=torch.long))
pos = 0
offset1 = int(N * (N - 1) / 2)
offset2 = int(N * (N + 1) / 2)
for j in range(N-1):
for jp in range(j+1, N):
self.J_transf[j * N + jp] = pos
self.J_transf[jp * N + j] = offset2 + pos
pos += 1
for j in range(N):
self.J_transf[j * N + j] = offset1 + j
def forward(self, Jraw):
diag = torch.zeros((self.N, 2, 2))
flip = torch.transpose(Jraw, 1, 2)
concat = torch.cat([Jraw, diag, flip])
reorder = concat[self.J_transf, :, :]
Jmat = reorder.reshape([self.N, self.N, 2, 2])
return Jmat
class SteinGlass(TorchDistribution):
"""
Continuous glass model with a normalized kernelized Stein discrepancy
instead of a log probability.
"""
arg_constraints = {}
def __init__(self, gene_scale, gene_mean, glass_h, glass_J,
kernel_c=1., kernel_beta=-0.5,
nksd_T=1., validate_args=None):
self.gene_scale = gene_scale
self.gene_mean = gene_mean
self.ngenes = glass_h.shape[0]
self.glass_h = glass_h
self.glass_J = glass_J
self.kernel_c = kernel_c
self.kernel_beta = torch.tensor(kernel_beta)
self.nksd_T = nksd_T
super(SteinGlass, self).__init__(
torch.Size([1]), torch.Size([self.ngenes]),
validate_args=validate_args
)
def stein_score(self, value):
"""Stein score function for model."""
# Compute inverse logits (effective spins).
invlogit = 1/(1 + torch.exp(-self.gene_scale * (
value - self.gene_mean)))
sigma = torch.cat([(1-invlogit.unsqueeze(-1)), invlogit.unsqueeze(-1)],
-1)
# Compute derivative of inverse logits.
dinvlogit = self.gene_scale * torch.prod(sigma, -1)
dsigma = torch.cat([-dinvlogit.unsqueeze(-1), dinvlogit.unsqueeze(-1)],
-1)
# Order-one term.
term_one = torch.einsum('ijk,jk->ij', dsigma, self.glass_h)
# Order-two term
term_two = torch.einsum('ijk,jakb,iab->ij', dsigma, self.glass_J,
sigma)
return term_one + term_two
def kernel_terms(self, value):
"""Compute kernel terms in NKSD."""
c, beta = self.kernel_c, self.kernel_beta / self.ngenes
xmy = value[:, None, :] - value[None, :, :]
fl2 = xmy**2
K = torch.prod((c*c + fl2)**beta, dim=2)
K = K - torch.diag(torch.diag(K))
# First derivative.
Kp = 2 * beta * xmy * (1/(c*c + fl2)) * K[:, :, None]
# Trace of second derivative.
Kpp = torch.sum(- 2 * beta / (c*c + fl2)
- 4 * beta * (beta - 1) * fl2 / (c*c + fl2)**2,
dim=2) * K
# Normalization.
Kbar = torch.sum(K)
return K, Kp, Kpp, Kbar
def log_prob(self, value):
"""
The normalized kernelized Stein discrepancy acts as a generalized
log likelihood.
"""
# Stein score.
sscore = self.stein_score(value)
# Kernel terms.
K, Kp, Kpp, Kbar = self.kernel_terms(value)
# Kernelized Stein discrepancy.
ksd = (torch.einsum('ia,ja,ij->', sscore, sscore, K) +
2 * torch.einsum('ija,ja->', Kp, sscore) +
torch.sum(Kpp))
# Normalized KSD (NKSD).
nksd = ksd / Kbar
# Negative loss.
nloss = - (value.shape[0]/self.nksd_T) * nksd
return nloss
class DataSelector:
def __init__(self, datapoints, ngenes, PY_mix_d=2, PY_conc=-0.25,
loorf_init=0.99, learning_rate=0.01,
milestones=[], learning_gamma=1.):
# Set up SVC volume correction.
self.PY_mix_d = torch.tensor(PY_mix_d)
self.PY_conc = torch.tensor(PY_conc)
self.PY_alpha = torch.tensor(0.5)
assert PY_conc > -self.PY_alpha, (
'Pitman-Yor model constraint violated.')
# Compute Pitman-Yor mixture model effective dimension prefactor.
cB = (self.PY_mix_d / self.PY_alpha) * torch.exp(
torch.lgamma(self.PY_conc + 1.) -
torch.lgamma(self.PY_conc + self.PY_alpha))
self.cfactor = (0.5 * cB * np.sqrt(datapoints)
* (np.log(2 * np.pi) - np.log(datapoints)))
# Set up stochastic optimization of SVC.
self.select_phi = (loorf_init * torch.ones(ngenes)
).requires_grad_(True)
self.loorf_optimizer = Adam([self.select_phi], lr=learning_rate)
self.loorf_scheduler = torch.optim.lr_scheduler.MultiStepLR(
self.loorf_optimizer, milestones, gamma=learning_gamma)
def correction(self, select):
"""SVC volume correction."""
return (1 - select).sum(-1) * self.cfactor
def _logit(self):
"""Logit of stochastic selection weights."""
return 1/(1 + torch.exp(-self.select_phi))
def sample_select(self):
"""Sample selection variable."""
probs = self._logit()
bern = torch.distributions.bernoulli.Bernoulli(probs)
return bern.sample().to(torch.bool)
def step(self, selects, nelbos):
"""Update stochastic selection biases with LOORF estimator."""
selects = selects.to(torch.double)
# Compute total SVC.
fb = self.correction(selects) - nelbos
# LOORF gradient estimator.
n = len(fb)
baseline = fb.mean()
grad_est = (1/(n-1)) * torch.sum((fb - baseline)[:, None] * (
selects - self._logit()[None, :]),
dim=0)
# Update.
self.select_phi.grad = -grad_est
self.loorf_optimizer.step()
self.loorf_scheduler.step()
return baseline
class SparseGlass(nn.Module):
"""
Stein glass with sparsity promoting prior on interactions.
"""
def __init__(self, ngenes,
prior_gene_scale_mn=0., prior_gene_scale_sd=1.,
prior_gene_scale_lbound=1.,
prior_gene_mean_mn=1., prior_gene_mean_sd=0.,
prior_glass_h_sd=1., prior_glass_J_scale=0.1,
kernel_c=1., kernel_beta=-0.5, nksd_T=1.,
cuda=False, pin_memory=False):
super().__init__()
self.ngenes = ngenes
self.ninteracts = int(self.ngenes * (self.ngenes - 1) / 2)
self.prior_gene_scale_mn = prior_gene_scale_mn
self.prior_gene_scale_sd = prior_gene_scale_sd
self.prior_gene_scale_lbound = prior_gene_scale_lbound
self.prior_gene_mean_mn = prior_gene_mean_mn
self.prior_gene_mean_sd = prior_gene_mean_sd
self.prior_glass_h_sd = prior_glass_h_sd
self.prior_glass_J_scale = prior_glass_J_scale
self.kernel_c = kernel_c
self.kernel_beta = kernel_beta
self.nksd_T = nksd_T
self.cuda = cuda
self.pin_memory = pin_memory
self.jorganizer = Jorganizer(ngenes)
def model(self, data, select, local_scale):
# Effective spin parameters.
gene_scale = pyro.sample(
"gene_scale",
dist.Normal(torch.tensor(self.prior_gene_scale_mn),
torch.tensor(self.prior_gene_scale_sd)))
gene_scale = softplus(gene_scale) + self.prior_gene_scale_lbound
gene_mean = pyro.sample(
"gene_mean",
dist.Normal(torch.tensor(self.prior_gene_mean_mn),
torch.tensor(self.prior_gene_mean_sd)))
gene_mean = softplus(gene_mean)
# First order energies.
glass_h = pyro.sample(
"glass_h",
dist.Normal(torch.zeros((self.ngenes, 2)),
torch.tensor(self.prior_glass_h_sd)).to_event(2)
)
# Second order energies.
glass_J = pyro.sample(
"glass_J",
dist.Laplace(torch.zeros((self.ninteracts, 2, 2)),
torch.tensor(self.prior_glass_J_scale)).to_event(3)
)
glass_J = self.jorganizer.forward(glass_J)
# Take selected subset.
data = data[:, select]
glass_h = glass_h[select]
glass_J = glass_J[select][:, select]
# Compute NKSD term.
with pyro.plate("batch", data.shape[0]):
with poutine.scale(scale=local_scale):
# Observations.
pyro.sample(
"obs_seq",
SteinGlass(
gene_scale, gene_mean, glass_h, glass_J,
kernel_c=self.kernel_c, kernel_beta=self.kernel_beta,
nksd_T=self.nksd_T
),
obs=data,
)
def guide(self, data, select, local_scale):
gene_scale_mn = pyro.param(
"gene_scale_mn", torch.tensor(0.)
)
gene_scale_sd = pyro.param(
"gene_scale_sd", torch.tensor(0.)
)
pyro.sample("gene_scale",
dist.Normal(gene_scale_mn,
softplus(gene_scale_sd)))
gene_mean_mn = pyro.param(
"gene_mean_mn", torch.tensor(0.)
)
gene_mean_sd = pyro.param(
"gene_mean_sd", torch.tensor(0.)
)
pyro.sample("gene_mean",
dist.Normal(gene_mean_mn,
softplus(gene_mean_sd)))
glass_h_mn = pyro.param(
"glass_h_mn", torch.zeros((self.ngenes, 2))
)
glass_h_sd = pyro.param(
"glass_h_sd", torch.zeros((self.ngenes, 2))
)
pyro.sample("glass_h",
dist.Normal(glass_h_mn,
softplus(glass_h_sd)).to_event(2))
glass_J_mn = pyro.param(
"glass_J_mn", torch.zeros((self.ninteracts, 2, 2))
)
glass_J_sd = pyro.param(
"glass_J_sd", torch.zeros((self.ninteracts, 2, 2))
)
pyro.sample("glass_J",
dist.Laplace(glass_J_mn,
softplus(glass_J_sd)).to_event(3))
def fit_svi(self, dataset, epochs=2, batch_size=100, scheduler=None,
jit=False, learning_rate=0.01,
milestones=[], learning_gamma=1., PY_mix_d=2, PY_conc=-0.25,
loorf_samples=10, loorf_init=1.0, select_all=False,
early_stop=True, smooth_wind=2000):
"""Fit via stochastic variational inference."""
# GPU.
if self.cuda:
device = torch.device("cuda")
else:
device = torch.device("cpu")
# Initialize guide.
self.guide(None, None, None)
dataload = DataLoader(
dataset,
batch_size=batch_size,
shuffle=True,
pin_memory=self.pin_memory,
generator=torch.Generator(device=device),
)
N = len(dataset)
# Optimizer for model variational approximation.
scheduler = MultiStepLR(
{
"optimizer": Adam,
"optim_args": {"lr": learning_rate},
"milestones": milestones,
"gamma": learning_gamma,
}
)
if not select_all:
# Optimizer for data selection.
dataselector = DataSelector(
N, self.ngenes, PY_mix_d=PY_mix_d, PY_conc=PY_conc,
loorf_init=loorf_init, learning_rate=learning_rate,
milestones=milestones, learning_gamma=learning_gamma)
else:
select = torch.ones(self.ngenes).to(torch.bool)
select_prob = torch.ones(self.ngenes)
select_gap = torch.tensor(0.5)
# Setup stochastic variational inference.
if jit:
elbo = JitTrace_ELBO(ignore_jit_warnings=True)
else:
elbo = Trace_ELBO()
svi = SVI(self.model, self.guide, scheduler, loss=elbo)
# Run inference.
svcs = []
select_gaps = []
t0 = datetime.now()
stop = False
for epoch in range(epochs):
for data in dataload:
data = data[0]
if self.cuda:
data = data.cuda()
# Take SVI step.
if not select_all:
select = dataselector.sample_select()
nelbo = svi.step(data, select, torch.tensor(N / data.shape[0]))
scheduler.step()
# Draw LOORF samples.
if not select_all:
selects = torch.zeros((loorf_samples, self.ngenes),
dtype=torch.bool)
nelbos = torch.zeros(loorf_samples)
for s in range(loorf_samples):
selects[s] = dataselector.sample_select()
nelbos[s] = svi.evaluate_loss(
data, selects[s],
torch.tensor(N / data.shape[0]))
# Update stochastic selection.
svc = dataselector.step(selects, nelbos)
select_prob = dataselector._logit().detach()
select_gap = torch.abs(select_prob - 0.5).min()
else:
svc = torch.tensor(-nelbo)
# Record.
svcs.append(svc.cpu())
select_gaps.append(select_gap.cpu())
if early_stop and len(svcs) > 2 * smooth_wind:
mu_m_var_0 = (np.mean(svcs[-smooth_wind:])
- np.std(svcs[-smooth_wind:]))
mu_m_var_1 = (np.mean(svcs[-2*smooth_wind:-smooth_wind])
- np.std(svcs[-2*smooth_wind:-smooth_wind]))
if mu_m_var_0 < mu_m_var_1:
stop = True
break
print(epoch, svc, select_gap, torch.sum(select_prob > 0.5),
" ", datetime.now() - t0)
if stop:
print('Stopped early based on mean - std criterion.')
break
return np.array(svcs),
|
np.array(select_gaps)
|
numpy.array
|
import mdtraj as md
import numpy as np
from scipy.interpolate import interp1d
from scipy.spatial import ConvexHull
def save_pdb(t, protein, filename, bfactors=None):
"""Save a PDB file. It supports adding contact information
in a beta-factor column.
Parameters
----------
t : MDTraj.Trajectory
protein : ProLint.Protein
filename : str
bfactors : array
"""
df = protein.dataframe[0]
if protein.resolution == 'atomistic':
indices = df[(df.name == "CA")].index.to_numpy()
elif protein.resolution == 'martini':
indices = df[(df.name == "BB")].index.to_numpy()
t = t[0].atom_slice(indices)
t.save_pdb(filename, bfactors=bfactors)
def unit_poly_verts(theta, centre ):
"""Return vertices of polygon for subplot axes.
This polygon is circumscribed by a unit circle centered at (0.5, 0.5)
"""
x0, y0, r = [centre ] * 3
verts = [(r*np.cos(t) + x0, r*
|
np.sin(t)
|
numpy.sin
|
import cv2
import numpy as np
import albumentations as A
transforms = A.Compose([
A.CenterCrop(1350,1350, True,1),
], p=1.0, bbox_params=A.BboxParams(format='pascal_voc', min_area=0, min_visibility=0.99, label_fields=['labels']))
def switch_image(img) :
h, w = img.shape[:2]
if (h, w) == (4032, 1960) or (h, w) == (4000, 1800) :
img = np.flip(img, 1)
img = np.transpose(img, (1, 0, 2))
return img
# def crop_image(img):
# h, w = img.shape[:2]
# # Case 1
# if (h, w) == (1960, 4032):
# h_margin = 305
# w_margin = 1341
# elif (h, w) == (1800, 4000):
# h_margin = 225
# w_margin = 1325
# else: #(1560, 1632)
# h_margin = 0
# w_margin = 0
# wid = w - w_margin*2
# hgt = h - h_margin*2
# img = img[h_margin:-h_margin, w_margin:-w_margin, :]
# return img
# def transform_bbox_points(img, bbox_point):
# h, w = img.shape[:2]
# # Case 1
# if (h, w) == (1960, 4032):
# h_margin = 305
# w_margin = 1341
# elif (h, w) == (1800, 4000):
# h_margin = 225
# w_margin = 1325
# else: #(1560, 1632)
# h_margin = 0
# w_margin = 0
# xmin, ymin, xmax, ymax = bbox_point
# xmin -= w_margin
# ymin -= h_margin
# xmax -= w_margin
# ymax -= h_margin
# new_bbox_point = [xmin, ymin, xmax, ymax]
# return new_bbox_point
def shift(img, val_x, val_y, points_2d=None, is_normalized=True):
""" Shift Image and Points
"""
h, w = img.shape[:2]
shift_x = int(val_x * w)
shift_y = int(val_y * h)
# Get Affine transform matrix
M =
|
np.float32([[1, 0, shift_x], [0, 1, shift_y]])
|
numpy.float32
|
# This code is based on: https://github.com/stefanknegt/Probabilistic-Unet-Pytorch/
# Author: <NAME> https://github.com/stefanknegt/Probabilistic-Unet-Pytorch/
# Modifications: <NAME>
# This software is licensed under the Apache License 2.0
import torch
from torch.utils.data.dataset import Dataset
from torch.utils.data import DataLoader
import numpy as np
import os
import random
import pickle
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from mypath import Path
def load_data_into_loader(sys_config, name, batch_size, transform=None):
location = os.path.join(sys_config.data_root, name)
dataset = LIDC_IDRI(dataset_location=location, transform=transform)
dataset_size = len(dataset)
indices = list(range(dataset_size))
split = int(np.floor(0.1 * dataset_size))
np.random.shuffle(indices)
train_indices, test_indices, val_indices = indices[2*split:], indices[2*split:3*split], indices[:split]
train_sampler = SubsetRandomSampler(train_indices)
test_sampler = SubsetRandomSampler(test_indices)
val_sampler = SubsetRandomSampler(val_indices)
train_loader = DataLoader(dataset, batch_size=12, sampler=train_sampler)
test_loader = DataLoader(dataset, batch_size=1, sampler=test_sampler)
validation_loader = DataLoader(dataset, batch_size=1, sampler=val_sampler)
print("Number of training/test/validation patches:", (len(train_indices), len(test_indices), len(val_indices)))
return train_loader, test_loader, validation_loader
class LIDC_IDRI(Dataset):
images = []
labels = []
series_uid = []
MEAN = 0.22223
STD = 0.1843
def __init__(self, dataset_location=Path.getPath('lidc'), transform=None, mode='ged'):
"""
mode = choices(['ged', 'qubiq'])
"""
self.transform = transform
self.mode = mode
max_bytes = 2**31 - 1
data = {}
for file in os.listdir(dataset_location):
filename = os.fsdecode(file)
if '.pickle' in filename:
print("Loading file", filename)
file_path = dataset_location + filename
bytes_in = bytearray(0)
input_size = os.path.getsize(file_path)
with open(file_path, 'rb') as f_in:
for _ in range(0, input_size, max_bytes):
bytes_in += f_in.read(max_bytes)
new_data = pickle.loads(bytes_in)
data.update(new_data)
for key, value in data.items():
self.images.append(value['image'].astype(float))
self.labels.append(value['masks'])
self.series_uid.append(value['series_uid'])
assert (len(self.images) == len(self.labels) == len(self.series_uid))
for img in self.images:
assert np.max(img) <= 1 and np.min(img) >= 0
for label in self.labels:
assert np.max(label) <= 1 and np.min(label) >= 0
del new_data
del data
def __getitem__(self, index):
image = np.expand_dims(self.images[index], axis=0)
label = self.labels[index][np.random.randint(4)][None, ...]
labels = np.stack(self.labels[index], axis=0)
# Convert image and label to torch tensors
image = (torch.from_numpy(image) - self.MEAN) / self.STD
label = torch.from_numpy(label)
#Convert uint8 to float tensors
image = image.type(torch.FloatTensor)
label = label.type(torch.FloatTensor)
# Normalise inputs
if self.transform:
image = self.transform(image)
label = self.transform(label)
if self.mode == 'ged':
return {'image': image, 'label': label, 'labels': labels}
elif self.mode == 'qubiq':
return {'image': image, 'label': labels}
else:
raise NotImplementedError
# Override to give PyTorch size of dataset
def __len__(self):
return len(self.images)
def mean(self, ):
return np.mean(self.images)
def std(self, ):
return
|
np.std(self.images)
|
numpy.std
|
import pickle
import itertools
import os
import math
from sklearn.preprocessing import normalize
import re
from operator import add
import matplotlib.pyplot as plt
#%matplotlib inline
import numpy as np
import argparse
import pylab as pl
def normalize_by_row(arr):
row_sums = np.sqrt((arr*arr).sum(axis=1))
new_arr = arr / row_sums[:, np.newaxis]
return new_arr
def grep(pat, txt, ind):
r = re.search(pat, txt)
return int(r.group(1))
def compute_embds_matrix(path, M):
pkls = os.listdir(path)
pkls.sort(key=lambda txt: grep(r"(\d+)_(\d+)\.pkl", txt, 1))
print(pkls)
A_lst = []
for pkl in pkls:
print(pkl)
with open(os.path.join(path, pkl), 'rb') as handle:
samples = pickle.load(handle)
keys = list(samples.keys())
keys.sort(key=lambda txt: grep(r"(\d+)\.png", txt, 1))
samples = [samples[key] for key in keys]
chunks = [normalize(np.asarray(samples[i:i+M]), axis=1, norm='l2') for i in range(0, len(samples), M)]
print(chunks[0].shape)
print(len(chunks))
A_lst.extend(chunks)
return A_lst
def compute_nearest_neighbors(A_lst, epsilon, N):
neighbors_count_lstoflst = []
final_neighbors_count_lstoflst = []
for i in range(N):
print('i={}'.format(i))
Ai = A_lst[i]
Bi = np.transpose(Ai)
AiBi = np.matmul(Ai, Bi)
np.fill_diagonal(AiBi, 1)
AiBi = np.arccos(AiBi) / math.pi
np.fill_diagonal(AiBi, np.inf)
AiBi = AiBi - np.ones(AiBi.shape)*epsilon
neighbors_count = list(
|
np.sum(AiBi <= 0, axis=1)
|
numpy.sum
|
#! /usr/bin/env python
"""Provides an artificial problem to test multi-fidelity optimizers on.
"""
import numpy as np
import rospy
import random
from percepto_msgs.srv import GetCritique, GetCritiqueRequest, GetCritiqueResponse
class MultiFidelityFunctions(object):
def __init__(self, num_fidelities, noise=0.1, gamma=0.1):
self.gamma = np.arange(num_fidelities)[::-1] * gamma
self.theta_offsets = np.random.uniform(-self.gamma, self.gamma,
size=num_fidelities)
rospy.loginfo('Gammas: ' + str(self.gamma))
rospy.loginfo('Offsets: ' + str(self.theta_offsets))
self.noise = noise
def __call__(self, fid, x):
if round(fid) != fid:
raise ValueError('Fidelity must be integer')
fid = int(round(fid))
if fid >= len(self.gamma):
raise ValueError('Cannot query fidelity %d out of %d' %
(fid, len(self.gamma)))
mean = -
|
np.linalg.norm(x)
|
numpy.linalg.norm
|
import unittest
import numpy as np
from activations import sigmoid, sigmoid_derivative, relu, relu_derivative
from propagate import propagate, _cost, _step_forward, _propagate_forward, _step_backward, _propagate_back, _tf_propagate_forward, _tf_cost
from utils import vectorize, normalize_rows, softmax, loss
from initializations import initialize_parameters, initialize_lr_with_zeros, initialize_tf_parameters
from gradient_checking import _dictionary_to_vector, check_gradient
from convolutions import zero_pad, conv_step, conv_forward, pool_forward, conv_backward, create_mask_from_window, distribute_value, pool_backward, create_placeholders, initialize_tf_parameters as initialize_tf_conv_parameters
import tensorflow as tf
from tf_helpers import create_placeholders
class TestActivations(unittest.TestCase):
def test_sigmoid(self):
self.assertEqual(sigmoid(0), 0.5)
self.assertGreater(sigmoid(100), .99)
self.assertLess(sigmoid(-100), .01)
Z = np.array([1,2,3])
expected = np.array([0.73105858, 0.88079708, 0.95257413])
self.assertTrue(np.allclose(sigmoid(Z), expected))
def test_sigmoid_derivative(self):
self.assertEqual(sigmoid_derivative(0), 0.25)
self.assertLess(sigmoid_derivative(100), .001)
self.assertLess(sigmoid_derivative(-100), .001)
Z = np.array([1, 2, 3])
expected = np.array([0.1966119, 0.1049935, 0.04517666])
self.assertTrue(np.allclose(sigmoid_derivative(Z), expected))
def test_relu(self):
self.assertEqual(relu(1), 1)
self.assertEqual(relu(0), 0)
self.assertEqual(relu(-1), 0)
def test_relu_derivative(self):
self.assertTrue(np.allclose(relu_derivative(np.array([[2,1,0,-1,-2]])), np.array([[1,1,0,0,0]])))
class TestUtils(unittest.TestCase):
def test_vectorize(self):
with self.assertRaises(AssertionError):
vectorize([])
array = vectorize(np.array([]))
self.assertEqual(array.shape, (0,1))
array = vectorize(np.zeros((2,1)))
self.assertEqual(array.shape, (2,1))
array = vectorize(np.zeros((1,3)))
self.assertEqual(array.shape, (3,1))
array = vectorize(np.zeros((3,4,2,5)))
self.assertEqual(array.shape, (120,1))
image = np.array([
[
[0.67826139, 0.29380381],
[0.90714982, 0.52835647],
[0.4215251 , 0.45017551]
],
[
[0.92814219, 0.96677647],
[0.85304703, 0.52351845],
[0.19981397, 0.27417313]
],
[
[0.60659855, 0.00533165],
[0.10820313, 0.49978937],
[0.34144279, 0.94630077]
]
])
expected = np.array([
[0.67826139],
[0.29380381],
[0.90714982],
[0.52835647],
[0.4215251 ],
[0.45017551],
[0.92814219],
[0.96677647],
[0.85304703],
[0.52351845],
[0.19981397],
[0.27417313],
[0.60659855],
[0.00533165],
[0.10820313],
[0.49978937],
[0.34144279],
[0.94630077]
])
self.assertTrue(np.allclose(vectorize(image), expected))
def test_normalize_rows(self):
arg = np.zeros((3,2))
self.assertTrue(np.array_equal(normalize_rows(arg), arg))
arg = np.array([
[0, 3, 4],
[1, 6, 4]
])
expected = np.array([
[0, 0.6, 0.8],
[0.13736056, 0.82416338, 0.54944226]
])
self.assertTrue(np.allclose(normalize_rows(arg), expected))
def test_softmax(self):
arg = np.zeros((3,2))
self.assertTrue(np.array_equal(softmax(arg), arg + 0.5))
arg = np.array([
[9, 2, 5, 0, 0],
[7, 5, 0, 0 ,0]
])
expected = np.array([
[9.80897665e-01, 8.94462891e-04, 1.79657674e-02, 1.21052389e-04, 1.21052389e-04],
[8.78679856e-01, 1.18916387e-01, 8.01252314e-04, 8.01252314e-04, 8.01252314e-04]
])
self.assertTrue(np.allclose(softmax(arg), expected))
def test_loss(self):
with self.assertRaises(AssertionError):
loss([], [], L=0)
with self.assertRaises(AssertionError):
loss([], [], L=3)
size = 10
y = np.random.randint(2, size=size)
yhat = np.copy(y)
self.assertEqual(loss(yhat, y, L=1), 0)
self.assertEqual(loss(yhat, y, L=2), 0)
yhat = (y == 0).astype(int)
self.assertEqual(loss(yhat, y, L=1), size)
self.assertEqual(loss(yhat, y, L=2), size)
y = np.array([1, 0, 0, 1, 1])
yhat = np.array([.9, 0.2, 0.1, .4, .9])
self.assertEqual(loss(yhat, y, L=1), 1.1)
self.assertEqual(loss(yhat, y, L=2), 0.43)
class TestInitialization(unittest.TestCase):
def test_initialize_parameters(self):
for i in range(5, 8):
for o in range(1, 4):
layer_dimensions = [dimension for dimension in [i,o] if dimension != 0]
parameters = initialize_parameters(layer_dimensions, use="zeros")
self.assertEqual(len(parameters), 2)
self.assertTrue(np.array_equal(parameters["W1"], np.zeros((o,i))))
self.assertTrue(np.array_equal(parameters["b1"], np.zeros((o, 1))))
for h1 in range(1, 4):
layer_dimensions = [dimension for dimension in [i, h1, o] if dimension != 0]
parameters = initialize_parameters(layer_dimensions, use="zeros")
self.assertEqual(len(parameters), 4)
self.assertTrue(np.array_equal(parameters["W1"], np.zeros((h1,i))))
self.assertTrue(np.array_equal(parameters["b1"], np.zeros((h1, 1))))
self.assertTrue(np.array_equal(parameters["W2"], np.zeros((o, h1))))
self.assertTrue(np.array_equal(parameters["b2"], np.zeros((o, 1))))
for h2 in range(7, 9):
layer_dimensions = [dimension for dimension in [i,h1,h2,o] if dimension != 0]
parameters = initialize_parameters(layer_dimensions, use="zeros")
self.assertEqual(len(parameters), 6)
self.assertTrue(np.array_equal(parameters["W1"], np.zeros((h1,i))))
self.assertTrue(np.array_equal(parameters["b1"], np.zeros((h1, 1))))
self.assertTrue(np.array_equal(parameters["W2"], np.zeros((h2, h1))))
self.assertTrue(np.array_equal(parameters["b2"], np.zeros((h2, 1))))
self.assertTrue(np.array_equal(parameters["W3"], np.zeros((o, h2))))
self.assertTrue(np.array_equal(parameters["b3"], np.zeros((o, 1))))
np.random.seed(3)
expected = {
'W1': np.array([
[17.88628473, 4.36509851, 0.96497468],
[-18.63492703, -2.77388203, -3.54758979]
]),
'b1': np.array([[0], [0]]),
'W2': np.array([[-0.82741481, -6.27000677]]),
'b2': np.array([[0]])
}
parameters = initialize_parameters([3,2,1], use="random", W_multiplier=10)
self.assertTrue(np.allclose(parameters["W1"], expected['W1']))
self.assertTrue(np.allclose(parameters["b1"], expected['b1']))
self.assertTrue(np.allclose(parameters["W2"], expected['W2']))
self.assertTrue(np.allclose(parameters["b2"], expected['b2']))
np.random.seed(3)
expected = {
'W1': np.array([
[1.78862847, 0.43650985],
[0.09649747, -1.8634927],
[-0.2773882, -0.35475898],
[-0.08274148, -0.62700068]
]),
'b1': np.array([
[0.],
[0.],
[0.],
[0.]
]),
'W2': np.array([[-0.03098412, -0.33744411, -0.92904268, 0.62552248]]),
'b2': np.array([[0]])
}
parameters = initialize_parameters([2,4,1], use="he")
self.assertTrue(np.allclose(parameters["W1"], expected['W1']))
self.assertTrue(np.allclose(parameters["b1"], expected['b1']))
self.assertTrue(np.allclose(parameters["W2"], expected['W2']))
self.assertTrue(np.allclose(parameters["b2"], expected['b2']))
def test_initialize_lr_with_zeros(self):
with self.assertRaises(ValueError):
initialize_lr_with_zeros(0)
for l in range(1,4):
parameters = initialize_lr_with_zeros(l)
self.assertTrue(np.array_equal(parameters['W1'], np.zeros((1,l))))
self.assertEqual(parameters['b1'], 0)
self.assertEqual(parameters['b1'].shape, (1, 1))
def test_initialize_tf_parameters(self):
parameters = initialize_tf_parameters([12288, 25, 12, 6], seed=1)
tf.get_variable_scope().reuse_variables()
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
parameters = sess.run(parameters)
self.assertEqual(parameters['W1'].shape, (25, 12288))
self.assertAlmostEqual(parameters['W1'][0][0], -0.01962241)
self.assertEqual(parameters['b1'].shape, (25, 1))
self.assertEqual(np.sum(parameters['b1']), 0)
self.assertEqual(parameters['W2'].shape, (12, 25))
self.assertAlmostEqual(parameters['W2'][0][0], -0.35795909)
self.assertEqual(parameters['b2'].shape, (12, 1))
self.assertEqual(np.sum(parameters['b2']), 0)
self.assertEqual(parameters['W3'].shape, (6, 12))
self.assertAlmostEqual(parameters['W3'][0][0], -0.5132134)
self.assertEqual(parameters['b3'].shape, (6, 1))
self.assertEqual(np.sum(parameters['b3']), 0)
class TestPropagate(unittest.TestCase):
def test__step_forward(self):
A_prev = np.array([
[-0.41675785, -0.05626683],
[-2.1361961, 1.64027081],
[-1.79343559, -0.84174737]
])
W = np.array([[0.50288142, -1.24528809, -1.05795222]])
b = np.array([[-0.90900761]])
activation = 'sigmoid'
A, D, ((cA_prev, cW, cb), (cZ, cActivation)) = _step_forward(A_prev, W, b, activation)
self.assertTrue(np.allclose(A, [[0.96890023, 0.11013289]]))
self.assertIs(A_prev, cA_prev)
self.assertIs(W, cW)
self.assertIs(b, cb)
self.assertTrue(np.allclose(cZ, np.array([[3.43896134, -2.08938436]])))
self.assertIs(activation, cActivation)
activation = 'relu'
A, D, ((cA_prev, cW, cb), (cZ, cActivation)) = _step_forward(A_prev, W, b, activation)
self.assertTrue(np.allclose(A, [[3.43896131, 0.]]))
self.assertIs(A_prev, cA_prev)
self.assertIs(W, cW)
self.assertIs(b, cb)
self.assertTrue(np.allclose(cZ, np.array([[3.43896131, -2.08938436]])))
self.assertIs(activation, cActivation)
def test__propagate_forward(self):
X = np.array([
[-0.31178367, 0.72900392, 0.21782079, -0.8990918 ],
[-2.48678065, 0.91325152, 1.12706373, -1.51409323],
[1.63929108, -0.4298936, 2.63128056, 0.60182225],
[-0.33588161, 1.23773784, 0.11112817, 0.12915125],
[0.07612761, -0.15512816, 0.63422534, 0.810655]
])
parameters = {
'W1': np.array([
[0.35480861, 1.81259031, -1.3564758 , -0.46363197, 0.82465384],
[-1.17643148, 1.56448966, 0.71270509, -0.1810066 , 0.53419953],
[-0.58661296, -1.48185327, 0.85724762, 0.94309899, 0.11444143],
[-0.02195668, -2.12714455, -0.83440747, -0.46550831, 0.23371059]
]),
'b1': np.array([
[1.38503523],
[-0.51962709],
[-0.78015214],
[0.95560959]
]),
'W2': np.array([
[-0.12673638, -1.36861282, 1.21848065, -0.85750144],
[-0.56147088, -1.0335199 , 0.35877096, 1.07368134],
[-0.37550472, 0.39636757, -0.47144628, 2.33660781]
]),
'b2': np.array([
[1.50278553],
[-0.59545972],
[0.52834106]
]),
'W3': np.array([
[0.9398248, 0.42628539, -0.75815703]
]),
'b3': np.array([[-0.16236698]])
}
AL, Ds, caches = _propagate_forward(parameters, X)
self.assertTrue(np.allclose(AL, np.array([[0.03921668, 0.70498921, 0.19734387, 0.04728177]])))
self.assertEqual(len(caches), 3)
X = np.array([
[ 1.62434536, -0.61175641, -0.52817175, -1.07296862, 0.86540763],
[-2.3015387, 1.74481176, -0.7612069, 0.3190391, -0.24937038],
[ 1.46210794, -2.06014071, -0.3224172, -0.38405435, 1.13376944]
])
parameters = {
'W1': np.array([
[-1.09989127, -0.17242821, -0.87785842],
[ 0.04221375, 0.58281521, -1.10061918]
]),
'b1': np.array([[ 1.14472371], [ 0.90159072]]),
'W2': np.array([
[ 0.50249434, 0.90085595],
[-0.68372786, -0.12289023],
[-0.93576943, -0.26788808]
]),
'b2': np.array([[ 0.53035547], [-0.69166075], [-0.39675353]]),
'W3': np.array([[-0.6871727 , -0.84520564, -0.67124613]]),
'b3': np.array([[-0.0126646]])
}
np.random.seed(1)
AL, Ds, caches = _propagate_forward(parameters, X, keep_prob=0.7)
self.assertTrue(np.allclose(AL, np.array([[0.36974721, 0.00305176, 0.04565099, 0.49683389, 0.36974721]])))
self.assertEqual(len(caches), 3)
def test__step_backward(self):
dAL = np.array([[-0.41675785, -0.05626683]])
A_prev = np.array([
[-2.1361961, 1.64027081],
[-1.79343559, -0.84174737],
[0.50288142, -1.24528809]
])
W = np.array([[-1.05795222, -0.90900761, 0.55145404]])
b = np.array([[2.29220801]])
Z = np.array([[0.04153939, -1.11792545]])
cache = ((A_prev, W, b), (Z, 'sigmoid'))
dA_prev, dW, db = _step_backward(dAL, 1, cache, None, None, 1)
self.assertTrue(np.allclose(dA_prev, np.array([[0.11017994, 0.01105339], [0.09466817, 0.00949723], [-0.05743092, -0.00576154]])))
self.assertTrue(np.allclose(dW, np.array([[0.10266786, 0.09778551, -0.01968084]])))
self.assertTrue(np.allclose(db, [[-0.05729622]]))
cache = ((A_prev, W, b), (Z, 'relu'))
dA_prev, dW, db = _step_backward(dAL, 1, cache, None, None, 1)
self.assertTrue(np.allclose(dA_prev, np.array([[0.44090989, 0.], [0.37883606, 0.], [-0.2298228, 0.]])))
self.assertTrue(np.allclose(dW, np.array([[0.44513824, 0.37371418, -0.10478989]])))
self.assertTrue(np.allclose(db, [[-0.20837892]]))
def test__propagate_back(self):
# AL = np.array([[1.78862847, 0.43650985]])
# Y = np.array([[1, 0]])
# A_prev = np.array([
# [0.09649747, -1.8634927],
# [-0.2773882 , -0.35475898],
# [-0.08274148, -0.62700068],
# [-0.04381817, -0.47721803]
# ])
# W = np.array([
# [-1.31386475, 0.88462238, 0.88131804, 1.70957306],
# [0.05003364, -0.40467741, -0.54535995, -1.54647732],
# [0.98236743, -1.10106763, -1.18504653, -0.2056499]
# ])
# b = np.array([
# [1.48614836],
# [0.23671627],
# [-1.02378514]
# ])
# Z = np.array([
# [-0.7129932 , 0.62524497],
# [-0.16051336, -0.76883635],
# [-0.23003072, 0.74505627]
# ])
# first_cache = ((A_prev, W, b), (Z, 'relu'))
# A_prev = np.array([
# [1.97611078, -1.24412333],
# [-0.62641691, -0.80376609],
# [-2.41908317, -0.92379202]
# ])
# W = np.array([[-1.02387576, 1.12397796, -0.13191423]])
# b = np.array([[-1.62328545]])
# Z = np.array([[0.64667545, -0.35627076]])
# second_cache = ((A_prev, W, b), (Z, 'sigmoid'))
# caches = [first_cache, second_cache]
# expected = {
# 'dW1': np.array([
# [ 0.41010002, 0.07807203, 0.13798444, 0.10502167],
# [0.,0.,0.,0.],
# [0.05283652, 0.01005865, 0.01777766, 0.0135308]
# ]),
# 'db1': np.array([[-0.22007063], [0.], [-0.02835349]]),
# 'dW2': np.array([[-0.39202432, -0.13325855, -0.04601089]]),
# 'db2': np.array([[ 0.15187861]])
# }
# grads = _propagate_back(AL, Y, [1,1], caches, None, None, 1)
# self.assertTrue(np.allclose(expected['dW1'], grads['dW1']))
# self.assertTrue(np.allclose(expected['db1'], grads['db1']))
# self.assertTrue(np.allclose(expected['dW2'], grads['dW2']))
# self.assertTrue(np.allclose(expected['db2'], grads['db2']))
# # regularization
# X = np.array([
# [1.62434536, -0.61175641, -0.52817175, -1.07296862, 0.86540763],
# [-2.3015387, 1.74481176, -0.7612069, 0.3190391, -0.24937038],
# [1.46210794, -2.06014071, -0.3224172, -0.38405435, 1.13376944]
# ])
# AL = np.array([[ 0.40682402, 0.01629284, 0.16722898, 0.10118111, 0.40682402]])
# Y = np.array([[1, 1, 0, 1, 0]])
# caches = (
# ((
# X,
# np.array([
# [-1.09989127, -0.17242821, -0.87785842],
# [0.04221375, 0.58281521, -1.10061918]
# ]),
# np.array([[1.14472371], [0.90159072]])
# ),
# (
# np.array([
# [-1.52855314, 3.32524635, 2.13994541, 2.60700654, -0.75942115],
# [-1.98043538, 4.1600994 , 0.79051021, 1.46493512, -0.45506242]
# ]),
# "relu"
# )),
# ((
# np.array([
# [0., 3.32524635, 2.13994541, 2.60700654, 0.],
# [0., 4.1600994, 0.79051021, 1.46493512, 0.]
# ]),
# np.array([
# [0.50249434, 0.90085595],
# [-0.68372786, -0.12289023],
# [-0.93576943, -0.26788808]
# ]),
# np.array([
# [0.53035547],
# [-0.69166075],
# [-0.39675353]
# ])
# ),
# (
# np.array([
# [0.53035547, 5.94892323, 2.31780174, 3.16005701, 0.53035547],
# [-0.69166075, -3.47645987, -2.25194702, -2.65416996, -0.69166075],
# [-0.39675353, -4.62285846, -2.61101729, -3.22874921, -0.39675353]
# ]),
# "relu"
# )),
# ((
# np.array([
# [0.53035547, 5.94892323, 2.31780174, 3.16005701, 0.53035547],
# [0., 0., 0., 0., 0.],
# [0., 0., 0., 0., 0.]
# ]),
# np.array([[-0.6871727, -0.84520564, -0.67124613]]),
# np.array([[-0.0126646]])
# ),
# (
# np.array([[-0.3771104, -4.10060224, -1.60539468, -2.18416951, -0.3771104 ]]),
# "sigmoid"
# ))
# )
# expected = {
# 'dW1': np.array([
# [-0.25604646, 0.12298827, -0.28297129],
# [-0.17706303, 0.34536094, -0.4410571 ]
# ]),
# 'db1': np.array([[0.11845855], [0.21236874]]),
# 'dW2': np.array([
# [0.79276486, 0.85133918],
# [-0.0957219, -0.01720463],
# [-0.13100772, -0.03750433]
# ]),
# 'db2': np.array([[ 0.26135226], [ 0.], [ 0.]]),
# 'dW3': np.array([[-1.77691347, -0.11832879, -0.09397446]]),
# 'db3': np.array([[-0.38032981]])
# }
# grads = _propagate_back(AL, Y, [1, 1, 1], caches, "L2", 0.7, 1)
# self.assertTrue(np.allclose(grads['dW1'], expected['dW1']))
# self.assertTrue(np.allclose(grads['db1'], expected['db1']))
# self.assertTrue(np.allclose(grads['dW2'], expected['dW2']))
# self.assertTrue(np.allclose(grads['db2'], expected['db2']))
# self.assertTrue(np.allclose(grads['dW3'], expected['dW3']))
# self.assertTrue(np.allclose(grads['db3'], expected['db3']))
# Dropout
AL = np.array([[0.32266394, 0.49683389, 0.00348883, 0.49683389, 0.32266394]])
Y = np.array([[1, 1, 0, 1, 0]])
caches = (
((
np.array([
[1.62434536, -0.61175641, -0.52817175, -1.07296862, 0.86540763],
[-2.3015387, 1.74481176, -0.7612069, 0.3190391, -0.24937038],
[ 1.46210794, -2.06014071, -0.3224172, -0.38405435, 1.13376944]
]),
np.array([[-1.09989127, -0.17242821, -0.87785842], [0.04221375, 0.58281521, -1.10061918]]),
np.array([[1.14472371], [0.90159072]])
),
(
np.array([
[-1.52855314, 3.32524635, 2.13994541, 2.60700654, -0.75942115],
[-1.98043538, 4.1600994, 0.79051021, 1.46493512, -0.45506242]
]),
"relu"
)),
((
np.array([
[0., 0., 4.27989081, 5.21401307, 0.],
[0., 8.32019881, 1.58102041, 2.92987024, 0.]
]),
np.array([
[0.50249434, 0.90085595],
[-0.68372786, -0.12289023],
[-0.93576943, -0.26788808]
]),
np.array([[0.53035547], [-0.69166075], [-0.39675353]])
),
(
np.array([
[0.53035547, 8.02565606, 4.10524802, 5.78975856, 0.53035547],
[-0.69166075, -1.71413186, -3.81223329, -4.61667916, -0.69166075],
[-0.39675353, -2.62563561, -4.82528105, -6.0607449, -0.39675353]
]),
"relu"
)),
((
np.array([
[1.06071093, 0., 8.21049603, 0., 1.06071093],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]
]),
np.array([[-0.6871727, -0.84520564, -0.67124613]]),
np.array([[-0.0126646]])
),
(
np.array([[-0.7415562, -0.0126646, -5.65469333, -0.0126646, -0.7415562 ]]),
"sigmoid"
))
)
expected = {
'dW1': np.array([[0.00019884, 0.00028657, 0.00012138], [ 0.00035647, 0.00051375, 0.00021761]]),
'db1': np.array([[-0.00037647], [-0.00067492]]),
'dW2': np.array([[-0.00256518, -0.0009476 ], [0., 0.], [0., 0.]]),
'db2': np.array([[ 0.06033089], [0.], [0.]]),
'dW3': np.array([[-0.06951191, 0., 0.]]),
'db3': np.array([[-0.2715031]])
}
Ds = [
1,
np.array([[1, 0, 1, 1, 1], [1, 1, 1, 1, 0]]),
np.array([[1, 0, 1, 0, 1], [0, 1, 0, 1, 1], [0, 0, 1, 0, 0]])
]
grads = _propagate_back(AL, Y, Ds, caches, None, None, 0.8)
self.assertTrue(np.allclose(grads['dW1'], expected['dW1']))
self.assertTrue(np.allclose(grads['db1'], expected['db1']))
self.assertTrue(np.allclose(grads['dW2'], expected['dW2']))
self.assertTrue(np.allclose(grads['db2'], expected['db2']))
self.assertTrue(np.allclose(grads['dW3'], expected['dW3']))
self.assertTrue(np.allclose(grads['db3'], expected['db3']))
def test__cost(self):
Y, A = np.array([[1, 1, 1]]), np.array([[0.8, 0.9, 0.4]])
self.assertEqual(_cost(A, Y, None, None, None), 0.414931599615397)
Y, A = np.array([[1, 1, 0, 1, 0]]), np.array([[0.40682402, 0.01629284, 0.16722898, 0.10118111, 0.40682402]])
parameters = {
'W1': np.array([
[1.62434536, -0.61175641, -0.52817175],
[-1.07296862, 0.86540763, -2.3015387]
]),
'b1': np.array([
[1.74481176],
[-0.7612069 ]
]),
'W2': np.array([
[0.3190391, -0.24937038],
[1.46210794, -2.06014071],
[-0.3224172, -0.38405435]
]),
'b2': np.array([
[1.13376944],
[-1.09989127],
[-0.17242821]
]),
'W3': np.array([[-0.87785842, 0.04221375, 0.58281521]]),
'b3': np.array([[-1.10061918]])
}
self.assertEqual(_cost(A, Y, parameters, regularization="L2", lambd=0.1), 1.786485945169561)
def test_propagate(self):
w, b, X, Y = np.array([[1., 2.]]), 2., np.array([[1.,2.,-1.],[3.,4.,-3.2]]), np.array([[1,0,1]])
parameters = {
'W1': w,
'b1': b
}
grads, cost = propagate(parameters, X, Y)
self.assertTrue(np.allclose(grads['dW1'], np.array([[0.99845601, 2.39507239]])))
self.assertTrue(np.allclose(grads['db1'], np.array([[0.00145558]])))
self.assertEqual(cost, 5.801545319394553)
def test__tf_propagate_forward(self):
X, Y = create_placeholders(12288, 6)
parameters = initialize_tf_parameters([12288, 25, 12, 6], seed=1)
tf.get_variable_scope().reuse_variables()
Z = _tf_propagate_forward(parameters, X)
np.random.seed(1)
dict_X = np.random.randn(12288, 1080)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
Z = sess.run(Z, feed_dict={X: dict_X})
self.assertAlmostEqual(Z[0][0], -2.46408725)
self.assertAlmostEqual(Z[5][1079], -0.9831996)
self.assertEqual(Z.shape, (6, 1080))
def test__tf_cost(self):
X, Y = create_placeholders(12288, 6)
parameters = initialize_tf_parameters([12288, 25, 12, 6], seed=1)
tf.get_variable_scope().reuse_variables()
Z = _tf_propagate_forward(parameters, X)
cost = _tf_cost(Z, Y)
np.random.seed(1)
dict_X = np.random.randn(12288, 1080)
dict_Y = np.random.randn(6, 1080)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
cost = sess.run(cost, feed_dict={X: dict_X, Y: dict_Y})
self.assertAlmostEqual(cost, 0.11581959)
class TestGradientChecking(unittest.TestCase):
def parameters(self):
return {
'W1': np.array([
[-0.3224172, -0.38405435, 1.13376944, -1.09989127],
[-0.17242821, -0.87785842, 0.04221375, 0.58281521],
[-1.10061918, 1.14472371, 0.90159072, 0.50249434],
[ 0.90085595, -0.68372786, -0.12289023, -0.93576943],
[-0.26788808, 0.53035547, -0.69166075, -0.39675353]
]),
'b1': np.array([
[-0.6871727],
[-0.84520564],
[-0.67124613],
[-0.0126646],
[-1.11731035]
]),
'W2': np.array([
[ 0.2344157, 1.65980218, 0.74204416, -0.19183555, -0.88762896],
[-0.74715829, 1.6924546, 0.05080775, -0.63699565, 0.19091548],
[ 2.10025514, 0.12015895, 0.61720311, 0.30017032, -0.35224985]
]),
'b2': np.array([[-1.1425182], [-0.34934272], [-0.20889423]]),
'W3': np.array([[ 0.58662319, 0.83898341, 0.93110208]]),
'b3': np.array([[ 0.28558733]])
}
def gradients(self):
return {
'dW1': np.array([
[-0.37347779, -1.47903216, 0.17596143, -1.33685036],
[-0.01967514, -0.08573553, 0.01188465, -0.07674312],
[0.03916037, -0.05539735, 0.04872715, -0.09359393],
[-0.05337778, -0.21138458, 0.02514856, -0.19106384],
[0., 0., 0., 0.]
]),
'db1': np.array([
[0.63290787],
[0.0372514 ],
[-0.06401301],
[0.09045575],
[0.]
]),
'dW2': np.array([
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0.91580165, 0.02451548, -0.10797954, 0.90281891, 0.]
]),
'db2': np.array([[0.], [0.], [0.19763343]]),
'dW3': np.array([[0., 0., 2.24404238]]),
'db3': np.array([[0.21225753]]),
}
def test__dictionary_to_vector_on_parameters(self):
expected = np.array([
[-0.3224172], [-0.38405435], [1.13376944], [-1.09989127], [-0.17242821], [-0.87785842],
[0.04221375], [0.58281521], [-1.10061918], [1.14472371], [0.90159072], [0.50249434],
[0.90085595], [-0.68372786], [-0.12289023], [-0.93576943], [-0.26788808], [0.53035547],
[-0.69166075], [-0.39675353], [-0.6871727 ], [-0.84520564], [-0.67124613], [-0.0126646],
[-1.11731035], [ 0.2344157], [1.65980218], [0.74204416], [-0.19183555], [-0.88762896],
[-0.74715829], [1.6924546], [0.05080775], [-0.63699565], [0.19091548], [2.10025514],
[0.12015895], [0.61720311], [0.30017032], [-0.35224985], [-1.1425182], [-0.34934272],
[-0.20889423], [0.58662319], [0.83898341], [0.93110208], [0.28558733]
])
parameter_values = _dictionary_to_vector(self.parameters())
self.assertTrue(np.allclose(parameter_values, expected))
def test__dictionary_to_vector_on_gradients(self):
expected = np.array([
[-0.37347779],
[-1.47903216],
[ 0.17596143],
[-1.33685036],
[-0.01967514],
[-0.08573553],
[0.01188465],
[-0.07674312],
[0.03916037],
[-0.05539735],
[0.04872715],
[-0.09359393],
[-0.05337778],
[-0.21138458],
[0.02514856],
[-0.19106384],
[0.],
[0.],
[0.],
[0.],
[0.63290787],
[0.0372514 ],
[-0.06401301],
[0.09045575],
[0.],
[0.],
[0.],
[0.],
[0.],
[0.],
[0.],
[0.],
[0.],
[0.],
[0.],
[0.91580165],
[0.02451548],
[-0.10797954],
[0.90281891],
[0.],
[0.],
[0.],
[0.19763343],
[0.],
[0.],
[2.24404238],
[0.21225753]
])
gradient_values = _dictionary_to_vector(self.gradients(), key_bases=["dW", "db"])
self.assertTrue(np.allclose(gradient_values, expected))
def test_check_gradient(self):
X = np.array([
[1.62434536, -0.61175641, -0.52817175],
[-1.07296862, 0.86540763, -2.3015387],
[ 1.74481176, -0.7612069, 0.3190391],
[-0.24937038, 1.46210794, -2.06014071]
])
Y = np.array([[1, 1, 0]])
difference = check_gradient(self.parameters(), self.gradients(), X, Y)
self.assertEqual(difference, 7.058343235500508e-08)
class TestTF_Helpers(unittest.TestCase):
def test_create_placeholders(self):
X, Y = create_placeholders(12288, 6)
print ("X = " + str(X))
print ("Y = " + str(Y))
self.assertEqual(X.shape[0], 12288)
self.assertEqual(Y.shape[0], 6)
self.assertEqual(X.dtype, tf.float32)
self.assertEqual(Y.dtype, tf.float32)
class TestConvolutions(unittest.TestCase):
def test_zero_pad(self):
np.random.seed(1)
x = np.random.randn(4, 3, 3, 2)
x_pad = zero_pad(x, 2)
oneone = np.array([[0.90085595, -0.68372786], [-0.12289023, -0.93576943], [-0.26788808, 0.53035547]])
padone = np.zeros((7,2))
self.assertEqual(x.shape, (4,3,3,2))
self.assertEqual(x_pad.shape, (4,7,7,2))
self.assertTrue(np.allclose(x[1,1], oneone))
self.assertTrue(np.allclose(x_pad[1,1], padone))
def test_conv_step(self):
np.random.seed(1)
a_slice_prev = np.random.randn(4, 4, 3)
W = np.random.randn(4, 4, 3)
b = np.random.randn(1, 1, 1)
Z = conv_step(a_slice_prev, W, b)
self.assertAlmostEqual(Z, -6.99908945068)
def test_conv_forward(self):
np.random.seed(1)
A_prev = np.random.randn(10,4,4,3)
W = np.random.randn(2,2,3,8)
b = np.random.randn(1,1,1,8)
hparameters = { "pad" : 2, "stride": 2 }
Z, cache_conv = conv_forward(A_prev, W, b, hparameters)
zthreetwoone = np.array([-0.61490741, -6.7439236, -2.55153897, 1.75698377, 3.56208902, 0.53036437, 5.18531798, 8.75898442])
cczeroonetwothree = np.array([-0.20075807, 0.18656139, 0.41005165])
self.assertAlmostEqual(np.mean(Z), 0.0489952035289)
self.assertTrue(np.allclose(Z[3,2,1], zthreetwoone))
self.assertTrue(np.allclose(cache_conv[0][1][2][3], cczeroonetwothree))
def test_pool_forward(self):
np.random.seed(1)
A_prev = np.random.randn(2, 4, 4, 3)
hparameters = { "stride" : 2, "f": 3 }
A, cache = pool_forward(A_prev, hparameters)
eA = np.array([[[[1.74481176, 0.86540763, 1.13376944]]], [[[1.13162939, 1.51981682, 2.18557541]]]])
self.assertTrue(np.allclose(A, eA))
A, cache = pool_forward(A_prev, hparameters, mode = "average")
eA = np.array([[[[0.02105773, -0.20328806, -0.40389855]]], [[[-0.22154621, 0.51716526, 0.48155844]]]])
def test_conv_backward(self):
np.random.seed(1)
A_prev = np.random.randn(10,4,4,3)
W =
|
np.random.randn(2,2,3,8)
|
numpy.random.randn
|
import gym
import math
import json
import torch
import pathlib
import numpy as np
from torch.utils.tensorboard import SummaryWriter
def sample(batch, p):
sampled_batch_len = int(batch.shape[0] * p)
idxs = np.random.choice(batch.shape[0], size=sampled_batch_len, replace=False)
sampled_batch = batch[idxs]
return sampled_batch, idxs
def get_stepsize(policy, max_kl):
grads = torch.cat([p.og_grad.flatten() for p in policy.parameters()])
ngrads = torch.cat([p.grad.flatten() for p in policy.parameters()])
alpha = torch.sqrt((2 * max_kl) / ((grads @ ngrads) + 1e-8))
return alpha
def zero_grad(model, set_none=False):
for p in model.parameters():
if set_none:
p.og_grad = None
p.grad = None
else:
p.og_grad = torch.zeros_like(p)
p.grad = torch.zeros_like(p)
def sgd_step(model, step_size):
for p in model.parameters():
p.data = p.data - step_size * p.grad.data
def orthog_layer_init(layer, std=np.sqrt(2), bias_const=0.0):
torch.nn.init.orthogonal_(layer.weight, std)
torch.nn.init.constant_(layer.bias, bias_const)
return layer
def set_seeds(experiments):
for exp in experiments:
exp.seed = np.random.randint(1e5)
def setup_writer(base_path, name, hparams):
bpath = pathlib.Path(f"{base_path}/")
bpath.mkdir(exist_ok=True, parents=True)
writer_path = f"{base_path}/{name}"
writer = SummaryWriter(writer_path) # metrics
print("saving to:", writer_path, "...")
# save hyperparams for run
dfile = open(f"{writer_path}/hparams.json", "w")
dfile.write(json.dumps(hparams, indent=4, sort_keys=True))
dfile.close()
return writer, writer_path
def normal_entropy(std):
var = std.pow(2)
entropy = 0.5 + 0.5 * torch.log(2 * var * math.pi)
return entropy.sum(1, keepdim=True)
def normal_log_density(x, mean, log_std, std):
var = std.pow(2)
log_density = -(x - mean).pow(2) / (2 * var) - 0.5 * math.log(2 * math.pi) - log_std
return log_density.sum(1, keepdim=True)
n1_vec = lambda x: x.flatten()[:, None]
vec = lambda xs: torch.cat([x.reshape(-1) for x in xs])
def set_flat_grad_to(model, flat_params):
prev_ind = 0
for param in model.parameters():
flat_size = int(np.prod(list(param.size())))
param.grad = flat_params[prev_ind : prev_ind + flat_size].view(param.size())
prev_ind += flat_size
def set_flat_params_to(model, flat_params):
prev_ind = 0
for param in model.parameters():
flat_size = int(np.prod(list(param.size())))
param.data.copy_(
flat_params[prev_ind : prev_ind + flat_size].view(param.size())
)
prev_ind += flat_size
def get_flat_grad_from(net):
fgrads = torch.cat([param.grad.view(-1) for param in net.parameters()])
return fgrads
def vector_to_parameter_list(vec, parameters):
params_new = []
pointer = 0
for param in parameters:
num_param = param.numel()
param_new = vec[pointer : pointer + num_param].view_as(param).data
params_new.append(param_new)
pointer += num_param
return list(params_new)
# from https://github.com/joschu/modular_rl
# http://www.johndcook.com/blog/standard_deviation/
import numpy as onp
class RunningStat(object):
def __init__(self, shape):
self._n = 0
self._M = onp.zeros(shape)
self._S = onp.zeros(shape)
def push(self, x):
x = onp.asarray(x)
assert x.shape == self._M.shape
self._n += 1
if self._n == 1:
self._M[...] = x
else:
oldM = self._M.copy()
self._M[...] = oldM + (x - oldM) / self._n
self._S[...] = self._S + (x - oldM) * (x - self._M)
@property
def n(self):
return self._n
@property
def mean(self):
return self._M
@property
def var(self):
return self._S / (self._n - 1) if self._n > 1 else onp.square(self._M)
@property
def std(self):
return onp.sqrt(self.var)
@property
def shape(self):
return self._M.shape
class ZFilter:
"""
y = (x-mean)/std
using running estimates of mean,std
"""
def __init__(self, shape, demean=True, destd=True, clip=10.0):
self.demean = demean
self.destd = destd
self.clip = clip
self.rs = RunningStat(shape)
def __call__(self, x, update=True):
if update:
self.rs.push(x)
if self.demean:
x = x - self.rs.mean
if self.destd:
x = x / (self.rs.std + 1e-8)
if self.clip:
x =
|
onp.clip(x, -self.clip, self.clip)
|
numpy.clip
|
""" DustAttnCalc
Given stellar masses, star formation rates, stellar metallicities, redshifts, axis ratios, calculate dust attenuation curves based on the hierarchical Bayesian model from Nagaraj+22a (in prep)
Author: <NAME>--<EMAIL>
"""
__all__ = ["regular_grid_interp_scipy","mass_completeness","get_dust_attn_curve_d2","get_dust_attn_curve_d1","getMargSample","getTraceInfo","getModelSamplesI","plotDustAttn","plotDust12","DustAttnCalc"]
import numpy as np
import os.path as op
from sedpy.attenuation import noll
from scipy.stats import truncnorm
import arviz as az
import argparse as ap
from glob import glob
import fnmatch
from astropy.table import Table
from distutils.dir_util import mkpath
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
from scipy.interpolate import RegularGridInterpolator as RGIScipy
from dynesty.utils import resample_equal
import pkg_resources
import seaborn as sns
sns.set_style("ticks")
sns.set_style({"xtick.direction": "in","ytick.direction": "in",
"xtick.top":True, "ytick.right":True,
"xtick.major.size":12, "xtick.minor.size":4,
"ytick.major.size":12, "ytick.minor.size":4,
})
def parse_args(argv=None):
""" Tool to parse arguments from the command line. The entries should be self-explanatory. They include all of the options in the DustAttnCalc class. """
parser = ap.ArgumentParser(description="DustAttnCalc",
formatter_class=ap.RawTextHelpFormatter)
parser.add_argument('-logM','--logM',help='Log stellar mass of galaxy',type=float,default=None)
parser.add_argument('-sfr','--sfr',help='Log SFR of galaxy',type=float,default=None)
parser.add_argument('-logZ','--logZ',help='Log stellar metallicity of galaxy',type=float,default=None)
parser.add_argument('-z','--z',help='Redshift of galaxy',type=float,default=None)
parser.add_argument('-i','--i',help='Axis ratio b/a of galaxy',type=float,default=None)
parser.add_argument('-d2','--d2',help='Diffuse dust optical depth of galaxy',type=float,default=None)
parser.add_argument('-de','--de',help='Effective dust optical depth of galaxy',type=float,default=None)
parser.add_argument('-f1','--f1',help='Name of file with one to five columns with independent variable values',type=str,default=None)
parser.add_argument('-f2','--f2',help='Name of file with diffuse dust optical depths in order to get birth cloud dust optical depths',type=str,default=None)
parser.add_argument('-pl','--plot',help='Whether or not to plot dust attenuation curve',action='count',default=0)
parser.add_argument('-bv','--bivar',help='Whether or not to use bivariate model',action='count',default=0)
parser.add_argument('-eff','--effective',help='Whether to use effective dust attenuation (default is diffuse)',action='count',default=0)
parser.add_argument('-sa','--samples',help='Number of samples per galaxy desired for dependent variables',type=int,default=50)
parser.add_argument('-nm','--nummarg',help='Number of Prospector likelihood samples desired for marginalization purposes',type=int,default=20)
parser.add_argument('-mnp','--max_num_plot',help='Maximum number of plots that will be made in a run',type=int,default=10)
parser.add_argument('-dir','--img_dir',help='Image directory for created plots (does not have to be created beforehand) ',type=str,default='DustAttnCurves')
parser.add_argument('-inb','--img_name_base',help='Image name base (minus extension); each plot made will be numbered on top of the image name base',type=str,default=None)
return parser.parse_args(args=argv)
def regular_grid_interp_scipy(points, values, coords, *, fill_value=None):
"""Perform a linear interpolation in N-dimensions w a regular grid
The data must be defined on a filled regular grid, but the spacing may be
uneven in any of the dimensions.
This implementation uses the ``scipy.interpolate.RegularGridInterpolator`` class which, in turn, is
based on the implementation from <NAME>'s ``regulargrid``
package https://github.com/JohannesBuchner/regulargrid.
Args:
points: A list of vectors with shapes ``(m1,), ... (mn,)``. These
define the grid points in each dimension.
values: A tensor defining the values at each point in the grid
defined by ``points``. This must have the shape
``(m1, ... mn, ..., nout)``.
coords: A matrix defining the coordinates where the interpolation
should be evaluated. This must have the shape ``(ntest, ndim)``.
"""
rgi = RGIScipy(points,values,bounds_error=False,fill_value=fill_value)
return rgi(coords)
def mass_completeness(zred):
"""Uses mass-completeness estimates from Tal+14, for FAST masses
then applied M_PROSP / M_FAST to estimate Prospector completeness
Credit: <NAME>
Parameters
----------
zred: 1-D Array of redshifts
Returns
-------
Minimum masses in ``log(M*/M_sun)`` for completeness at zred
"""
zref = np.array([0.65,1,1.5,2.1,3.0])
mcomp_prosp = np.array([8.71614882,9.07108637,9.63281923,9.79486727,10.15444536])
return np.interp(zred, zref, mcomp_prosp)
def get_dust_attn_curve_d2(wave,n=0.0,d2=1.0):
""" Calculate diffuse dust attenuation curve
Parameters
----------
wave: Float or 1-D Array
Wavelengths (Angstroms) at which attenuation curve should be evaluated
n: Float
Slope parameter of Noll+09 dust attenuation parametrization--difference between true slope and that of Calzetti+00 curve, with positive values signifying shallower curves
d2: Float
Diffuse dust optical depth; also referred to as tau throughout this document
Returns
-------
Diffuse dust attenuation curve at given wavelength(s)
"""
Eb = 0.85 - 1.9*n
return noll(wave,tau_v=d2,delta=n,c_r=0.0,Ebump=Eb)
def get_dust_attn_curve_d1(wave,d1=1.0):
""" Calculate birth cloud dust attenuation curve
Parameters
----------
wave: Float or 1-D Array
Wavelengths (Angstroms) at which attenuation curve should be evaluated
d1: Float
Birth cloud dust optical depth
Returns
-------
Birth dust attenuation curve at given wavelength(s); inverse law from Charlot+Fall 00 assumed
"""
return d1*(wave/5500.0)**(-1)
def getMargSample(num=10,bv=1,eff=1):
""" Get samples from the Prospector likelihood distribution for proper marginalization
Parameters
----------
num: Integer
Number of marginal samples desired
bv: Boolean
Whether or not bivariate case for code is being used
eff: Boolean
Whether or not single-component dust attenuation model is being used
Returns
-------
Marg_samples: (5 x num) 2D array
Samples from the likelihood distribution to be used for marginalization
"""
fn = 'b%de%d.dat'%(bv,eff)
try:
stream = pkg_resources.resource_filename('duste',op.join('Marg',fn))
except:
stream = op.join('Marg',fn)
info = np.loadtxt(stream)
totnum = len(info[0])
inds = np.random.choice(totnum,num,replace=False)
return info[:,inds]
def getTraceInfo(trace, bivar=False):
""" Parse the hierarchical Bayesian model trace object in order to get all samples of the model parameters
Parameters
----------
trace: Trace object
bivar: Boolean
Whether or not the model is bivariate (two dependent variables) or not
Returns
-------
ngrid, taugrid: Multi-D Arrays
Posterior samples of values of the dependent variables (n and/or tau) at the grid points in the interpolation model (taugrid = None if univariate model)
log_width, log_width2: 1-D Arrays
Posterior samples of the log_width parameters (log_width2 = None if univariate), which is a measure of the natural spread in reality around the model valuefs
rho: 1-D Array
Posterior samples of the correlation between the errors in ngrid and taugrid (None if univariate)
"""
ngrid_0, log_width_0 = np.array(getattr(trace.posterior,'ngrid')), np.array(getattr(trace.posterior,'log_width'))
sh = ngrid_0.shape
ngrid, log_width = ngrid_0.reshape(sh[0]*sh[1],*sh[2:]), log_width_0.reshape(sh[0]*sh[1])
if bivar:
taugrid_0, log_width2_0 = np.array(getattr(trace.posterior,'taugrid')), np.array(getattr(trace.posterior,'log_width2'))
taugrid, log_width2 = taugrid_0.reshape(sh[0]*sh[1],*sh[2:]), log_width2_0.reshape(sh[0]*sh[1])
rho_0 = np.array(getattr(trace.posterior,'rho'))
rho = rho_0.reshape(sh[0]*sh[1])
else:
taugrid, log_width2, rho = None, None, None
return ngrid, log_width, taugrid, log_width2, rho
def getModelSamplesI(xtup, indep_samp, ngrid, log_width, taugrid, log_width2, rho, numsamp=50, nlim=None, taulim=None, return_other=False):
""" Calculate samples of n and/or tau (the dependent variables of the model) at a given set of points
Parameters
----------
xtup: List of arrays
Values of grid points for independent variables in the model; use DustAttnCalc.getPostModelData() for easy generation
indep_samp: 2-D or 3-D array
Points at which to evaluate model; variables are differentiated at the outermost dimension; for example, in a 2-D case, each row is a different variable
ngrid, log_width, taugrid, log_width2, rho: Outputs of getTraceInfo
numsamp: Integer
Number of samples desired per galaxy
nlim, taulim: Two-element 1-D arrays
Limits for n and tau to keep sample values within reasonable bounds; see DustAttnCalc.make_prop_dict() for values to use; those bounds can be guaranteed only if the independent variables are within the correct bounds as well
return_other: Boolean
Whether or not to also return Gaussian width and bivariate correlation parameter values
Return
------
n_sim, tau_sim: 2-D or 3-D arrays (same dimension as indep_samp but different outer dimension size)
Samples of n and/or tau at the given points; tau_sim is None if univariate
width, width2, rho: 1-D Arrays
Samples of Gaussian width and bivariate correlation if desired
"""
npts = len(log_width)
if indep_samp.ndim == 2: indepI = indep_samp.T
else: indepI = indep_samp.reshape(len(indep_samp),np.prod(indep_samp.shape[1:])).T
inds = np.random.choice(npts,size=numsamp,replace=False)
sh = list(indep_samp[0].shape)
sh.insert(0,numsamp)
n_sim = np.empty(tuple(sh))
if taugrid is not None: tau_sim = np.empty(tuple(sh))
else: tau_sim = None
for i, ind in enumerate(inds):
ngrid_mod, width_mod = ngrid[ind], np.exp(log_width[ind])
r1, r2 = np.random.randn(*n_sim[i].shape), np.random.randn(*n_sim[i].shape)
interp_part = regular_grid_interp_scipy(xtup, ngrid_mod, indepI).reshape(n_sim[0].shape) # Direct model values (without adding a random component to represent the natural error in the model)
n_sim[i] = interp_part + width_mod * r1 # Simulated values include the direct interpolated model values plus a random component based on the width parameter
# Try to ensure that all samples of n are within the desired bounds, but ignore if there are direct model values (without adding a random component) that are considerably below or above the limits
if np.amin(interp_part)>=nlim[0]-0.1*width_mod and np.amax(interp_part)<=nlim[1]+0.1*width_mod:
ind_bad = np.where(np.logical_or(n_sim[i]<nlim[0],n_sim[i]>nlim[1]))
while len(ind_bad[0])>0:
r1[ind_bad] = np.random.randn(len(ind_bad[0]))
n_sim[i] = interp_part + width_mod * r1
ind_bad = np.where(np.logical_or(n_sim[i]<nlim[0],n_sim[i]>nlim[1]))
# Repeat the same process for tau if bivariate model
if taugrid is not None:
taugrid_mod, width2_mod, rho_mod = taugrid[ind], np.exp(log_width2[ind]), rho[ind]
interp_part = regular_grid_interp_scipy(xtup, taugrid_mod, indepI).reshape(tau_sim[0].shape)
tau_sim[i] = interp_part + width2_mod * (rho_mod*r1 + np.sqrt(1.0-rho_mod**2)*r2)
if np.amin(interp_part)>=taulim[0]-0.1*width2_mod and np.amax(interp_part)<=taulim[1]+0.1*width2_mod:
ind_bad = np.where(np.logical_or(tau_sim[i]<taulim[0],tau_sim[i]>taulim[1]))
while len(ind_bad[0])>0:
r2[ind_bad] = np.random.randn(len(ind_bad[0]))
tau_sim[i] = interp_part + width2_mod * (rho_mod*r1 + np.sqrt(1.0-rho_mod**2)*r2)
ind_bad = np.where(np.logical_or(tau_sim[i]<taulim[0],tau_sim[i]>taulim[1]))
if not return_other: return n_sim, tau_sim
if taugrid is not None: return n_sim, tau_sim, np.exp(log_width[inds]), np.exp(log_width2[inds]), rho[inds]
else: return n_sim, tau_sim, np.exp(log_width[inds]), None, None
def plotDustAttn(nvals,tauvals,img_name,wvs,effective=False,label=None):
""" Plot either diffuse or effective dust attenuation curves
Parameters
----------
nvals, tauvals: 1-D Arrays
Samples of n and tau for 1 galaxy only
img_name: String
Name for image (can choose desired extension, but if vector graphics format, will need to remove the dpi argument)
wvs: 1-D Array
Array of wavelengths (in Angstroms) at which dust attenuation curve will be plotted
effective: Boolean
Whether or not effective dust (or diffuse dust if False) is the quantity in question
label: String
Text to help describe plot; optimally a list of independent variable values
"""
fig, ax = plt.subplots()
lnv, lwv = len(nvals), len(wvs)
attn_curve = np.empty((lnv,lwv))
for i in range(lnv):
attn_curve[i] = get_dust_attn_curve_d2(wvs,n=nvals[i],d2=tauvals[i])
attn_mean, attn_std = np.mean(attn_curve,axis=0), np.std(attn_curve,axis=0)
ax.plot(wvs,attn_mean,color='r',linestyle='-',linewidth=2)
ax.fill_between(wvs,attn_mean-attn_std,attn_mean+attn_std,color='b',alpha=0.1)
ax.set_xlabel(r'$\lambda$ ($\AA$)')
if effective: taustr = r"$\hat{\tau}_{\lambda,{\rm eff}}$"
else: taustr = r"$\hat{\tau}_{\lambda,2}$"
ax.set_ylabel(taustr)
ax.set_xlim(min(wvs),max(wvs))
if label is not None: ax.text(0.4,0.9,label,transform=ax.transAxes,fontsize='x-small')
fig.savefig(img_name,bbox_inches='tight',dpi=150)
def plotDust12(tau1,tau2,img_name,n,wvs,label=None):
""" Plot either diffuse or effective dust attenuation curves
Parameters
----------
tau1, tau2, n: 1-D Arrays
Samples of birth cloud dust optical depth (tau1), diffuse dust optical depth (tau2), and dust slope index (n) for 1 galaxy only
img_name: String
Name for image (can choose desired extension, but if vector graphics format, will need to remove the dpi argument)
wvs: 1-D Array
Array of wavelengths (in Angstroms) at which dust attenuation curve will be plotted
label: String
Text to help describe plot; optimally a list of independent variable values
"""
fig, ax = plt.subplots()
ltau, lwv = len(tau1), len(wvs)
attn_curve1, attn_curve2 = np.empty((ltau,lwv)),
|
np.empty((ltau,lwv))
|
numpy.empty
|
import datetime as dt
import numpy as np
import os
from nose.tools import assert_raises
from nose.plugins import skip
import pandas as pds
import pysat
from pysat.instruments import sw_kp, sw_f107
from pysat.instruments.methods import sw as sw_meth
class TestSWKp():
def setup(self):
"""Runs before every method to create a clean testing setup"""
# Load a test instrument
self.testInst = pysat.Instrument()
self.testInst.data = pds.DataFrame({'Kp': np.arange(0, 4, 1.0/3.0),
'ap_nan': np.full(shape=12, \
fill_value=np.nan),
'ap_inf': np.full(shape=12, \
fill_value=np.inf)},
index=[pysat.datetime(2009, 1, 1)
+ pds.DateOffset(hours=3*i)
for i in range(12)])
self.testInst.meta = pysat.Meta()
self.testInst.meta.__setitem__('Kp', {self.testInst.meta.fill_label:
np.nan})
self.testInst.meta.__setitem__('ap_nan', {self.testInst.meta.fill_label:
np.nan})
self.testInst.meta.__setitem__('ap_inv', {self.testInst.meta.fill_label:
np.inf})
# Load a test Metadata
self.testMeta = pysat.Meta()
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.testMeta
def test_convert_kp_to_ap(self):
""" Test conversion of Kp to ap"""
sw_kp.convert_3hr_kp_to_ap(self.testInst)
assert '3hr_ap' in self.testInst.data.columns
assert '3hr_ap' in self.testInst.meta.keys()
assert(self.testInst['3hr_ap'].min() >=
self.testInst.meta['3hr_ap'][self.testInst.meta.min_label])
assert(self.testInst['3hr_ap'].max() <=
self.testInst.meta['3hr_ap'][self.testInst.meta.max_label])
def test_convert_kp_to_ap_fill_val(self):
""" Test conversion of Kp to ap with fill values"""
# Set the first value to a fill value, then calculate ap
fill_label = self.testInst.meta.fill_label
self.testInst['Kp'][0] = self.testInst.meta['Kp'][fill_label]
sw_kp.convert_3hr_kp_to_ap(self.testInst)
# Test non-fill ap values
assert '3hr_ap' in self.testInst.data.columns
assert '3hr_ap' in self.testInst.meta.keys()
assert(self.testInst['3hr_ap'][1:].min() >=
self.testInst.meta['3hr_ap'][self.testInst.meta.min_label])
assert(self.testInst['3hr_ap'][1:].max() <=
self.testInst.meta['3hr_ap'][self.testInst.meta.max_label])
# Test the fill value in the data and metadata
assert np.isnan(self.testInst['3hr_ap'][0])
assert np.isnan(self.testInst.meta['3hr_ap'][fill_label])
del fill_label
def test_convert_kp_to_ap_bad_input(self):
""" Test conversion of Kp to ap with bad input"""
self.testInst.data.rename(columns={"Kp": "bad"}, inplace=True)
assert_raises(ValueError, sw_kp.convert_3hr_kp_to_ap, self.testInst)
def test_initialize_kp_metadata(self):
"""Test default Kp metadata initialization"""
sw_kp.initialize_kp_metadata(self.testInst.meta, 'Kp')
assert self.testInst.meta['Kp'][self.testInst.meta.units_label] == ''
assert self.testInst.meta['Kp'][self.testInst.meta.name_label] == 'Kp'
assert(self.testInst.meta['Kp'][self.testInst.meta.desc_label] ==
'Planetary K-index')
assert self.testInst.meta['Kp'][self.testInst.meta.plot_label] == 'Kp'
assert self.testInst.meta['Kp'][self.testInst.meta.axis_label] == 'Kp'
assert(self.testInst.meta['Kp'][self.testInst.meta.scale_label] ==
'linear')
assert self.testInst.meta['Kp'][self.testInst.meta.min_label] == 0
assert self.testInst.meta['Kp'][self.testInst.meta.max_label] == 9
assert self.testInst.meta['Kp'][self.testInst.meta.fill_label] == -1
def test_uninit_kp_metadata(self):
"""Test Kp metadata initialization with uninitialized Metadata"""
sw_kp.initialize_kp_metadata(self.testMeta, 'Kp')
assert self.testMeta['Kp'][self.testMeta.units_label] == ''
assert self.testMeta['Kp'][self.testMeta.name_label] == 'Kp'
assert(self.testMeta['Kp'][self.testMeta.desc_label] ==
'Planetary K-index')
assert self.testMeta['Kp'][self.testMeta.plot_label] == 'Kp'
assert self.testMeta['Kp'][self.testMeta.axis_label] == 'Kp'
assert self.testMeta['Kp'][self.testMeta.scale_label] == 'linear'
assert self.testMeta['Kp'][self.testMeta.min_label] == 0
assert self.testMeta['Kp'][self.testMeta.max_label] == 9
assert self.testMeta['Kp'][self.testMeta.fill_label] == -1
def test_fill_kp_metadata(self):
"""Test Kp metadata initialization with user-specified fill value"""
sw_kp.initialize_kp_metadata(self.testInst.meta, 'Kp', fill_val=666)
assert self.testInst.meta['Kp'][self.testInst.meta.fill_label] == 666
def test_long_name_kp_metadata(self):
"""Test Kp metadata initialization with a long name"""
dkey = 'high_lat_Kp'
sw_kp.initialize_kp_metadata(self.testInst.meta, dkey)
assert self.testInst.meta[dkey][self.testInst.meta.name_label] == dkey
assert(self.testInst.meta[dkey][self.testInst.meta.desc_label] ==
'Planetary K-index')
assert(self.testInst.meta[dkey][self.testInst.meta.plot_label] ==
'High lat Kp')
assert(self.testInst.meta[dkey][self.testInst.meta.axis_label] ==
'High lat Kp')
del dkey
def test_convert_ap_to_kp(self):
""" Test conversion of ap to Kp"""
sw_kp.convert_3hr_kp_to_ap(self.testInst)
kp_out, kp_meta = sw_meth.convert_ap_to_kp(self.testInst['3hr_ap'])
# Assert original and coverted there and back Kp are equal
assert all(abs(kp_out - self.testInst.data['Kp']) < 1.0e-4)
# Assert the converted Kp meta data exists and is reasonable
assert 'Kp' in kp_meta.keys()
assert(kp_meta['Kp'][kp_meta.fill_label] == -1)
del kp_out, kp_meta
def test_convert_ap_to_kp_middle(self):
""" Test conversion of ap to Kp where ap is not an exact Kp value"""
sw_kp.convert_3hr_kp_to_ap(self.testInst)
self.testInst['3hr_ap'][8] += 1
kp_out, kp_meta = sw_meth.convert_ap_to_kp(self.testInst['3hr_ap'])
# Assert original and coverted there and back Kp are equal
assert all(abs(kp_out - self.testInst.data['Kp']) < 1.0e-4)
# Assert the converted Kp meta data exists and is reasonable
assert 'Kp' in kp_meta.keys()
assert(kp_meta['Kp'][kp_meta.fill_label] == -1)
del kp_out, kp_meta
def test_convert_ap_to_kp_nan_input(self):
""" Test conversion of ap to Kp where ap is NaN"""
kp_out, kp_meta = sw_meth.convert_ap_to_kp(self.testInst['ap_nan'])
# Assert original and coverted there and back Kp are equal
assert all(kp_out == -1)
# Assert the converted Kp meta data exists and is reasonable
assert 'Kp' in kp_meta.keys()
assert(kp_meta['Kp'][kp_meta.fill_label] == -1)
del kp_out, kp_meta
def test_convert_ap_to_kp_inf_input(self):
""" Test conversion of ap to Kp where ap is Inf"""
kp_out, kp_meta = sw_meth.convert_ap_to_kp(self.testInst['ap_inf'])
# Assert original and coverted there and back Kp are equal
assert all(kp_out[1:] == -1)
# Assert the converted Kp meta data exists and is reasonable
assert 'Kp' in kp_meta.keys()
assert(kp_meta['Kp'][kp_meta.fill_label] == -1)
del kp_out, kp_meta
def test_convert_ap_to_kp_fill_val(self):
""" Test conversion of ap to Kp with fill values"""
# Set the first value to a fill value, then calculate ap
fill_label = self.testInst.meta.fill_label
self.testInst['Kp'][0] = self.testInst.meta['Kp'][fill_label]
sw_kp.convert_3hr_kp_to_ap(self.testInst)
kp_out, kp_meta = sw_meth.convert_ap_to_kp(self.testInst['3hr_ap'], \
fill_val=self.testInst.meta['Kp'][fill_label])
# Test non-fill ap values
assert all(abs(kp_out[1:] - self.testInst.data['Kp'][1:]) < 1.0e-4)
# Test the fill value in the data and metadata
assert np.isnan(kp_out[0])
assert np.isnan(kp_meta['Kp'][fill_label])
del fill_label, kp_out, kp_meta
class TestSwKpCombine():
def setup(self):
"""Runs before every method to create a clean testing setup"""
# Switch to test_data directory
self.saved_path = pysat.data_dir
pysat.utils.set_data_dir(pysat.test_data_path, store=False)
# Set combination testing input
self.test_day = pysat.datetime(2019, 3, 18)
self.combine = {"standard_inst": pysat.Instrument("sw", "kp", ""),
"recent_inst": pysat.Instrument("sw", "kp", "recent"),
"forecast_inst":
pysat.Instrument("sw", "kp", "forecast"),
"start": self.test_day - dt.timedelta(days=30),
"stop": self.test_day + dt.timedelta(days=3),
"fill_val": -1}
def teardown(self):
"""Runs after every method to clean up previous testing."""
pysat.utils.set_data_dir(self.saved_path)
del self.combine, self.test_day, self.saved_path
def test_combine_kp_none(self):
""" Test combine_kp failure when no input is provided"""
assert_raises(ValueError, sw_meth.combine_kp)
def test_combine_kp_one(self):
""" Test combine_kp failure when only one instrument is provided"""
# Load a test instrument
testInst = pysat.Instrument()
testInst.data = pds.DataFrame({'Kp': np.arange(0, 4, 1.0/3.0)},
index=[pysat.datetime(2009, 1, 1)
+ pds.DateOffset(hours=3*i)
for i in range(12)])
testInst.meta = pysat.Meta()
testInst.meta['Kp'] = {testInst.meta.fill_label: np.nan}
combo_in = {"standard_inst": testInst}
assert_raises(ValueError, sw_meth.combine_kp, combo_in)
del combo_in, testInst
def test_combine_kp_no_time(self):
"""Test combine_kp failure when no times are provided"""
combo_in = {kk: self.combine[kk] for kk in
['standard_inst', 'recent_inst', 'forecast_inst']}
assert_raises(ValueError, sw_meth.combine_kp, combo_in)
del combo_in
def test_combine_kp_no_data(self):
"""Test combine_kp when no data is present for specified times"""
combo_in = {kk: self.combine['forecast_inst'] for kk in
['standard_inst', 'recent_inst', 'forecast_inst']}
combo_in['start'] = pysat.datetime(2014, 2, 19)
combo_in['stop'] = pysat.datetime(2014, 2, 24)
kp_inst = sw_meth.combine_kp(**combo_in)
assert kp_inst.data.isnull().all()["Kp"]
del combo_in, kp_inst
def test_combine_kp_inst_time(self):
"""Test combine_kp when times are provided through the instruments"""
combo_in = {kk: self.combine[kk] for kk in
['standard_inst', 'recent_inst', 'forecast_inst']}
combo_in['standard_inst'].load(date=self.combine['start'])
combo_in['recent_inst'].load(date=self.test_day)
combo_in['forecast_inst'].load(date=self.test_day)
combo_in['stop'] = combo_in['forecast_inst'].index[-1]
kp_inst = sw_meth.combine_kp(**combo_in)
assert kp_inst.index[0] >= self.combine['start']
# kp_inst contains times up to 21:00:00, coombine['stop'] is midnight
assert kp_inst.index[-1].date() <= self.combine['stop'].date()
assert len(kp_inst.data.columns) == 1
assert kp_inst.data.columns[0] == 'Kp'
assert np.isnan(kp_inst.meta['Kp'][kp_inst.meta.fill_label])
assert len(kp_inst['Kp'][
|
np.isnan(kp_inst['Kp'])
|
numpy.isnan
|
from __future__ import absolute_import, division, print_function
import numpy as np
from functools import partial, wraps
from math import factorial
from toolz import compose
from .core import _concatenate2, Array, atop, sqrt, elemwise
from .slicing import insert_many
from .numpy_compat import divide
from ..core import flatten
from . import chunk
from ..utils import ignoring, getargspec
def reduction(x, chunk, aggregate, axis=None, keepdims=None, dtype=None):
""" General version of reductions
>>> reduction(my_array, np.sum, np.sum, axis=0, keepdims=False) # doctest: +SKIP
"""
if axis is None:
axis = tuple(range(x.ndim))
if isinstance(axis, int):
axis = (axis,)
axis = tuple(i if i >= 0 else x.ndim + i for i in axis)
if dtype and 'dtype' in getargspec(chunk).args:
chunk = partial(chunk, dtype=dtype)
if dtype and 'dtype' in getargspec(aggregate).args:
aggregate = partial(aggregate, dtype=dtype)
chunk2 = partial(chunk, axis=axis, keepdims=True)
aggregate2 = partial(aggregate, axis=axis, keepdims=keepdims)
inds = tuple(range(x.ndim))
tmp = atop(chunk2, inds, x, inds)
inds2 = tuple(i for i in inds if i not in axis)
result = atop(compose(aggregate2, partial(_concatenate2, axes=axis)),
inds2, tmp, inds, dtype=dtype)
if keepdims:
dsk = result.dask.copy()
for k in flatten(result._keys()):
k2 = (k[0],) + insert_many(k[1:], axis, 0)
dsk[k2] = dsk.pop(k)
chunks = insert_many(result.chunks, axis, [1])
return Array(dsk, result.name, chunks=chunks, dtype=dtype)
else:
return result
@wraps(chunk.sum)
def sum(a, axis=None, dtype=None, keepdims=False):
if dtype is not None:
dt = dtype
elif a._dtype is not None:
dt =
|
np.empty((1,), dtype=a._dtype)
|
numpy.empty
|
from __future__ import division, print_function
import copy
import fnmatch
import os
import subprocess
import wave
import struct
import hashlib
import h5py
from copy import deepcopy
from math import ceil
from numpy.fft import fftshift, ifftshift
import numpy as np
from scipy.io.wavfile import read as read_wavfile
from scipy.fftpack import fft, ifft, fftfreq, fft2, ifft2, dct
from scipy.signal import resample, firwin, filtfilt
from scipy.linalg import inv, toeplitz
from scipy.optimize import leastsq
import matplotlib.pyplot as plt
import matplotlib.cm as cmap
import matplotlib.colors as pltcolors
import matplotlib.mlab as mlab
import colorsys
from soundsig.signal import lowpass_filter, gaussian_window, correlation_function
from soundsig.timefreq import gaussian_stft
from soundsig.detect_peaks import detect_peaks
class WavFile():
""" Class for representing a sound and writing it to a .wav file """
def __init__(self, file_name=None, log_spectrogram=True):
self.log_spectrogram = log_spectrogram
if file_name is None:
self.sample_depth = 2 # in bytes
self.sample_rate = 44100.0 # in Hz
self.data = None
self.num_channels = 1
else:
wr = wave.open(file_name, 'r')
self.num_channels = wr.getnchannels()
self.sample_depth = wr.getsampwidth()
wr.close()
self.sample_rate, data = read_wavfile(file_name)
# If stereo make mono
if self.num_channels == 1:
self.data = data
else:
self.data = data.mean(axis=1)
self.analyzed = False
def to_wav(self, output_file, normalize=False, max_amplitude=32767.0):
wf = wave.open(output_file, 'w')
wf.setparams( (self.num_channels, self.sample_depth, self.sample_rate, len(self.data), 'NONE', 'not compressed') )
# normalize the sample
if normalize:
nsound = ((self.data / np.abs(self.data).max())*max_amplitude).astype('int')
else:
nsound = self.data
#print 'nsound.min=%d, max=%d' % (nsound.min(), nsound.max())
hex_sound = [struct.pack('h', x) for x in nsound]
wf.writeframes(b''.join(hex_sound))
wf.close()
def analyze(self, min_freq=0, max_freq=None, spec_sample_rate=1000.0, freq_spacing=125.0, envelope_cutoff_freq=200.0, noise_level_db=80, rectify=True, cmplx=False):
if self.analyzed:
return
self.data_t = np.arange(0.0, len(self.data), 1.0) / self.sample_rate
#compute the temporal envelope
self.envelope = temporal_envelope(self.data, self.sample_rate, envelope_cutoff_freq)
#compute log power spectrum
fftx = fft(self.data)
ps_f = fftfreq(len(self.data), d=(1.0 / self.sample_rate))
if max_freq == None:
findx = (ps_f > min_freq) & (ps_f < np.inf)
else:
findx = (ps_f > min_freq) & (ps_f < max_freq)
self.power_spectrum = np.log10(np.abs(fftx[findx]))
self.power_spectrum_f = ps_f[findx]
#estimate fundamental frequency from log power spectrum in the simplest way possible
ps = np.abs(fftx[findx])
peak_index = ps.argmax()
try:
self.fundamental_freq = self.power_spectrum_f[peak_index]
except IndexError:
print('Could not identify fundamental frequency!')
self.fundamental_freq = 0.0
#compute log spectrogram
t,f,spec,spec_rms = spectrogram(self.data, self.sample_rate, spec_sample_rate=spec_sample_rate,
freq_spacing=freq_spacing, min_freq=min_freq, max_freq=max_freq)
self.spectrogram_t = t
self.spectrogram_f = f
self.spectrogram = spec
self.spectrogram_rms = spec_rms
self.analyzed = True
def reanalyze(self, min_freq=0, max_freq=None, spec_sample_rate=1000.0, freq_spacing=25.0, envelope_cutoff_freq=200.0, noise_level_db=80, rectify=True, cmplx=False):
self.analyzed = False
return self.analyze(min_freq, max_freq, spec_sample_rate, freq_spacing, envelope_cutoff_freq, noise_level_db, rectify, cmplx)
def plot(self, fig=None, show_envelope=True, min_freq=0.0, max_freq=10000.0, colormap=cmap.gist_yarg, noise_level_db=80,
start_time=0, end_time=np.inf):
self.analyze(min_freq=min_freq, max_freq=max_freq, noise_level_db=noise_level_db)
if show_envelope:
spw_size = 15
spec_size = 35
else:
spw_size = 25
spec_size = 75
raw_ti = (self.data_t > start_time) & (self.data_t < end_time)
if fig is None:
fig = plt.figure()
gs = plt.GridSpec(100, 1)
ax = fig.add_subplot(gs[:spw_size])
plt.plot(self.data_t[raw_ti], self.data[raw_ti], 'k-')
plt.axis('tight')
plt.ylabel('Sound Pressure')
s = (spw_size+5)
e = s + spec_size
ax = fig.add_subplot(gs[s:e])
spec_ti = (self.spectrogram_t > start_time) & (self.spectrogram_t < end_time)
plot_spectrogram(self.spectrogram_t[spec_ti], self.spectrogram_f, self.spectrogram[:, spec_ti], ax=ax, ticks=True, colormap=colormap, colorbar=False)
if show_envelope:
ax = fig.add_subplot(gs[(e+5):95])
plt.plot(self.spectrogram_t, self.spectrogram_rms, 'g-')
plt.xlabel('Time (s)')
plt.ylabel('Envelope')
plt.axis('tight')
class BioSound(object):
""" Class for representing a communication sound using multiple feature spaces"""
def __init__(self, soundWave=np.array(0.0), fs=np.array(0.0), emitter='Unknown', calltype = 'U' ):
# Note that all the fields are numpy arrays for saving to h5 files.
self.sound = soundWave # sound pressure waveform
self.hashid = np.string_(hashlib.md5(np.array_str(soundWave).encode('utf-8')).hexdigest())
self.samprate = float(fs) if isinstance(fs,int) else fs # sampling rate
self.emitter = np.string_(emitter) # string for id of emitter
self.type = np.string_(calltype) # string for call type
self.spectro = np.asarray([]) # Log spectrogram
self.to = np.asarray([]) # Time scale for spectrogram
self.fo = np.asarray([]) # Frequency scale for spectrogram
self.mps = np.asarray([]) # Modulation Power Spectrum
self.wf = np.asarray([]) # Spectral modulations
self.wt = np.asarray([]) # Temporal modulations
self.f0 = np.asarray([]) # time varying fundamental
self.f0_2 = np.asarray([]) # time varying fundamental of second voice
self.F1 = np.asarray([]) # time varying formant 1
self.F2 = np.asarray([]) # time varying formant 2
self.F3 = np.asarray([]) # time varying formant 3
self.fund = np.asarray([]) # Average fundamental
self.sal = np.asarray([]) # time varying saliency
self.meansal = np.asarray([]) # mean saliency
self.fund2 = np.asarray([]) # Average fundamental of 2nd peak
self.voice2percent = np.asarray([]) # Average percent of presence of second peak
self.maxfund = np.asarray([])
self.minfund = np.asarray([])
self.cvfund = np.asarray([])
self.meanspect = np.asarray([])
self.stdspect = np.asarray([])
self.skewspect = np.asarray([])
self.kurtosisspect = np.asarray([])
self.entropyspect = np.asarray([])
self.q1 =
|
np.asarray([])
|
numpy.asarray
|
import os
import cv2
import glob
import scipy
import torch
import random
import numpy as np
import torchvision.transforms.functional as F
from torch.utils.data import DataLoader
from PIL import Image
from scipy.misc import imread
from skimage.feature import canny
from skimage.color import rgb2gray, gray2rgb
from .utils import create_mask
import src.region_fill as rf
class Dataset(torch.utils.data.Dataset):
def __init__(self, config, flist, edge_flist, mask_flist, augment=True, training=True):
super(Dataset, self).__init__()
self.augment = augment
self.training = training
self.flo = config.FLO
self.norm = config.NORM
self.data = self.load_flist(flist, self.flo)
self.edge_data = self.load_flist(edge_flist, 0)
self.mask_data = self.load_flist(mask_flist, 0)
self.input_size = config.INPUT_SIZE
self.sigma = config.SIGMA
self.edge = config.EDGE
self.mask = config.MASK
self.nms = config.NMS
# in test mode, there's a one-to-one relationship between mask and image
# masks are loaded non random
if config.MODE == 2:
self.mask = 6
def __len__(self):
return len(self.data)
def __getitem__(self, index):
try:
item = self.load_item(index)
except:
print('loading error: ' + self.data[index])
item = self.load_item(0)
return item
def load_name(self, index):
name = self.data[index]
return os.path.basename(name)
def load_item(self, index):
size = self.input_size
factor = 1.
if self.flo == 0:
# load image
img = imread(self.data[index])
# gray to rgb
if len(img.shape) < 3:
img = gray2rgb(img)
# resize/crop if needed
if size != 0:
img = self.resize(img, size[0], size[1])
# create grayscale image
img_gray = rgb2gray(img)
# load mask
mask = self.load_mask(img, index)
edge = self.load_edge(img_gray, index, mask)
img_filled = img
else:
img = self.readFlow(self.data[index])
# resize/crop if needed
if size != 0:
img = self.flow_tf(img, [size[0], size[1]])
img_gray = (img[:, :, 0] ** 2 + img[:, :, 1] ** 2) ** 0.5
if self.norm == 1:
# normalization
# factor = (np.abs(img[:, :, 0]).max() ** 2 + np.abs(img[:, :, 1]).max() ** 2) ** 0.5
factor = img_gray.max()
img /= factor
# load mask
mask = self.load_mask(img, index)
edge = self.load_edge(img_gray, index, mask)
img_gray = img_gray / img_gray.max()
img_filled = np.zeros(img.shape)
img_filled[:, :, 0] = rf.regionfill(img[:, :, 0], mask)
img_filled[:, :, 1] = rf.regionfill(img[:, :, 1], mask)
# augment data
if self.augment and np.random.binomial(1, 0.5) > 0:
img = img[:, ::-1, ...].copy()
img_filled = img_filled[:, ::-1, ...].copy()
img_gray = img_gray[:, ::-1, ...]
edge = edge[:, ::-1, ...]
mask = mask[:, ::-1, ...]
return self.to_tensor(img), self.to_tensor(img_filled), self.to_tensor(img_gray), self.to_tensor(edge), self.to_tensor(mask), factor
def load_edge(self, img, index, mask):
sigma = self.sigma
# in test mode images are masked (with masked regions),
# using 'mask' parameter prevents canny to detect edges for the masked regions
mask = None if self.training else (1 - mask / 255).astype(np.bool)
# canny
if self.edge == 1:
# no edge
if sigma == -1:
return np.zeros(img.shape).astype(np.float)
# random sigma
if sigma == 0:
sigma = random.randint(1, 4)
return canny(img, sigma=sigma, mask=mask).astype(np.float)
# external
else:
imgh, imgw = img.shape[0:2]
edge = imread(self.edge_data[index])
edge = self.resize(edge, imgh, imgw)
# non-max suppression
if self.nms == 1:
edge = edge * canny(img, sigma=sigma, mask=mask)
return edge
def load_mask(self, img, index):
imgh, imgw = img.shape[0:2]
mask_type = self.mask
# external + random block
if mask_type == 4:
mask_type = 1 if np.random.binomial(1, 0.5) == 1 else 3
# external + random block + half
elif mask_type == 5:
mask_type = np.random.randint(1, 4)
# random block
if mask_type == 1:
return create_mask(imgw, imgh, imgw // 2, imgh // 2)
# half
if mask_type == 2:
# randomly choose right or left
return create_mask(imgw, imgh, imgw // 2, imgh, 0 if random.random() < 0.5 else imgw // 2, 0)
# external
if mask_type == 3:
mask_index = random.randint(0, len(self.mask_data) - 1)
mask = imread(self.mask_data[mask_index])
mask = self.resize(mask, imgh, imgw, centerCrop=False)
mask = (mask > 0).astype(np.uint8) * 255 # threshold due to interpolation
return mask
# test mode: load mask non random
if mask_type == 6:
mask = imread(self.mask_data[index])
mask = self.resize(mask, imgh, imgw, centerCrop=False)
mask = rgb2gray(mask)
mask = (mask > 0).astype(np.uint8) * 255
return mask
def to_tensor(self, img):
if (len(img.shape) == 3 and img.shape[2] == 2):
return F.to_tensor(img).float()
img = Image.fromarray(img)
img_t = F.to_tensor(img).float()
return img_t
def resize(self, img, height, width, centerCrop=True):
imgh, imgw = img.shape[0:2]
if centerCrop and imgh != imgw:
# center crop
side = np.minimum(imgh, imgw)
j = (imgh - side) // 2
i = (imgw - side) // 2
img = img[j:j + side, i:i + side, ...]
img = scipy.misc.imresize(img, [height, width])
return img
def load_flist(self, flist, flo=0):
if isinstance(flist, list):
return flist
# flist: image file path, image directory path, text file flist path
if flo == 0:
if isinstance(flist, str):
if os.path.isdir(flist):
flist = list(glob.glob(flist + '/*.jpg')) + list(glob.glob(flist + '/*.png'))
flist.sort()
return flist
if os.path.isfile(flist):
try:
return np.genfromtxt(flist, dtype=np.str, encoding='utf-8')
except:
return [flist]
else:
if isinstance(flist, str):
if os.path.isdir(flist):
flist = list(glob.glob(flist + '/*.flo'))
flist.sort()
return flist
if os.path.isfile(flist):
try:
return np.genfromtxt(flist, dtype=np.str, encoding='utf-8')
except:
return [flist]
return []
def create_iterator(self, batch_size):
while True:
sample_loader = DataLoader(
dataset=self,
batch_size=batch_size,
drop_last=True
)
for item in sample_loader:
yield item
def readFlow(self, fn):
with open(fn, 'rb') as f:
magic = np.fromfile(f, np.float32, count=1)
if 202021.25 != magic:
print('Magic number incorrect. Invalid .flo file')
return None
else:
w = np.fromfile(f, np.int32, count=1)
h = np.fromfile(f, np.int32, count=1)
data = np.fromfile(f, np.float32, count=2 * int(w) * int(h))
# Reshape data into 3D array (columns, rows, bands)
# The reshape here is for visualization, the original code is (w,h,2)
return np.resize(data, (int(h), int(w), 2))
def flow_to_image(self, flow):
UNKNOWN_FLOW_THRESH = 1e7
u = flow[:, :, 0]
v = flow[:, :, 1]
maxu = -999.
maxv = -999.
minu = 999.
minv = 999.
idxUnknow = (abs(u) > UNKNOWN_FLOW_THRESH) | (abs(v) > UNKNOWN_FLOW_THRESH)
u[idxUnknow] = 0
v[idxUnknow] = 0
maxu = max(maxu, np.max(u))
minu = min(minu, np.min(u))
maxv = max(maxv, np.max(v))
minv = min(minv, np.min(v))
rad = np.sqrt(u ** 2 + v ** 2)
maxrad = max(-1, np.max(rad))
# print("max flow: %.4f\nflow range:\nu = %.3f .. %.3f\nv = %.3f .. %.3f" % (maxrad, minu,maxu, minv, maxv))
u = u/(maxrad + np.finfo(float).eps)
v = v/(maxrad + np.finfo(float).eps)
img = self.compute_color(u, v)
idx = np.repeat(idxUnknow[:, :, np.newaxis], 3, axis=2)
img[idx] = 0
return np.uint8(img)
def compute_color(self, u, v):
"""
compute optical flow color map
:param u: optical flow horizontal map
:param v: optical flow vertical map
:return: optical flow in color code
"""
[h, w] = u.shape
img = np.zeros([h, w, 3])
nanIdx = np.isnan(u) | np.isnan(v)
u[nanIdx] = 0
v[nanIdx] = 0
colorwheel = self.make_color_wheel()
ncols = np.size(colorwheel, 0)
rad = np.sqrt(u**2+v**2)
a = np.arctan2(-v, -u) / np.pi
fk = (a+1) / 2 * (ncols - 1) + 1
k0 = np.floor(fk).astype(int)
k1 = k0 + 1
k1[k1 == ncols+1] = 1
f = fk - k0
for i in range(0, np.size(colorwheel,1)):
tmp = colorwheel[:, i]
col0 = tmp[k0-1] / 255
col1 = tmp[k1-1] / 255
col = (1-f) * col0 + f * col1
idx = rad <= 1
col[idx] = 1-rad[idx]*(1-col[idx])
notidx = np.logical_not(idx)
col[notidx] *= 0.75
img[:, :, i] = np.uint8(np.floor(255 * col*(1-nanIdx)))
return img
def make_color_wheel(self):
"""
Generate color wheel according Middlebury color code
:return: Color wheel
"""
RY = 15
YG = 6
GC = 4
CB = 11
BM = 13
MR = 6
ncols = RY + YG + GC + CB + BM + MR
colorwheel = np.zeros([ncols, 3])
col = 0
# RY
colorwheel[0:RY, 0] = 255
colorwheel[0:RY, 1] = np.transpose(np.floor(255*np.arange(0, RY) / RY))
col += RY
# YG
colorwheel[col:col+YG, 0] = 255 - np.transpose(np.floor(255*np.arange(0, YG) / YG))
colorwheel[col:col+YG, 1] = 255
col += YG
# GC
colorwheel[col:col+GC, 1] = 255
colorwheel[col:col+GC, 2] = np.transpose(np.floor(255*np.arange(0, GC) / GC))
col += GC
# CB
colorwheel[col:col+CB, 1] = 255 - np.transpose(np.floor(255*np.arange(0, CB) / CB))
colorwheel[col:col+CB, 2] = 255
col += CB
# BM
colorwheel[col:col+BM, 2] = 255
colorwheel[col:col+BM, 0] = np.transpose(np.floor(255*np.arange(0, BM) / BM))
col += + BM
# MR
colorwheel[col:col+MR, 2] = 255 - np.transpose(np.floor(255 *
|
np.arange(0, MR)
|
numpy.arange
|
#!/usr/bin/env python
# coding: utf-8
from mat4py import loadmat
import numpy as np
import tensorflow as tf
import tensorflow_model_optimization as tfmot
import matplotlib.pyplot as plt
import json
import tempfile
from sklearn.metrics import mean_squared_error
from tqdm import tqdm
import matplotlib.ticker as tick
print(tf.__version__)
# # Data pre-processing
# In[ ]:
def downscale(data, resolution):
# 10 min resolution.. (data.shape[0], 3, 1440) -> (data.shape[0], 10, 3, 144).. breaks one 3,1440 length trajectory into ten 3,144 length trajectories
# Use ~12 timesteps -> 2-5 timesteps (Use ~2 hours to predict 20-50 mins)
return np.mean(data.reshape(data.shape[0], data.shape[1], int(data.shape[2]/resolution), resolution), axis=3)
def process_data(aligned_data, time_horizon, ph):
# 10 min resolution.. breaks each (3,144) trajectory into (144-ph-time_horizon,3,time_horizon) samples
data = np.zeros((aligned_data.shape[0] * (aligned_data.shape[2]-ph-time_horizon), aligned_data.shape[1], time_horizon))
label = np.zeros((aligned_data.shape[0] * (aligned_data.shape[2]-ph-time_horizon), ph))
count = 0
for i in range(aligned_data.shape[0]): # for each sample
for j in range(aligned_data.shape[2]-ph-time_horizon): # TH length sliding window across trajectory
data[count] = aligned_data[i,:,j:j+time_horizon]
label[count] = aligned_data[i,0,j+time_horizon:j+time_horizon+ph]
count+=1
return data, label
def load_mpc(time_horizon, ph, resolution, batch): # int, int, int, bool
# Load train data
g = np.loadtxt('CGM_prediction_data/glucose_readings_train.csv', delimiter=',')
c = np.loadtxt('CGM_prediction_data/meals_carbs_train.csv', delimiter=',')
it = np.loadtxt('CGM_prediction_data/insulin_therapy_train.csv', delimiter=',')
# Load test data
g_ = np.loadtxt('CGM_prediction_data/glucose_readings_test.csv', delimiter=',')
c_ = np.loadtxt('CGM_prediction_data/meals_carbs_test.csv', delimiter=',')
it_ = np.loadtxt('CGM_prediction_data/insulin_therapy_test.csv', delimiter=',')
# Time align train & test data
aligned_train_data = downscale(np.array([(g[i,:], c[i,:], it[i,:]) for i in range(g.shape[0])]), resolution)
aligned_test_data = downscale(np.array([(g_[i,:], c_[i,:], it_[i,:]) for i in range(g_.shape[0])]), resolution)
print(aligned_train_data.shape)
# Break time aligned data into train & test samples
if batch:
train_data, train_label = process_data(aligned_train_data, time_horizon, ph)
test_data, test_label = process_data(aligned_test_data, time_horizon, ph)
return np.swapaxes(train_data,1,2), train_label, np.swapaxes(test_data,1,2), test_label
else:
return aligned_train_data, aligned_test_data
def load_uva(time_horizon, ph, resolution, batch):
data = loadmat('uva-padova-data/sim_results.mat')
train_data = np.zeros((231,3,1440))
test_data = np.zeros((99,3,1440))
# Separate train and test sets.. last 3 records of each patient will be used for testing
count_train = 0
count_test = 0
for i in range(33):
for j in range(10):
if j>=7:
test_data[count_test,0,:] = np.asarray(data['data']['results']['sensor'][count_test+count_train]['signals']['values']).flatten()[:1440]
test_data[count_test,1,:] = np.asarray(data['data']['results']['CHO'][count_test+count_train]['signals']['values']).flatten()[:1440]
test_data[count_test,2,:] = np.asarray(data['data']['results']['BOLUS'][count_test+count_train]['signals']['values']).flatten()[:1440] + np.asarray(data['data']['results']['BASAL'][i]['signals']['values']).flatten()[:1440]
count_test+=1
else:
train_data[count_train,0,:] = np.asarray(data['data']['results']['sensor'][count_test+count_train]['signals']['values']).flatten()[:1440]
train_data[count_train,1,:] = np.asarray(data['data']['results']['CHO'][count_test+count_train]['signals']['values']).flatten()[:1440]
train_data[count_train,2,:] = np.asarray(data['data']['results']['BOLUS'][count_test+count_train]['signals']['values']).flatten()[:1440] + np.asarray(data['data']['results']['BASAL'][i]['signals']['values']).flatten()[:1440]
count_train+=1
train_data = downscale(train_data, resolution)
test_data = downscale(test_data, resolution)
if batch:
train_data, train_label = process_data(train_data, time_horizon, ph)
test_data, test_label = process_data(test_data, time_horizon, ph)
return np.swapaxes(train_data,1,2)*0.0555, train_label*0.0555, np.swapaxes(test_data,1,2)*0.0555, test_label*0.0555 # convert to mmol/L
else:
return train_data, test_data
# # Monte Carlo dropout
# In[ ]:
def sampling(model, test_data, test_label, samples):
model.compile(optimizer='adam', loss='mean_squared_error', metrics=[tf.keras.metrics.RootMeanSquaredError()])
mc_ensemble = np.zeros(test_label.shape)
mc_mse = np.zeros(samples)
predictions = np.zeros((samples, test_label.shape[0], test_label.shape[1])) # (mc_samples, test samples, ph)
for j in tqdm(range(0,samples)):
sample_predictions = model.predict(test_data, batch_size=1000)
predictions[j,:,:] = sample_predictions
mc_mse[j] = mean_squared_error(sample_predictions, test_label)
mc_ensemble = [a+b for a,b in zip(mc_ensemble, sample_predictions)]
return mean_squared_error([i/samples for i in mc_ensemble], test_label), mc_mse, predictions
def credible_interval(predictions, alpha):
# Sort predictions from smallest to largest
predictions = np.sort(predictions, axis=0)
# print(predictions.shape)
# Upper & lower bounds
ci = np.zeros((predictions.shape[1], 2))
# print(ci.shape)
# print(np.floor(predictions.shape[0]*(alpha/2)))
ci[:,0] = predictions[int(np.floor(predictions.shape[0]*(alpha/2))),:]
ci[:,1] = predictions[int(-np.floor(predictions.shape[0]*(alpha/2))),:]
return ci
# # MPC Results
# ## Bidirectional LSTM
# In[ ]:
PH = 6
TIME_HORIZON = 12
RESOLUTION = 10
SAMPLES = 100
BATCH = True
# Load data & model
_, __, test_data, test_label = load_mpc(TIME_HORIZON, PH, RESOLUTION, BATCH)
bilstm = tf.keras.models.load_model('../saved/postgraduate_dissertation/saved_models/mpc_guided_bilstm.h5', compile=False)
bilstm_ensemble, bilstm_mse, bilstm_pred = sampling(bilstm, test_data, test_label, SAMPLES)
# Construct CI for 10 min prediction
fig, axes = plt.subplots(2,3)
plt.rcParams["legend.loc"] = 'upper left'
plt.rcParams["figure.figsize"] = (20,10)
bilstm_CI_10mins_90percent = credible_interval(bilstm_pred[:,:,0],alpha=0.1)
bilstm_CI_10mins_98percent = credible_interval(bilstm_pred[:,:,0],alpha=0.02)
axes[0,0].scatter(np.arange(144-TIME_HORIZON-PH)*10, test_label[:144-TIME_HORIZON-PH,0], s=5, c='r', marker='o', label='test label', zorder=10)
axes[0,0].fill_between(np.arange(144-TIME_HORIZON-PH)*10, bilstm_CI_10mins_90percent[:144-TIME_HORIZON-PH,1], bilstm_CI_10mins_90percent[:144-TIME_HORIZON-PH,0], color='k', label='90% CI', zorder=5)
axes[0,0].fill_between(np.arange(144-TIME_HORIZON-PH)*10, bilstm_CI_10mins_98percent[:144-TIME_HORIZON-PH,1],bilstm_CI_10mins_98percent[:144-TIME_HORIZON-PH,0], color='m', label='98% CI', zorder=0)
axes[0,0].set_xlabel('Minutes')
axes[0,0].set_ylabel('Predicted BG (mmol/L)')
axes[0,0].title.set_text('10 minute prediction')
axes[0,0].legend()
# Construct CI for 20 min prediction
bilstm_CI_20mins_90percent = credible_interval(bilstm_pred[:,:,1],alpha=0.1)
bilstm_CI_20mins_98percent = credible_interval(bilstm_pred[:,:,1],alpha=0.02)
axes[0,1].scatter(np.arange(144-TIME_HORIZON-PH)*10, test_label[:144-TIME_HORIZON-PH,1], s=5, c='r', marker='o', label='test label', zorder=10)
axes[0,1].fill_between(np.arange(144-TIME_HORIZON-PH)*10, bilstm_CI_20mins_90percent[:144-TIME_HORIZON-PH,1], bilstm_CI_20mins_90percent[:144-TIME_HORIZON-PH,0], color='k', label='90% CI', zorder=5)
axes[0,1].fill_between(np.arange(144-TIME_HORIZON-PH)*10, bilstm_CI_20mins_98percent[:144-TIME_HORIZON-PH,1],bilstm_CI_20mins_98percent[:144-TIME_HORIZON-PH,0], color='m', label='98% CI', zorder=0)
axes[0,1].set_xlabel('Minutes')
axes[0,1].set_ylabel('Predicted BG (mmol/L)')
axes[0,1].title.set_text('20 minute prediction')
axes[0,1].legend()
# Construct CI for 30 min prediction
bilstm_CI_30mins_90percent = credible_interval(bilstm_pred[:,:,2],alpha=0.1)
bilstm_CI_30mins_98percent = credible_interval(bilstm_pred[:,:,2],alpha=0.02)
axes[0,2].scatter(np.arange(144-TIME_HORIZON-PH)*10, test_label[:144-TIME_HORIZON-PH,2], s=5, c='r', marker='o', label='test label', zorder=10)
axes[0,2].fill_between(np.arange(144-TIME_HORIZON-PH)*10, bilstm_CI_30mins_90percent[:144-TIME_HORIZON-PH,1], bilstm_CI_30mins_90percent[:144-TIME_HORIZON-PH,0], color='k', label='90% CI', zorder=5)
axes[0,2].fill_between(np.arange(144-TIME_HORIZON-PH)*10, bilstm_CI_30mins_98percent[:144-TIME_HORIZON-PH,1], bilstm_CI_30mins_98percent[:144-TIME_HORIZON-PH,0], color='m', label='98% CI', zorder=0)
axes[0,2].set_xlabel('Minutes')
axes[0,2].set_ylabel('Predicted BG (mmol/L)')
axes[0,2].title.set_text('30 minute prediction')
axes[0,2].legend()
# Construct CI for 40 min prediction
bilstm_CI_40mins_90percent = credible_interval(bilstm_pred[:,:,3],alpha=0.1)
bilstm_CI_40mins_98percent = credible_interval(bilstm_pred[:,:,3],alpha=0.02)
axes[1,0].scatter(np.arange(144-TIME_HORIZON-PH)*10, test_label[:144-TIME_HORIZON-PH,3], s=5, c='r', marker='o', label='test label', zorder=10)
axes[1,0].fill_between(np.arange(144-TIME_HORIZON-PH)*10, bilstm_CI_40mins_90percent[:144-TIME_HORIZON-PH,1], bilstm_CI_40mins_90percent[:144-TIME_HORIZON-PH,0], color='k', label='90% CI', zorder=5)
axes[1,0].fill_between(np.arange(144-TIME_HORIZON-PH)*10, bilstm_CI_40mins_98percent[:144-TIME_HORIZON-PH,1], bilstm_CI_40mins_98percent[:144-TIME_HORIZON-PH,0], color='m', label='98% CI', zorder=0)
axes[1,0].set_xlabel('Minutes')
axes[1,0].set_ylabel('Predicted BG (mmol/L)')
axes[1,0].title.set_text('40 minute prediction')
axes[1,0].legend()
# Construct CI for 50 min prediction
bilstm_CI_50mins_90percent = credible_interval(bilstm_pred[:,:,4],alpha=0.1)
bilstm_CI_50mins_98percent = credible_interval(bilstm_pred[:,:,4],alpha=0.02)
axes[1,1].scatter(np.arange(144-TIME_HORIZON-PH)*10, test_label[:144-TIME_HORIZON-PH,4], s=5, c='r', marker='o', label='test label', zorder=10)
axes[1,1].fill_between(np.arange(144-TIME_HORIZON-PH)*10, bilstm_CI_50mins_90percent[:144-TIME_HORIZON-PH,1], bilstm_CI_50mins_90percent[:144-TIME_HORIZON-PH,0], color='k', label='90% CI', zorder=5)
axes[1,1].fill_between(np.arange(144-TIME_HORIZON-PH)*10, bilstm_CI_50mins_98percent[:144-TIME_HORIZON-PH,1], bilstm_CI_50mins_98percent[:144-TIME_HORIZON-PH,0], color='m', label='98% CI', zorder=0)
axes[1,1].set_xlabel('Minutes')
axes[1,1].set_ylabel('Predicted BG (mmol/L)')
axes[1,1].title.set_text('50 minute prediction')
axes[1,1].legend()
# Construct CI for 60 min prediction
bilstm_CI_60mins_90percent = credible_interval(bilstm_pred[:,:,5],alpha=0.1)
bilstm_CI_60mins_98percent = credible_interval(bilstm_pred[:,:,5],alpha=0.02)
axes[1,2].scatter(np.arange(144-TIME_HORIZON-PH)*10, test_label[:144-TIME_HORIZON-PH,5], s=5, c='r', marker='o', label='test label', zorder=10)
axes[1,2].fill_between(np.arange(144-TIME_HORIZON-PH)*10, bilstm_CI_60mins_90percent[:144-TIME_HORIZON-PH,1], bilstm_CI_60mins_90percent[:144-TIME_HORIZON-PH,0], color='k', label='90% CI', zorder=5)
axes[1,2].fill_between(np.arange(144-TIME_HORIZON-PH)*10, bilstm_CI_60mins_98percent[:144-TIME_HORIZON-PH,1], bilstm_CI_60mins_98percent[:144-TIME_HORIZON-PH,0], color='m', label='98% CI', zorder=0)
axes[1,2].set_xlabel('Minutes')
axes[1,2].set_ylabel('Predicted BG (mmol/L)')
axes[1,2].title.set_text('60 minute prediction')
axes[1,2].legend()
#fig.suptitle('Figure 15: Bidirectional LSTM Credible Intervals', fontsize='x-large')
plt.show()
# ## LSTM
# In[ ]:
PH = 6
TIME_HORIZON = 12
RESOLUTION = 10
SAMPLES = 100
BATCH = True
# Load data & model
plt.rcParams["legend.loc"] = 'best'
_, __, test_data, test_label = load_mpc(TIME_HORIZON, PH, RESOLUTION, BATCH)
lstm = tf.keras.models.load_model('../saved/postgraduate_dissertation/saved_models/mpc_guided_lstm.h5', compile=False)
lstm_ensemble, lstm_mse, lstm_pred = sampling(lstm, test_data, test_label, SAMPLES)
# Construct CI for 10 min prediction
fig, axes = plt.subplots(2,3)
plt.rcParams["figure.figsize"] = (20,10)
lstm_CI_10mins_90percent = credible_interval(lstm_pred[:,:,0],alpha=0.1)
lstm_CI_10mins_98percent = credible_interval(lstm_pred[:,:,0],alpha=0.02)
axes[0,0].scatter(np.arange(144-TIME_HORIZON-PH)*10, test_label[:144-TIME_HORIZON-PH,0], s=5, c='r', marker='o', label='test label', zorder=10)
axes[0,0].fill_between(np.arange(144-TIME_HORIZON-PH)*10, lstm_CI_10mins_90percent[:144-TIME_HORIZON-PH,1], lstm_CI_10mins_90percent[:144-TIME_HORIZON-PH,0], color='k', label='90% CI', zorder=5)
axes[0,0].fill_between(np.arange(144-TIME_HORIZON-PH)*10, lstm_CI_10mins_98percent[:144-TIME_HORIZON-PH,1], lstm_CI_10mins_98percent[:144-TIME_HORIZON-PH,0], color='m', label='98% CI', zorder=0)
axes[0,0].set_xlabel('Minutes')
axes[0,0].set_ylabel('Predicted BG (mmol/L)')
axes[0,0].title.set_text('10 minute prediction')
axes[0,0].legend()
# Construct CI for 20 min prediction
lstm_CI_20mins_90percent = credible_interval(lstm_pred[:,:,1],alpha=0.1)
lstm_CI_20mins_98percent = credible_interval(lstm_pred[:,:,1],alpha=0.02)
axes[0,1].scatter(np.arange(144-TIME_HORIZON-PH)*10, test_label[:144-TIME_HORIZON-PH,1], s=5, c='r', marker='o', label='test label', zorder=10)
axes[0,1].fill_between(np.arange(144-TIME_HORIZON-PH)*10, lstm_CI_20mins_90percent[:144-TIME_HORIZON-PH,1], lstm_CI_20mins_90percent[:144-TIME_HORIZON-PH,0], color='k', label='90% CI', zorder=5)
axes[0,1].fill_between(np.arange(144-TIME_HORIZON-PH)*10, lstm_CI_20mins_98percent[:144-TIME_HORIZON-PH,1], lstm_CI_20mins_98percent[:144-TIME_HORIZON-PH,0], color='m', label='98% CI', zorder=0)
axes[0,1].set_xlabel('Minutes')
axes[0,1].set_ylabel('Predicted BG (mmol/L)')
axes[0,1].title.set_text('20 minute prediction')
axes[0,1].legend()
# Construct CI for 30 min prediction
lstm_CI_30mins_90percent = credible_interval(lstm_pred[:,:,2],alpha=0.1)
lstm_CI_30mins_98percent = credible_interval(lstm_pred[:,:,2],alpha=0.02)
axes[0,2].scatter(np.arange(144-TIME_HORIZON-PH)*10, test_label[:144-TIME_HORIZON-PH,2], s=5, c='r', marker='o', label='test label', zorder=10)
axes[0,2].fill_between(np.arange(144-TIME_HORIZON-PH)*10, lstm_CI_30mins_90percent[:144-TIME_HORIZON-PH,1], lstm_CI_30mins_90percent[:144-TIME_HORIZON-PH,0], color='k', label='90% CI', zorder=5)
axes[0,2].fill_between(np.arange(144-TIME_HORIZON-PH)*10, lstm_CI_30mins_98percent[:144-TIME_HORIZON-PH,1], lstm_CI_30mins_98percent[:144-TIME_HORIZON-PH,0], color='m', label='98% CI', zorder=0)
axes[0,2].set_xlabel('Minutes')
axes[0,2].set_ylabel('Predicted BG (mmol/L)')
axes[0,2].title.set_text('30 minute prediction')
axes[0,2].legend()
# Construct CI for 40 min prediction
lstm_CI_40mins_90percent = credible_interval(lstm_pred[:,:,3],alpha=0.1)
lstm_CI_40mins_98percent = credible_interval(lstm_pred[:,:,3],alpha=0.02)
axes[1,0].scatter(np.arange(144-TIME_HORIZON-PH)*10, test_label[:144-TIME_HORIZON-PH,3], s=5, c='r', marker='o', label='test label', zorder=10)
axes[1,0].fill_between(np.arange(144-TIME_HORIZON-PH)*10, lstm_CI_40mins_90percent[:144-TIME_HORIZON-PH,1], lstm_CI_40mins_90percent[:144-TIME_HORIZON-PH,0], color='k', label='90% CI', zorder=5)
axes[1,0].fill_between(np.arange(144-TIME_HORIZON-PH)*10, lstm_CI_40mins_98percent[:144-TIME_HORIZON-PH,1], lstm_CI_40mins_98percent[:144-TIME_HORIZON-PH,0], color='m', label='98% CI', zorder=0)
axes[1,0].set_xlabel('Minutes')
axes[1,0].set_ylabel('Predicted BG (mmol/L)')
axes[1,0].title.set_text('40 minute prediction')
axes[1,0].legend()
# Construct CI for 50 min prediction
lstm_CI_50mins_90percent = credible_interval(lstm_pred[:,:,4],alpha=0.1)
lstm_CI_50mins_98percent = credible_interval(lstm_pred[:,:,4],alpha=0.02)
axes[1,1].scatter(np.arange(144-TIME_HORIZON-PH)*10, test_label[:144-TIME_HORIZON-PH,4], s=5, c='r', marker='o', label='test label', zorder=10)
axes[1,1].fill_between(np.arange(144-TIME_HORIZON-PH)*10, lstm_CI_50mins_90percent[:144-TIME_HORIZON-PH,1], lstm_CI_50mins_90percent[:144-TIME_HORIZON-PH,0], color='k', label='90% CI', zorder=5)
axes[1,1].fill_between(np.arange(144-TIME_HORIZON-PH)*10, lstm_CI_50mins_98percent[:144-TIME_HORIZON-PH,1], lstm_CI_50mins_98percent[:144-TIME_HORIZON-PH,0], color='m', label='98% CI', zorder=0)
axes[1,1].set_xlabel('Minutes')
axes[1,1].set_ylabel('Predicted BG (mmol/L)')
axes[1,1].title.set_text('50 minute prediction')
axes[1,1].legend()
# Construct CI for 60 min prediction
lstm_CI_60mins_90percent = credible_interval(lstm_pred[:,:,5],alpha=0.1)
lstm_CI_60mins_98percent = credible_interval(lstm_pred[:,:,5],alpha=0.02)
axes[1,2].scatter(np.arange(144-TIME_HORIZON-PH)*10, test_label[:144-TIME_HORIZON-PH,5], s=5, c='r', marker='o', label='test label', zorder=10)
axes[1,2].fill_between(np.arange(144-TIME_HORIZON-PH)*10, lstm_CI_60mins_90percent[:144-TIME_HORIZON-PH,1], lstm_CI_60mins_90percent[:144-TIME_HORIZON-PH,0], color='k', label='90% CI', zorder=5)
axes[1,2].fill_between(
|
np.arange(144-TIME_HORIZON-PH)
|
numpy.arange
|
"""
=======================================================
:mod:`go_benchmark` -- Benchmark optimization functions
=======================================================
This module provides a set of benchmark problems for global optimization.
.. Copyright 2013 <NAME>
.. module:: go_benchmark
.. moduleauthor:: <NAME> <<EMAIL>>
.. modifiedby:: <NAME> <<EMAIL>> 2016
"""
# Array math module implemented in C / C++
import numpy
# Optimized mathematical functions
from numpy import abs, arctan2, cos, dot, exp, floor, inf, log, log10, pi, prod, sin, sqrt, sum, tan, tanh
# Array functions
from numpy import arange, asarray, atleast_1d, ones, roll, seterr, sign, where, zeros, zeros_like
from numpy.random import uniform
from math import factorial
# Tell numpy to ignore errors
seterr(all='ignore')
# -------------------------------------------------------------------------------- #
class Benchmark(object):
"""
Defines a global optimization benchmark problem.
This abstract class defines the basic structure of a global
optimization problem. Subclasses should implement the ``evaluator`` method
for a particular optimization problem.
Public Attributes:
- *dimensions* -- the number of inputs to the problem
- *fun_evals* -- stores the number of function evaluations, as some crappy
optimization frameworks (i.e., `nlopt`) do not return this value
- *change_dimensionality* -- whether we can change the benchmark function `x`
variable length (i.e., the dimensionality of the problem)
- *custom_bounds* -- a set of lower/upper bounds for plot purposes (if needed).
- *spacing* -- the spacing to use to generate evenly spaced samples across the
lower/upper bounds on the variables, for plotting purposes
"""
def __init__(self, dimensions):
self.dimensions = dimensions
self.fun_evals = 0
self.change_dimensionality = False
self.custom_bounds = None
self.record = [] # A record of objective values per evaluations
if dimensions == 1:
self.spacing = 1001
else:
self.spacing = 201
def __str__(self):
return "%s (%i dimensions)"%(self.__class__.__name__, self.dimensions)
def __repr__(self):
return self.__class__.__name__
def generator(self):
"""The generator function for the benchmark problem."""
return [uniform(l, u) for l, u in self.bounds]
def evaluator(self, candidates):
"""The evaluator function for the benchmark problem."""
raise NotImplementedError
def set_dimensions(self, ndim):
self.dimensions = ndim
def lower_bounds_constraints(self, x):
lower = asarray([b[0] for b in self.bounds])
return asarray(x) - lower
def upper_bounds_constraints(self, x):
upper = asarray([b[1] for b in self.bounds])
return upper - asarray(x)
#-----------------------------------------------------------------------
# SINGLE-OBJECTIVE PROBLEMS
#-----------------------------------------------------------------------
class Ackley(Benchmark):
"""
Ackley test objective function.
This class defines the Ackley global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Ackley}}(\\mathbf{x}) = -20e^{-0.2 \\sqrt{\\frac{1}{n} \\sum_{i=1}^n x_i^2}} - e^{ \\frac{1}{n} \\sum_{i=1}^n \\cos(2 \\pi x_i)} + 20 + e
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-32, 32]` for :math:`i=1,...,n`.
.. figure:: figures/Ackley.png
:alt: Ackley function
:align: center
**Two-dimensional Ackley function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-32.0] * self.dimensions,
[ 32.0] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
a = 20.0; b = 0.2; c = 2.0*pi
return -a*exp(-b*sqrt(1./self.dimensions*sum(x**2)))-exp(1./self.dimensions*sum(cos(c*x)))+a+exp(1.)
#-----------------------------------------------------------------------
class Adjiman(Benchmark):
"""
Adjiman test objective function.
This class defines the Adjiman global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Adjiman}}(\\mathbf{x}) = \\cos(x_1)\\sin(x_2) - \\frac{x_1}{(x_2^2 + 1)}
Here, :math:`n` represents the number of dimensions and :math:`x_1 \\in [-1, 2]` and :math:`x_2 \\in [-1, 1]`.
.. figure:: figures/Adjiman.png
:alt: Adjiman function
:align: center
**Two-dimensional Adjiman function**
*Global optimum*: :math:`f(x_i) = -2.02181` for :math:`\\mathbf{x} = [2, 0.10578]`
"""
def __init__(self, dimensions):
Benchmark.__init__(self, 2)
self.bounds = [(-1.0, 2.0), (-1.0, 1.0)]
self.global_optimum = [2.0, 0.10578]
self.fglob = -2.02180678
self.change_dimensionality = False
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return cos(x1)*sin(x2) - x1/(x2**2.0 + 1)
# -------------------------------------------------------------------------------- #
class Alpine01(Benchmark):
"""
Alpine 1 test objective function.
This class defines the Alpine 1 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Alpine01}}(\\mathbf{x}) = \\sum_{i=1}^{n} \\lvert {x_i \\sin \\left( x_i \\right) + 0.1 x_i} \\rvert
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,...,n`.
.. figure:: figures/Alpine01.png
:alt: Alpine 1 function
:align: center
**Two-dimensional Alpine 1 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return sum(abs(x*sin(x) + 0.1*x))
# -------------------------------------------------------------------------------- #
class Alpine02(Benchmark):
"""
Alpine 2 test objective function.
This class defines the Alpine 2 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Alpine02}}(\\mathbf{x}) = \\prod_{i=1}^{n} \\sqrt{x_i} \\sin(x_i)
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 10]` for :math:`i=1,...,n`.
.. figure:: figures/Alpine02.png
:alt: Alpine 2 function
:align: center
**Two-dimensional Alpine 2 function**
*Global optimum*: :math:`f(x_i) = -6.1295` for :math:`x_i = 7.917` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[10.0] * self.dimensions))
self.global_optimum = [7.91705268, 4.81584232]
self.fglob = -6.12950
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return prod(sqrt(x)*sin(x))
# -------------------------------------------------------------------------------- #
class AMGM(Benchmark):
"""
AMGM test objective function.
This class defines the Arithmetic Mean - Geometric Mean Equality global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{AMGM}}(\\mathbf{x}) = \\left ( \\frac{1}{n} \\sum_{i=1}^{n} x_i - \\sqrt[n]{ \\prod_{i=1}^{n} x_i} \\right )^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 10]` for :math:`i=1,...,n`.
.. figure:: figures/AMGM.png
:alt: AMGM function
:align: center
**Two-dimensional Arithmetic Mean - Geometric Mean Equality function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_1 = x_2 = ... = x_n` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[10.0] * self.dimensions))
self.global_optimum = [1, 1]
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
n = self.dimensions
f1 = sum(x)
f2 = prod(x)
xsum = f1
f1 = f1/n
f2 = f2**(1.0/n)
return (f1 - f2)**2
# -------------------------------------------------------------------------------- #
class BartelsConn(Benchmark):
"""
Bartels-Conn test objective function.
This class defines the Bartels-Conn global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{BartelsConn}}(\\mathbf{x}) = \\lvert {x_1^2 + x_2^2 + x_1x_2} \\rvert + \\lvert {\\sin(x_1)} \\rvert + \\lvert {\\cos(x_2)} \\rvert
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-50, 50]` for :math:`i=1,...,n`.
.. figure:: figures/BartelsConn.png
:alt: Bartels-Conn function
:align: center
**Two-dimensional Bartels-Conn function**
*Global optimum*: :math:`f(x_i) = 1` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self):
Benchmark.__init__(self, 2)
self.bounds = list(zip([-5.0] * self.dimensions,
[ 5.0] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = 1.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return abs(x1**2.0 + x2**2.0 + x1*x2) + abs(sin(x1)) + abs(cos(x2))
# -------------------------------------------------------------------------------- #
class Beale(Benchmark):
"""
Beale test objective function.
This class defines the Beale global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Beale}}(\\mathbf{x}) = \\left(x_1 x_2 - x_1 + 1.5\\right)^{2} + \\left(x_1 x_2^{2} - x_1 + 2.25\\right)^{2} + \\left(x_1 x_2^{3} - x_1 + 2.625\\right)^{2}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/Beale.png
:alt: Beale function
:align: center
**Two-dimensional Beale function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [3, 0.5]`
"""
def __init__(self, dimensions):
Benchmark.__init__(self, 2)
self.bounds = list(zip([-4.5] * self.dimensions,
[ 4.5] * self.dimensions))
self.global_optimum = [3.0, 0.5]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
return (1.5 - x[0] + x[0]*x[1])**2 + (2.25 - x[0] + x[0]*x[1]**2)**2 + (2.625 - x[0] + x[0]*x[1]**3)**2
# -------------------------------------------------------------------------------- #
class Bird(Benchmark):
"""
Bird test objective function.
This class defines the Bird global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Bird}}(\\mathbf{x}) = \\left(x_1 - x_2\\right)^{2} + e^{\left[1 - \\sin\\left(x_1\\right) \\right]^{2}} \\cos\\left(x_2\\right) + e^{\left[1 - \\cos\\left(x_2\\right)\\right]^{2}} \\sin\\left(x_1\\right)
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-2\\pi, 2\\pi]` for :math:`i=1,2`.
.. figure:: figures/Bird.png
:alt: Bird function
:align: center
**Two-dimensional Bird function**
*Global optimum*: :math:`f(x_i) = -106.7645367198034` for :math:`\\mathbf{x} = [4.701055751981055 , 3.152946019601391]` or
:math:`\\mathbf{x} = [-1.582142172055011, -3.130246799635430]`
"""
def __init__(self):
Benchmark.__init__(self, 2)
self.bounds = list(zip([-2.0*pi] * self.dimensions,
[ 2.0*pi] * self.dimensions))
self.global_optimum = ([4.701055751981055 , 3.152946019601391],
[-1.582142172055011, -3.130246799635430])
self.fglob = -106.7645367198034
def evaluator(self, x, *args):
self.fun_evals += 1
return sin(x[0])*exp((1-cos(x[1]))**2) + cos(x[1])*exp((1-sin(x[0]))**2) + (x[0]-x[1])**2
# -------------------------------------------------------------------------------- #
class Bohachevsky(Benchmark):
"""
Bohachevsky test objective function.
This class defines the Bohachevsky global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Bohachevsky}}(\\mathbf{x}) = \\sum_{i=1}^{n-1}\\left[x_i^2 + 2x_{i+1}^2 - 0.3\\cos(3\\pi x_i) - 0.4\\cos(4\\pi x_{i+1}) + 0.7\\right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-15, 15]` for :math:`i=1,...,n`.
.. figure:: figures/Bohachevsky.png
:alt: Bohachevsky function
:align: center
**Two-dimensional Bohachevsky function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-15.0] * self.dimensions,
[ 15.0] * self.dimensions))
self.custom_bounds = [(-2, 2), (-2, 2)]
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
x0 = x[:-1]
x1 = roll(x,-1)[:-1]
return sum(x0**2 + 2*x1**2 - 0.3 * cos(3*pi*x0) - 0.4 * cos(4*pi*x1) + 0.7)
# -------------------------------------------------------------------------------- #
class BoxBetts(Benchmark):
"""
BoxBetts test objective function.
This class defines the Box-Betts global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{BoxBetts}}(\\mathbf{x}) = \\sum_{i=1}^k g(x_i)^2
Where, in this exercise:
.. math:: g(x) = e^{-0.1(i+1)x_1} - e^{-0.1(i+1)x_2} - \\left[(e^{-0.1(i+1)}) - e^{-(i+1)}x_3\\right]
And :math:`k = 10`.
Here, :math:`n` represents the number of dimensions and :math:`x_1 \\in [0.9, 1.2], x_2 \\in [9, 11.2], x_3 \\in [0.9, 1.2]`.
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [1, 10, 1]`
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self.bounds = ([0.9, 1.2], [9.0, 11.2], [0.9, 1.2])
self.global_optimum = [1.0, 10.0, 1.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
y = 0.0
for i in range(1, 11):
y += (exp(-0.1*i*x[0]) - exp(-0.1*i*x[1]) - (exp(-0.1*i) - exp(-1.0*i))*x[2])**2.0
return y
# -------------------------------------------------------------------------------- #
class Branin01(Benchmark):
"""
Branin 1 test objective function.
This class defines the Branin 1 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Branin01}}(\\mathbf{x}) = \\left(- 1.275 \\frac{x_1^{2}}{\pi^{2}} + 5 \\frac{x_1}{\pi} + x_2 -6\\right)^{2} + \\left(10 - \\frac{5}{4 \\pi} \\right) \\cos\\left(x_1\\right) + 10
Here, :math:`n` represents the number of dimensions and :math:`x_1 \\in [-5, 10], x_2 \\in [0, 15]`
.. figure:: figures/Branin01.png
:alt: Branin 1 function
:align: center
**Two-dimensional Branin 1 function**
*Global optimum*: :math:`f(x_i) = 0.39788735772973816` for :math:`\\mathbf{x} = [-\\pi, 12.275]` or
:math:`\\mathbf{x} = [\\pi, 2.275]` or :math:`\\mathbf{x} = [9.42478, 2.475]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = [(-5., 10.), (0., 15.)]
self.global_optimum = [(-pi, 12.275), (pi, 2.275), (9.42478, 2.475)]
self.fglob = 0.39788735772973816
def evaluator(self, x, *args):
self.fun_evals += 1
return (x[1]-(5.1/(4*pi**2))*x[0]**2+5*x[0]/pi-6)**2+10*(1-1/(8*pi))*cos(x[0])+10
# -------------------------------------------------------------------------------- #
class Branin02(Benchmark):
"""
Branin 2 test objective function.
This class defines the Branin 2 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Branin02}}(\\mathbf{x}) = \\left(- 1.275 \\frac{x_1^{2}}{\pi^{2}} + 5 \\frac{x_1}{\pi} + x_2 -6\\right)^{2} + \\left(10 - \\frac{5}{4 \\pi} \\right) \\cos\\left(x_1\\right) \\cos\\left(x_2\\right) + \\log(x_1^2+x_2^2 +1) + 10
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-5, 15]` for :math:`i=1,2`.
.. figure:: figures/Branin02.png
:alt: Branin 2 function
:align: center
**Two-dimensional Branin 2 function**
*Global optimum*: :math:`f(x_i) = 5.559037` for :math:`\\mathbf{x} = [-3.2, 12.53]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = [(-5.0, 15.0), (-5.0, 15.0)]
self.global_optimum = [-3.2, 12.53]
self.fglob = 5.559037
def evaluator(self, x, *args):
self.fun_evals += 1
return (x[1]-(5.1/(4*pi**2))*x[0]**2+5*x[0]/pi-6)**2+10*(1-1/(8*pi))*cos(x[0])*cos(x[1])+log(x[0]**2.0+x[1]**2.0+1.0)+10
# -------------------------------------------------------------------------------- #
class Brent(Benchmark):
"""
Brent test objective function.
This class defines the Brent global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Brent}}(\\mathbf{x}) = (x_1 + 10)^2 + (x_2 + 10)^2 + e^{(-x_1^2-x_2^2)}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/Brent.png
:alt: Brent function
:align: center
**Two-dimensional Brent function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [-10, -10]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.custom_bounds = [(-10, 2), (-10, 2)]
self.global_optimum = [-10.0, -10.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
return (x[0] + 10.0)**2.0 + (x[1] + 10.0)**2.0 + exp(-x[0]**2.0 - x[1]**2.0)
# -------------------------------------------------------------------------------- #
class Brown(Benchmark):
"""
Brown test objective function.
This class defines the Brown global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Brown}}(\\mathbf{x}) = \\sum_{i=1}^{n-1}\\left[ \\left(x_i^2\\right)^{x_{i+1}^2+1} + \\left(x_{i+1}^2\\right)^{x_i^2+1} \\right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-1, 4]` for :math:`i=1,...,n`.
.. figure:: figures/Brown.png
:alt: Brown function
:align: center
**Two-dimensional Brown function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-1.0] * self.dimensions,
[ 4.0] * self.dimensions))
self.custom_bounds = [(-1.0, 1.0), (-1.0, 1.0)]
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
x0 = x[:-1]
x1 = x[1:]
return sum((x0**2.0)**(x1**2.0 + 1.0) + (x1**2.0)**(x0**2.0 + 1.0))
# -------------------------------------------------------------------------------- #
class Bukin02(Benchmark):
"""
Bukin 2 test objective function.
This class defines the Bukin 2 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Bukin02}}(\\mathbf{x}) = 100 (x_2 - 0.01x_1^2 + 1) + 0.01(x_1 + 10)^2
Here, :math:`n` represents the number of dimensions and :math:`x_1 \\in [-15, -5], x_2 \\in [-3, 3]`
.. figure:: figures/Bukin02.png
:alt: Bukin 2 function
:align: center
**Two-dimensional Bukin 2 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [-10, 0]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = [(-15.0, -5.0), (-3.0, 3.0)]
self.global_optimum = [-10.0, 0.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
return 100*(x[1]**2 - 0.01*x[0]**2 + 1.0) + 0.01*(x[0] + 10.0)**2.0
# -------------------------------------------------------------------------------- #
class Bukin04(Benchmark):
"""
Bukin 4 test objective function.
This class defines the Bukin 4 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Bukin04}}(\\mathbf{x}) = 100 x_2^{2} + 0.01 \\lvert{x_1 + 10} \\rvert
Here, :math:`n` represents the number of dimensions and :math:`x_1 \\in [-15, -5], x_2 \\in [-3, 3]`
.. figure:: figures/Bukin04.png
:alt: Bukin 4 function
:align: center
**Two-dimensional Bukin 4 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [-10, 0]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = [(-15.0, -5.0), (-3.0, 3.0)]
self.global_optimum = [-10.0, 0.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
return 100*x[1]**2 + 0.01*abs(x[0] + 10)
# -------------------------------------------------------------------------------- #
class Bukin06(Benchmark):
"""
Bukin 6 test objective function.
This class defines the Bukin 6 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Bukin06}}(\\mathbf{x}) = 100 \\sqrt{ \\lvert{x_2 - 0.01 x_1^{2}} \\rvert} + 0.01 \\lvert{x_1 + 10} \\rvert
Here, :math:`n` represents the number of dimensions and :math:`x_1 \\in [-15, -5], x_2 \\in [-3, 3]`
.. figure:: figures/Bukin06.png
:alt: Bukin 6 function
:align: center
**Two-dimensional Bukin 6 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [-10, 1]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = [(-15.0, -5.0), (-3.0, 3.0)]
self.global_optimum = [-10.0, 1.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
return 100*sqrt(abs(x[1] - 0.01*x[0]**2)) + 0.01*abs(x[0] + 10)
# -------------------------------------------------------------------------------- #
class CarromTable(Benchmark):
"""
CarromTable test objective function.
This class defines the CarromTable global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{CarromTable}}(\\mathbf{x}) = - \\frac{1}{30} e^{2 \\left|{1 - \\frac{\\sqrt{x_{1}^{2} + x_{2}^{2}}}{\pi}}\\right|} \\cos^{2}\\left(x_{1}\\right) \\cos^{2}\\left(x_{2}\\right)
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/CarromTable.png
:alt: CarromTable function
:align: center
**Two-dimensional CarromTable function**
*Global optimum*: :math:`f(x_i) = -24.15681551650653` for :math:`x_i = \\pm 9.646157266348881` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [(9.646157266348881 , 9.646134286497169),
(-9.646157266348881, 9.646134286497169),
(9.646157266348881 , -9.646134286497169),
(-9.646157266348881, -9.646134286497169)]
self.fglob = -24.15681551650653
def evaluator(self, x, *args):
self.fun_evals += 1
return -((cos(x[0])*cos(x[1])*exp(abs(1 - sqrt(x[0]**2 + x[1]**2)/pi)))**2)/30
# -------------------------------------------------------------------------------- #
class Chichinadze(Benchmark):
"""
Chichinadze test objective function.
This class defines the Chichinadze global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Chichinadze}}(\\mathbf{x}) = x_{1}^{2} - 12 x_{1} + 8 \\sin\\left(\\frac{5}{2} \\pi x_{1}\\right) + 10 \\cos\\left(\\frac{1}{2} \\pi x_{1}\\right) + 11 - 0.2 \\frac{\\sqrt{5}}{e^{\\frac{1}{2} \\left(x_{2} -0.5\\right)^{2}}}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-30, 30]` for :math:`i=1,2`.
.. figure:: figures/Chichinadze.png
:alt: Chichinadze function
:align: center
**Two-dimensional Chichinadze function**
*Global optimum*: :math:`f(x_i) = -42.94438701899098` for :math:`\\mathbf{x} = [6.189866586965680, 0.5]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-30.0] * self.dimensions,
[ 30.0] * self.dimensions))
self.custom_bounds = [(-10, 10), (-10, 10)]
self.global_optimum = [6.189866586965680, 0.5]
self.fglob = -42.94438701899098
def evaluator(self, x, *args):
self.fun_evals += 1
return x[0]**2 - 12*x[0] + 11 + 10*cos(pi*x[0]/2) + 8*sin(5*pi*x[0]/2) - 1.0/sqrt(5)*exp(-((x[1] - 0.5)**2)/2)
# -------------------------------------------------------------------------------- #
class Cigar(Benchmark):
"""
Cigar test objective function.
This class defines the Cigar global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Cigar}}(\\mathbf{x}) = x_1^2 + 10^6\\sum_{i=2}^{n} x_i^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,...,n`.
.. figure:: figures/Cigar.png
:alt: Cigar function
:align: center
**Two-dimensional Cigar function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.custom_bounds = [(-5, 5), (-5, 5)]
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return x[0]**2 + 1e6*sum(x[1:]**2)
# -------------------------------------------------------------------------------- #
class Cola(Benchmark):
"""
Cola test objective function.
This class defines the Cola global optimization problem. The 17-dimensional function computes
indirectly the formula :math:`f(n, u)` by setting :math:`x_0 = y_0, x_1 = u_0, x_i = u_{2(i−2)}, y_i = u_{2(i−2)+1}` :
.. math::
f_{\\text{Cola}}(\\mathbf{x}) = \\sum_{i<j}^{n} \\left (r_{i,j} - d_{i,j} \\right )^2
Where :math:`r_{i,j}` is given by:
.. math::
r_{i,j} = \\sqrt{(x_i - x_j)^2 + (y_i - y_j)^2}
And :math:`d` is a symmetric matrix given by:
.. math::
\\mathbf{d} = \\left [ d_{ij} \\right ] = \\begin{pmatrix}
1.27 & & & & & & & & \\\\
1.69 & 1.43 & & & & & & & \\\\
2.04 & 2.35 & 2.43 & & & & & & \\\\
3.09 & 3.18 & 3.26 & 2.85 & & & & & \\\\
3.20 & 3.22 & 3.27 & 2.88 & 1.55 & & & & \\\\
2.86 & 2.56 & 2.58 & 2.59 & 3.12 & 3.06 & & & \\\\
3.17 & 3.18 & 3.18 & 3.12 & 1.31 & 1.64 & 3.00 & \\\\
3.21 & 3.18 & 3.18 & 3.17 & 1.70 & 1.36 & 2.95 & 1.32 & \\\\
2.38 & 2.31 & 2.42 & 1.94 & 2.85 & 2.81 & 2.56 & 2.91 & 2.97
\\end{pmatrix}
This function has bounds :math:`0 \\leq x_0 \\leq 4` and :math:`-4 \\leq x_i \\leq 4` for :math:`i = 1,...,n-1`. It
has a global minimum of 11.7464.
"""
def __init__(self, dimensions=17):
Benchmark.__init__(self, dimensions)
self.bounds = [[0.0, 4.0]] + \
list(zip([-4.0] * (self.dimensions-1),
[ 4.0] * (self.dimensions-1)))
self.global_optimum = [0.651906, 1.30194, 0.099242, -0.883791,
-0.8796, 0.204651, -3.28414, 0.851188,
-3.46245, 2.53245, -0.895246, 1.40992,
-3.07367, 1.96257, -2.97872, -0.807849,
-1.68978]
self.fglob = 11.7464
def evaluator(self, x, *args):
self.fun_evals += 1
d = asarray([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1.27, 0, 0, 0, 0, 0, 0, 0, 0],
[1.69, 1.43, 0, 0, 0, 0, 0, 0, 0],
[2.04, 2.35, 2.43, 0, 0, 0, 0, 0, 0],
[3.09, 3.18, 3.26, 2.85, 0, 0, 0, 0, 0],
[3.20, 3.22, 3.27, 2.88, 1.55, 0, 0, 0, 0],
[2.86, 2.56, 2.58, 2.59, 3.12, 3.06, 0, 0, 0],
[3.17, 3.18, 3.18, 3.12, 1.31, 1.64, 3.00, 0, 0],
[3.21, 3.18, 3.18, 3.17, 1.70, 1.36, 2.95, 1.32, 0],
[2.38, 2.31, 2.42, 1.94, 2.85, 2.81, 2.56, 2.91, 2.97]])
# WARNING: This doesn't seem to follow guidelines above...
x1 = asarray([0.0, x[0]] + list(x[1::2]))
x2 = asarray([0.0, 0.0] + list(x[2::2]))
y = 0.0
for i in range(1, len(x1)):
y += sum((sqrt((x1[i] - x1[0:i])**2.0 +
(x2[i] - x2[0:i])**2.0)
- d[i, 0:i])**2.0)
return y
# -------------------------------------------------------------------------------- #
class Colville(Benchmark):
"""
Colville test objective function.
This class defines the Colville global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Colville}}(\\mathbf{x}) = \\left(x_{1} -1\\right)^{2} + 100 \\left(x_{1}^{2} - x_{2}\\right)^{2} + 10.1 \\left(x_{2} -1\\right)^{2} + \\left(x_{3} -1\\right)^{2} + 90 \\left(x_{3}^{2} - x_{4}\\right)^{2} + 10.1 \\left(x_{4} -1\\right)^{2} + 19.8 \\frac{x_{4} -1}{x_{2}}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,...,4`.
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 1` for :math:`i=1,...,4`
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [1.0] * self.dimensions
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
return 100*(x[0]**2-x[1])**2+(x[0]-1)**2+(x[2]-1)**2+90*(x[2]**2-x[3])**2+ 10.1*((x[1]-1)**2+(x[3]-1)**2)+19.8*(1/x[1])*(x[3]-1)
# -------------------------------------------------------------------------------- #
class Corana(Benchmark):
"""
Corana test objective function.
This class defines the Corana global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Corana}}(\\mathbf{x}) = \\begin{cases} \\sum_{i=1}^n 0.15 d_i [z_i - 0.05\\textrm{sgn}(z_i)]^2 & \\textrm{if}|x_i-z_i| < 0.05 \\\\
d_ix_i^2 & \\textrm{otherwise}\\end{cases}
Where, in this exercise:
.. math::
z_i = 0.2 \\lfloor |x_i/s_i|+0.49999\\rfloor\\textrm{sgn}(x_i), d_i=(1,1000,10,100, ...)
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-5, 5]` for :math:`i=1,...,4`.
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,4`
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-5.0] * self.dimensions,
[ 5.0] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
d = [1., 1000., 10., 100.]
r = 0
for j in range(4):
zj = floor(abs(x[j]/0.2) + 0.49999)*sign(x[j]) * 0.2
if abs(x[j]-zj) < 0.05:
r += 0.15 * ((zj - 0.05*sign(zj))**2) * d[j]
else:
r += d[j] * x[j] * x[j]
return r
# -------------------------------------------------------------------------------- #
class CosineMixture(Benchmark):
"""
Cosine Mixture test objective function.
This class defines the Cosine Mixture global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{CosineMixture}}(\\mathbf{x}) = -0.1 \\sum_{i=1}^n \\cos(5 \\pi x_i) - \\sum_{i=1}^n x_i^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-1, 1]` for :math:`i=1,...,N`.
.. figure:: figures/CosineMixture.png
:alt: Cosine Mixture function
:align: center
**Two-dimensional Cosine Mixture function**
*Global optimum*: :math:`f(x_i) = -0.1N` for :math:`x_i = 0` for :math:`i=1,...,N`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.change_dimensionality = True
self.bounds = list(zip([-1.0] * self.dimensions,
[ 1.0] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = -0.1*self.dimensions
def evaluator(self, x, *args):
self.fun_evals += 1
return -0.1*sum(cos(5.0*pi*x)) - sum(x**2.0)
# -------------------------------------------------------------------------------- #
class CrossInTray(Benchmark):
"""
Cross-in-Tray test objective function.
This class defines the Cross-in-Tray global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{CrossInTray}}(\\mathbf{x}) = - 0.0001 \\left(\\left|{e^{\\left|{100 - \\frac{\\sqrt{x_{1}^{2} + x_{2}^{2}}}{\\pi}}\\right|} \\sin\\left(x_{1}\\right) \\sin\\left(x_{2}\\right)}\\right| + 1\\right)^{0.1}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-15, 15]` for :math:`i=1,2`.
.. figure:: figures/CrossInTray.png
:alt: Cross-in-Tray function
:align: center
**Two-dimensional Cross-in-Tray function**
*Global optimum*: :math:`f(x_i) = -2.062611870822739` for :math:`x_i = \\pm 1.349406608602084` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [(1.349406685353340 , 1.349406608602084),
(-1.349406685353340, 1.349406608602084),
(1.349406685353340, -1.349406608602084),
(-1.349406685353340, -1.349406608602084)]
self.fglob = -2.062611870822739
def evaluator(self, x, *args):
self.fun_evals += 1
return -0.0001*(abs(sin(x[0])*sin(x[1])*exp(abs(100 - sqrt(x[0]**2 + x[1]**2)/pi))) + 1)**(0.1)
# -------------------------------------------------------------------------------- #
class CrossLegTable(Benchmark):
"""
Cross-Leg-Table test objective function.
This class defines the Cross-Leg-Table global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{CrossLegTable}}(\\mathbf{x}) = - \\frac{1}{\\left(\\left|{e^{\\left|{100 - \\frac{\\sqrt{x_{1}^{2} + x_{2}^{2}}}{\\pi}}\\right|} \\sin\\left(x_{1}\\right) \\sin\\left(x_{2}\\right)}\\right| + 1\\right)^{0.1}}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/CrossLegTable.png
:alt: Cross-Leg-Table function
:align: center
**Two-dimensional Cross-Leg-Table function**
*Global optimum*: :math:`f(x_i) = -1`. The global minimum is found on the planes :math:`x_1 = 0` and :math:`x_2 = 0`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
# WARNING: There was an error here, I added the global optimum
self.global_optimum = [0.0, 2.0]
self.fglob = -1.0
def evaluator(self, x, *args):
self.fun_evals += 1
return -(abs(sin(x[0])*sin(x[1])*exp(abs(100 - sqrt(x[0]**2 + x[1]**2)/pi))) + 1)**(-0.1)
# -------------------------------------------------------------------------------- #
class CrownedCross(Benchmark):
"""
Crowned Cross test objective function.
This class defines the Crowned Cross global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{CrownedCross}}(\\mathbf{x}) = 0.0001 \\left(\\left|{e^{\\left|{100- \\frac{\\sqrt{x_{1}^{2} + x_{2}^{2}}}{\\pi}}\\right|} \\sin\\left(x_{1}\\right) \\sin\\left(x_{2}\\right)}\\right| + 1\\right)^{0.1}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/CrownedCross.png
:alt: Crowned Cross function
:align: center
**Two-dimensional Crowned Cross function**
*Global optimum*: :math:`f(x_i) = 0.0001`. The global minimum is found on the planes :math:`x_1 = 0` and :math:`x_2 = 0`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [0, 0]
self.fglob = 0.0001
def evaluator(self, x, *args):
self.fun_evals += 1
return 0.0001*(abs(sin(x[0])*sin(x[1])*exp(abs(100 - sqrt(x[0]**2 + x[1]**2)/pi))) + 1)**(0.1)
# -------------------------------------------------------------------------------- #
class Csendes(Benchmark):
"""
Csendes test objective function.
This class defines the Csendes global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Csendes}}(\\mathbf{x}) = \\sum_{i=1}^n x_i^6 \\left[ 2 + \\sin \\left( \\frac{1}{x_i} \\right ) \\right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-1, 1]` for :math:`i=1,...,N`.
.. figure:: figures/Csendes.png
:alt: Csendes function
:align: center
**Two-dimensional Csendes function**
*Global optimum*: :math:`f(x_i) = 0.0` for :math:`x_i = 0` for :math:`i=1,...,N`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-1.0] * self.dimensions,
[ 1.0] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return sum((x**6.0)*(2.0 + sin(1.0/x)))
# -------------------------------------------------------------------------------- #
class Cube(Benchmark):
"""
Cube test objective function.
This class defines the Cube global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Cube}}(\\mathbf{x}) = 100(x_2 - x_1^3)^2 + (1 - x1)^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,...,N`.
.. figure:: figures/Cube.png
:alt: Cube function
:align: center
**Two-dimensional Cube function**
*Global optimum*: :math:`f(x_i) = 0.0` for :math:`\\mathbf{x} = [1, 1]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.custom_bounds = [(0, 2), (0, 2)]
self.global_optimum = [1.0, 1.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
return 100.0*(x[1] - x[0]**3.0)**2.0 + (1.0 - x[0])**2.0
# -------------------------------------------------------------------------------- #
class Damavandi(Benchmark):
"""
Damavandi test objective function.
This class defines the Damavandi global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Damavandi}}(\\mathbf{x}) = \\left[ 1 - \\lvert{\\frac{\\sin[\\pi(x_1-2)]\\sin[\\pi(x2-2)]}{\\pi^2(x_1-2)(x_2-2)}} \\rvert^5 \\right] \\left[2 + (x_1-7)^2 + 2(x_2-7)^2 \\right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 14]` for :math:`i=1,...,n`.
.. figure:: figures/Damavandi.png
:alt: Damavandi function
:align: center
**Two-dimensional Damavandi function**
*Global optimum*: :math:`f(x_i) = 0.0` for :math:`x_i = 2` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[14.0] * self.dimensions))
self.global_optimum = [2.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
numerator = sin(pi*(x1 - 2.0))*sin(pi*(x2 - 2.0))
denumerator = (pi**2)*(x1 - 2.0)*(x2 - 2.0)
factor1 = 1.0 - (abs(numerator / denumerator))**5.0
factor2 = 2 + (x1 - 7.0)**2.0 + 2*(x2 - 7.0)**2.0
return factor1*factor2
# -------------------------------------------------------------------------------- #
class Deb01(Benchmark):
"""
Deb 1 test objective function.
This class defines the Deb 1 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Deb01}}(\\mathbf{x}) = - \\frac{1}{N} \\sum_{i=1}^n \\sin^6(5 \\pi x_i)
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-1, 1]` for :math:`i=1,...,n`.
.. figure:: figures/Deb01.png
:alt: Deb 1 function
:align: center
**Two-dimensional Deb 1 function**
*Global optimum*: :math:`f(x_i) = 0.0`. The number of global minima is :math:`5^n` that are evenly spaced
in the function landscape, where :math:`n` represents the dimension of the problem.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-1.0] * self.dimensions,
[ 1.0] * self.dimensions))
self.global_optimum = [0.3, -0.3]
self.fglob = -1.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return -(1.0/self.dimensions)*sum(sin(5*pi*x)**6.0)
# -------------------------------------------------------------------------------- #
class Deb02(Benchmark):
"""
Deb 2 test objective function.
This class defines the Deb 2 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Deb02}}(\\mathbf{x}) = - \\frac{1}{N} \\sum_{i=1}^n \\sin^6 \\left[ 5 \\pi \\left ( x_i^{3/4} - 0.05 \\right) \\right ]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 1]` for :math:`i=1,...,n`.
.. figure:: figures/Deb02.png
:alt: Deb 2 function
:align: center
**Two-dimensional Deb 2 function**
*Global optimum*: :math:`f(x_i) = 0.0`. The number of global minima is :math:`5^n` that are evenly spaced
in the function landscape, where :math:`n` represents the dimension of the problem.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([0.0] * self.dimensions,
[1.0] * self.dimensions))
self.global_optimum = [0.93388314, 0.68141781]
self.fglob = -1.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return -(1.0/self.dimensions)*sum(sin(5*pi*(x**0.75 - 0.05))**6.0)
# -------------------------------------------------------------------------------- #
class Decanomial(Benchmark):
"""
Decanomial test objective function.
This class defines the Decanomial function global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Decanomial}}(\\mathbf{x}) = 0.001 \\left(\\lvert{x_{2}^{4} + 12 x_{2}^{3} + 54 x_{2}^{2} + 108 x_{2} + 81.0}\\rvert + \\lvert{x_{1}^{10} - 20 x_{1}^{9} + 180 x_{1}^{8} - 960 x_{1}^{7} + 3360 x_{1}^{6} - 8064 x_{1}^{5} + 13340 x_{1}^{4} - 15360 x_{1}^{3} + 11520 x_{1}^{2} - 5120 x_{1} + 2624.0}\\rvert\\right)^{2}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/Decanomial.png
:alt: Decanomial function
:align: center
**Two-dimensional Decanomial function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [2, -3]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.custom_bounds = [(0, 2.5), (-2, -4)]
self.global_optimum = [2.0, -3.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
F1 = abs(x[0]**10 - 20*x[0]**9 + 180*x[0]**8 - 960*x[0]**7 + 3360*x[0]**6 - 8064*x[0]**5 + \
13340*x[0]**4 - 15360*x[0]**3 + 11520*x[0]**2 - 5120*x[0] + 2624.0)
F2 = abs(x[1]**4 + 12*x[1]**3 + 54*x[1]**2 + 108*x[1] + 81.0)
return 0.001*(F1 + F2)**2
# -------------------------------------------------------------------------------- #
class Deceptive(Benchmark):
"""
Deceptive test objective function.
This class defines the Deceptive global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Deceptive}}(\\mathbf{x}) = - \\left [\\frac{1}{n} \\sum_{i=1}^{n} g_i(x_i) \\right ]^{\\beta}
Where :math:`\\beta` is a fixed non-linearity factor; in this exercise, :math:`\\beta = 2`. The function :math:`g_i(x_i)`
is given by:
.. math::
g_i(x_i) = \\begin{cases} - \\frac{x}{\\alpha_i} + \\frac{4}{5} & \\textrm{if} \\hspace{5pt} 0 \\leq x_i \\leq \\frac{4}{5} \\alpha_i \\\\
\\frac{5x}{\\alpha_i} -4 & \\textrm{if} \\hspace{5pt} \\frac{4}{5} \\alpha_i \\le x_i \\leq \\alpha_i \\\\
\\frac{5(x - \\alpha_i)}{\\alpha_i-1} & \\textrm{if} \\hspace{5pt} \\alpha_i \\le x_i \\leq \\frac{1 + 4\\alpha_i}{5} \\\\
\\frac{x - 1}{1 - \\alpha_i} & \\textrm{if} \\hspace{5pt} \\frac{1 + 4\\alpha_i}{5} \\le x_i \\leq 1 \\end{cases}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 1]` for :math:`i=1,...,n`.
.. figure:: figures/Deceptive.png
:alt: Deceptive function
:align: center
**Two-dimensional Deceptive function**
*Global optimum*: :math:`f(x_i) = -1` for :math:`x_i = \\alpha_i` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([0.0] * self.dimensions,
[1.0] * self.dimensions))
n = self.dimensions
self.global_optimum = numpy.arange(1.0, n + 1.0)/(n + 1.0)
self.fglob = -1.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
n = self.dimensions
alpha = numpy.arange(1.0, n + 1.0)/(n + 1.0)
beta = 2.0
g = zeros((n, ))
for i in range(n):
if x[i] <= 0.0:
g[i] = x[i]
elif x[i] < 0.8*alpha[i]:
g[i] = -x[i]/alpha[i] + 0.8
elif x[i] < alpha[i]:
g[i] = 5.0*x[i]/alpha[i] - 4.0
elif x[i] < (1.0 + 4*alpha[i])/5.0:
g[i] = 5.0*(x[i] - alpha[i])/(alpha[i] - 1.0) + 1.0
elif x[i] <= 1.0:
g[i] = (x[i] - 1.0)/(1.0 - alpha[i]) + 4.0/5.0
else:
g[i] = x[i] - 1.0
return -((1.0/n)*sum(g))**beta
# -------------------------------------------------------------------------------- #
class DeckkersAarts(Benchmark):
"""
Deckkers-Aarts test objective function.
This class defines the Deckkers-Aarts global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{DeckkersAarts}}(\\mathbf{x}) = 10^5x_1^2 + x_2^2 - (x_1^2 + x_2^2)^2 + 10^{-5}(x_1^2 + x_2^2)^4
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-20, 20]` for :math:`i=1,2`.
.. figure:: figures/DeckkersAarts.png
:alt: DeckkersAarts function
:align: center
**Two-dimensional Deckkers-Aarts function**
*Global optimum*: :math:`f(x_i) = -24777` for :math:`\\mathbf{x} = [0, \\pm 15]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-20.0] * self.dimensions,
[ 20.0] * self.dimensions))
# WARNING: Custom bounds was a tuple of lists..
self.custom_bounds = [(-1, 1), (14, 16)]
self.global_optimum = [0.0, 15.0]
self.fglob = -24776.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return 1e5*x1**2.0 + x2**2.0 - (x1**2.0 + x2**2.0)**2.0 + 1e-5*(x1**2.0 + x2**2.0)**4.0
# -------------------------------------------------------------------------------- #
class DeflectedCorrugatedSpring(Benchmark):
"""
DeflectedCorrugatedSpring test objective function.
This class defines the Deflected Corrugated Spring function global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{DeflectedCorrugatedSpring}}(\\mathbf{x}) = 0.1\\sum_{i=1}^n \\left[ (x_i - \\alpha)^2 - \\cos \\left( K \\sqrt {\\sum_{i=1}^n (x_i - \\alpha)^2} \\right ) \\right ]
Where, in this exercise, :math:`K = 5` and :math:`\\alpha = 5`.
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 2\\alpha]` for :math:`i=1,...,n`.
.. figure:: figures/DeflectedCorrugatedSpring.png
:alt: Deflected Corrugated Spring function
:align: center
**Two-dimensional Deflected Corrugated Spring function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = \\alpha` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
alpha = 5.0
self.bounds = list(zip([0] * self.dimensions,
[2*alpha] * self.dimensions))
self.global_optimum = [alpha] * self.dimensions
self.fglob = -1.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
K, alpha = 5.0, 5.0
return -cos(K*sqrt(sum((x - alpha)**2))) + 0.1*sum((x - alpha)**2)
# -------------------------------------------------------------------------------- #
class DeVilliersGlasser01(Benchmark):
"""
DeVilliers-Glasser 1 test objective function.
This class defines the DeVilliers-Glasser 1 function global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{DeVilliersGlasser01}}(\\mathbf{x}) = \\sum_{i=1}^{24} \\left[ x_1x_2^{t_i} \\sin(x_3t_i + x_4) - y_i \\right ]^2
Where, in this exercise, :math:`t_i = 0.1(i-1)` and :math:`y_i = 60.137(1.371^{t_i}) \\sin(3.112t_i + 1.761)`.
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [1, 100]` for :math:`i=1,...,n`.
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`\\mathbf{x} = [60.137, 1.371, 3.112, 1.761]`.
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 1.0] * self.dimensions,
[100.0] * self.dimensions))
self.global_optimum = [60.137, 1.371, 3.112, 1.761]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
t_i = 0.1*numpy.arange(24)
y_i = 60.137*(1.371**t_i)*sin(3.112*t_i + 1.761)
x1, x2, x3, x4 = x
return sum((x1*(x2**t_i)*sin(x3*t_i + x4) - y_i)**2.0)
# -------------------------------------------------------------------------------- #
class DeVilliersGlasser02(Benchmark):
"""
DeVilliers-Glasser 2 test objective function.
This class defines the DeVilliers-Glasser 2 function global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{DeVilliersGlasser01}}(\\mathbf{x}) = \\sum_{i=1}^{24} \\left[ x_1x_2^{t_i} \\tanh \\left [x_3t_i + \\sin(x_4t_i) \\right] \\cos(t_ie^{x_5}) - y_i \\right ]^2
Where, in this exercise, :math:`t_i = 0.1(i-1)` and :math:`y_i = 53.81(1.27^{t_i}) \\tanh (3.012t_i + \\sin(2.13t_i)) \\cos(e^{0.507}t_i)`.
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [1, 60]` for :math:`i=1,...,n`.
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`\\mathbf{x} = [53.81, 1.27, 3.012, 2.13, 0.507]`.
"""
def __init__(self, dimensions=5):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 1.0] * self.dimensions,
[60.0] * self.dimensions))
self.global_optimum = [53.81, 1.27, 3.012, 2.13, 0.507]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
t_i = 0.1*numpy.arange(16)
y_i = 53.81*1.27**t_i*tanh(3.012*t_i + sin(2.13*t_i))*cos(exp(0.507)*t_i)
x1, x2, x3, x4, x5 = x
return sum((x1*(x2**t_i)*tanh(x3*t_i + sin(x4*t_i))*cos(t_i*exp(x5)) - y_i)**2.0)
# -------------------------------------------------------------------------------- #
class DixonPrice(Benchmark):
"""
Dixon and Price test objective function.
This class defines the Dixon and Price global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{DixonPrice}}(\\mathbf{x}) = (x_i - 1)^2 + \\sum_{i=2}^n i(2x_i^2 - x_{i-1})^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,...,n`.
.. figure:: figures/DixonPrice.png
:alt: Dixon and Price function
:align: center
**Two-dimensional Dixon and Price function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 2^{- \\frac{(2^i-2)}{2^i}}` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.custom_bounds = [(-2, 3), (-2, 3)]
self.global_optimum = [2.0**(-(2.0**i-2.0)/2.0**i)
for i in range(1, self.dimensions+1)]
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
s = 0.0
for i in range(1, self.dimensions):
s += i*(2.0*x[i]**2.0 - x[i-1])**2.0
y = s + (x[0] - 1.0)**2.0
return y
# -------------------------------------------------------------------------------- #
class Dolan(Benchmark):
"""
Dolan test objective function.
This class defines the Dolan global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Dolan}}(\\mathbf{x}) = \\lvert (x_1 + 1.7x_2)\\sin(x_1) - 1.5x_3 - 0.1x_4\\cos(x_5 + x_5 - x_1) + 0.2x_5^2 - x_2 - 1 \\rvert
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,2`.
*Global optimum*: :math:`f(x_i) = 10^{-5}` for :math:`\\mathbf{x} = [8.39045925, 4.81424707, 7.34574133, 68.88246895, 3.85470806]`
"""
def __init__(self, dimensions=5):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.global_optimum = [8.39045925, 4.81424707, 7.34574133,
68.88246895, 3.85470806]
self.fglob = 1e-5
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2, x3, x4, x5 = x
return abs((x1 + 1.7*x2)*sin(x1) - 1.5*x3 - 0.1*x4*cos(x4 + x5 - x1) + 0.2*x5**2.0 - x2 - 1.0)
# -------------------------------------------------------------------------------- #
class DropWave(Benchmark):
"""
DropWave test objective function.
This class defines the DropWave global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{DropWave}}(\\mathbf{x}) = - \\frac{1 + \\cos\\left(12 \\sqrt{\\sum_{i=1}^{n} x_i^{2}}\\right)}{2 + 0.5 \\sum_{i=1}^{n} x_i^{2}}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-5.12, 5.12]` for :math:`i=1,2`.
.. figure:: figures/DropWave.png
:alt: DropWave function
:align: center
**Two-dimensional DropWave function**
*Global optimum*: :math:`f(x_i) = -1` for :math:`x_i = 0` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-5.12] * self.dimensions,
[ 5.12] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = -1.0
def evaluator(self, x, *args):
self.fun_evals += 1
norm_x = sum(x**2)
return -(1+cos(12 * sqrt(norm_x)))/(0.5 * norm_x + 2)
# -------------------------------------------------------------------------------- #
class Easom(Benchmark):
"""
Easom test objective function.
This class defines the Easom global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Easom}}(\\mathbf{x}) = a - \\frac{a}{e^{b \\sqrt{\\frac{\\sum_{i=1}^{n} x_i^{2}}{n}}}} + e - e^{\\frac{\\sum_{i=1}^{n} \\cos\\left(c x_i\\right)}{n}}
Where, in this exercise, :math:`a = 20, b = 0.2` and :math:`c = 2\\pi`.
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,2`.
.. figure:: figures/Easom.png
:alt: Easom function
:align: center
**Two-dimensional Easom function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
a = 20.0
b = 0.2
c = 2*pi
n = self.dimensions
return -a * exp(-b * sqrt(sum(x**2) / n)) - exp(sum(cos(c * x)) / n) + a + exp(1)
# -------------------------------------------------------------------------------- #
class EggCrate(Benchmark):
"""
Egg Crate test objective function.
This class defines the Egg Crate global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{EggCrate}}(\\mathbf{x}) = x_1^2 + x_2^2 + 25 \\left[ \\sin^2(x_1) + \\sin^2(x_2) \\right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-5, 5]` for :math:`i=1,2`.
.. figure:: figures/EggCrate.png
:alt: Egg Crate function
:align: center
**Two-dimensional Egg Crate function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-5.0] * self.dimensions,
[ 5.0] * self.dimensions))
self.global_optimum = [0.0, 0.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return x1**2.0 + x2**2.0 + 25.0*(sin(x1)**2.0 + sin(x2)**2.0)
# -------------------------------------------------------------------------------- #
class EggHolder(Benchmark):
"""
Egg Holder test objective function.
This class defines the Egg Holder global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{EggHolder}}(\\mathbf{x}) = - x_{1} \\sin\\left(\\sqrt{\\lvert{x_{1} - x_{2} -47}\\rvert}\\right) - \\left(x_{2} + 47\\right) \\sin\\left(\\sqrt{\\left|{\\frac{1}{2} x_{1} + x_{2} + 47}\\right|}\\right)
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-512, 512]` for :math:`i=1,2`.
.. figure:: figures/EggHolder.png
:alt: Egg Holder function
:align: center
**Two-dimensional Egg Holder function**
*Global optimum*: :math:`f(x_i) = -959.640662711` for :math:`\\mathbf{x} = [512, 404.2319]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-512.0] * self.dimensions,
[ 512.0] * self.dimensions))
self.global_optimum = [512.0, 404.2319]
self.fglob = -959.640662711
def evaluator(self, x, *args):
self.fun_evals += 1
return -(x[1]+47)*sin(sqrt(abs(x[1]+x[0]/2+47)))-x[0]*sin(sqrt(abs(x[0]-(x[1]+47))))
# -------------------------------------------------------------------------------- #
class ElAttarVidyasagarDutta(Benchmark):
"""
El-Attar-Vidyasagar-Dutta test objective function.
This class defines the El-Attar-Vidyasagar-Dutta function global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{ElAttarVidyasagarDutta}}(\\mathbf{x}) = (x_1^2 + x_2 - 10)^2 + (x_1 + x_2^2 - 7)^2 + (x_1^2 + x_2^3 - 1)^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,2`.
.. figure:: figures/ElAttarVidyasagarDutta.png
:alt: El-Attar-Vidyasagar-Dutta function
:align: center
**Two-dimensional El-Attar-Vidyasagar-Dutta function**
*Global optimum*: :math:`f(x_i) = 1.712780354` for :math:`\\mathbf{x} = [3.40918683, -2.17143304]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.custom_bounds = [(-4, 4), (-4, 4)]
self.global_optimum = [3.40918683, -2.17143304]
self.fglob = 1.712780354
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return (x1**2.0 + x2 - 10)**2.0 + (x1 + x2**2.0 - 7)**2.0 + (x1**2.0 + x2**3.0 - 1)**2.0
# -------------------------------------------------------------------------------- #
class Exp2(Benchmark):
"""
Exp2 test objective function.
This class defines the Exp2 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Exp2}}(\\mathbf{x}) = \\sum_{i=0}^9 \\left ( e^{-ix_1/10} - 5e^{-ix_2/10} -e^{-i/10} + 5e^{-i} \\right )^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 20]` for :math:`i=1,2`.
.. figure:: figures/Exp2.png
:alt: Exp2 function
:align: center
**Two-dimensional Exp2 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = [1, 0.1]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[20.0] * self.dimensions))
self.custom_bounds = [(0, 2), (0, 2)]
self.global_optimum = [1.0, 0.1]
self.fglob = 0
def evaluator(self, x, *args):
self.fun_evals += 1
y = 0.0
for i in range(10):
y += (exp(-i*x[0]/10.0) - 5*exp(-i*x[1]*10) - exp(-i/10.0) + 5*exp(-i))**2.0
return y
# -------------------------------------------------------------------------------- #
class Exponential(Benchmark):
"""
Exponential test objective function.
This class defines the Exponential global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Exponential}}(\\mathbf{x}) = -e^{-0.5 \\sum_{i=1}^n x_i^2}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-1, 1]` for :math:`i=1,...,n`.
.. figure:: figures/Exponential.png
:alt: Exponential function
:align: center
**Two-dimensional Exponential function**
*Global optimum*: :math:`f(x_i) = -1` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-1.0] * self.dimensions,
[ 1.0] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = -1.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return -exp(-0.5*sum(x**2.0))
# -------------------------------------------------------------------------------- #
class FreudensteinRoth(Benchmark):
"""
FreudensteinRoth test objective function.
This class defines the Freudenstein & Roth global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{FreudensteinRoth}}(\\mathbf{x}) = \\left\{x_1 - 13 + \\left[(5 - x_2)x_2 - 2 \\right] x_2 \\right\}^2 + \\left \{x_1 - 29 + \\left[(x_2 + 1)x_2 - 14 \\right] x_2 \\right\}^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/FreudensteinRoth.png
:alt: FreudensteinRoth function
:align: center
**Two-dimensional FreudensteinRoth function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [5, 4]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.custom_bounds = [(-3, 3), (-5, 5)]
self.global_optimum = [5.0, 4.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
f1 = (-13.0 + x[0] + ((5.0 - x[1])*x[1] - 2.0)*x[1])**2
f2 = (-29.0 + x[0] + ((x[1] + 1.0)*x[1] - 14.0)*x[1])**2
return f1 + f2
# -------------------------------------------------------------------------------- #
class Gear(Benchmark):
"""
Gear test objective function.
This class defines the Gear global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Gear}}(\\mathbf{x}) = \\left \\{ \\frac{1.0}{6.931} - \\frac{\\lfloor x_1\\rfloor \\lfloor x_2 \\rfloor } {\\lfloor x_3 \\rfloor \\lfloor x_4 \\rfloor } \\right\\}^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [12, 60]` for :math:`i=1,...,4`.
*Global optimum*: :math:`f(x_i) = 2.7 \\cdot 10^{-12}` for :math:`\\mathbf{x} = [16, 19, 43, 49]`, where the various
:math:`x_i` may be permuted.
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([12.0] * self.dimensions,
[60.0] * self.dimensions))
self.global_optimum = [16, 19, 43, 49]
self.fglob = 2.7e-12
def evaluator(self, x, *args):
self.fun_evals += 1
return (1.0/6.931 - floor(x[0])*floor(x[1])/(floor(x[2])*floor(x[3])))**2
# -------------------------------------------------------------------------------- #
class Giunta(Benchmark):
"""
Giunta test objective function.
This class defines the Giunta global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Giunta}}(\\mathbf{x}) = 0.6 + \\sum_{i=1}^{n} \\left[\\sin^{2}\\left(1 - \\frac{16}{15} x_i\\right) - \\frac{1}{50} \\sin\\left(4 - \\frac{64}{15} x_i\\right) - \\sin\\left(1 - \\frac{16}{15} x_i\\right)\\right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-1, 1]` for :math:`i=1,2`.
.. figure:: figures/Giunta.png
:alt: Giunta function
:align: center
**Two-dimensional Giunta function**
*Global optimum*: :math:`f(x_i) = 0.06447042053690566` for :math:`\\mathbf{x} = [0.4673200277395354, 0.4673200169591304]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-1.0] * self.dimensions,
[ 1.0] * self.dimensions))
self.global_optimum = [0.4673200277395354, 0.4673200169591304]
self.fglob = 0.06447042053690566
def evaluator(self, x, *args):
self.fun_evals += 1
arg = 16*x/15.0 - 1
return 0.6 + sum(sin(arg) + sin(arg)**2 + sin(4*arg)/50)
# -------------------------------------------------------------------------------- #
class GoldsteinPrice(Benchmark):
"""
Goldstein-Price test objective function.
This class defines the Goldstein-Price global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{GoldsteinPrice}}(\\mathbf{x}) = \\left[ 1+(x_1+x_2+1)^2(19-14x_1+3x_1^2-14x_2+6x_1x_2+3x_2^2) \\right] \\left[ 30+(2x_1-3x_2)^2(18-32x_1+12x_1^2+48x_2-36x_1x_2+27x_2^2) \\right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-2, 2]` for :math:`i=1,2`.
.. figure:: figures/GoldsteinPrice.png
:alt: Goldstein-Price function
:align: center
**Two-dimensional Goldstein-Price function**
*Global optimum*: :math:`f(x_i) = 3` for :math:`\\mathbf{x} = [0, -1]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-2.0] * self.dimensions,
[ 2.0] * self.dimensions))
self.global_optimum = [0., -1.]
self.fglob = 3.0
def evaluator(self, x, *args):
self.fun_evals += 1
a = 1+(x[0]+x[1]+1)**2*(19-14*x[0]+3*x[0]**2-14*x[1]+6*x[0]*x[1]+3*x[1]**2)
b = 30+(2*x[0]-3*x[1])**2*(18-32*x[0]+12*x[0]**2+48*x[1]-36*x[0]*x[1]+27*x[1]**2)
return a*b
# -------------------------------------------------------------------------------- #
class Griewank(Benchmark):
"""
Griewank test objective function.
This class defines the Griewank global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Griewank}}(\\mathbf{x}) = \\frac{1}{4000}\\sum_{i=1}^n x_i^2 - \\prod_{i=1}^n\\cos\\left(\\frac{x_i}{\\sqrt{i}}\\right) + 1
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-600, 600]` for :math:`i=1,...,n`.
.. figure:: figures/Griewank.png
:alt: Griewank function
:align: center
**Two-dimensional Griewank function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-600.0] * self.dimensions,
[ 600.0] * self.dimensions))
self.custom_bounds = [(-50, 50), (-50, 50)]
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return sum(x**2)/4000.0 - prod(cos(x/sqrt(1.0+arange(len(x))))) + 1.0
# -------------------------------------------------------------------------------- #
class Gulf(Benchmark):
"""
Gulf test objective function.
This class defines the Gulf global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Gulf}}(\\mathbf{x}) = \\sum_{i=1}^m \\left( e^{-\\frac{\\lvert y_i - x_2 \\rvert^{x_3}}{x_1} } - t_i \\right)
Where, in this exercise:
.. math::
t_i = i/100 \\\\
y_i = 25 + [-50 \\log(t_i)]^{2/3}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 60]` for :math:`i=1,2,3`.
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [50, 25, 1.5]`
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[50.0] * self.dimensions))
self.global_optimum = [50.0, 25.0, 1.5]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2, x3 = x
y = 0.0
for i in range(30):
ti = (i)*0.01;
yi = 25.0 + (-50*log(ti))**(2.0/3.0)
ai = yi - x2
y += (exp(-((abs(ai)**x3)/x1)) - ti)**2.0
return y
# -------------------------------------------------------------------------------- #
class Hansen(Benchmark):
"""
Hansen test objective function.
This class defines the Hansen global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Hansen}}(\\mathbf{x}) = \\left[ \\sum_{i=0}^4(i+1)\\cos(ix_1+i+1)\\right ] \\left[\\sum_{j=0}^4(j+1)\\cos[(j+2)x_2+j+1])\\right ]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/Hansen.png
:alt: Hansen function
:align: center
**Two-dimensional Hansen function**
*Global optimum*: :math:`f(x_i) = -2.3458` for :math:`\\mathbf{x} = [-7.58989583, -7.70831466]`.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [-7.58989583, -7.70831466]
self.fglob = -176.54
def evaluator(self, x, *args):
self.fun_evals += 1
f1 = f2 = 0.0
for i in range(5):
f1 += (i+1)*cos(i*x[0] + i + 1)
f2 += (i+1)*cos((i+2)*x[1] + i + 1)
return f1*f2
# -------------------------------------------------------------------------------- #
class Hartmann3(Benchmark):
"""
Hartmann3 test objective function.
This class defines the Hartmann3 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Hartmann3}}(\\mathbf{x}) = -\\sum\\limits_{i=1}^{4} c_i e^{-\\sum\\limits_{j=1}^{n}a_{ij}(x_j - p_{ij})^2}
Where, in this exercise:
.. math::
\\begin{array}{l|ccc|c|ccr}
\\hline
i & & a_{ij}& & c_i & & p_{ij} & \\\\
\\hline
1 & 3.0 & 10.0 & 30.0 & 1.0 & 0.689 & 0.1170 & 0.2673 \\\\
2 & 0.1 & 10.0 & 35.0 & 1.2 & 0.4699 & 0.4387 & 0.7470 \\\\
3 & 3.0 & 10.0 & 30.0 & 3.0 & 0.1091 & 0.8732 & 0.5547 \\\\
4 & 0.1 & 10.0 & 35.0 & 3.2 & 0.0381 & 0.5743 & 0.8828 \\\\
\\hline
\\end{array}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 1]` for :math:`i=1,2,3`.
*Global optimum*: :math:`f(x_i) = -3.86278214782076` for :math:`\\mathbf{x} = [0.1, 0.55592003, 0.85218259]`
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([0.0] * self.dimensions,
[1.0] * self.dimensions))
self.global_optimum = [0.1, 0.55592003, 0.85218259]
self.fglob = -3.86278214782076
def evaluator(self, x, *args):
self.fun_evals += 1
a = asarray([[3.0, 0.1, 3.0, 0.1],
[10.0, 10.0, 10.0, 10.0],
[30.0, 35.0, 30.0, 35.0]])
p = asarray([[0.36890, 0.46990, 0.10910, 0.03815],
[0.11700, 0.43870, 0.87320, 0.57430],
[0.26730, 0.74700, 0.55470, 0.88280]])
c = asarray([1.0, 1.2, 3.0, 3.2])
d = zeros_like(c)
for i in range(4):
d[i] = sum(a[:, i]*(x - p[:, i])**2)
return -sum(c*exp(-d))
# -------------------------------------------------------------------------------- #
class Hartmann6(Benchmark):
"""
Hartmann6 test objective function.
This class defines the Hartmann6 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Hartmann6}}(\\mathbf{x}) = -\\sum\\limits_{i=1}^{4} c_i e^{-\\sum\\limits_{j=1}^{n}a_{ij}(x_j - p_{ij})^2}
Where, in this exercise:
.. math::
\\begin{array}{l|cccccc|r}
\\hline
i & & & a_{ij} & & & & c_i \\\\
\\hline
1 & 10.0 & 3.0 & 17.0 & 3.50 & 1.70 & 8.00 & 1.0 \\\\
2 & 0.05 & 10.0 & 17.0 & 0.10 & 8.00 & 14.00 & 1.2 \\\\
3 & 3.00 & 3.50 & 1.70 & 10.0 & 17.00 & 8.00 & 3.0 \\\\
4 & 17.00 & 8.00 & 0.05 & 10.00 & 0.10 & 14.00 & 3.2 \\\\
\\hline
\\end{array}
\\newline
\\\\
\\newline
\\begin{array}{l|cccccr}
\\hline
i & & & p_{ij} & & & \\\\
\\hline
1 & 0.1312 & 0.1696 & 0.5569 & 0.0124 & 0.8283 & 0.5886 \\\\
2 & 0.2329 & 0.4135 & 0.8307 & 0.3736 & 0.1004 & 0.9991 \\\\
3 & 0.2348 & 0.1451 & 0.3522 & 0.2883 & 0.3047 & 0.6650 \\\\
4 & 0.4047 & 0.8828 & 0.8732 & 0.5743 & 0.1091 & 0.0381 \\\\
\\hline
\\end{array}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 1]` for :math:`i=1,...,6`.
*Global optimum*: :math:`f(x_i) = -3.32236801141551` for :math:`\\mathbf{x} = [0.20168952, 0.15001069, 0.47687398, 0.27533243, 0.31165162, 0.65730054]`
"""
def __init__(self, dimensions=6):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([0.0] * self.dimensions,
[1.0] * self.dimensions))
self.global_optimum = [0.20168952, 0.15001069, 0.47687398,
0.27533243, 0.31165162, 0.65730054]
self.fglob = -3.32236801141551
def evaluator(self, x, *args):
self.fun_evals += 1
a = asarray([[10.00, 0.05, 3.00, 17.00],
[3.00, 10.00, 3.50, 8.00],
[17.00, 17.00, 1.70, 0.05],
[3.50, 0.10, 10.00, 10.00],
[1.70, 8.00, 17.00, 0.10],
[8.00, 14.00, 8.00, 14.00]])
p = asarray([[0.1312, 0.2329, 0.2348, 0.4047],
[0.1696, 0.4135, 0.1451, 0.8828],
[0.5569, 0.8307, 0.3522, 0.8732],
[0.0124, 0.3736, 0.2883, 0.5743],
[0.8283, 0.1004, 0.3047, 0.1091],
[0.5886, 0.9991, 0.6650, 0.0381]])
c = asarray([1.0, 1.2, 3.0, 3.2])
d = zeros_like(c)
for i in range(4):
d[i] = sum(a[:, i]*(x - p[:, i])**2)
return -sum(c*exp(-d))
# -------------------------------------------------------------------------------- #
class HelicalValley(Benchmark):
"""
HelicalValley test objective function.
This class defines the HelicalValley global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{HelicalValley}}(\\mathbf{x}) = 100{[z-10\\Psi(x_1,x_2)]^2+(\\sqrt{x_1^2+x_2^2}-1)^2}+x_3^2
Where, in this exercise:
.. math::
2\\pi\\Psi(x,y) = \\begin{cases} \\arctan(y/x) & \\textrm{for} x > 0 \\\\
\\pi + \\arctan(y/x) & \\textrm{for} x < 0 \\end{cases}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-\infty, \\infty]` for :math:`i=1,2,3`.
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [1, 0, 0]`
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100] * self.dimensions))
self.global_optimum = [1.0, 0.0, 0.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
return 100*((x[2] - 10*arctan2(x[1], x[0])/2/pi)**2 + (sqrt(x[0]**2 + x[1]**2) - 1)**2) + x[2]**2
# -------------------------------------------------------------------------------- #
class HimmelBlau(Benchmark):
"""
HimmelBlau test objective function.
This class defines the HimmelBlau global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{HimmelBlau}}(\\mathbf{x}) = (x_1^2 + x_2 - 11)^2 + (x_1 + x_2^2 -7)^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-6, 6]` for :math:`i=1,2`.
.. figure:: figures/HimmelBlau.png
:alt: HimmelBlau function
:align: center
**Two-dimensional HimmelBlau function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [0, 0]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-6] * self.dimensions,
[ 6] * self.dimensions))
self.global_optimum = [0.0, 0.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
return (x[0] * x[0] + x[1] - 11)**2 + (x[0] + x[1] * x[1] - 7)**2
# -------------------------------------------------------------------------------- #
class HolderTable(Benchmark):
"""
HolderTable test objective function.
This class defines the HolderTable global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{HolderTable}}(\\mathbf{x}) = - \\left|{e^{\\left|{1 - \\frac{\\sqrt{x_{1}^{2} + x_{2}^{2}}}{\\pi} }\\right|} \\sin\\left(x_{1}\\right) \\cos\\left(x_{2}\\right)}\\right|
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/HolderTable.png
:alt: HolderTable function
:align: center
**Two-dimensional HolderTable function**
*Global optimum*: :math:`f(x_i) = -19.20850256788675` for :math:`x_i = \\pm 9.664590028909654` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [(8.055023472141116 , 9.664590028909654),
(-8.055023472141116, 9.664590028909654),
(8.055023472141116 , -9.664590028909654),
(-8.055023472141116, -9.664590028909654)]
self.fglob = -19.20850256788675
def evaluator(self, x, *args):
self.fun_evals += 1
return -abs(sin(x[0])*cos(x[1])*exp(abs(1 - sqrt(x[0]**2 + x[1]**2)/pi)))
# -------------------------------------------------------------------------------- #
class Holzman(Benchmark):
"""
Holzman test objective function.
This class defines the Holzman global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Holzman}}(\\mathbf{x}) = \\sum_{i=0}^{99} \\left [ e^{\\frac{1}{x_1} (u_i-x_2)^{x_3}} -0.1(i+1) \\right ]
Where, in this exercise:
.. math::
u_i = 25 + (-50 \\log{[0.01(i+1)]})^{2/3}
Here, :math:`n` represents the number of dimensions and :math:`x_1 \\in [0, 100], x_2 \\in [0, 25.6], x_3 \\in [0, 5]`.
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [50, 25, 1.5]`
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self.bounds = ([0.0, 100.0], [0.0, 25.6], [0.0, 5.0])
self.global_optimum = [50.0, 25.0, 1.5]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
y = 0.0
for i in range(100):
ui = 25.0 + (-50.0*log(0.01*(i+1)))**(2.0/3.0)
y += -0.1*(i+1) + exp(1.0/x[0]*(ui-x[1])**x[2])
return y
# -------------------------------------------------------------------------------- #
class Hosaki(Benchmark):
"""
Hosaki test objective function.
This class defines the Hosaki global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Hosaki}}(\\mathbf{x}) = \\left ( 1 - 8x_1 + 7x_1^2 - \\frac{7}{3}x_1^3 + \\frac{1}{4}x_1^4 \\right )x_2^2e^{-x_1}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 10]` for :math:`i=1,2`.
.. figure:: figures/Hosaki.png
:alt: Hosaki function
:align: center
**Two-dimensional Hosaki function**
*Global optimum*: :math:`f(x_i) = -2.3458` for :math:`\\mathbf{x} = [4, 2]`.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[10.0] * self.dimensions))
self.custom_bounds = [(0, 5), (0, 5)]
self.global_optimum = [4, 2]
self.fglob = -2.3458
def evaluator(self, x, *args):
self.fun_evals += 1
return (1 + x[0]*(-8 + x[0]*(7 + x[0]*(-7.0/3.0 + x[0] *1.0/4.0))))*x[1]*x[1] * exp(-x[1])
# -------------------------------------------------------------------------------- #
class Infinity(Benchmark):
"""
Infinity test objective function.
This class defines the Infinity global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Infinity}}(\\mathbf{x}) = \\sum_{i=1}^{n} x_i^{6} \\left [ \\sin\\left ( \\frac{1}{x_i} \\right )+2 \\right ]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-1, 1]` for :math:`i=1,...,n`.
.. figure:: figures/Infinity.png
:alt: Infinity function
:align: center
**Two-dimensional Infinity function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-1.0] * self.dimensions,
[ 1.0] * self.dimensions))
self.global_optimum = [1e-16] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return sum(x**6.0*(sin(1.0/x) + 2.0))
# -------------------------------------------------------------------------------- #
class JennrichSampson(Benchmark):
"""
Jennrich-Sampson test objective function.
This class defines the Jennrich-Sampson global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{JennrichSampson}}(\\mathbf{x}) = \\sum_{i=1}^{10} \\left [2 + 2i - (e^{ix_1} + e^{ix_2}) \\right ]^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-1, 1]` for :math:`i=1,2`.
.. figure:: figures/JennrichSampson.png
:alt: Jennrich-Sampson function
:align: center
**Two-dimensional Jennrich-Sampson function**
*Global optimum*: :math:`f(x_i) = 124.3621824` for :math:`\\mathbf{x} = [0.257825, 0.257825]`.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-1.0] * self.dimensions,
[ 1.0] * self.dimensions))
self.custom_bounds = [(-1, 0.34), (-1, 0.34)]
self.global_optimum = [0.257825, 0.257825]
self.fglob = 124.3621824
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
rng = numpy.arange(1.0, 11.0)
return sum((2.0 + 2.0*rng - (exp(rng*x1) + exp(rng*x2)))**2.0)
# -------------------------------------------------------------------------------- #
class Judge(Benchmark):
"""
Judge test objective function.
This class defines the Judge global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Judge}}(\\mathbf{x}) = \\sum_{i=1}^{20} \\left [ \\left (x_1 + A_i x_2 + B x_2^2 \\right ) - C_i \\right ]^2
Where, in this exercise:
.. math::
\\begin{cases} A = [4.284, 4.149, 3.877, 0.533, 2.211, 2.389, 2.145, 3.231, 1.998, 1.379, 2.106, 1.428, 1.011, 2.179, 2.858, 1.388, 1.651, 1.593, 1.046, 2.152] \\\\
B = [0.286, 0.973, 0.384, 0.276, 0.973, 0.543, 0.957, 0.948, 0.543, 0.797, 0.936, 0.889, 0.006, 0.828, 0.399, 0.617, 0.939, 0.784, 0.072, 0.889] \\\\
C = [0.645, 0.585, 0.310, 0.058, 0.455, 0.779, 0.259, 0.202, 0.028, 0.099, 0.142, 0.296, 0.175, 0.180, 0.842, 0.039, 0.103, 0.620, 0.158, 0.704] \\end{cases}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/Judge.png
:alt: Judge function
:align: center
**Two-dimensional Judge function**
*Global optimum*: :math:`f(x_i) = 16.0817307` for :math:`\\mathbf{x} = [0.86479, 1.2357]`.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.custom_bounds = [(-2.0, 2.0), (-2.0, 2.0)]
self.global_optimum = [0.86479, 1.2357]
self.fglob = 16.0817307
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
Y = asarray([4.284, 4.149, 3.877, 0.533, 2.211, 2.389, 2.145,
3.231, 1.998, 1.379, 2.106, 1.428, 1.011, 2.179,
2.858, 1.388, 1.651, 1.593, 1.046, 2.152])
X2 = asarray([0.286, 0.973, 0.384, 0.276, 0.973, 0.543, 0.957,
0.948, 0.543, 0.797, 0.936, 0.889, 0.006, 0.828,
0.399, 0.617, 0.939, 0.784, 0.072, 0.889])
X3 = asarray([0.645, 0.585, 0.310, 0.058, 0.455, 0.779, 0.259,
0.202, 0.028, 0.099, 0.142, 0.296, 0.175, 0.180,
0.842, 0.039, 0.103, 0.620, 0.158, 0.704])
return sum(((x1 + x2*X2 + (x2**2.0)*X3) - Y)**2.0)
# -------------------------------------------------------------------------------- #
class Katsuura(Benchmark):
"""
Katsuura test objective function.
This class defines the Katsuura global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Katsuura}}(\\mathbf{x}) = \\prod_{i=0}^{n-1} \\left [ 1 + (i+1) \\sum_{k=1}^{d} \\lfloor (2^k x_i) \\rfloor 2^{-k} \\right ]
Where, in this exercise, :math:`d = 32`.
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 100]` for :math:`i=1,...,n`.
.. figure:: figures/Katsuura.png
:alt: Katsuura function
:align: center
**Two-dimensional Katsuura function**
*Global optimum*: :math:`f(x_i) = 1` for :math:`x_i = 0` for :math:`i=1,...,n`.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[10.0] * self.dimensions))
self.custom_bounds = [(0, 1), (0, 1)]
self.global_optimum = [0.0] * self.dimensions
self.fglob = 1.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
d = 32
prod = 1.0
for i in range(self.dimensions):
s = 0.0
for k in range(1, d+1):
pow2 = 2.0**k
s += round(pow2*x[i])/pow2
prod = prod*(1.0 + (i+1.0)*s)
return prod
# -------------------------------------------------------------------------------- #
class Keane(Benchmark):
"""
Keane test objective function.
This class defines the Keane global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Keane}}(\\mathbf{x}) = \\frac{\\sin^2(x_1 - x_2)\\sin^2(x_1 + x_2)}{\\sqrt{x_1^2 + x_2^2}}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 10]` for :math:`i=1,2`.
.. figure:: figures/Keane.png
:alt: Keane function
:align: center
**Two-dimensional Keane function**
*Global optimum*: :math:`f(x_i) = 0.673668` for :math:`\\mathbf{x} = [0.0, 1.39325]`.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[10.0] * self.dimensions))
self.custom_bounds = [(-1, 0.34), (-1, 0.34)]
self.global_optimum = [0.0, 1.39325]
self.fglob = 0.673668
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return (sin(x1 - x2)**2.0*sin(x1 + x2)**2.0)/sqrt(x1**2.0 + x2**2.0)
# -------------------------------------------------------------------------------- #
class Kowalik(Benchmark):
"""
Kowalik test objective function.
This class defines the Kowalik global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Kowalik}}(\\mathbf{x}) = \\sum_{i=0}^{10} \\left [ a_i - \\frac{x_1(b_i^2+b_ix_2)}{b_i^2 + b_ix_3 + x_4} \\right ]^2
Where:
.. math::
\\mathbf{a} = [4, 2, 1, 1/2, 1/4 1/8, 1/10, 1/12, 1/14, 1/16] \\\\
\\mathbf{b} = [0.1957, 0.1947, 0.1735, 0.1600, 0.0844, 0.0627, 0.0456, 0.0342, 0.0323, 0.0235, 0.0246]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-5, 5]` for :math:`i=1,...,4`.
*Global optimum*: :math:`f(x_i) = 0.00030748610` for :math:`\\mathbf{x} = [0.192833, 0.190836, 0.123117, 0.135766]`.
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-5.0] * self.dimensions,
[ 5.0] * self.dimensions))
self.global_optimum = [0.192833, 0.190836, 0.123117, 0.135766]
self.fglob = 0.00030748610
def evaluator(self, x, *args):
self.fun_evals += 1
b = asarray([4.0, 2.0, 1.0, 1/2.0, 1/4.0, 1/6.0, 1/8.0,
1/10.0, 1/12.0, 1/14.0, 1/16.0])
a = asarray([0.1957, 0.1947, 0.1735, 0.1600, 0.0844, 0.0627,
0.0456, 0.0342, 0.0323, 0.0235, 0.0246])
y = 0.0
for i in range(11):
bb = b[i]*b[i]
t = a[i] - (x[0]*(bb + b[i]*x[1])/(bb + b[i]*x[2]+x[3]))
y += t*t
return y
# -------------------------------------------------------------------------------- #
class Langermann(Benchmark):
"""
Langermann test objective function.
This class defines the Langermann global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Langermann}}(\\mathbf{x}) = - \\sum_{i=1}^{5} \\frac{c_i \\cos\\left\{\\pi \\left[\\left(x_{1}- a_i\\right)^{2} + \\left(x_{2} - b_i \\right)^{2}\\right]\\right\}}{e^{\\frac{\\left( x_{1} - a_i\\right)^{2} + \\left( x_{2} - b_i\\right)^{2}}{\\pi}}}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 10]` for :math:`i=1,2`.
.. figure:: figures/Langermann.png
:alt: Langermann function
:align: center
**Two-dimensional Langermann function**
*Global optimum*: :math:`f(x_i) = -5.1621259` for :math:`\\mathbf{x} = [2.00299219, 1.006096]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[10.0] * self.dimensions))
self.global_optimum = [2.00299219, 1.006096]
self.fglob = -5.1621259
def evaluator(self, x, *args):
self.fun_evals += 1
a = [3,5,2,1,7]
b = [5,2,1,4,9]
c = [1,2,5,2,3]
return -sum(c*exp(-(1/pi)*((x[0]-a)**2 + (x[1]-b)**2))*cos(pi*((x[0]-a)**2 + (x[1]-b)**2)))
# -------------------------------------------------------------------------------- #
class LennardJones(Benchmark):
"""
LennardJones test objective function.
This class defines the Lennard-Jones global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{LennardJones}}(\\mathbf{x}) = \\sum_{i=0}^{n-2}\\sum_{j>1}^{n-1}\\frac{1}{r_{ij}^{12}} - \\frac{1}{r_{ij}^{6}}
Where, in this exercise:
.. math::
r_{ij} = \\sqrt{(x_{3i}-x_{3j})^2 + (x_{3i+1}-x_{3j+1})^2) + (x_{3i+2}-x_{3j+2})^2}
Valid for any dimension, :math:`n = 3*k, k=2,3,4,...,20`. :math:`k` is the number of atoms in 3-D space
constraints: unconstrained type: multi-modal with one global minimum; non-separable
Value-to-reach: :math:`minima[k-2] + 0.0001`. See array of minima below; additional minima available at
the Cambridge cluster database:
http://www-wales.ch.cam.ac.uk/~jon/structures/LJ/tables.150.html
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-4, 4]` for :math:`i=1,...,n`.
*Global optimum*:
.. math::
minima = [-1.,-3.,-6.,-9.103852,-12.712062,-16.505384,-19.821489,-24.113360, \\\\
-28.422532,-32.765970,-37.967600,-44.326801,-47.845157,-52.322627, \\\\
-56.815742,-61.317995, -66.530949,-72.659782,-77.1777043]
"""
def __init__(self, dimensions=6):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-4.0] * self.dimensions,
[ 4.0] * self.dimensions))
minima = [-1.0, -3.0, -6.0, -9.103852, -12.712062, -16.505384,
-19.821489, -24.113360, -28.422532, -32.765970,
-37.967600, -44.326801, -47.845157, -52.322627,
-56.815742, -61.317995, -66.530949, -72.659782,
-77.1777043]
# WARNING: No global optimum for this function? Strange
self.global_optimum = []
k = dimensions//3
self.fglob = minima[k-2]
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
k = self.dimensions//3
s = 0.0
for i in range(k-1):
for j in range(i+1, k):
a = 3*i
b = 3*j
xd = x[a] - x[b]
yd = x[a+1] - x[b+1]
zd = x[a+2] - x[b+2]
ed = xd*xd + yd*yd + zd*zd
ud = ed*ed*ed
if ed > 0.0:
s += (1.0/ud-2.0)/ud
return s
# -------------------------------------------------------------------------------- #
class Leon(Benchmark):
"""
Leon test objective function.
This class defines the Leon global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Leon}}(\\mathbf{x}) = \\left(1 - x_{1}\\right)^{2} + 100 \\left(x_{2} - x_{1}^{2} \\right)^{2}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-1.2, 1.2]` for :math:`i=1,2`.
.. figure:: figures/Leon.png
:alt: Leon function
:align: center
**Two-dimensional Leon function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 1` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-1.2] * self.dimensions,
[ 1.2] * self.dimensions))
self.global_optimum = [1.0] * self.dimensions
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
return 100*(x[1] - x[0]**2.0)**2.0 + (1 - x[0])**2.0
# -------------------------------------------------------------------------------- #
class Levy03(Benchmark):
"""
Levy 3 test objective function.
This class defines the Levy 3 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Levy03}}(\\mathbf{x}) = \\sin^2(\\pi y_1)+\\sum_{i=1}^{n-1}(y_i-1)^2[1+10\\sin^2(\\pi y_{i+1})]+(y_n-1)^2
Where, in this exercise:
.. math::
y_i=1+\\frac{x_i-1}{4}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,...,n`.
.. figure:: figures/Levy03.png
:alt: Levy 3 function
:align: center
**Two-dimensional Levy 3 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 1` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.custom_bounds = [(-5, 5), (-5, 5)]
self.global_optimum = [1.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
n = len(x)
z = zeros_like(x)
for i in range(n):
z[i] = 1+(x[i]-1)/4
s = sin(pi*z[0])**2
for i in range(n-1):
s = s + (z[i]-1)**2*(1+10*(sin(pi*z[i]+1))**2)
y = s+(z[n-1]-1)**2*(1+(sin(2*pi*z[n-1]))**2)
return y
# -------------------------------------------------------------------------------- #
class Levy05(Benchmark):
"""
Levy 5 test objective function.
This class defines the Levy 5 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Levy05}}(\\mathbf{x}) = \\sum_{i=1}^{5} i \\cos \\left[(i-1)x_1 + i \\right] \\times \\sum_{j=1}^{5} j \\cos \\left[(j+1)x_2 + j \\right] + (x_1 + 1.42513)^2 + (x_2 + 0.80032)^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,...,n`.
.. figure:: figures/Levy05.png
:alt: Levy 5 function
:align: center
**Two-dimensional Levy 5 function**
*Global optimum*: :math:`f(x_i) = -176.1375` for :math:`\\mathbf{x} = [-1.3068, -1.4248]`.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.custom_bounds = [(-2.0, 2.0), (-2.0, 2.0)]
self.global_optimum = [-1.30685, -1.42485]
self.fglob = -176.1375
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
rng = numpy.arange(1.0, 6.0)
return sum(rng*cos((rng-1.0)*x1 + rng))*sum(rng*cos((rng+1.0)*x2 + rng)) + (x1 + 1.42513)**2.0 + (x2 + 0.80032)**2.0
# -------------------------------------------------------------------------------- #
class Levy13(Benchmark):
"""
Levy13 test objective function.
This class defines the Levy13 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Levy13}}(\\mathbf{x}) = \\left(x_{1} -1\\right)^{2} \\left[\sin^{2}\\left(3 \\pi x_{2}\\right) + 1\\right] + \\left(x_{2} -1\\right)^{2} \\left[\\sin^{2}\\left(2 \\pi x_{2}\\right) + 1\\right] + \\sin^{2}\\left(3 \\pi x_{1}\\right)
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/Levy13.png
:alt: Levy13 function
:align: center
**Two-dimensional Levy13 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 1` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.custom_bounds = [(-5, 5), (-5, 5)]
self.global_optimum = [1.0] * self.dimensions
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
return (sin(3*pi*x[0]))**2 + ((x[0]-1)**2)*(1 + (sin(3*pi*x[1]))**2) + ((x[1]-1)**2)*(1 + (sin(2*pi*x[1]))**2)
# -------------------------------------------------------------------------------- #
class Matyas(Benchmark):
"""
Matyas test objective function.
This class defines the Matyas global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Matyas}}(\\mathbf{x}) = 0.26(x_1^2 + x_2^2) - 0.48x_1x_2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/Matyas.png
:alt: Matyas function
:align: center
**Two-dimensional Matyas function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
return 0.26*(x[0]**2 + x[1]**2) - 0.48*x[0]*x[1]
# -------------------------------------------------------------------------------- #
class McCormick(Benchmark):
"""
McCormick test objective function.
This class defines the McCormick global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{McCormick}}(\\mathbf{x}) = - x_{1} + 2 x_{2} + \\left(x_{1} - x_{2}\\right)^{2} + \\sin\\left(x_{1} + x_{2}\\right) + 1
Here, :math:`n` represents the number of dimensions and :math:`x_1 \\in [-1.5, 4]`, :math:`x_2 \\in [-3, 4]`.
.. figure:: figures/McCormick.png
:alt: McCormick function
:align: center
**Two-dimensional McCormick function**
*Global optimum*: :math:`f(x_i) = -1.913222954981037` for :math:`\\mathbf{x} = [-0.5471975602214493, -1.547197559268372]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = [(-1.5, 4.0), (-3.0, 4.0)]
self.global_optimum = [-0.5471975602214493, -1.547197559268372]
self.fglob = -1.913222954981037
def evaluator(self, x, *args):
self.fun_evals += 1
return sin(x[0] + x[1]) + (x[0] - x[1])**2 - 1.5*x[0] + 2.5*x[1] + 1
# -------------------------------------------------------------------------------- #
class Michalewicz(Benchmark):
"""
Michalewicz test objective function.
This class defines the Michalewicz global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Michalewicz}}(\\mathbf{x}) = - \\sum_{i=1}^{2} \\sin\\left(x_i\\right) \\sin^{2 m}\\left(\\frac{i x_i^{2}}{\\pi}\\right)
Where, in this exercise, :math:`m = 10`.
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, \\pi]` for :math:`i=1,2`.
.. figure:: figures/Michalewicz.png
:alt: Michalewicz function
:align: center
**Two-dimensional Michalewicz function**
*Global optimum*: :math:`f(x_i) = -1.8013` for :math:`x_i = 0` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([0.0] * self.dimensions,
[pi] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = -1.8013
def evaluator(self, x, *args):
self.fun_evals += 1
m = 10.0
i = arange(1, self.dimensions+1)
return -sum(sin(x) * (sin(i*x**2/pi))**(2*m))
# -------------------------------------------------------------------------------- #
class MieleCantrell(Benchmark):
"""
Miele-Cantrell test objective function.
This class defines the Miele-Cantrell global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{MieleCantrell}}(\\mathbf{x}) = (e^{-x_1} - x_2)^4 + 100(x_2 - x_3)^6 + \\tan^4(x_3 - x_4) + x_1^8
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-1, 1]` for :math:`i=1,...,4`.
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [0, 1, 1, 1]`
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-1.0] * self.dimensions,
[ 1.0] * self.dimensions))
self.global_optimum = [0.0, 1.0, 1.0, 1.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2, x3, x4 = x
return (exp(-x1) - x2)**4.0 + 100.0*(x2 - x3)**6.0 + (tan(x3 - x4))**4.0 + x1**8.0
# -------------------------------------------------------------------------------- #
class Mishra01(Benchmark):
"""
Mishra 1 test objective function.
This class defines the Mishra 1 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Mishra01}}(\\mathbf{x}) = (1 + x_n)^{x_n} \\hspace{10pt} ; \\hspace{10pt} x_n = n - \\sum_{i=1}^{n-1} x_i
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 1]` for :math:`i=1,...,n`.
.. figure:: figures/Mishra01.png
:alt: Mishra 1 function
:align: center
**Two-dimensional Mishra 1 function**
*Global optimum*: :math:`f(x_i) = 2` for :math:`x_i = 1` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([0.0] * self.dimensions,
[1.0 + 1e-9] * self.dimensions))
self.global_optimum = [1.0] * self.dimensions
self.fglob = 2.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
n = self.dimensions
xn = n - sum(x[0:-1])
return (1 + xn)**xn
# -------------------------------------------------------------------------------- #
class Mishra02(Benchmark):
"""
Mishra 2 test objective function.
This class defines the Mishra 2 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Mishra02}}(\\mathbf{x}) = (1 + x_n)^{x_n} \\hspace{10pt} ; \\hspace{10pt} x_n = n - \\sum_{i=1}^{n-1} \\frac{(x_i + x_{i+1})}{2}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 1]` for :math:`i=1,...,n`.
.. figure:: figures/Mishra02.png
:alt: Mishra 2 function
:align: center
**Two-dimensional Mishra 2 function**
*Global optimum*: :math:`f(x_i) = 2` for :math:`x_i = 1` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([0.0] * self.dimensions,
[1.0 + 1e-9] * self.dimensions))
self.global_optimum = [1.0] * self.dimensions
self.fglob = 2.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
n = self.dimensions
xn = n - sum((x[0:-1] + x[1:])/2.0)
return (1 + xn)**xn
# -------------------------------------------------------------------------------- #
class Mishra03(Benchmark):
"""
Mishra 3 test objective function.
This class defines the Mishra 3 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Mishra03}}(\\mathbf{x}) = \\sqrt{\\lvert \\cos{\\sqrt{\\lvert x_1^2 + x_2^2 \\rvert}} \\rvert} + 0.01(x_1 + x_2)
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/Mishra03.png
:alt: Mishra 3 function
:align: center
**Two-dimensional Mishra 3 function**
*Global optimum*: :math:`f(x_i) = -0.18467` for :math:`x_i = -10` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [-10.0, -10.0]
self.fglob = -0.18467
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return sqrt(abs(cos(sqrt(abs(x1**2.0 + x2**2.0))))) + 0.01*(x1 + x2)
# -------------------------------------------------------------------------------- #
class Mishra04(Benchmark):
"""
Mishra 4 test objective function.
This class defines the Mishra 4 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Mishra04}}(\\mathbf{x}) = \\sqrt{\\lvert \\sin{\\sqrt{\\lvert x_1^2 + x_2^2 \\rvert}} \\rvert} + 0.01(x_1 + x_2)
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/Mishra04.png
:alt: Mishra 4 function
:align: center
**Two-dimensional Mishra 4 function**
*Global optimum*: :math:`f(x_i) = -0.199409` for :math:`x_i = -10` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [-10.0, -10.0]
self.fglob = -0.199409
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return sqrt(abs(sin(sqrt(abs(x1**2.0 + x2**2.0))))) + 0.01*(x1 + x2)
# -------------------------------------------------------------------------------- #
class Mishra05(Benchmark):
"""
Mishra 5 test objective function.
This class defines the Mishra 5 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Mishra05}}(\\mathbf{x}) = \\left [ \\sin^2 ((\\cos(x_1) + \\cos(x_2))^2) + \\cos^2 ((\\sin(x_1) + \\sin(x_2))^2) + x_1 \\right ]^2 + 0.01(x_1 + x_2)
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/Mishra05.png
:alt: Mishra 5 function
:align: center
**Two-dimensional Mishra 5 function**
*Global optimum*: :math:`f(x_i) = -0.119829` for :math:`\\mathbf{x} = [-1.98682, -10]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [-1.98682, -10.0]
self.fglob = -0.119829
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return ((sin((cos(x1) + cos(x2))**2.0)**2.0) + (cos((sin(x1) + sin(x2))**2.0)**2.0) + x1)**2.0 + 0.01*(x1 + x2)
# -------------------------------------------------------------------------------- #
class Mishra06(Benchmark):
"""
Mishra 6 test objective function.
This class defines the Mishra 6 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Mishra06}}(\\mathbf{x}) = -\\log{\\left [ \\sin^2 ((\\cos(x_1) + \\cos(x_2))^2) - \\cos^2 ((\\sin(x_1) + \\sin(x_2))^2) + x_1 \\right ]^2} + 0.01 \\left[(x_1 -1)^2 + (x_2 - 1)^2 \\right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/Mishra06.png
:alt: Mishra 6 function
:align: center
**Two-dimensional Mishra 6 function**
*Global optimum*: :math:`f(x_i) = -2.28395` for :math:`\\mathbf{x} = [2.88631, 1.82326]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [2.88631, 1.82326]
self.fglob = -2.28395
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return -log(((sin((cos(x1) + cos(x2))**2.0)**2.0) - (cos((sin(x1) + sin(x2))**2.0)**2.0) + x1)**2.0) + 0.1*((x1 - 1.0)**2.0 + (x2 - 1.0)**2.0)
# -------------------------------------------------------------------------------- #
class Mishra07(Benchmark):
"""
Mishra 7 test objective function.
This class defines the Mishra 7 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Mishra07}}(\\mathbf{x}) = \\left [\\prod_{i=1}^{n} x_i - n! \\right]^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,...,n`.
.. figure:: figures/Mishra07.png
:alt: Mishra 7 function
:align: center
**Two-dimensional Mishra 7 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = \\sqrt{n}` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.custom_bounds = [(-2, 2), (-2, 2)]
self.global_optimum = [sqrt(self.dimensions)] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return (prod(x) - factorial(self.dimensions))**2.0
# -------------------------------------------------------------------------------- #
class Mishra08(Benchmark):
"""
Mishra 8 test objective function.
This class defines the Mishra 8 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Mishra08}}(\\mathbf{x}) = 0.001 \\left[\\lvert x_1^{10} - 20x_1^9 + 180x_1^8 - 960 x_1^7 + 3360x_1^6 - 8064x_1^5 + 13340x_1^4 - 15360x_1^3 + 11520x_1^2 - 5120x_1 + 2624 \\rvert \\lvert x_2^4 + 12x_2^3 + 54x_2^2 + 108x_2 + 81 \\rvert \\right]^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/Mishra08.png
:alt: Mishra 8 function
:align: center
**Two-dimensional Mishra 8 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [2, -3]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.custom_bounds = [(1.0, 2.0), (-4.0, 1.0)]
self.global_optimum = [2.0, -3.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
F1 = abs(x[0]**10-20*x[0]**9+180*x[0]**8-960*x[0]**7+3360*x[0]**6-8064*x[0]**5+13340*x[0]**4-15360*x[0]**3+11520*x[0]**2-5120*x[0]+2624)
F2 = abs(x[1]**4+12*x[1]**3+54*x[1]**2+108*x[1]+81.0)
return 0.001*(F1+F2)**2
# -------------------------------------------------------------------------------- #
class Mishra09(Benchmark):
"""
Mishra 9 test objective function.
This class defines the Mishra 9 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Mishra09}}(\\mathbf{x}) = \\left[ ab^2c + abc^2 + b^2 + (x_1 + x_2 - x_3)^2 \\right]^2
Where, in this exercise:
.. math::
\\begin{cases} a = 2x_1^3 + 5x_1x_2^2 + 4x_3 - 2x_1^2x_3 - 18 \\\\
b = x_1 + x_2^3 + x_1x_3^2 - 22 \\\\
c = 8x_1^2 + 2x_2x_3 + 2x_2^2 + 3x_2^3 - 52 \\end{cases}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2,3`.
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [1, 2, 3]`
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [1.0, 2.0, 3.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2, x3 = x
F1 = 2*x1**3+5*x1*x2+4*x3-2*x1**2*x3-18.0
F2 = x1+x2**3+x1*x2**2+x1*x3**2-22.0
F3 = 8*x1**2+2*x2*x3+2*x2**2+3*x2**3-52.0
return (F1*F3*F2**2+F1*F2*F3**2+F2**2+(x1+x2-x3)**2)**2
# -------------------------------------------------------------------------------- #
class Mishra10(Benchmark):
"""
Mishra 10 test objective function.
This class defines the Mishra 10 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Mishra10}}(\\mathbf{x}) = \\left[ \\lfloor x_1 \\perp x_2 \\rfloor - \\lfloor x_1 \\rfloor - \\lfloor x_2 \\rfloor \\right]^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/Mishra10.png
:alt: Mishra 10 function
:align: center
**Two-dimensional Mishra 10 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [2, 2]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[10.0] * self.dimensions))
self.global_optimum = [2.0, 2.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = int(x[0]), int(x[1])
f1 = x1 + x2
f2 = x1*x2
return (f1 - f2)**2.0
# -------------------------------------------------------------------------------- #
class Mishra11(Benchmark):
"""
Mishra 11 test objective function.
This class defines the Mishra 11 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Mishra11}}(\\mathbf{x}) = \\left [ \\frac{1}{n} \\sum_{i=1}^{n} \\lvert x_i \\rvert - \\left(\\prod_{i=1}^{n} \\lvert x_i \\rvert \\right )^{\\frac{1}{n}} \\right]^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,...,n`.
.. figure:: figures/Mishra11.png
:alt: Mishra 11 function
:align: center
**Two-dimensional Mishra 11 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.custom_bounds = [(-3, 3), (-3, 3)]
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
n = self.dimensions
return ((1.0/n)*sum(abs(x)) - (prod(abs(x)))**1.0/n)**2.0
# -------------------------------------------------------------------------------- #
class MultiModal(Benchmark):
"""
MultiModal test objective function.
This class defines the MultiModal global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{MultiModal}}(\\mathbf{x}) = \\left( \\sum_{i=1}^n \\lvert x_i \\rvert \\right) \\left( \\prod_{i=1}^n \\lvert x_i \\rvert \\right)
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,...,n`.
.. figure:: figures/MultiModal.png
:alt: MultiModal function
:align: center
**Two-dimensional MultiModal function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.custom_bounds = [(-5, 5), (-5, 5)]
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return sum(abs(x))*prod(abs(x))
# -------------------------------------------------------------------------------- #
class NeedleEye(Benchmark):
"""
NeedleEye test objective function.
This class defines the Needle-Eye global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{NeedleEye}}(\\mathbf{x}) = \\begin{cases} 1 & \\textrm{if} \\hspace{5pt} \\lvert x_i \\rvert < eye \\hspace{5pt} \\forall i \\\\
\\sum_{i=1}^n (100 + \\lvert x_i \\rvert) & \\textrm{if} \\hspace{5pt} \\lvert x_i \\rvert > eye \\\\
0 & \\textrm{otherwise} \\end{cases}
Where, in this exercise, :math:`eye = 0.0001`.
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,...,n`.
.. figure:: figures/NeedleEye.png
:alt: NeedleEye function
:align: center
**Two-dimensional NeedleEye function**
*Global optimum*: :math:`f(x_i) = 1` for :math:`x_i = -1` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
# WARNING: Adding global optimum, not declared before
self.global_optimum = [1.0] * self.dimensions
self.fglob = 1.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
f = 0.0
fp = False
eye = 0.0001
for i in range(self.dimensions):
# WARNING: Changing this code, ambiguous variable "fp"
if abs(x[i]) >= eye:
fp = True
f += 100.0 + abs(x[i])
else:
f += 1.0
if not fp:
f = f/self.dimensions
return f
# -------------------------------------------------------------------------------- #
class NewFunction01(Benchmark):
"""
NewFunction01 test objective function.
This class defines the NewFunction01 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{NewFunction01}}(\\mathbf{x}) = \\left | {\\cos\\left(\\sqrt{\\left|{x_{1}^{2} + x_{2}}\\right|}\\right)} \\right |^{0.5} + (x_{1} + x_{2})/100
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/NewFunction01.png
:alt: NewFunction01 function
:align: center
**Two-dimensional NewFunction01 function**
*Global optimum*: :math:`f(x_i) = -0.17894509347721144` for :math:`\\mathbf{x} = [-8.4666, -9.9988]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [-8.4666, -9.9988]
self.fglob = -0.17894509347721144
def evaluator(self, x, *args):
self.fun_evals += 1
return abs(cos(sqrt(abs(x[0]**2 + x[1]))))**0.5 + 0.01*x[0] + 0.01*x[1]
# -------------------------------------------------------------------------------- #
class NewFunction02(Benchmark):
"""
NewFunction02 test objective function.
This class defines the NewFunction02 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{NewFunction02}}(\\mathbf{x}) = \\left | {\\sin\\left(\\sqrt{\\lvert{x_{1}^{2} + x_{2}}\\rvert}\\right)} \\right |^{0.5} + (x_{1} + x_{2})/100
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/NewFunction02.png
:alt: NewFunction02 function
:align: center
**Two-dimensional NewFunction02 function**
*Global optimum*: :math:`f(x_i) = -0.1971881059905` for :math:`\\mathbf{x} = [-9.94112, -9.99952]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [-9.94112, -9.99952]
self.fglob = -0.1971881059905
def evaluator(self, x, *args):
self.fun_evals += 1
return abs(sin(sqrt(abs(x[0]**2 + x[1]))))**0.5 + 0.01*x[0] + 0.01*x[1]
# -------------------------------------------------------------------------------- #
class NewFunction03(Benchmark):
"""
NewFunction03 test objective function.
This class defines the NewFunction03 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{NewFunction03}}(\\mathbf{x}) = 0.01 x_{1} + 0.1 x_{2} + \\left\{x_{1} + \\sin^{2}\\left[\\left(\\cos\\left(x_{1}\\right) + \\cos\\left(x_{2}\\right)\\right)^{2}\\right] + \\cos^{2}\\left[\\left(\\sin\\left(x_{1}\\right) + \\sin\\left(x_{2}\\right)\\right)^{2}\\right]\\right\}^{2}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/NewFunction03.png
:alt: NewFunction03 function
:align: center
**Two-dimensional NewFunction03 function**
*Global optimum*: :math:`f(x_i) = -1.019829` for :math:`\\mathbf{x} = [-1.98682, -10]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [-1.98682, -10.0]
self.fglob = -1.019829
def evaluator(self, x, *args):
self.fun_evals += 1
f1 = sin((cos(x[0]) + cos(x[1]))**2)**2
f2 = cos((sin(x[0]) + sin(x[1]))**2)**2
f = (f1 + f2 + x[0])**2
f = f + 0.01*x[0] + 0.1*x[1]
return f
# -------------------------------------------------------------------------------- #
class OddSquare(Benchmark):
"""
Odd Square test objective function.
This class defines the Odd Square global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{OddSquare}}(\\mathbf{x}) = -e^{-\\frac{d}{2\\pi}} \\cos(\\pi d) \\left( 1 + \\frac{0.02h}{d + 0.01} \\right )
Where, in this exercise:
.. math::
\\begin{cases} d = n \\cdot \\smash{\\displaystyle\\max_{1 \leq i \leq n}} \\left[ (x_i - b_i)^2 \\right ] \\\\
\\\\
h = \\sum_{i=1}^{n} (x_i - b_i)^2 \\end{cases}
And :math:`\\mathbf{b} = [1, 1.3, 0.8, -0.4, -1.3, 1.6, -0.2, -0.6, 0.5, 1.4, 1, 1.3, 0.8, -0.4, -1.3, 1.6, -0.2, -0.6, 0.5, 1.4]`
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-5 \\pi, 5 \\pi]` for :math:`i=1,...,n` and :math:`n \\leq 20`.
.. figure:: figures/OddSquare.png
:alt: Odd Square function
:align: center
**Two-dimensional Odd Square function**
*Global optimum*: :math:`f(x_i) = -1.0084` for :math:`\\mathbf{x} \\approx b`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-5.0*pi] * self.dimensions,
[ 5.0*pi] * self.dimensions))
self.custom_bounds = [(-2.0, 4.0), (-2.0, 4.0)]
# WARNING: Max dimensions of 2 * 10 is implicit
self.a = asarray([1, 1.3, 0.8, -0.4, -1.3,
1.6, -0.2, -0.6, 0.5, 1.4]*2)
self.global_optimum = self.a[0:self.dimensions]
self.fglob = -1.0084
if self.dimensions > (2 * len(self.a)):
print("WARNING: Too many dimensions to calculate global"+
" optimum for function: OddSquare")
def evaluator(self, x, *args):
self.fun_evals += 1
c = 0.02
b = self.a[0:self.dimensions]
d = self.dimensions*max((x - b)**2.0)
h = sum((x - b)**2.0)
return -exp(-d/(2.0*pi))*cos(pi*d)*(1.0 + 0.02*h/(d + 0.01))
# -------------------------------------------------------------------------------- #
class Parsopoulos(Benchmark):
"""
Parsopoulos test objective function.
This class defines the Parsopoulos global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Parsopoulos}}(\\mathbf{x}) = \\cos(x_1)^2 + \\sin(x_2)^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-5, 5]` for :math:`i=1,2`.
.. figure:: figures/Parsopoulos.png
:alt: Parsopoulos function
:align: center
**Two-dimensional Parsopoulos function**
*Global optimum*: This function has infinite number of global minima in R2, at points :math:`\\left(k\\frac{\\pi}{2}, \\lambda \\pi \\right)`,
where :math:`k = \\pm1, \\pm3, ...` and :math:`\\lambda = 0, \\pm1, \\pm2, ...`
In the given domain problem, function has 12 global minima all equal to zero.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-5.0] * self.dimensions,
[ 5.0] * self.dimensions))
self.global_optimum = [pi/2.0, pi]
self.fglob = 0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return cos(x1)**2.0 + sin(x2)**2.0
# -------------------------------------------------------------------------------- #
class Pathological(Benchmark):
"""
Pathological test objective function.
This class defines the Pathological global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Pathological}}(\\mathbf{x}) = \\sum_{i=1}^{n -1} \\frac{\\sin^{2}\\left(\\sqrt{100 x_{i+1}^{2} + x_{i}^{2}}\\right) -0.5}{0.001 \\left(x_{i} - x_{i+1}\\right)^{4} + 0.50}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,2`.
.. figure:: figures/Pathological.png
:alt: Pathological function
:align: center
**Two-dimensional Pathological function**
*Global optimum*: :math:`f(x_i) = -1.99600798403` for :math:`x_i = 0` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = -1.99600798403
def evaluator(self, x, *args):
self.fun_evals += 1
x_ = roll(x, -1)
return sum((sin(sqrt(x_**2 + 100*x**2))**2 - 0.5) / (0.001 * ((x_ - x)**4 + 1.0) + 0.5))
# -------------------------------------------------------------------------------- #
class Paviani(Benchmark):
"""
Paviani test objective function.
This class defines the Paviani global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Paviani}}(\\mathbf{x}) = \\sum_{i=1}^{10} \\left[\\log^{2}\\left(10 - x_i\\right) + \\log^{2}\\left(x_i -2\\right)\\right] - \\left(\\prod_{i=1}^{10} x_i^{10} \\right)^{0.2}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [2.001, 9.999]` for :math:`i=1,...,n`.
*Global optimum*: :math:`f(x_i) = -45.7784684040686` for :math:`x_i = 9.350266` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=10):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([2.001] * self.dimensions,
[9.999] * self.dimensions))
self.global_optimum = [9.350266] * self.dimensions
self.fglob = -45.7784684040686
def evaluator(self, x, *args):
self.fun_evals += 1
return sum(log(x-2)**2.0 + log(10.0 - x)**2.0) - prod(x)**0.2
# -------------------------------------------------------------------------------- #
class Penalty01(Benchmark):
"""
Penalty 1 test objective function.
This class defines the Penalty 1 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Penalty01}}(\\mathbf{x}) = \\frac{\\pi}{30} \\left\\{10 \\sin^2(\\pi y_1) + \\sum_{i=1}^{n-1} (y_i - 1)^2 \\left[1 + 10 \\sin^2(\\pi y_{i+1}) \\right ] + (y_n - 1)^2 \\right \\} + \\sum_{i=1}^n u(x_i, 10, 100, 4)
Where, in this exercise:
.. math::
y_i = 1 + \\frac{1}{4}(x_i + 1)
And:
.. math::
u(x_i, a, k, m) = \\begin{cases} k(x_i - a)^m & \\textrm{if} \\hspace{5pt} x_i > a \\\\
0 & \\textrm{if} \\hspace{5pt} -a \\leq x_i \\leq a \\\\
k(-x_i - a)^m & \\textrm{if} \\hspace{5pt} x_i < -a \\end{cases}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-50, 50]` for :math:`i=1,...,n`.
.. figure:: figures/Penalty01.png
:alt: Penalty 1 function
:align: center
**Two-dimensional Penalty 1 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = -1` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-50.0] * self.dimensions,
[ 50.0] * self.dimensions))
self.custom_bounds = [(-5.0, 5.0), (-5.0, 5.0)]
self.global_optimum = [-1.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
a, b, c = 10.0, 100.0, 4.0
xx = abs(x)
u = where(xx > a, b*(xx - a)**c, 0.0)
y = 1.0 + (x + 1.0)/4.0
return sum(u) + (pi/30.0)*(10.0*sin(pi*y[0])**2.0 + sum((y[0:-1] - 1.0)**2.0 *(1.0 + 10.0*sin(pi*y[1:])**2.0)) + (y[-1] - 1)**2.0)
# -------------------------------------------------------------------------------- #
class Penalty02(Benchmark):
"""
Penalty 2 test objective function.
This class defines the Penalty 2 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Penalty02}}(\\mathbf{x}) = 0.1 \\left\\{\\sin^2(3\\pi x_1) + \\sum_{i=1}^{n-1} (x_i - 1)^2 \\left[1 + \\sin^2(3\\pi x_{i+1}) \\right ] + (x_n - 1)^2 \\left [1 + \\sin^2(2 \\pi x_n) \\right ]\\right \\} + \\sum_{i=1}^n u(x_i, 5, 100, 4)
Where, in this exercise:
.. math::
u(x_i, a, k, m) = \\begin{cases} k(x_i - a)^m & \\textrm{if} \\hspace{5pt} x_i > a \\\\
0 & \\textrm{if} \\hspace{5pt} -a \\leq x_i \\leq a \\\\
k(-x_i - a)^m & \\textrm{if} \\hspace{5pt} x_i < -a \\end{cases}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-50, 50]` for :math:`i=1,...,n`.
.. figure:: figures/Penalty02.png
:alt: Penalty 2 function
:align: center
**Two-dimensional Penalty 2 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 1` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-50.0] * self.dimensions,
[ 50.0] * self.dimensions))
self.custom_bounds = [(-4.0, 4.0), (-4.0, 4.0)]
self.global_optimum = [1.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
a, b, c = 5.0, 100.0, 4.0
xx = abs(x)
u = where(xx > a, b*(xx - a)**c, 0.0)
return sum(u) + 0.1*(10.0*sin(3.0*pi*x[0])**2.0 + sum((x[0:-1] - 1.0)**2.0 *(1.0 + sin(pi*x[1:])**2.0)) + (x[-1] - 1)**2.0*(1 + sin(2*pi*x[-1])**2.0))
# -------------------------------------------------------------------------------- #
class PenHolder(Benchmark):
"""
PenHolder test objective function.
This class defines the PenHolder global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{PenHolder}}(\\mathbf{x}) = -e^{\\left|{e^{\\left|{- \\frac{\\sqrt{x_{1}^{2} + x_{2}^{2}}}{\\pi} + 1}\\right|} \\cos\\left(x_{1}\\right) \\cos\\left(x_{2}\\right)}\\right|^{-1}}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-11, 11]` for :math:`i=1,2`.
.. figure:: figures/PenHolder.png
:alt: PenHolder function
:align: center
**Two-dimensional PenHolder function**
*Global optimum*: :math:`f(x_i) = -0.9635348327265058` for :math:`x_i = \\pm 9.646167671043401` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-11.0] * self.dimensions,
[ 11.0] * self.dimensions))
self.global_optimum = [-9.646167708023526, 9.646167671043401]
self.fglob = -0.9635348327265058
def evaluator(self, x, *args):
self.fun_evals += 1
return -exp(-(abs(cos(x[0])*cos(x[1])*exp(abs(1 - sqrt(x[0]**2 + x[1]**2)/pi))))**(-1))
# -------------------------------------------------------------------------------- #
class PermFunction01(Benchmark):
"""
PermFunction 1 test objective function.
This class defines the Perm Function 1 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{PermFunction01}}(\\mathbf{x}) = \\sum_{k=1}^n \\left\\{ \\sum_{j=1}^n (j^k + \\beta) \\left[ \\left(\\frac{x_j}{j}\\right)^k - 1 \\right] \\right\\}^2
Here, :math '\\beta' is 0.5 and :math:`n` represents the number of dimensions and :math:`x_i \\in [-n, n+1]` for :math:`i=1,...,n`.
.. figure:: figures/PermFunction01.png
:alt: PermFunction 1 function
:align: center
**Two-dimensional PermFunction 1 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = i` for :math:`i=1,...,n`
"""
# WARNING: Definition of "Beta" was not included in documentation
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-self.dimensions] * self.dimensions,
[self.dimensions+1] * self.dimensions))
self.global_optimum = range(1, self.dimensions+1)
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
b = 0.5
s_out = 0.0
for k in range(1, self.dimensions+1):
s_in = 0.0
for j in range(1, self.dimensions+1):
s_in += (j**k + b)*((x[j-1]/j)**k - 1)
s_out += s_in**2
return s_out
# -------------------------------------------------------------------------------- #
class PermFunction02(Benchmark):
"""
PermFunction 2 test objective function.
This class defines the Perm Function 2 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{PermFunction02}}(\\mathbf{x}) = \\sum_{k=1}^n \\left\\{ \\sum_{j=1}^n (j + \\beta) \\left[ \\left(x_j^k - \\frac{1}{j} \\right ) \\right] \\right\\}^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-n, n+1]` for :math:`i=1,...,n`.
.. figure:: figures/PermFunction02.png
:alt: PermFunction 2 function
:align: center
**Two-dimensional PermFunction 2 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = \\frac{1}{i}` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-self.dimensions] * self.dimensions,
[ self.dimensions+1] * self.dimensions))
self.custom_bounds = [(0, 1.5), (0, 1.0)]
self.global_optimum = range(1, self.dimensions+1)
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
b = 10.0
s_out = 0.0
for k in range(1, self.dimensions+1):
s_in = 0.0
for j in range(1, self.dimensions+1):
s_in += (j + b)*(x[j-1]**k - (1.0/j)**k)
s_out += s_in**2
return s_out
# -------------------------------------------------------------------------------- #
class Pinter(Benchmark):
"""
Pinter test objective function.
This class defines the Pinter global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Pinter}}(\\mathbf{x}) = \\sum_{i=1}^n ix_i^2 + \\sum_{i=1}^n 20i \\sin^2 A + \\sum_{i=1}^n i \\log_{10} (1 + iB^2)
Where, in this exercise:
.. math::
\\begin{cases} A = x_{i-1} \\sin x_i + \\sin x_{i+1} \\\\
B = x_{i-1}^2 - 2x_i + 3x_{i+1} - \\cos x_i + 1 \\end{cases}
Where :math:`x_0 = x_n` and :math:`x_{n+1} = x_1`.
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,...,n`.
.. figure:: figures/Pinter.png
:alt: Pinter function
:align: center
**Two-dimensional Pinter function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
n = self.dimensions
f = 0.0
for i in range(n):
x_i = x[i]
if i == 0:
x_mi = x[-1]
x_pi = x[i+1]
elif i == n - 1:
x_mi = x[i-1]
x_pi = x[0]
else:
x_mi = x[i-1]
x_pi = x[i+1]
A = x_mi*sin(x_i) + sin(x_pi)
B = x_mi**2.0 - 2*x_i + 3*x_pi - cos(x_i) + 1.0
f += (i + 1.0)*x_i**2.0 + 20.0*(i + 1.0)*sin(A)**2.0 + (i + 1.0)*log10(1.0 + (i + 1.0)*B**2.0)
return f
# -------------------------------------------------------------------------------- #
class Plateau(Benchmark):
"""
Plateau test objective function.
This class defines the Plateau global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Plateau}}(\\mathbf{x}) = 30 + \\sum_{i=1}^n \\lfloor x_i \\rfloor
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-5.12, 5.12]` for :math:`i=1,...,n`.
.. figure:: figures/Plateau.png
:alt: Plateau function
:align: center
**Two-dimensional Plateau function**
*Global optimum*: :math:`f(x_i) = 30` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-5.12] * self.dimensions,
[ 5.12] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = 30.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return 30.0 + sum(floor(abs(x)))
# -------------------------------------------------------------------------------- #
class Powell(Benchmark):
"""
Powell test objective function.
This class defines the Powell global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Powell}}(\\mathbf{x}) = (x_3+10x_1)^2+5(x_2-x_4)^2+(x_1-2x_2)^4+10(x_3-x_4)^4
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-4, 5]` for :math:`i=1,...,4`.
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,4`
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-4.0] * self.dimensions,
[ 5.0] * self.dimensions))
self.global_optimum = [0, 0, 0, 0]
self.fglob = 0
def evaluator(self, x, *args):
self.fun_evals += 1
return (x[0] + 10*x[1])**2 + 5*(x[2] - x[3])**2 + (x[1] - 2*x[2])**4 + 10*(x[0] - x[3])**4
# -------------------------------------------------------------------------------- #
class PowerSum(Benchmark):
"""
Power sum test objective function.
This class defines the Power Sum global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{PowerSum}}(\\mathbf{x}) = \\sum_{k=1}^n\\left[\\left(\\sum_{i=1}^n x_i^k \\right) - b_k \\right]^2
Where, in this exercise, :math:`\\mathbf{b} = [8, 18, 44, 114]`
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 4]` for :math:`i=1,...,4`.
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [1, 2, 2, 3]`
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([0.0] * self.dimensions,
[float(self.dimensions)] * self.dimensions))
self.global_optimum = [1.0, 2.0, 2.0, 3.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
b = [8.0, 18.0, 44.0, 114.0]
y = 0.0
for k in range(1, self.dimensions+1):
s_in = 0.0
for i in range(self.dimensions):
s_in = s_in + x[i]**k
y = y + (s_in - b[k-1])**2.0
return y
# -------------------------------------------------------------------------------- #
class Price01(Benchmark):
"""
Price 1 test objective function.
This class defines the Price 1 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Price01}}(\\mathbf{x}) = (\\lvert x_1 \\rvert - 5)^2 + (\\lvert x_2 \\rvert - 5)^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-500, 500]` for :math:`i=1,2`.
.. figure:: figures/Price01.png
:alt: Price 1 function
:align: center
**Two-dimensional Price 1 function**
*Global optimum*: :math:`f(x_i) = 0.0` for :math:`\\mathbf{x} = [5, 5]` or :math:`\\mathbf{x} = [5, -5]`
or :math:`\\mathbf{x} = [-5, 5]` or :math:`\\mathbf{x} = [-5, -5]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-500.0] * self.dimensions,
[ 500.0] * self.dimensions))
self.custom_bounds = [(-10.0, 10.0), (-10.0, 10.0)]
self.global_optimum = [5.0, 5.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return (abs(x1) - 5.0)**2.0 + (abs(x2) - 5.0)**2.0
# -------------------------------------------------------------------------------- #
class Price02(Benchmark):
"""
Price 2 test objective function.
This class defines the Price 2 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Price02}}(\\mathbf{x}) = 1 + \\sin^2(x_1) + \\sin^2(x_2) - 0.1e^{(-x_1^2 - x_2^2)}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,...,n`.
.. figure:: figures/Price02.png
:alt: Price 2 function
:align: center
**Two-dimensional Price 2 function**
*Global optimum*: :math:`f(x_i) = 0.9` for :math:`x_i = 0` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[10.0] * self.dimensions))
self.global_optimum = [0.0, 0.0]
self.fglob = 0.9
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return 1.0 + sin(x1)**2.0 + sin(x2)**2.0 - 0.1*exp(-x1**2.0 - x2**2.0)
# -------------------------------------------------------------------------------- #
class Price03(Benchmark):
"""
Price 3 test objective function.
This class defines the Price 3 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Price03}}(\\mathbf{x}) = 100(x_2 - x_1^2)^2 + \\left[6.4(x_2 - 0.5)^2 - x_1 - 0.6 \\right]^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-50, 50]` for :math:`i=1,2`.
.. figure:: figures/Price03.png
:alt: Price 3 function
:align: center
**Two-dimensional Price 3 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [-5, -5]`, :math:`\\mathbf{x} = [-5, 5]`,
:math:`\\mathbf{x} = [5, -5]`, :math:`\\mathbf{x} = [5, 5]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-50.0] * self.dimensions,
[ 50.0] * self.dimensions))
self.custom_bounds = [(0, 2), (0, 2)]
self.global_optimum = [1.0, 1.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return 100.0*(x2 - x1**2.0)**2.0 + (6.4*(x2 - 0.5)**2.0 - x1 - 0.6)**2.0
# -------------------------------------------------------------------------------- #
class Price04(Benchmark):
"""
Price 4 test objective function.
This class defines the Price 4 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Price04}}(\\mathbf{x}) = (2x_1^3x_2 - x_2^3)^2 + (6x_1 - x_2^2 + x_2)^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-50, 50]` for :math:`i=1,2`.
.. figure:: figures/Price04.png
:alt: Price 4 function
:align: center
**Two-dimensional Price 4 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [0, 0]`, :math:`\\mathbf{x} = [2, 4]` and
:math:`\\mathbf{x} = [1.464, -2.506]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-50.0] * self.dimensions,
[ 50.0] * self.dimensions))
self.custom_bounds = [(0, 2), (0, 2)]
self.global_optimum = [2.0, 4.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return (2.0*x2*x1**3.0 - x2**3.0)**2.0 + (6.0*x1 - x2**2.0 + x2)**2.0
# -------------------------------------------------------------------------------- #
class Qing(Benchmark):
"""
Qing test objective function.
This class defines the Qing global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Qing}}(\\mathbf{x}) = \\sum_{i=1}^{n} (x_i^2 - i)^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-500, 500]` for :math:`i=1,...,n`.
.. figure:: figures/Qing.png
:alt: Qing function
:align: center
**Two-dimensional Qing function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = \\pm \\sqrt(i)` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-500.0] * self.dimensions,
[ 500.0] * self.dimensions))
self.custom_bounds = [(-2, 2), (-2, 2)]
self.global_optimum = [sqrt(i) for i in range(1, self.dimensions+1)]
self.fglob = 0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
rng = numpy.arange(1, self.dimensions+1)
return sum((x**2.0 - rng)**2.0)
# -------------------------------------------------------------------------------- #
class Quadratic(Benchmark):
"""
Quadratic test objective function.
This class defines the Quadratic global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Quadratic}}(\\mathbf{x}) = -3803.84 - 138.08x_1 - 232.92x_2 + 128.08x_1^2 + 203.64x_2^2 + 182.25x_1x_2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/Quadratic.png
:alt: Quadratic function
:align: center
**Two-dimensional Quadratic function**
*Global optimum*: :math:`f(x_i) = -3873.72418` for :math:`\\mathbf{x} = [0.19388, 0.48513]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.custom_bounds = [(0, 1), (0, 1)]
self.global_optimum = [0.19388, 0.48513]
self.fglob = -3873.72418
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return -3803.84 - 138.08*x1 - 232.92*x2 + 128.08*x1**2.0 + 203.64*x2**2.0 + 182.25*x1*x2
# -------------------------------------------------------------------------------- #
class Quintic(Benchmark):
"""
Quintic test objective function.
This class defines the Quintic global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Quintic}}(\\mathbf{x}) = \\sum_{i=1}^{n} \\left|{x_{i}^{5} - 3 x_{i}^{4} + 4 x_{i}^{3} + 2 x_{i}^{2} - 10 x_{i} -4}\\right|
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,...,n`.
.. figure:: figures/Quintic.png
:alt: Quintic function
:align: center
**Two-dimensional Quintic function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = -1` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.custom_bounds = [(-2, 2), (-2, 2)]
self.global_optimum = [-1.0] * self.dimensions
self.fglob = 0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return sum(abs(x**5 - 3*x**4 + 4*x**3 + 2*x**2 - 10*x - 4))
# -------------------------------------------------------------------------------- #
class Rana(Benchmark):
"""
Rana test objective function.
This class defines the Rana global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Rana}}(\\mathbf{x}) = \\sum_{i=1}^{n} \\left[x_{i} \\sin\\left(\\sqrt{\\lvert{x_{1} - x_{i} + 1}\\rvert}\\right) \\cos\\left(\\sqrt{\\lvert{x_{1} + x_{i} + 1}\\rvert}\\right) + \\left(x_{1} + 1\\right) \\sin\\left(\\sqrt{\\lvert{x_{1} + x_{i} + 1}\\rvert}\\right) \\cos\\left(\\sqrt{\\lvert{x_{1} - x_{i} + 1}\\rvert}\\right)\\right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-500.000001, 500.000001]` for :math:`i=1,...,n`.
.. figure:: figures/Rana.png
:alt: Rana function
:align: center
**Two-dimensional Rana function**
*Global optimum*: :math:`f(x_i) = -928.5478` for :math:`x_i = -500` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-500.000001] * self.dimensions,
[ 500.000001] * self.dimensions))
self.global_optimum = [-500.0] * self.dimensions
self.fglob = -928.5478
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
E = x + 1
return sum(E*cos(sqrt(abs(E-x)))*sin(sqrt(abs(E+x))) + x*cos(sqrt(abs(E+x)))*sin(sqrt(abs(E-x))))
# -------------------------------------------------------------------------------- #
class Rastrigin(Benchmark):
"""
Rastrigin test objective function.
This class defines the Rastrigin global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Rastrigin}}(\\mathbf{x}) = 10n \\sum_{i=1}^n \\left[ x_i^2 - 10 \\cos(2\\pi x_i) \\right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-5.12, 5.12]` for :math:`i=1,...,n`.
.. figure:: figures/Rastrigin.png
:alt: Rastrigin function
:align: center
**Two-dimensional Rastrigin function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-5.12] * self.dimensions,
[ 5.12] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return 10.0*self.dimensions + sum(x**2.0 - 10.0*cos(2.0*pi*x))
# -------------------------------------------------------------------------------- #
class Ripple01(Benchmark):
"""
Ripple 1 test objective function.
This class defines the Ripple 1 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Ripple01}}(\\mathbf{x}) = \\sum_{i=1}^2 -e^{-2 \\log 2 (\\frac{x_i-0.1}{0.8})^2} \\left[\\sin^6(5 \\pi x_i) + 0.1\\cos^2(500 \\pi x_i) \\right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 1]` for :math:`i=1,...,n`.
.. figure:: figures/Ripple01.png
:alt: Ripple 1 function
:align: center
**Two-dimensional Ripple 1 function**
*Global optimum*: :math:`f(x_i) = -2.2` for :math:`x_i = 0.1` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([0.0] * self.dimensions,
[1.0] * self.dimensions))
self.global_optimum = [0.1] * self.dimensions
self.fglob = -2.2
def evaluator(self, x, *args):
self.fun_evals += 1
return sum(-exp(-2.0*log(2.0)*((x - 0.1)/0.8)**2.0)*(sin(5.0*pi*x)**6.0 + 0.1*cos(500.0*pi*x)**2.0))
# -------------------------------------------------------------------------------- #
class Ripple25(Benchmark):
"""
Ripple 25 test objective function.
This class defines the Ripple 25 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Ripple25}}(\\mathbf{x}) = \\sum_{i=1}^2 -e^{-2 \\log 2 (\\frac{x_i-0.1}{0.8})^2} \\left[\\sin^6(5 \\pi x_i) \\right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 1]` for :math:`i=1,...,n`.
.. figure:: figures/Ripple25.png
:alt: Ripple 25 function
:align: center
**Two-dimensional Ripple 25 function**
*Global optimum*: :math:`f(x_i) = -2` for :math:`x_i = 0.1` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([0.0] * self.dimensions,
[1.0] * self.dimensions))
self.global_optimum = [0.1] * self.dimensions
self.fglob = -2.0
def evaluator(self, x, *args):
self.fun_evals += 1
return sum(-exp(-2.0*log(2.0)*((x - 0.1)/0.8)**2.0)*(sin(5.0*pi*x)**6.0))
# -------------------------------------------------------------------------------- #
class Rosenbrock(Benchmark):
"""
Rosenbrock test objective function.
This class defines the Rosenbrock global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Rosenbrock}}(\\mathbf{x}) = \\sum_{i=1}^{n-1} [100(x_i^2 - x_{i+1})^2 + (x_i - 1)^2]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-5, 10]` for :math:`i=1,...,n`.
.. figure:: figures/Rosenbrock.png
:alt: Rosenbrock function
:align: center
**Two-dimensional Rosenbrock function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 1` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-5.0] * self.dimensions,
[10.0] * self.dimensions))
self.custom_bounds = [(-2, 2), (-2, 2)]
self.global_optimum = [1.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
# WARNING: This is not in accordance with the documentation
return sum(100.0*(x[:-1]-x[1:]**2.0)**2.0 + (1-x[:-1])**2.0)
# -------------------------------------------------------------------------------- #
class RosenbrockModified(Benchmark):
"""
Modified Rosenbrock test objective function.
This class defines the Modified Rosenbrock global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{RosenbrockModified}}(\\mathbf{x}) = 74 + 100(x_2 - x_1^2)^2 + (1 - x_1)^2 - 400 e^{-\\frac{(x_1+1)^2 + (x_2 + 1)^2}{0.1}}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-2, 2]` for :math:`i=1,2`.
.. figure:: figures/RosenbrockModified.png
:alt: Modified Rosenbrock function
:align: center
**Two-dimensional Modified Rosenbrock function**
*Global optimum*: :math:`f(x_i) = 34.37` for :math:`\\mathbf{x} = [-0.9, -0.95]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-2.0] * self.dimensions,
[ 2.0] * self.dimensions))
self.custom_bounds = [(-1.0, 0.5), (-1.0, 1.0)]
self.global_optimum = [-0.9, -0.95]
self.fglob = 34.37
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return 74.0 + 100.0*(x2 - x1**2.0)**2.0 + (1.0 - x1)**2.0 - 400.0*exp(-((x1 + 1.0)**2.0 + (x2 + 1.0)**2.0)/0.1)
# -------------------------------------------------------------------------------- #
class RotatedEllipse01(Benchmark):
"""
Rotated Ellipse 1 test objective function.
This class defines the Rotated Ellipse 1 global optimization problem. This
is a unimodal minimization problem defined as follows:
.. math::
f_{\\text{RotatedEllipse01}}(\\mathbf{x}) = 7x_1^2 - 6 \\sqrt{3} x_1x_2 + 13x_2^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-500, 500]` for :math:`i=1,2`.
.. figure:: figures/RotatedEllipse01.png
:alt: Rotated Ellipse 1 function
:align: center
**Two-dimensional Rotated Ellipse 1 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [0, 0]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-500.0] * self.dimensions,
[500.0] * self.dimensions))
self.custom_bounds = [(-2.0, 2.0), (-2.0, 2.0)]
self.global_optimum = [0.0, 0.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return 7.0*x1**2.0 - 6.0*sqrt(3)*x1*x2 + 13*x2**2.0
# -------------------------------------------------------------------------------- #
class RotatedEllipse02(Benchmark):
"""
Rotated Ellipse 2 test objective function.
This class defines the Rotated Ellipse 2 global optimization problem. This
is a unimodal minimization problem defined as follows:
.. math::
f_{\\text{RotatedEllipse02}}(\\mathbf{x}) = x_1^2 - x_1x_2 + x_2^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-500, 500]` for :math:`i=1,2`.
.. figure:: figures/RotatedEllipse02.png
:alt: Rotated Ellipse 2 function
:align: center
**Two-dimensional Rotated Ellipse 2 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [0, 0]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-500.0] * self.dimensions,
[500.0] * self.dimensions))
self.custom_bounds = [(-2.0, 2.0), (-2.0, 2.0)]
self.global_optimum = [0.0, 0.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return x1**2.0 - x1*x2 + x2**2.0
# -------------------------------------------------------------------------------- #
class Salomon(Benchmark):
"""
Salomon test objective function.
This class defines the Salomon global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Salomon}}(\\mathbf{x}) = 1 - \\cos \\left (2 \\pi \\sqrt{\\sum_{i=1}^{n} x_i^2} \\right) + 0.1 \\sqrt{\\sum_{i=1}^n x_i^2}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,...,n`.
.. figure:: figures/Salomon.png
:alt: Salomon function
:align: center
**Two-dimensional Salomon function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.custom_bounds = [(-50, 50), (-50, 50)]
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return 1.0 - cos(2.0*pi*sqrt(sum(x**2.0))) + 0.1*sqrt(sum(x**2.0))
# -------------------------------------------------------------------------------- #
class Sargan(Benchmark):
"""
Sargan test objective function.
This class defines the Sargan global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Sargan}}(\\mathbf{x}) = \\sum_{i=1}^{n} n \\left (x_i^2 + 0.4 \\sum_{i \\neq j}^{n} x_ix_j \\right)
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,...,n`.
.. figure:: figures/Sargan.png
:alt: Sargan function
:align: center
**Two-dimensional Sargan function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.custom_bounds = [(-5, 5), (-5, 5)]
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
x0 = x[:-1]
x1 = roll(x,-1)[:-1]
return sum(self.dimensions*(x**2 + 0.4*sum(x0*x1)))
# -------------------------------------------------------------------------------- #
class Schaffer01(Benchmark):
"""
Schaffer 1 test objective function.
This class defines the Schaffer 1 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Schaffer01}}(\\mathbf{x}) = 0.5 + \\frac{\\sin^2 (x_1^2 + x_2^2)^2 - 0.5}{1 + 0.001(x_1^2 + x_2^2)^2}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,2`.
.. figure:: figures/Schaffer01.png
:alt: Schaffer 1 function
:align: center
**Two-dimensional Schaffer 1 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.custom_bounds = [(-10, 10), (-10, 10)]
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return 0.5 + (sin(x1**2.0 + x2**2.0)**2.0 - 0.5)/(1 + 0.001*(x1**2.0 + x2**2.0)**2.0)
# -------------------------------------------------------------------------------- #
class Schaffer02(Benchmark):
"""
Schaffer 2 test objective function.
This class defines the Schaffer 2 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Schaffer02}}(\\mathbf{x}) = 0.5 + \\frac{\\sin^2 (x_1^2 - x_2^2)^2 - 0.5}{1 + 0.001(x_1^2 + x_2^2)^2}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,2`.
.. figure:: figures/Schaffer02.png
:alt: Schaffer 2 function
:align: center
**Two-dimensional Schaffer 2 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.custom_bounds = [(-10, 10), (-10, 10)]
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return 0.5 + (sin(x1**2.0 - x2**2.0)**2.0 - 0.5)/(1 + 0.001*(x1**2.0 + x2**2.0)**2.0)
# -------------------------------------------------------------------------------- #
class Schaffer03(Benchmark):
"""
Schaffer 3 test objective function.
This class defines the Schaffer 3 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Schaffer03}}(\\mathbf{x}) = 0.5 + \\frac{\\sin^2 \\left( \\cos \\lvert x_1^2 - x_2^2 \\rvert \\right ) - 0.5}{1 + 0.001(x_1^2 + x_2^2)^2}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,2`.
.. figure:: figures/Schaffer03.png
:alt: Schaffer 3 function
:align: center
**Two-dimensional Schaffer 3 function**
*Global optimum*: :math:`f(x_i) = 0.00156685` for :math:`\\mathbf{x} = [0, 1.253115]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.custom_bounds = [(-10, 10), (-10, 10)]
self.global_optimum = [0.0, 1.253115]
self.fglob = 0.00156685
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return 0.5 + (sin(cos(abs(x1**2.0 - x2**2.0)))**2.0 - 0.5)/(1 + 0.001*(x1**2.0 + x2**2.0)**2.0)
# -------------------------------------------------------------------------------- #
class Schaffer04(Benchmark):
"""
Schaffer 4 test objective function.
This class defines the Schaffer 4 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Schaffer04}}(\\mathbf{x}) = 0.5 + \\frac{\\cos^2 \\left( \\sin(x_1^2 - x_2^2) \\right ) - 0.5}{1 + 0.001(x_1^2 + x_2^2)^2}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,2`.
.. figure:: figures/Schaffer04.png
:alt: Schaffer 4 function
:align: center
**Two-dimensional Schaffer 4 function**
*Global optimum*: :math:`f(x_i) = 0.292579` for :math:`\\mathbf{x} = [0, 1.253115]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.custom_bounds = [(-10, 10), (-10, 10)]
self.global_optimum = [0.0, 1.253115]
self.fglob = 0.292579
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return 0.5 + (cos(sin(x1**2.0 - x2**2.0))**2.0 - 0.5)/(1 + 0.001*(x1**2.0 + x2**2.0)**2.0)
# -------------------------------------------------------------------------------- #
class SchmidtVetters(Benchmark):
"""
Schmidt-Vetters test objective function.
This class defines the Schmidt-Vetters global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{SchmidtVetters}}(\\mathbf{x}) = \\frac{1}{1 + (x_1 - x_2)^2} + \\sin \\left(\\frac{\\pi x_2 + x_3}{2} \\right) + e^{\\left(\\frac{x_1+x_2}{x_2} - 2\\right)^2}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 10]` for :math:`i=1,2,3`.
*Global optimum*: :math:`f(x_i) = 3` for :math:`x_i = 0.78547` for :math:`i=1,2,3`
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[10.0] * self.dimensions))
self.global_optimum = [0.78547] * self.dimensions
self.fglob = 3.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2, x3 = x
return 1.0/(1.0 + (x1 - x2)**2.0) + sin((pi*x2 + x3)/2.0) + exp(((x1 + x2)/x2 - 2)**2.0)
# -------------------------------------------------------------------------------- #
class Schwefel01(Benchmark):
"""
Schwefel 1 test objective function.
This class defines the Schwefel 1 global optimization problem. This
is a unimodal minimization problem defined as follows:
.. math::
f_{\\text{Schwefel01}}(\\mathbf{x}) = \\left(\\sum_{i=1}^n x_i^2 \\right)^{\\alpha}
Where, in this exercise, :math:`\\alpha = \\sqrt{\\pi}`.
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,...,n`.
.. figure:: figures/Schwefel01.png
:alt: Schwefel 1 function
:align: center
**Two-dimensional Schwefel 1 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.custom_bounds = [(-4.0, 4.0), (-4.0, 4.0)]
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
alpha = sqrt(pi)
return (sum(x**2.0))**alpha
# -------------------------------------------------------------------------------- #
class Schwefel02(Benchmark):
"""
Schwefel 2 test objective function.
This class defines the Schwefel 2 global optimization problem. This
is a unimodal minimization problem defined as follows:
.. math::
f_{\\text{Schwefel02}}(\\mathbf{x}) = \\sum_{i=1}^n \\left(\\sum_{j=1}^i x_i \\right)^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,...,n`.
.. figure:: figures/Schwefel02.png
:alt: Schwefel 2 function
:align: center
**Two-dimensional Schwefel 2 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.custom_bounds = [(-4.0, 4.0), (-4.0, 4.0)]
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
s = 0.0
for i in range(self.dimensions):
temp = 0.0
for j in range(i):
temp += x[j]
s += temp**2.0
return s
# -------------------------------------------------------------------------------- #
class Schwefel04(Benchmark):
"""
Schwefel 4 test objective function.
This class defines the Schwefel 4 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Schwefel04}}(\\mathbf{x}) = \\sum_{i=1}^n \\left[(x_i - 1)^2 + (x_1 - x_i^2)^2 \\right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 10]` for :math:`i=1,...,n`.
.. figure:: figures/Schwefel04.png
:alt: Schwefel 4 function
:align: center
**Two-dimensional Schwefel 4 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 1` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[10.0] * self.dimensions))
self.custom_bounds = [(0.0, 2.0), (0.0, 2.0)]
self.global_optimum = [1.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return sum((x - 1.0)**2.0 + (x[0] - x**2.0)**2.0)
# -------------------------------------------------------------------------------- #
class Schwefel06(Benchmark):
"""
Schwefel 6 test objective function.
This class defines the Schwefel 6 global optimization problem. This
is a unimodal minimization problem defined as follows:
.. math::
f_{\\text{Schwefel06}}(\\mathbf{x}) = \\max(\\lvert x_1 + 2x_2 - 7 \\rvert, \\lvert 2x_1 + x_2 - 5 \\rvert)
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,2`.
.. figure:: figures/Schwefel06.png
:alt: Schwefel 6 function
:align: center
**Two-dimensional Schwefel 6 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [1, 3]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.custom_bounds = [(-10.0, 10.0), (-10.0, 10.0)]
self.global_optimum = [1.0, 3.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
vector = [abs(x1 + 2*x2 - 7), abs(2*x1 + x2 - 5)]
return max(vector)
# -------------------------------------------------------------------------------- #
class Schwefel20(Benchmark):
"""
Schwefel 20 test objective function.
This class defines the Schwefel 20 global optimization problem. This
is a unimodal minimization problem defined as follows:
.. math::
f_{\\text{Schwefel20}}(\\mathbf{x}) = \\sum_{i=1}^n \\lvert x_i \\rvert
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,...,n`.
.. figure:: figures/Schwefel20.png
:alt: Schwefel 20 function
:align: center
**Two-dimensional Schwefel 20 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return sum(abs(x))
# -------------------------------------------------------------------------------- #
class Schwefel21(Benchmark):
"""
Schwefel 21 test objective function.
This class defines the Schwefel 21 global optimization problem. This
is a unimodal minimization problem defined as follows:
.. math::
f_{\\text{Schwefel21}}(\\mathbf{x}) = \\smash{\\displaystyle\\max_{1 \leq i \leq n}} \\lvert x_i \\rvert
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,...,n`.
.. figure:: figures/Schwefel21.png
:alt: Schwefel 21 function
:align: center
**Two-dimensional Schwefel 21 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return max(abs(x))
# -------------------------------------------------------------------------------- #
class Schwefel22(Benchmark):
"""
Schwefel 22 test objective function.
This class defines the Schwefel 22 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Schwefel22}}(\\mathbf{x}) = \\sum_{i=1}^n \\lvert x_i \\rvert + \\prod_{i=1}^n \\lvert x_i \\rvert
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,...,n`.
.. figure:: figures/Schwefel22.png
:alt: Schwefel 22 function
:align: center
**Two-dimensional Schwefel 22 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.custom_bounds = [(-10.0, 10.0), (-10.0, 10.0)]
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return sum(abs(x)) + prod(abs(x))
# -------------------------------------------------------------------------------- #
class Schwefel26(Benchmark):
"""
Schwefel 26 test objective function.
This class defines the Schwefel 26 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Schwefel26}}(\\mathbf{x}) = 418.9829n - \\sum_{i=1}^n x_i \\sin(\\sqrt{|x_i|})
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-512, 512]` for :math:`i=1,...,n`.
.. figure:: figures/Schwefel26.png
:alt: Schwefel 26 function
:align: center
**Two-dimensional Schwefel 26 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 420.968746` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-512.0] * self.dimensions,
[ 512.0] * self.dimensions))
self.global_optimum = [420.968746] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return 418.982887 * self.dimensions - sum([x * sin(sqrt(abs(x)))])
# -------------------------------------------------------------------------------- #
class Schwefel36(Benchmark):
"""
Schwefel 36 test objective function.
This class defines the Schwefel 36 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Schwefel36}}(\\mathbf{x}) = -x_1x_2(72 - 2x_1 - 2x_2)
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 500]` for :math:`i=1,2`.
.. figure:: figures/Schwefel36.png
:alt: Schwefel 36 function
:align: center
**Two-dimensional Schwefel 36 function**
*Global optimum*: :math:`f(x_i) = -3456` for :math:`\\mathbf{x} = [12, 12]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[500.0] * self.dimensions))
self.custom_bounds = [(0.0, 20.0), (0.0, 20.0)]
self.global_optimum = [12.0, 12.0]
self.fglob = -3456.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return -x1*x2*(72.0 - 2.0*x1 - 2.0*x2)
# -------------------------------------------------------------------------------- #
class Shekel05(Benchmark):
"""
Shekel 5 test objective function.
This class defines the Shekel 5 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Shekel05}}(\\mathbf{x}) = \\sum_{i=1}^{m} \\frac{1}{c_{i} + \\sum_{j=1}^{n} (x_{j} - a_{ij})^2 }`
Where, in this exercise:
.. math::
\\mathbf{a} = \\begin{bmatrix} 4.0 & 4.0 & 4.0 & 4.0 \\\\ 1.0 & 1.0 & 1.0 & 1.0 \\\\ 8.0 & 8.0 & 8.0 & 8.0 \\\\ 6.0 & 6.0 & 6.0 & 6.0 \\\\ 3.0 & 7.0 & 3.0 & 7.0 \\end{bmatrix}
.. math::
\\mathbf{c} = \\begin{bmatrix} 0.1 \\\\ 0.2 \\\\ 0.2 \\\\ 0.4 \\\\ 0.6 \\end{bmatrix}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 10]` for :math:`i=1,...,4`.
*Global optimum*: :math:`f(x_i) = -10.1527` for :math:`x_i = 4` for :math:`i=1,...,4`
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[10.0] * self.dimensions))
self.global_optimum = [4.0] * self.dimensions
self.fglob = -10.1527
def evaluator(self, x, *args):
self.fun_evals += 1
m = 5
A = asarray([[4.0, 4.0, 4.0, 4.0],
[1.0, 1.0, 1.0, 1.0],
[8.0, 8.0, 8.0, 8.0],
[6.0, 6.0, 6.0, 6.0],
[3.0, 7.0, 3.0, 7.0]])
C = asarray([0.1, 0.2, 0.2, 0.4, 0.6])
return -sum(1.0/(dot(x-a, x-a)+c) for a, c in zip(A, C))
# -------------------------------------------------------------------------------- #
class Shekel07(Benchmark):
"""
Shekel 7 test objective function.
This class defines the Shekel 7 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Shekel07}}(\\mathbf{x}) = \\sum_{i=1}^{m} \\frac{1}{c_{i} + \\sum_{j=1}^{n} (x_{j} - a_{ij})^2 }`
Where, in this exercise:
.. math::
\\mathbf{a} = \\begin{bmatrix} 4.0 & 4.0 & 4.0 & 4.0 \\\\ 1.0 & 1.0 & 1.0 & 1.0 \\\\ 8.0 & 8.0 & 8.0 & 8.0 \\\\
6.0 & 6.0 & 6.0 & 6.0 \\\\ 3.0 & 7.0 & 3.0 & 7.0 \\\\ 2.0 & 9.0 & 2.0 & 9.0 \\\\ 5.0 & 5.0 & 3.0 & 3.0 \\end{bmatrix}
.. math::
\\mathbf{c} = \\begin{bmatrix} 0.1 \\\\ 0.2 \\\\ 0.2 \\\\ 0.4 \\\\ 0.4 \\\\ 0.6 \\\\ 0.3 \\end{bmatrix}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 10]` for :math:`i=1,...,4`.
*Global optimum*: :math:`f(x_i) = -10.3999` for :math:`x_i = 4` for :math:`i=1,...,4`
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[10.0] * self.dimensions))
self.global_optimum = [4.0] * self.dimensions
self.fglob = -10.3999
def evaluator(self, x, *args):
self.fun_evals += 1
m = 7
A = asarray([[4.0, 4.0, 4.0, 4.0],
[1.0, 1.0, 1.0, 1.0],
[8.0, 8.0, 8.0, 8.0],
[6.0, 6.0, 6.0, 6.0],
[3.0, 7.0, 3.0, 7.0],
[2.0, 9.0, 2.0, 9.0],
[5.0, 5.0, 3.0, 3.0]])
C = asarray([0.1, 0.2, 0.2, 0.4, 0.4, 0.6, 0.3])
return -sum(1.0/(dot(x-a, x-a)+c) for a, c in zip(A, C))
# -------------------------------------------------------------------------------- #
class Shekel10(Benchmark):
"""
Shekel 10 test objective function.
This class defines the Shekel 10 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Shekel10}}(\\mathbf{x}) = \\sum_{i=1}^{m} \\frac{1}{c_{i} + \\sum_{j=1}^{n} (x_{j} - a_{ij})^2 }`
Where, in this exercise:
.. math::
\\mathbf{a} = \\begin{bmatrix} 4.0 & 4.0 & 4.0 & 4.0 \\\\ 1.0 & 1.0 & 1.0 & 1.0 \\\\ 8.0 & 8.0 & 8.0 & 8.0 \\\\
6.0 & 6.0 & 6.0 & 6.0 \\\\ 3.0 & 7.0 & 3.0 & 7.0 \\\\ 2.0 & 9.0 & 2.0 & 9.0 \\\\ 5.0 & 5.0 & 3.0 & 3.0 \\\\
8.0 & 1.0 & 8.0 & 1.0 \\\\ 6.0 & 2.0 & 6.0 & 2.0 \\\\ 7.0 & 3.6 & 7.0 & 3.6 \\end{bmatrix}
.. math::
\\mathbf{c} = \\begin{bmatrix} 0.1 \\\\ 0.2 \\\\ 0.2 \\\\ 0.4 \\\\ 0.4 \\\\ 0.6 \\\\ 0.3 \\\\ 0.7 \\\\ 0.5 \\\\ 0.5 \\end{bmatrix}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 10]` for :math:`i=1,...,4`.
*Global optimum*: :math:`f(x_i) = -10.5319` for :math:`x_i = 4` for :math:`i=1,...,4`
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[10.0] * self.dimensions))
self.global_optimum = [4.0] * self.dimensions
self.fglob = -10.5319
def evaluator(self, x, *args):
self.fun_evals += 1
m = 10
A = asarray([[4.0, 4.0, 4.0, 4.0],
[1.0, 1.0, 1.0, 1.0],
[8.0, 8.0, 8.0, 8.0],
[6.0, 6.0, 6.0, 6.0],
[3.0, 7.0, 3.0, 7.0],
[2.0, 9.0, 2.0, 9.0],
[5.0, 5.0, 3.0, 3.0],
[8.0, 1.0, 8.0, 1.0],
[6.0, 2.0, 6.0, 2.0],
[7.0, 3.6, 7.0, 3.6]])
C = asarray([0.1, 0.2, 0.2, 0.4, 0.4, 0.6, 0.3, 0.7, 0.5, 0.5])
return -sum(1.0/(dot(x-a, x-a)+c) for a, c in zip(A, C))
# -------------------------------------------------------------------------------- #
class Shubert01(Benchmark):
"""
Shubert 1 test objective function.
This class defines the Shubert 1 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Shubert01}}(\\mathbf{x}) = \\left( \\sum\\limits_{i=1}^{5} i\\cos[(i+1)x_1 + i] \\right) \\left( \\sum\\limits_{i=1}^{5} i\\cos[(i+1)x_2 + i] \\right)
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,...,n`.
.. figure:: figures/Shubert01.png
:alt: Shubert 1 function
:align: center
**Two-dimensional Shubert 1 function**
*Global optimum*: :math:`f(x_i) = -186.7309` for :math:`\\mathbf{x} = [-7.0835, 4.8580]` (and many others).
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [-7.0835, 4.8580]
self.fglob = -186.7309
def evaluator(self, x, *args):
self.fun_evals += 1
s1 = s2 = 0.0
for i in range(1, 6):
s1 = s1+i*cos((i+1)*x[0]+i)
s2 = s2+i*cos((i+1)*x[1]+i)
y = s1*s2
return y
# -------------------------------------------------------------------------------- #
class Shubert03(Benchmark):
"""
Shubert 3 test objective function.
This class defines the Shubert 3 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Shubert03}}(\\mathbf{x}) = \\sum_{i=1}^n \\sum_{j=1}^5 j \\sin \\left[(j+1)x_i \\right] + j
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,...,n`.
.. figure:: figures/Shubert03.png
:alt: Shubert 3 function
:align: center
**Two-dimensional Shubert 3 function**
*Global optimum*: :math:`f(x_i) = -24.062499` for :math:`\\mathbf{x} = [5.791794, 5.791794]` (and many others).
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [5.791794, 5.791794]
self.fglob = -24.062499
def evaluator(self, x, *args):
self.fun_evals += 1
return -sin(2.0*x[0]+1.0) - 2.0*sin(3.0*x[0]+2.0) - 3.0*sin(4.0*x[0]+3.0) - 4.0*sin(5.0*x[0]+4.0) \
-5.0*sin(6.0*x[0]+5.0) - sin(2.0*x[1]+1.0) - 2.0*sin(3.0*x[1]+2.0) - 3.0*sin(4.0*x[1]+3.0) \
-4.0*sin(5.0*x[1]+4.0) - 5.0*sin(6.0*x[1]+5.0)
# -------------------------------------------------------------------------------- #
class Shubert04(Benchmark):
"""
Shubert 4 test objective function.
This class defines the Shubert 4 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Shubert04}}(\\mathbf{x}) = \\sum_{i=1}^n \\sum_{j=1}^5 j \\cos \\left[(j+1)x_i \\right] + j
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,...,n`.
.. figure:: figures/Shubert04.png
:alt: Shubert 4 function
:align: center
**Two-dimensional Shubert 4 function**
*Global optimum*: :math:`f(x_i) = -29.016015` for :math:`\\mathbf{x} = [-0.80032121, -7.08350592]` (and many others).
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [-0.80032121, -7.08350592]
self.fglob = -29.016015
def evaluator(self, x, *args):
self.fun_evals += 1
return -cos(2.0*x[0]+1.0) - 2.0*cos(3.0*x[0]+2.0) - 3.0*cos(4.0*x[0]+3.0) - 4.0*cos(5.0*x[0]+4.0) \
-5.0*cos(6.0*x[0]+5.0) - cos(2.0*x[1]+1.0) - 2.0*cos(3.0*x[1]+2.0) - 3.0*cos(4.0*x[1]+3.0) \
-4.0*cos(5.0*x[1]+4.0) - 5.0*cos(6.0*x[1]+5.0)
# -------------------------------------------------------------------------------- #
class SineEnvelope(Benchmark):
"""
SineEnvelope test objective function.
This class defines the SineEnvelope global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{SineEnvelope}}(\\mathbf{x}) = -\\sum_{i=1}^{n-1}\\left[\\frac{\\sin^2(\\sqrt{x_{i+1}^2+x_{i}^2}-0.5)}{(0.001(x_{i+1}^2+x_{i}^2)+1)^2}+0.5\\right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,...,n`.
.. figure:: figures/SineEnvelope.png
:alt: SineEnvelope function
:align: center
**Two-dimensional SineEnvelope function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.custom_bounds = [(-20, 20), (-20, 20)]
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
X1 = x[0:-1]
X2 = x[1:]
X12X22 = X1**2 + X2**2
return sum((sin(sqrt(X12X22))**2 - 0.5)/(1 + 0.001*X12X22)**2 + 0.5)
# -------------------------------------------------------------------------------- #
class SixHumpCamel(Benchmark):
"""
Six Hump Camel test objective function.
This class defines the Six Hump Camel global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{SixHumpCamel}}(\\mathbf{x}) = 4x_1^2+x_1x_2-4x_2^2-2.1x_1^4+4x_2^4+\\frac{1}{3}x_1^6
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-5, 5]` for :math:`i=1,2`.
.. figure:: figures/SixHumpCamel.png
:alt: Six Hump Camel function
:align: center
**Two-dimensional Six Hump Camel function**
*Global optimum*: :math:`f(x_i) = -1.031628453489877` for :math:`\\mathbf{x} = [0.08984201368301331 , -0.7126564032704135]`
or :math:`\\mathbf{x} = [-0.08984201368301331, 0.7126564032704135]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-5.0] * self.dimensions,
[ 5.0] * self.dimensions))
self.custom_bounds = [(-2, 2), (-1.5, 1.5)]
self.global_optimum = [(0.08984201368301331 , -0.7126564032704135),
(-0.08984201368301331, 0.7126564032704135)]
self.fglob = -1.031628
def evaluator(self, x, *args):
self.fun_evals += 1
return (4 - 2.1*x[0]**2 + x[0]**4/3)*x[0]**2 + x[0]*x[1] + (4*x[1]**2 - 4)*x[1]**2
# -------------------------------------------------------------------------------- #
class Sodp(Benchmark):
"""
Sodp test objective function.
This class defines the Sum Of Different Powers global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Sodp}}(\\mathbf{x}) = \\sum_{i=1}^{n} \\lvert{x_{i}}\\rvert^{i + 1}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-1, 1]` for :math:`i=1,...,n`.
.. figure:: figures/Sodp.png
:alt: Sodp function
:align: center
**Two-dimensional Sum Of Different Powers function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-1.0] * self.dimensions,
[ 1.0] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
i = arange(1, self.dimensions+1)
return sum(abs(x) ** (i+1))
# -------------------------------------------------------------------------------- #
class Sphere(Benchmark):
"""
Sphere test objective function.
This class defines the Sphere global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Sphere}}(\\mathbf{x}) = \\sum_{i=1}^{n} x_i^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-1, 1]` for :math:`i=1,...,n`.
.. figure:: figures/Sphere.png
:alt: Sphere function
:align: center
**Two-dimensional Sphere function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-5.12] * self.dimensions,
[ 5.12] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return sum(x**2)
# -------------------------------------------------------------------------------- #
class Step(Benchmark):
"""
Step test objective function.
This class defines the Step global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Step}}(\\mathbf{x}) = \\sum_{i=1}^{n} \\left ( \\lfloor x_i \\rfloor + 0.5 \\right )^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,...,n`.
.. figure:: figures/Step.png
:alt: Step function
:align: center
**Two-dimensional Step function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0.5` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.custom_bounds = [(-5, 5), (-5, 5)]
self.global_optimum = [0.5] * self.dimensions
self.fglob = 0.5
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return sum((floor(x) + 0.5)**2.0)
# -------------------------------------------------------------------------------- #
class Stochastic(Benchmark):
"""
Stochastic test objective function.
This class defines a Stochastic global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Stochastic}}(\\mathbf{x}) = \\sum_{i=1}^{n} \\epsilon_i \\left | {x_i - \\frac{1}{i}} \\right |
The variable :math:`\\epsilon_i, (i=1,...,n)` is a random variable uniformly distributed in :math:`[0, 1]`.
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-5, 5]` for :math:`i=1,...,n`.
.. figure:: figures/Stochastic.png
:alt: Stochastic function
:align: center
**Two-dimensional Stochastic function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = [1/n]` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-5.0] * self.dimensions,
[ 5.0] * self.dimensions))
self.global_optimum = [1.0/i for i in range(1, self.dimensions+1)]
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
rnd = uniform(0.0, 1.0, size=(self.dimensions, ))
rng = arange(1, self.dimensions+1)
return sum(rnd*abs(x - 1.0/rng))
# -------------------------------------------------------------------------------- #
class StretchedV(Benchmark):
"""
StretchedV test objective function.
This class defines the Stretched V global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{StretchedV}}(\\mathbf{x}) = \sum_{i=1}^{n-1} t^{1/4} [\sin (50t^{0.1}) + 1]^2
Where, in this exercise:
.. math::
t = x_{i+1}^2 + x_i^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,...,n`.
.. figure:: figures/StretchedV.png
:alt: StretchedV function
:align: center
**Two-dimensional StretchedV function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [-9.38723188, 9.34026753]` when :math:`n = 2`.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10] * self.dimensions,
[ 10] * self.dimensions))
self.global_optimum = [-9.38723188, 9.34026753]
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
s = 0.0
for i in range(self.dimensions-1):
t = x[i+1]*x[i+1] + x[i]*x[i]
s += t**0.25 * (sin(50.0*t**0.1 + 1.0))**2.0
return s
# -------------------------------------------------------------------------------- #
class StyblinskiTang(Benchmark):
"""
StyblinskiTang test objective function.
This class defines the Styblinski-Tang global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{StyblinskiTang}}(\\mathbf{x}) = \\sum_{i=1}^{n} \\left(x_i^4 - 16x_i^2 + 5x_i \\right)
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-5, 5]` for :math:`i=1,...,n`.
.. figure:: figures/StyblinskiTang.png
:alt: StyblinskiTang function
:align: center
**Two-dimensional Styblinski-Tang function**
*Global optimum*: :math:`f(x_i) = -39.16616570377142n` for :math:`x_i = -2.903534018185960` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-5.0] * self.dimensions,
[ 5.0] * self.dimensions))
self.global_optimum = [-2.903534018185960] * self.dimensions
self.fglob = -39.16616570377142*self.dimensions
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return sum(x**4 - 16*x**2 + 5*x)/2
# -------------------------------------------------------------------------------- #
class TestTubeHolder(Benchmark):
"""
TestTubeHolder test objective function.
This class defines the TestTubeHolder global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{TestTubeHolder}}(\\mathbf{x}) = - 4 \\left | {e^{\\left|{\\cos\\left(\\frac{1}{200} x_{1}^{2} + \\frac{1}{200} x_{2}^{2}\\right)}\\right|} \\sin\\left(x_{1}\\right) \\cos\\left(x_{2}\\right)}\\right |
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/TestTubeHolder.png
:alt: TestTubeHolder function
:align: center
**Two-dimensional TestTubeHolder function**
*Global optimum*: :math:`f(x_i) = -10.872299901558` for :math:`\\mathbf{x} = [-\\pi/2, 0]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [-pi/2, 0.0]
self.fglob = -10.87229990155800
def evaluator(self, x, *args):
self.fun_evals += 1
return -4*abs(sin(x[0])*cos(x[1])*exp(abs(cos((x[0]**2 + x[1]**2)/200))))
# -------------------------------------------------------------------------------- #
class Treccani(Benchmark):
"""
Treccani test objective function.
This class defines the Treccani global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Treccani}}(\\mathbf{x}) = x_1^4 + 4x_1^3 + 4x_1^2 + x_2^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-5, 5]` for :math:`i=1,2`.
.. figure:: figures/Treccani.png
:alt: Treccani function
:align: center
**Two-dimensional Treccani function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [-2, 0]` or :math:`\\mathbf{x} = [0, 0]`.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-5.0] * self.dimensions,
[ 5.0] * self.dimensions))
self.custom_bounds = [(-2, 2), (-2, 2)]
self.global_optimum = [-2.0, 0.0]
self.fglob = 0
def evaluator(self, x, *args):
self.fun_evals += 1
return x[0]**4 + 4.0*x[0]**3 + 4.0*x[0]**2 + x[1]**2
# -------------------------------------------------------------------------------- #
class Trefethen(Benchmark):
"""
Trefethen test objective function.
This class defines the Trefethen global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Trefethen}}(\\mathbf{x}) = 0.25 x_{1}^{2} + 0.25 x_{2}^{2} + e^{\\sin\\left(50 x_{1}\\right)} - \\sin\\left(10 x_{1} + 10 x_{2}\\right) + \\sin\\left(60 e^{x_{2}}\\right) + \\sin\\left[70 \\sin\\left(x_{1}\\right)\\right] + \\sin\\left[\\sin\\left(80 x_{2}\\right)\\right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/Trefethen.png
:alt: Trefethen function
:align: center
**Two-dimensional Trefethen function**
*Global optimum*: :math:`f(x_i) = -3.3068686474` for :math:`\\mathbf{x} = [-0.02440307923, 0.2106124261]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.custom_bounds = [(-5, 5), (-5, 5)]
self.global_optimum = [-0.02440307923, 0.2106124261]
self.fglob = -3.3068686474
def evaluator(self, x, *args):
self.fun_evals += 1
F = exp(sin(50*x[0])) + sin(60*exp(x[1])) + sin(70*sin(x[0])) + \
sin(sin(80*x[1])) - sin(10*(x[0]+x[1])) + 1.0/4*(x[0]**2 + x[1]**2)
return F
# -------------------------------------------------------------------------------- #
class ThreeHumpCamel(Benchmark):
"""
Three Hump Camel test objective function.
This class defines the Three Hump Camel global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{ThreeHumpCamel}}(\\mathbf{x}) = 2x_1^2 - 1.05x_1^4 + \\frac{x_1^6}{6} + x_1x_2 + x_2^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-5, 5]` for :math:`i=1,2`.
.. figure:: figures/ThreeHumpCamel.png
:alt: Three Hump Camel function
:align: center
**Two-dimensional Three Hump Camel function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [0, 0]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-5.0] * self.dimensions,
[ 5.0] * self.dimensions))
self.custom_bounds = [(-2, 2), (-1.5, 1.5)]
self.global_optimum = [0.0, 0.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return 2.0*x1**2.0 - 1.05*x1**4.0 + x1**6/6.0 + x1*x2 + x2**2.0
# -------------------------------------------------------------------------------- #
class Trid(Benchmark):
"""
Trid test objective function.
This class defines the Trid global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Trid}}(\\mathbf{x}) = \\sum_{i=1}^{n}(x_i - 1)^2 - \\sum_{i=2}^{n} x_ix_{i-1}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-20, 20]` for :math:`i=1,...,6`.
*Global optimum*: :math:`f(x_i) = -50` for :math:`\\mathbf{x} = [6, 10, 12, 12, 10, 6]`
"""
def __init__(self, dimensions=6):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[20.0] * self.dimensions))
self.global_optimum = [6, 10, 12, 12, 10, 6]
self.fglob = -50.0
def evaluator(self, x, *args):
self.fun_evals += 1
return sum((x - 1.0)**2.0) - sum(x[1:]*x[0:-1])
# -------------------------------------------------------------------------------- #
class Trigonometric01(Benchmark):
"""
Trigonometric 1 test objective function.
This class defines the Trigonometric 1 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Trigonometric01}}(\\mathbf{x}) = \\sum_{i=1}^{n} \\left [n - \\sum_{j=1}^{n} \\cos(x_j) + i \\left(1 - cos(x_i) - sin(x_i) \\right ) \\right]^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, \\pi]` for :math:`i=1,...,n`.
.. figure:: figures/Trigonometric01.png
:alt: Trigonometric 1 function
:align: center
**Two-dimensional Trigonometric 1 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([0.0] * self.dimensions,
[ pi] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
rng = numpy.arange(1.0, self.dimensions+1)
return sum((self.dimensions - sum(cos(x) + rng*(1 - cos(x) - sin(x))))**2.0)
# -------------------------------------------------------------------------------- #
class Trigonometric02(Benchmark):
"""
Trigonometric 2 test objective function.
This class defines the Trigonometric 2 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Trigonometric2}}(\\mathbf{x}) = 1 + \\sum_{i=1}^{n} 8 \\sin^2 \\left[7(x_i - 0.9)^2 \\right] + 6 \\sin^2 \\left[14(x_i - 0.9)^2 \\right] + (x_i - 0.9)^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-500, 500]` for :math:`i=1,...,n`.
.. figure:: figures/Trigonometric02.png
:alt: Trigonometric 2 function
:align: center
**Two-dimensional Trigonometric 2 function**
*Global optimum*: :math:`f(x_i) = 1` for :math:`x_i = 0.9` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-500.0] * self.dimensions,
[ 500.0] * self.dimensions))
self.custom_bounds = [(0, 2), (0, 2)]
self.global_optimum = [0.9] * self.dimensions
self.fglob = 1.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return 1.0 + sum(8.0*(sin(7.0*(x - 0.9)**2.0)**2.0) + 6.0*(sin(14.0*(x - 0.9)**2.0)**2.0) + (x - 0.9)**2.0)
# -------------------------------------------------------------------------------- #
class Tripod(Benchmark):
"""
Tripod test objective function.
This class defines the Tripod global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Tripod}}(\\mathbf{x}) = p(x_2) \\left[1 + p(x_1) \\right] + \\lvert x_1 + 50p(x_2) \\left[1 - 2p(x_1) \\right] \\rvert + \\lvert x_2 + 50\\left[1 - 2p(x_2)\\right] \\rvert
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,2`.
.. figure:: figures/Tripod.png
:alt: Tripod function
:align: center
**Two-dimensional Tripod function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [0, -50]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.global_optimum = [0.0, -50.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
p1 = float(x1 >= 0)
p2 = float(x2 >= 0)
return p2*(1.0 + p1) + abs(x1 + 50.0*p2*(1.0-2.0*p1)) + abs(x2 + 50.0*(1.0-2.0*p2))
# -------------------------------------------------------------------------------- #
class Ursem01(Benchmark):
"""
Ursem 1 test objective function.
This class defines the Ursem 1 global optimization problem. This
is a unimodal minimization problem defined as follows:
.. math::
f_{\\text{Ursem01}}(\\mathbf{x}) = - \\sin(2x_1 - 0.5 \\pi) - 3 \\cos(x_2) - 0.5x_1
Here, :math:`n` represents the number of dimensions and :math:`x_1 \\in [-2.5, 3]`, :math:`x_2 \\in [-2, 2]`.
.. figure:: figures/Ursem01.png
:alt: Ursem 1 function
:align: center
**Two-dimensional Ursem 1 function**
*Global optimum*: :math:`f(x_i) = -4.8168` for :math:`\\mathbf{x} = [1.69714, 0.0]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = [(-2.5, 3.0), (-2.0, 2.0)]
self.global_optimum = [1.69714, 0.0]
self.fglob = -4.8168
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return -sin(2*x1 - 0.5*pi) - 3.0*
|
cos(x2)
|
numpy.cos
|
from __future__ import print_function, division, absolute_import
import time
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import skimage.morphology
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from imgaug import dtypes as iadt
from imgaug.testutils import array_equal_lists, keypoints_equal, reseed
def main():
time_start = time.time()
test_Affine()
test_AffineCv2()
test_PiecewiseAffine()
test_PerspectiveTransform()
test_ElasticTransformation()
test_Rot90()
time_end = time.time()
print("<%s> Finished without errors in %.4fs." % (__file__, time_end - time_start,))
def test_Affine():
reseed()
base_img = np.array([[0, 0, 0],
[0, 255, 0],
[0, 0, 0]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
images_list = [base_img]
outer_pixels = ([], [])
for i in sm.xrange(base_img.shape[0]):
for j in sm.xrange(base_img.shape[1]):
if i != j:
outer_pixels[0].append(i)
outer_pixels[1].append(j)
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
# no translation/scale/rotate/shear, shouldnt change nothing
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
# ---------------------
# scale
# ---------------------
# zoom in
aug = iaa.Affine(scale=1.75, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
observed = aug_det.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
observed = aug.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
observed = aug_det.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
observed = aug.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x < 0
assert observed[0].keypoints[0].y < 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x > 2
assert observed[0].keypoints[2].y > 2
observed = aug_det.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x < 0
assert observed[0].keypoints[0].y < 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x > 2
assert observed[0].keypoints[2].y > 2
# zoom in only on x axis
aug = iaa.Affine(scale={"x": 1.75, "y": 1.0}, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
observed = aug_det.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
observed = aug.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
observed = aug_det.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
observed = aug.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x < 0
assert observed[0].keypoints[0].y == 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x > 2
assert observed[0].keypoints[2].y == 2
observed = aug_det.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x < 0
assert observed[0].keypoints[0].y == 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x > 2
assert observed[0].keypoints[2].y == 2
# zoom in only on y axis
aug = iaa.Affine(scale={"x": 1.0, "y": 1.75}, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
observed = aug_det.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
observed = aug.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
observed = aug_det.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
observed = aug.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x == 0
assert observed[0].keypoints[0].y < 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x == 2
assert observed[0].keypoints[2].y > 2
observed = aug_det.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x == 0
assert observed[0].keypoints[0].y < 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x == 2
assert observed[0].keypoints[2].y > 2
# zoom out
# this one uses a 4x4 area of all 255, which is zoomed out to a 4x4 area
# in which the center 2x2 area is 255
# zoom in should probably be adapted to this style
# no separate tests here for x/y axis, should work fine if zoom in works with that
aug = iaa.Affine(scale=0.49, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.ones((4, 4, 1), dtype=np.uint8) * 255
images = np.array([image])
images_list = [image]
outer_pixels = ([], [])
for y in sm.xrange(4):
xs = sm.xrange(4) if y in [0, 3] else [0, 3]
for x in xs:
outer_pixels[0].append(y)
outer_pixels[1].append(x)
inner_pixels = ([1, 1, 2, 2], [1, 2, 1, 2])
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=3, y=0),
ia.Keypoint(x=0, y=3), ia.Keypoint(x=3, y=3)],
shape=image.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=0.765, y=0.765), ia.Keypoint(x=2.235, y=0.765),
ia.Keypoint(x=0.765, y=2.235), ia.Keypoint(x=2.235, y=2.235)],
shape=image.shape)]
observed = aug.augment_images(images)
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
observed = aug_det.augment_images(images)
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
observed = aug.augment_images(images_list)
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
observed = aug_det.augment_images(images_list)
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# varying scales
aug = iaa.Affine(scale={"x": (0.5, 1.5), "y": (0.5, 1.5)}, translate_px=0,
rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 2, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]], dtype=np.uint8) * 100
image = image[:, :, np.newaxis]
images = np.array([image])
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.8)
assert nb_changed_aug_det == 0
aug = iaa.Affine(scale=iap.Uniform(0.7, 0.9))
assert isinstance(aug.scale, iap.Uniform)
assert isinstance(aug.scale.a, iap.Deterministic)
assert isinstance(aug.scale.b, iap.Deterministic)
assert 0.7 - 1e-8 < aug.scale.a.value < 0.7 + 1e-8
assert 0.9 - 1e-8 < aug.scale.b.value < 0.9 + 1e-8
# ---------------------
# translate
# ---------------------
# move one pixel to the right
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, 1] = 255
image_aug[1, 2] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)], shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=2, y=1)], shape=base_img.shape)]
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# move one pixel to the right
# with backend = skimage
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0, shear=0, backend="skimage")
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the right
# with backend = skimage
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0, shear=0, backend="skimage")
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the right
# with backend = skimage, order=ALL
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0, shear=0, backend="skimage", order=ia.ALL)
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the right
# with backend = skimage, order=list
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0, shear=0, backend="skimage", order=[0, 1, 3])
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the right
# with backend = cv2, order=list
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0, shear=0, backend="cv2", order=[0, 1, 3])
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the right
# with backend = cv2, order=StochasticParameter
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0, shear=0, backend="cv2", order=iap.Choice([0, 1, 3]))
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the bottom
aug = iaa.Affine(scale=1.0, translate_px={"x": 0, "y": 1}, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, 1] = 255
image_aug[2, 1] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)], shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=2)], shape=base_img.shape)]
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# move 33% (one pixel) to the right
aug = iaa.Affine(scale=1.0, translate_percent={"x": 0.3333, "y": 0}, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, 1] = 255
image_aug[1, 2] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)], shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=2, y=1)], shape=base_img.shape)]
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# move 33% (one pixel) to the bottom
aug = iaa.Affine(scale=1.0, translate_percent={"x": 0, "y": 0.3333}, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, 1] = 255
image_aug[2, 1] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)], shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=2)], shape=base_img.shape)]
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# 0-1px to left/right and 0-1px to top/bottom
aug = iaa.Affine(scale=1.0, translate_px={"x": (-1, 1), "y": (-1, 1)}, rotate=0, shear=0)
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
centers_aug = np.copy(image).astype(np.int32) * 0
centers_aug_det = np.copy(image).astype(np.int32) * 0
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert len(observed_aug[0].nonzero()[0]) == 1
assert len(observed_aug_det[0].nonzero()[0]) == 1
centers_aug += (observed_aug[0] > 0)
centers_aug_det += (observed_aug_det[0] > 0)
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
assert (centers_aug > int(nb_iterations * (1/9 * 0.6))).all()
assert (centers_aug < int(nb_iterations * (1/9 * 1.4))).all()
aug = iaa.Affine(translate_percent=iap.Uniform(0.7, 0.9))
assert isinstance(aug.translate, iap.Uniform)
assert isinstance(aug.translate.a, iap.Deterministic)
assert isinstance(aug.translate.b, iap.Deterministic)
assert 0.7 - 1e-8 < aug.translate.a.value < 0.7 + 1e-8
assert 0.9 - 1e-8 < aug.translate.b.value < 0.9 + 1e-8
aug = iaa.Affine(translate_px=iap.DiscreteUniform(1, 10))
assert isinstance(aug.translate, iap.DiscreteUniform)
assert isinstance(aug.translate.a, iap.Deterministic)
assert isinstance(aug.translate.b, iap.Deterministic)
assert aug.translate.a.value == 1
assert aug.translate.b.value == 10
# ---------------------
# translate heatmaps
# ---------------------
heatmaps = ia.HeatmapsOnImage(
np.float32([
[0.0, 0.5, 0.75],
[0.0, 0.5, 0.75],
[0.75, 0.75, 0.75],
]),
shape=(3, 3, 3)
)
arr_expected_1px_right = np.float32([
[0.0, 0.0, 0.5],
[0.0, 0.0, 0.5],
[0.0, 0.75, 0.75],
])
aug = iaa.Affine(translate_px={"x": 1})
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == heatmaps.shape
assert heatmaps.min_value - 1e-6 < observed.min_value < heatmaps.min_value + 1e-6
assert heatmaps.max_value - 1e-6 < observed.max_value < heatmaps.max_value + 1e-6
assert np.array_equal(observed.get_arr(), arr_expected_1px_right)
# should still use mode=constant cval=0 even when other settings chosen
aug = iaa.Affine(translate_px={"x": 1}, cval=255)
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == heatmaps.shape
assert heatmaps.min_value - 1e-6 < observed.min_value < heatmaps.min_value + 1e-6
assert heatmaps.max_value - 1e-6 < observed.max_value < heatmaps.max_value + 1e-6
assert np.array_equal(observed.get_arr(), arr_expected_1px_right)
aug = iaa.Affine(translate_px={"x": 1}, mode="edge", cval=255)
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == heatmaps.shape
assert heatmaps.min_value - 1e-6 < observed.min_value < heatmaps.min_value + 1e-6
assert heatmaps.max_value - 1e-6 < observed.max_value < heatmaps.max_value + 1e-6
assert np.array_equal(observed.get_arr(), arr_expected_1px_right)
# ---------------------
# rotate
# ---------------------
# rotate by 90 degrees
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=90, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, :] = 255
image_aug[0, 1] = 255
image_aug[1, 1] = 255
image_aug[2, 1] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=1), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=1)], shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=1, y=2)], shape=base_img.shape)]
observed = aug.augment_images(images)
observed[observed >= 100] = 255
observed[observed < 100] = 0
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
observed[observed >= 100] = 255
observed[observed < 100] = 0
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
observed[0][observed[0] >= 100] = 255
observed[0][observed[0] < 100] = 0
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
observed[0][observed[0] >= 100] = 255
observed[0][observed[0] < 100] = 0
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# rotate by StochasticParameter
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=iap.Uniform(10, 20), shear=0)
assert isinstance(aug.rotate, iap.Uniform)
assert isinstance(aug.rotate.a, iap.Deterministic)
assert aug.rotate.a.value == 10
assert isinstance(aug.rotate.b, iap.Deterministic)
assert aug.rotate.b.value == 20
# random rotation 0-364 degrees
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=(0, 364), shear=0)
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
pixels_sums_aug = np.copy(image).astype(np.int32) * 0
pixels_sums_aug_det = np.copy(image).astype(np.int32) * 0
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
pixels_sums_aug += (observed_aug[0] > 100)
pixels_sums_aug_det += (observed_aug_det[0] > 100)
assert nb_changed_aug >= int(nb_iterations * 0.9)
assert nb_changed_aug_det == 0
# center pixel, should always be white when rotating line around center
assert pixels_sums_aug[1, 1] > (nb_iterations * 0.98)
assert pixels_sums_aug[1, 1] < (nb_iterations * 1.02)
# outer pixels, should sometimes be white
# the values here had to be set quite tolerant, the middle pixels at top/left/bottom/right get more activation
# than expected
outer_pixels = ([0, 0, 0, 1, 1, 2, 2, 2], [0, 1, 2, 0, 2, 0, 1, 2])
assert (pixels_sums_aug[outer_pixels] > int(nb_iterations * (2/8 * 0.4))).all()
assert (pixels_sums_aug[outer_pixels] < int(nb_iterations * (2/8 * 2.0))).all()
for backend in ["auto", "cv2", "skimage"]:
# measure alignment between images and heatmaps when rotating
aug = iaa.Affine(rotate=45, backend=backend)
image = np.zeros((7, 6), dtype=np.uint8)
image[:, 2:3+1] = 255
hm = ia.HeatmapsOnImage(image.astype(np.float32)/255, shape=(7, 6))
img_aug = aug.augment_image(image)
hm_aug = aug.augment_heatmaps([hm])[0]
assert hm_aug.shape == (7, 6)
assert hm_aug.arr_0to1.shape == (7, 6, 1)
img_aug_mask = img_aug > 255*0.1
hm_aug_mask = hm_aug.arr_0to1 > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert (same / img_aug_mask.size) >= 0.99
# measure alignment between images and heatmaps when rotating
# here with smaller heatmaps
aug = iaa.Affine(rotate=45, backend=backend)
image = np.zeros((56, 48), dtype=np.uint8)
image[:, 16:24+1] = 255
hm = ia.HeatmapsOnImage(
ia.imresize_single_image(image, (28, 24), interpolation="cubic").astype(np.float32)/255,
shape=(56, 48)
)
img_aug = aug.augment_image(image)
hm_aug = aug.augment_heatmaps([hm])[0]
assert hm_aug.shape == (56, 48)
assert hm_aug.arr_0to1.shape == (28, 24, 1)
img_aug_mask = img_aug > 255*0.1
hm_aug_mask = ia.imresize_single_image(hm_aug.arr_0to1, img_aug.shape[0:2], interpolation="cubic") > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert (same / img_aug_mask.size) >= 0.9
# ---------------------
# shear
# ---------------------
# TODO
# shear by StochasticParameter
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=0, shear=iap.Uniform(10, 20))
assert isinstance(aug.shear, iap.Uniform)
assert isinstance(aug.shear.a, iap.Deterministic)
assert aug.shear.a.value == 10
assert isinstance(aug.shear.b, iap.Deterministic)
assert aug.shear.b.value == 20
# ---------------------
# cval
# ---------------------
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0, cval=128)
aug_det = aug.to_deterministic()
image = np.ones((3, 3, 1), dtype=np.uint8) * 255
images = np.array([image])
images_list = [image]
observed = aug.augment_images(images)
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
observed = aug_det.augment_images(images)
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
observed = aug.augment_images(images_list)
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
observed = aug_det.augment_images(images_list)
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
# random cvals
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0, cval=(0, 255))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
averages = []
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
averages.append(int(np.average(observed_aug)))
assert nb_changed_aug >= int(nb_iterations * 0.9)
assert nb_changed_aug_det == 0
# center pixel, should always be white when rotating line around center
assert pixels_sums_aug[1, 1] > (nb_iterations * 0.98)
assert pixels_sums_aug[1, 1] < (nb_iterations * 1.02)
assert len(set(averages)) > 200
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0, cval=ia.ALL)
assert isinstance(aug.cval, iap.Uniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 0
assert aug.cval.b.value == 255
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0, cval=iap.DiscreteUniform(1, 5))
assert isinstance(aug.cval, iap.DiscreteUniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 1
assert aug.cval.b.value == 5
# ------------
# mode
# ------------
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0, cval=0, mode=ia.ALL)
assert isinstance(aug.mode, iap.Choice)
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0, cval=0, mode="edge")
assert isinstance(aug.mode, iap.Deterministic)
assert aug.mode.value == "edge"
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0, cval=0, mode=["constant", "edge"])
assert isinstance(aug.mode, iap.Choice)
assert len(aug.mode.a) == 2 and "constant" in aug.mode.a and "edge" in aug.mode.a
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0, cval=0, mode=iap.Choice(["constant", "edge"]))
assert isinstance(aug.mode, iap.Choice)
assert len(aug.mode.a) == 2 and "constant" in aug.mode.a and "edge" in aug.mode.a
# ------------
# fit_output
# ------------
for backend in ["auto", "cv2", "skimage"]:
aug = iaa.Affine(scale=1.0, translate_px=100, fit_output=True, backend=backend)
assert aug.fit_output is True
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug.augment_heatmaps([heatmaps])[0]
expected = heatmaps
assert np.allclose(observed.arr_0to1, expected.arr_0to1)
# fit_output with rotation
aug = iaa.Affine(rotate=45, fit_output=True, backend=backend)
img = np.zeros((10, 10), dtype=np.uint8)
img[0:2, 0:2] = 255
img[-2:, 0:2] = 255
img[0:2, -2:] = 255
img[-2:, -2:] = 255
hm = ia.HeatmapsOnImage(img.astype(np.float32)/255, shape=(10, 10))
img_aug = aug.augment_image(img)
hm_aug = aug.augment_heatmaps([hm])[0]
_labels, nb_labels = skimage.morphology.label(img_aug > 240, return_num=True, connectivity=2)
assert nb_labels == 4
_labels, nb_labels = skimage.morphology.label(hm_aug.arr_0to1 > 240/255, return_num=True, connectivity=2)
assert nb_labels == 4
# fit_output with differently sized heatmaps
aug = iaa.Affine(rotate=45, fit_output=True, backend=backend)
img = np.zeros((80, 80), dtype=np.uint8)
img[0:5, 0:5] = 255
img[-5:, 0:5] = 255
img[0:5, -5:] = 255
img[-5:, -5:] = 255
hm = ia.HeatmapsOnImage(
ia.imresize_single_image(img, (40, 40), interpolation="cubic").astype(np.float32)/255,
shape=(80, 80)
)
img_aug = aug.augment_image(img)
hm_aug = aug.augment_heatmaps([hm])[0]
# these asserts are deactivated because the image size can change under fit_output=True
# assert hm_aug.shape == (80, 80)
# assert hm_aug.arr_0to1.shape == (40, 40, 1)
_labels, nb_labels = skimage.morphology.label(img_aug > 240, return_num=True, connectivity=2)
assert nb_labels == 4
_labels, nb_labels = skimage.morphology.label(hm_aug.arr_0to1 > 200/255, return_num=True, connectivity=2)
assert nb_labels == 4
img_aug_mask = img_aug > 255*0.1
hm_aug_mask = ia.imresize_single_image(hm_aug.arr_0to1, img_aug.shape[0:2], interpolation="cubic") > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert (same / img_aug_mask.size) >= 0.95
# ------------
# exceptions for bad inputs
# ------------
# scale
got_exception = False
try:
_ = iaa.Affine(scale=False)
except Exception:
got_exception = True
assert got_exception
# translate_px
got_exception = False
try:
_ = iaa.Affine(translate_px=False)
except Exception:
got_exception = True
assert got_exception
# translate_percent
got_exception = False
try:
_ = iaa.Affine(translate_percent=False)
except Exception:
got_exception = True
assert got_exception
# rotate
got_exception = False
try:
_ = iaa.Affine(scale=1.0, translate_px=0, rotate=False, shear=0, cval=0)
except Exception:
got_exception = True
assert got_exception
# shear
got_exception = False
try:
_ = iaa.Affine(scale=1.0, translate_px=0, rotate=0, shear=False, cval=0)
except Exception:
got_exception = True
assert got_exception
# cval
got_exception = False
try:
_ = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0, cval=None)
except Exception:
got_exception = True
assert got_exception
# mode
got_exception = False
try:
_ = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0, cval=0, mode=False)
except Exception:
got_exception = True
assert got_exception
# non-existent order in case of backend=cv2
got_exception = False
try:
_ = iaa.Affine(backend="cv2", order=-1)
except Exception:
got_exception = True
assert got_exception
# bad order datatype in case of backend=cv2
got_exception = False
try:
_ = iaa.Affine(backend="cv2", order="test")
except Exception:
got_exception = True
assert got_exception
# ----------
# get_parameters
# ----------
aug = iaa.Affine(scale=1, translate_px=2, rotate=3, shear=4, order=1, cval=0, mode="constant", backend="cv2",
fit_output=True)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic) # scale
assert isinstance(params[1], iap.Deterministic) # translate
assert isinstance(params[2], iap.Deterministic) # rotate
assert isinstance(params[3], iap.Deterministic) # shear
assert params[0].value == 1 # scale
assert params[1].value == 2 # translate
assert params[2].value == 3 # rotate
assert params[3].value == 4 # shear
assert params[4].value == 1 # order
assert params[5].value == 0 # cval
assert params[6].value == "constant" # mode
assert params[7] == "cv2" # backend
assert params[8] is True # fit_output
###################
# test other dtypes
###################
# skimage
aug = iaa.Affine(translate_px={"x": 1}, order=0, mode="constant", backend="skimage")
mask = np.zeros((3, 3), dtype=bool)
mask[1, 2] = True
# bool
image = np.zeros((3, 3), dtype=bool)
image[1, 1] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype == image.dtype
assert np.all(image_aug[~mask] == 0)
assert np.all(image_aug[mask] == 1)
# uint, int
for dtype in [np.uint8, np.uint16, np.uint32, np.int8, np.int16, np.int32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if np.dtype(dtype).kind == "i":
values = [1, 5, 10, 100, int(0.1 * max_value), int(0.2 * max_value),
int(0.5 * max_value), max_value - 100, max_value]
values = values + [(-1) * value for value in values]
else:
values = [1, 5, 10, 100, int(center_value), int(0.1 * max_value), int(0.2 * max_value),
int(0.5 * max_value), max_value - 100, max_value]
for value in values:
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype == np.dtype(dtype)
assert np.all(image_aug[~mask] == 0)
assert np.all(image_aug[mask] == value)
# float
for dtype in [np.float16, np.float32, np.float64]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
def _isclose(a, b):
atol = 1e-4 if dtype == np.float16 else 1e-8
return np.isclose(a, b, atol=atol, rtol=0)
isize = np.dtype(dtype).itemsize
values = [0.01, 1.0, 10.0, 100.0, 500 ** (isize - 1), 1000 ** (isize - 1)]
values = values + [(-1) * value for value in values]
values = values + [min_value, max_value]
for value in values:
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype == np.dtype(dtype)
assert np.all(_isclose(image_aug[~mask], 0))
assert np.all(_isclose(image_aug[mask], np.float128(value)))
#
# skimage, order!=0 and rotate=180
#
for order in [1, 3, 4, 5]:
aug = iaa.Affine(rotate=180, order=order, mode="constant", backend="skimage")
aug_flip = iaa.Sequential([iaa.Flipud(1.0), iaa.Fliplr(1.0)])
image = np.zeros((17, 17), dtype=bool)
image[2:15, 5:13] = True
mask_inner = aug_flip.augment_image(image) == 1
mask_outer = aug_flip.augment_image(image) == 0
assert np.any(mask_inner) and np.any(mask_outer)
thresh_inner = 0.9
thresh_outer = 0.9
thresh_inner_float = 0.85 if order == 1 else 0.7
thresh_outer_float = 0.85 if order == 1 else 0.4
# bool
image = np.zeros((17, 17), dtype=bool)
image[2:15, 5:13] = True
image_aug = aug.augment_image(image)
image_exp = aug_flip.augment_image(image)
assert image_aug.dtype == image.dtype
assert (np.sum(image_aug == image_exp)/image.size) > thresh_inner
# uint, int
for dtype in [np.uint8, np.uint16, np.uint32, np.int8, np.int16, np.int32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
def _compute_matching(image_aug, image_exp, mask):
return np.sum(
np.isclose(image_aug[mask], image_exp[mask], rtol=0, atol=1.001)
) / np.sum(mask)
if np.dtype(dtype).kind == "i":
values = [1, 5, 10, 100, int(0.1 * max_value), int(0.2 * max_value),
int(0.5 * max_value), max_value - 100, max_value]
values = values + [(-1) * value for value in values]
else:
values = [1, 5, 10, 100, int(center_value), int(0.1 * max_value),
int(0.2 * max_value), int(0.5 * max_value), max_value - 100, max_value]
for value in values:
image = np.zeros((17, 17), dtype=dtype)
image[2:15, 5:13] = value
image_aug = aug.augment_image(image)
image_exp = aug_flip.augment_image(image)
assert image_aug.dtype == np.dtype(dtype)
assert _compute_matching(image_aug, image_exp, mask_inner) > thresh_inner
assert _compute_matching(image_aug, image_exp, mask_outer) > thresh_outer
# float
dts = [np.float16, np.float32, np.float64]
if order == 5:
# float64 caused too many interpolation inaccuracies for order=5, not wrong but harder to test
dts = [np.float16, np.float32]
for dtype in dts:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
def _isclose(a, b):
atol = 1e-4 if dtype == np.float16 else 1e-8
if order not in [0, 1]:
atol = 1e-2
return np.isclose(a, b, atol=atol, rtol=0)
def _compute_matching(image_aug, image_exp, mask):
return np.sum(
_isclose(image_aug[mask], image_exp[mask])
) / np.sum(mask)
isize = np.dtype(dtype).itemsize
values = [0.01, 1.0, 10.0, 100.0, 500 ** (isize - 1), 1000 ** (isize - 1)]
values = values + [(-1) * value for value in values]
if order not in [3, 4]: # results in NaNs otherwise
values = values + [min_value, max_value]
for value in values:
image = np.zeros((17, 17), dtype=dtype)
image[2:15, 5:13] = value
image_aug = aug.augment_image(image)
image_exp = aug_flip.augment_image(image)
np.set_printoptions(linewidth=250)
assert image_aug.dtype == np.dtype(dtype)
assert _compute_matching(image_aug, image_exp, mask_inner) > thresh_inner_float
assert _compute_matching(image_aug, image_exp, mask_outer) > thresh_outer_float
# cv2
aug = iaa.Affine(translate_px={"x": 1}, order=0, mode="constant", backend="cv2")
mask = np.zeros((3, 3), dtype=bool)
mask[1, 2] = True
# bool
image = np.zeros((3, 3), dtype=bool)
image[1, 1] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype == image.dtype
assert np.all(image_aug[~mask] == 0)
assert np.all(image_aug[mask] == 1)
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16, np.int32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if np.dtype(dtype).kind == "i":
values = [1, 5, 10, 100, int(0.1 * max_value), int(0.2 * max_value),
int(0.5 * max_value), max_value - 100, max_value]
values = values + [(-1) * value for value in values]
else:
values = [1, 5, 10, 100, int(center_value), int(0.1 * max_value), int(0.2 * max_value),
int(0.5 * max_value), max_value - 100, max_value]
for value in values:
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype == np.dtype(dtype)
assert np.all(image_aug[~mask] == 0)
assert np.all(image_aug[mask] == value)
# float
for dtype in [np.float16, np.float32, np.float64]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
def _isclose(a, b):
atol = 1e-4 if dtype == np.float16 else 1e-8
return np.isclose(a, b, atol=atol, rtol=0)
isize = np.dtype(dtype).itemsize
values = [0.01, 1.0, 10.0, 100.0, 500 ** (isize - 1), 1000 ** (isize - 1)]
values = values + [(-1) * value for value in values]
values = values + [min_value, max_value]
for value in values:
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype == np.dtype(dtype)
assert np.all(_isclose(image_aug[~mask], 0))
assert np.all(_isclose(image_aug[mask], np.float128(value)))
#
# cv2, order=1 and rotate=180
#
for order in [1, 3]:
aug = iaa.Affine(rotate=180, order=order, mode="constant", backend="cv2")
aug_flip = iaa.Sequential([iaa.Flipud(1.0), iaa.Fliplr(1.0)])
# bool
image = np.zeros((17, 17), dtype=bool)
image[2:15, 5:13] = True
image_aug = aug.augment_image(image)
image_exp = aug_flip.augment_image(image)
assert image_aug.dtype == image.dtype
assert (np.sum(image_aug == image_exp) / image.size) > 0.9
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if np.dtype(dtype).kind == "i":
values = [1, 5, 10, 100, int(0.1 * max_value), int(0.2 * max_value),
int(0.5 * max_value), max_value - 100, max_value]
values = values + [(-1) * value for value in values]
else:
values = [1, 5, 10, 100, int(center_value), int(0.1 * max_value),
int(0.2 * max_value), int(0.5 * max_value), max_value - 100, max_value]
for value in values:
image = np.zeros((17, 17), dtype=dtype)
image[2:15, 5:13] = value
image_aug = aug.augment_image(image)
image_exp = aug_flip.augment_image(image)
assert image_aug.dtype == np.dtype(dtype)
assert (np.sum(image_aug == image_exp) / image.size) > 0.9
# float
for dtype in [np.float16, np.float32, np.float64]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
def _isclose(a, b):
atol = 1e-4 if dtype == np.float16 else 1e-8
return np.isclose(a, b, atol=atol, rtol=0)
isize = np.dtype(dtype).itemsize
values = [0.01, 1.0, 10.0, 100.0, 500 ** (isize - 1), 1000 ** (isize - 1)]
values = values + [(-1) * value for value in values]
values = values + [min_value, max_value]
for value in values:
image = np.zeros((17, 17), dtype=dtype)
image[2:15, 5:13] = value
image_aug = aug.augment_image(image)
image_exp = aug_flip.augment_image(image)
assert image_aug.dtype == np.dtype(dtype)
assert (np.sum(_isclose(image_aug, image_exp)) / image.size) > 0.9
def test_AffineCv2():
reseed()
base_img = np.array([[0, 0, 0],
[0, 255, 0],
[0, 0, 0]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
images_list = [base_img]
outer_pixels = ([], [])
for i in sm.xrange(base_img.shape[0]):
for j in sm.xrange(base_img.shape[1]):
if i != j:
outer_pixels[0].append(i)
outer_pixels[1].append(j)
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
# no translation/scale/rotate/shear, shouldnt change nothing
aug = iaa.AffineCv2(scale=1.0, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
# ---------------------
# scale
# ---------------------
# zoom in
aug = iaa.AffineCv2(scale=1.75, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
observed = aug_det.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
observed = aug.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
observed = aug_det.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
observed = aug.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x < 0
assert observed[0].keypoints[0].y < 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x > 2
assert observed[0].keypoints[2].y > 2
observed = aug_det.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x < 0
assert observed[0].keypoints[0].y < 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x > 2
assert observed[0].keypoints[2].y > 2
# zoom in only on x axis
aug = iaa.AffineCv2(scale={"x": 1.75, "y": 1.0}, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
observed = aug_det.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
observed = aug.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
observed = aug_det.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
observed = aug.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x < 0
assert observed[0].keypoints[0].y == 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x > 2
assert observed[0].keypoints[2].y == 2
observed = aug_det.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x < 0
assert observed[0].keypoints[0].y == 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x > 2
assert observed[0].keypoints[2].y == 2
# zoom in only on y axis
aug = iaa.AffineCv2(scale={"x": 1.0, "y": 1.75}, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
observed = aug_det.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
observed = aug.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
observed = aug_det.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
observed = aug.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x == 0
assert observed[0].keypoints[0].y < 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x == 2
assert observed[0].keypoints[2].y > 2
observed = aug_det.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x == 0
assert observed[0].keypoints[0].y < 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x == 2
assert observed[0].keypoints[2].y > 2
# zoom out
# this one uses a 4x4 area of all 255, which is zoomed out to a 4x4 area
# in which the center 2x2 area is 255
# zoom in should probably be adapted to this style
# no separate tests here for x/y axis, should work fine if zoom in works with that
aug = iaa.AffineCv2(scale=0.49, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.ones((4, 4, 1), dtype=np.uint8) * 255
images = np.array([image])
images_list = [image]
outer_pixels = ([], [])
for y in sm.xrange(4):
xs = sm.xrange(4) if y in [0, 3] else [0, 3]
for x in xs:
outer_pixels[0].append(y)
outer_pixels[1].append(x)
inner_pixels = ([1, 1, 2, 2], [1, 2, 1, 2])
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=3, y=0),
ia.Keypoint(x=0, y=3), ia.Keypoint(x=3, y=3)],
shape=image.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=0.765, y=0.765), ia.Keypoint(x=2.235, y=0.765),
ia.Keypoint(x=0.765, y=2.235), ia.Keypoint(x=2.235, y=2.235)],
shape=image.shape)]
observed = aug.augment_images(images)
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
observed = aug_det.augment_images(images)
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
observed = aug.augment_images(images_list)
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
observed = aug_det.augment_images(images_list)
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# varying scales
aug = iaa.AffineCv2(scale={"x": (0.5, 1.5), "y": (0.5, 1.5)}, translate_px=0,
rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 2, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]], dtype=np.uint8) * 100
image = image[:, :, np.newaxis]
images = np.array([image])
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.8)
assert nb_changed_aug_det == 0
aug = iaa.AffineCv2(scale=iap.Uniform(0.7, 0.9))
assert isinstance(aug.scale, iap.Uniform)
assert isinstance(aug.scale.a, iap.Deterministic)
assert isinstance(aug.scale.b, iap.Deterministic)
assert 0.7 - 1e-8 < aug.scale.a.value < 0.7 + 1e-8
assert 0.9 - 1e-8 < aug.scale.b.value < 0.9 + 1e-8
# ---------------------
# translate
# ---------------------
# move one pixel to the right
aug = iaa.AffineCv2(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, 1] = 255
image_aug[1, 2] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)], shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=2, y=1)], shape=base_img.shape)]
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# move one pixel to the right
aug = iaa.AffineCv2(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0, shear=0)
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the right
aug = iaa.AffineCv2(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0, shear=0)
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the right
# with order=ALL
aug = iaa.AffineCv2(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0, shear=0, order=ia.ALL)
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the right
# with order=list
aug = iaa.AffineCv2(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0, shear=0, order=[0, 1, 2])
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the right
# with order=StochasticParameter
aug = iaa.AffineCv2(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0, shear=0, order=iap.Choice([0, 1, 2]))
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the bottom
aug = iaa.AffineCv2(scale=1.0, translate_px={"x": 0, "y": 1}, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, 1] = 255
image_aug[2, 1] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)], shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=2)], shape=base_img.shape)]
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# move 33% (one pixel) to the right
aug = iaa.AffineCv2(scale=1.0, translate_percent={"x": 0.3333, "y": 0}, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, 1] = 255
image_aug[1, 2] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)], shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=2, y=1)], shape=base_img.shape)]
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# move 33% (one pixel) to the bottom
aug = iaa.AffineCv2(scale=1.0, translate_percent={"x": 0, "y": 0.3333}, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, 1] = 255
image_aug[2, 1] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)], shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=2)], shape=base_img.shape)]
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# 0-1px to left/right and 0-1px to top/bottom
aug = iaa.AffineCv2(scale=1.0, translate_px={"x": (-1, 1), "y": (-1, 1)}, rotate=0, shear=0)
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
centers_aug = np.copy(image).astype(np.int32) * 0
centers_aug_det = np.copy(image).astype(np.int32) * 0
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert len(observed_aug[0].nonzero()[0]) == 1
assert len(observed_aug_det[0].nonzero()[0]) == 1
centers_aug += (observed_aug[0] > 0)
centers_aug_det += (observed_aug_det[0] > 0)
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
assert (centers_aug > int(nb_iterations * (1/9 * 0.6))).all()
assert (centers_aug < int(nb_iterations * (1/9 * 1.4))).all()
aug = iaa.AffineCv2(translate_percent=iap.Uniform(0.7, 0.9))
assert isinstance(aug.translate, iap.Uniform)
assert isinstance(aug.translate.a, iap.Deterministic)
assert isinstance(aug.translate.b, iap.Deterministic)
assert 0.7 - 1e-8 < aug.translate.a.value < 0.7 + 1e-8
assert 0.9 - 1e-8 < aug.translate.b.value < 0.9 + 1e-8
aug = iaa.AffineCv2(translate_px=iap.DiscreteUniform(1, 10))
assert isinstance(aug.translate, iap.DiscreteUniform)
assert isinstance(aug.translate.a, iap.Deterministic)
assert isinstance(aug.translate.b, iap.Deterministic)
assert aug.translate.a.value == 1
assert aug.translate.b.value == 10
# ---------------------
# translate heatmaps
# ---------------------
heatmaps = ia.HeatmapsOnImage(
np.float32([
[0.0, 0.5, 0.75],
[0.0, 0.5, 0.75],
[0.75, 0.75, 0.75],
]),
shape=(3, 3, 3)
)
arr_expected_1px_right = np.float32([
[0.0, 0.0, 0.5],
[0.0, 0.0, 0.5],
[0.0, 0.75, 0.75],
])
aug = iaa.AffineCv2(translate_px={"x": 1})
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == heatmaps.shape
assert heatmaps.min_value - 1e-6 < observed.min_value < heatmaps.min_value + 1e-6
assert heatmaps.max_value - 1e-6 < observed.max_value < heatmaps.max_value + 1e-6
assert np.array_equal(observed.get_arr(), arr_expected_1px_right)
# should still use mode=constant cval=0 even when other settings chosen
aug = iaa.AffineCv2(translate_px={"x": 1}, cval=255)
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == heatmaps.shape
assert heatmaps.min_value - 1e-6 < observed.min_value < heatmaps.min_value + 1e-6
assert heatmaps.max_value - 1e-6 < observed.max_value < heatmaps.max_value + 1e-6
assert np.array_equal(observed.get_arr(), arr_expected_1px_right)
aug = iaa.AffineCv2(translate_px={"x": 1}, mode="replicate", cval=255)
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == heatmaps.shape
assert heatmaps.min_value - 1e-6 < observed.min_value < heatmaps.min_value + 1e-6
assert heatmaps.max_value - 1e-6 < observed.max_value < heatmaps.max_value + 1e-6
assert np.array_equal(observed.get_arr(), arr_expected_1px_right)
# ---------------------
# rotate
# ---------------------
# rotate by 45 degrees
aug = iaa.AffineCv2(scale=1.0, translate_px=0, rotate=90, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, :] = 255
image_aug[0, 1] = 255
image_aug[1, 1] = 255
image_aug[2, 1] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=1), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=1)], shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=1, y=2)], shape=base_img.shape)]
observed = aug.augment_images(images)
observed[observed >= 100] = 255
observed[observed < 100] = 0
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
observed[observed >= 100] = 255
observed[observed < 100] = 0
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
observed[0][observed[0] >= 100] = 255
observed[0][observed[0] < 100] = 0
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
observed[0][observed[0] >= 100] = 255
observed[0][observed[0] < 100] = 0
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# rotate by StochasticParameter
aug = iaa.AffineCv2(scale=1.0, translate_px=0, rotate=iap.Uniform(10, 20), shear=0)
assert isinstance(aug.rotate, iap.Uniform)
assert isinstance(aug.rotate.a, iap.Deterministic)
assert aug.rotate.a.value == 10
assert isinstance(aug.rotate.b, iap.Deterministic)
assert aug.rotate.b.value == 20
# random rotation 0-364 degrees
aug = iaa.AffineCv2(scale=1.0, translate_px=0, rotate=(0, 364), shear=0)
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
pixels_sums_aug = np.copy(image).astype(np.int32) * 0
pixels_sums_aug_det = np.copy(image).astype(np.int32) * 0
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
pixels_sums_aug += (observed_aug[0] > 100)
pixels_sums_aug_det += (observed_aug_det[0] > 100)
assert nb_changed_aug >= int(nb_iterations * 0.9)
assert nb_changed_aug_det == 0
# center pixel, should always be white when rotating line around center
assert pixels_sums_aug[1, 1] > (nb_iterations * 0.98)
assert pixels_sums_aug[1, 1] < (nb_iterations * 1.02)
# outer pixels, should sometimes be white
# the values here had to be set quite tolerant, the middle pixels at top/left/bottom/right get more activation
# than expected
outer_pixels = ([0, 0, 0, 1, 1, 2, 2, 2], [0, 1, 2, 0, 2, 0, 1, 2])
assert (pixels_sums_aug[outer_pixels] > int(nb_iterations * (2/8 * 0.4))).all()
assert (pixels_sums_aug[outer_pixels] < int(nb_iterations * (2/8 * 2.0))).all()
# ---------------------
# shear
# ---------------------
# TODO
# shear by StochasticParameter
aug = iaa.AffineCv2(scale=1.0, translate_px=0, rotate=0, shear=iap.Uniform(10, 20))
assert isinstance(aug.shear, iap.Uniform)
assert isinstance(aug.shear.a, iap.Deterministic)
assert aug.shear.a.value == 10
assert isinstance(aug.shear.b, iap.Deterministic)
assert aug.shear.b.value == 20
# ---------------------
# cval
# ---------------------
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0, cval=128)
aug_det = aug.to_deterministic()
image = np.ones((3, 3, 1), dtype=np.uint8) * 255
image_aug = np.copy(image)
images = np.array([image])
images_list = [image]
observed = aug.augment_images(images)
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
observed = aug_det.augment_images(images)
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
observed = aug.augment_images(images_list)
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
observed = aug_det.augment_images(images_list)
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
# random cvals
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0, cval=(0, 255))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
averages = []
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
averages.append(int(np.average(observed_aug)))
assert nb_changed_aug >= int(nb_iterations * 0.9)
assert nb_changed_aug_det == 0
# center pixel, should always be white when rotating line around center
assert pixels_sums_aug[1, 1] > (nb_iterations * 0.98)
assert pixels_sums_aug[1, 1] < (nb_iterations * 1.02)
assert len(set(averages)) > 200
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0, cval=ia.ALL)
assert isinstance(aug.cval, iap.DiscreteUniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 0
assert aug.cval.b.value == 255
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0, cval=iap.DiscreteUniform(1, 5))
assert isinstance(aug.cval, iap.DiscreteUniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 1
assert aug.cval.b.value == 5
# ------------
# mode
# ------------
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0, cval=0, mode=ia.ALL)
assert isinstance(aug.mode, iap.Choice)
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0, cval=0, mode="replicate")
assert isinstance(aug.mode, iap.Deterministic)
assert aug.mode.value == "replicate"
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0, cval=0, mode=["replicate", "reflect"])
assert isinstance(aug.mode, iap.Choice)
assert len(aug.mode.a) == 2 and "replicate" in aug.mode.a and "reflect" in aug.mode.a
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0, cval=0,
mode=iap.Choice(["replicate", "reflect"]))
assert isinstance(aug.mode, iap.Choice)
assert len(aug.mode.a) == 2 and "replicate" in aug.mode.a and "reflect" in aug.mode.a
# ------------
# exceptions for bad inputs
# ------------
# scale
got_exception = False
try:
_ = iaa.AffineCv2(scale=False)
except Exception:
got_exception = True
assert got_exception
# translate_px
got_exception = False
try:
_ = iaa.AffineCv2(translate_px=False)
except Exception:
got_exception = True
assert got_exception
# translate_percent
got_exception = False
try:
_ = iaa.AffineCv2(translate_percent=False)
except Exception:
got_exception = True
assert got_exception
# rotate
got_exception = False
try:
_ = iaa.AffineCv2(scale=1.0, translate_px=0, rotate=False, shear=0, cval=0)
except Exception:
got_exception = True
assert got_exception
# shear
got_exception = False
try:
_ = iaa.AffineCv2(scale=1.0, translate_px=0, rotate=0, shear=False, cval=0)
except Exception:
got_exception = True
assert got_exception
# cval
got_exception = False
try:
_ = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0, cval=None)
except Exception:
got_exception = True
assert got_exception
# mode
got_exception = False
try:
_ = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0, cval=0, mode=False)
except Exception:
got_exception = True
assert got_exception
# non-existent order
got_exception = False
try:
_ = iaa.AffineCv2(order=-1)
except Exception:
got_exception = True
assert got_exception
# bad order datatype
got_exception = False
try:
_ = iaa.AffineCv2(order="test")
except Exception:
got_exception = True
assert got_exception
# ----------
# get_parameters
# ----------
aug = iaa.AffineCv2(scale=1, translate_px=2, rotate=3, shear=4, order=1, cval=0, mode="constant")
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic) # scale
assert isinstance(params[1], iap.Deterministic) # translate
assert isinstance(params[2], iap.Deterministic) # rotate
assert isinstance(params[3], iap.Deterministic) # shear
assert params[0].value == 1 # scale
assert params[1].value == 2 # translate
assert params[2].value == 3 # rotate
assert params[3].value == 4 # shear
assert params[4].value == 1 # order
assert params[5].value == 0 # cval
assert params[6].value == "constant" # mode
def test_PiecewiseAffine():
reseed()
img = np.zeros((60, 80), dtype=np.uint8)
img[:, 9:11+1] = 255
img[:, 69:71+1] = 255
mask = img > 0
heatmaps = ia.HeatmapsOnImage((img / 255.0).astype(np.float32), shape=(60, 80, 3))
heatmaps_arr = heatmaps.get_arr()
# -----
# scale
# -----
# basic test
aug = iaa.PiecewiseAffine(scale=0.01, nb_rows=12, nb_cols=4)
observed = aug.augment_image(img)
assert 100.0 < np.average(observed[mask]) < np.average(img[mask])
assert 75.0 > np.average(observed[~mask]) > np.average(img[~mask])
# basic test, heatmaps
aug = iaa.PiecewiseAffine(scale=0.01, nb_rows=12, nb_cols=4)
observed = aug.augment_heatmaps([heatmaps])[0]
observed_arr = observed.get_arr()
assert observed.shape == heatmaps.shape
assert heatmaps.min_value - 1e-6 < observed.min_value < heatmaps.min_value + 1e-6
assert heatmaps.max_value - 1e-6 < observed.max_value < heatmaps.max_value + 1e-6
assert 100.0/255.0 < np.average(observed_arr[mask]) < np.average(heatmaps_arr[mask])
assert 75.0/255.0 > np.average(observed_arr[~mask]) > np.average(heatmaps_arr[~mask])
# scale 0
aug = iaa.PiecewiseAffine(scale=0, nb_rows=12, nb_cols=4)
observed = aug.augment_image(img)
assert np.array_equal(observed, img)
# scale 0, heatmaps
aug = iaa.PiecewiseAffine(scale=0, nb_rows=12, nb_cols=4)
observed = aug.augment_heatmaps([heatmaps])[0]
observed_arr = observed.get_arr()
assert observed.shape == heatmaps.shape
assert heatmaps.min_value - 1e-6 < observed.min_value < heatmaps.min_value + 1e-6
assert heatmaps.max_value - 1e-6 < observed.max_value < heatmaps.max_value + 1e-6
assert np.array_equal(observed_arr, heatmaps_arr)
# scale 0, keypoints
aug = iaa.PiecewiseAffine(scale=0, nb_rows=12, nb_cols=4)
kpsoi = ia.KeypointsOnImage([ia.Keypoint(x=5, y=3), ia.Keypoint(x=3, y=8)], shape=(14, 14, 3))
kpsoi_aug = aug.augment_keypoints([kpsoi])[0]
assert kpsoi_aug.shape == (14, 14, 3)
assert np.allclose(kpsoi_aug.keypoints[0].x, 5)
assert np.allclose(kpsoi_aug.keypoints[0].y, 3)
assert np.allclose(kpsoi_aug.keypoints[1].x, 3)
assert np.allclose(kpsoi_aug.keypoints[1].y, 8)
# stronger scale should lead to stronger changes
aug1 = iaa.PiecewiseAffine(scale=0.01, nb_rows=12, nb_cols=4)
aug2 = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)
observed1 = aug1.augment_image(img)
observed2 = aug2.augment_image(img)
assert np.average(observed1[~mask]) < np.average(observed2[~mask])
# stronger scale should lead to stronger changes, heatmaps
aug1 = iaa.PiecewiseAffine(scale=0.01, nb_rows=12, nb_cols=4)
aug2 = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)
observed1 = aug1.augment_heatmaps([heatmaps])[0]
observed1_arr = observed1.get_arr()
observed2 = aug2.augment_heatmaps([heatmaps])[0]
observed2_arr = observed2.get_arr()
assert observed1.shape == heatmaps.shape
assert heatmaps.min_value - 1e-6 < observed1.min_value < heatmaps.min_value + 1e-6
assert heatmaps.max_value - 1e-6 < observed1.max_value < heatmaps.max_value + 1e-6
assert observed2.shape == heatmaps.shape
assert heatmaps.min_value - 1e-6 < observed2.min_value < heatmaps.min_value + 1e-6
assert heatmaps.max_value - 1e-6 < observed2.max_value < heatmaps.max_value + 1e-6
assert np.average(observed1_arr[~mask]) < np.average(observed2_arr[~mask])
# strong scale, measure alignment between images and heatmaps
aug = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)
aug_det = aug.to_deterministic()
img_aug = aug_det.augment_image(img)
hm_aug = aug_det.augment_heatmaps([heatmaps])[0]
assert hm_aug.shape == (60, 80, 3)
assert heatmaps.min_value - 1e-6 < observed.min_value < heatmaps.min_value + 1e-6
assert heatmaps.max_value - 1e-6 < observed.max_value < heatmaps.max_value + 1e-6
img_aug_mask = img_aug > 255*0.1
hm_aug_mask = hm_aug.arr_0to1 > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert (same / img_aug_mask.size) >= 0.98
# strong scale, measure alignment between images and heatmaps
# heatmaps here smaller than image
aug_det = aug.to_deterministic()
heatmaps_small = ia.HeatmapsOnImage(
(ia.imresize_single_image(img, (30, 40+10), interpolation="cubic") / 255.0).astype(np.float32),
shape=(60, 80, 3)
)
img_aug = aug_det.augment_image(img)
hm_aug = aug_det.augment_heatmaps([heatmaps_small])[0]
assert hm_aug.shape == (60, 80, 3)
assert hm_aug.arr_0to1.shape == (30, 40+10, 1)
img_aug_mask = img_aug > 255*0.1
hm_aug_mask = ia.imresize_single_image(hm_aug.arr_0to1, (60, 80), interpolation="cubic") > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert (same / img_aug_mask.size) >= 0.9 # seems to be 0.948 actually
# strong scale, measure alignment between images and keypoints
aug = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)
aug_det = aug.to_deterministic()
kpsoi = ia.KeypointsOnImage([ia.Keypoint(x=5, y=15), ia.Keypoint(x=17, y=12)], shape=(24, 30, 3))
img_kps = np.zeros((24, 30, 3), dtype=np.uint8)
img_kps = kpsoi.draw_on_image(img_kps, color=[255, 255, 255])
img_kps_aug = aug_det.augment_image(img_kps)
kpsoi_aug = aug_det.augment_keypoints([kpsoi])[0]
assert kpsoi_aug.shape == (24, 30, 3)
bb1 = ia.BoundingBox(x1=kpsoi_aug.keypoints[0].x-1, y1=kpsoi_aug.keypoints[0].y-1,
x2=kpsoi_aug.keypoints[0].x+1, y2=kpsoi_aug.keypoints[0].y+1)
bb2 = ia.BoundingBox(x1=kpsoi_aug.keypoints[1].x-1, y1=kpsoi_aug.keypoints[1].y-1,
x2=kpsoi_aug.keypoints[1].x+1, y2=kpsoi_aug.keypoints[1].y+1)
patch1 = bb1.extract_from_image(img_kps_aug)
patch2 = bb2.extract_from_image(img_kps_aug)
assert np.max(patch1) > 150
assert np.max(patch2) > 150
assert np.average(img_kps_aug) < 40
# scale as list
aug1 = iaa.PiecewiseAffine(scale=0.01, nb_rows=12, nb_cols=4)
aug2 = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)
aug = iaa.PiecewiseAffine(scale=[0.01, 0.10], nb_rows=12, nb_cols=4)
assert isinstance(aug.scale, iap.Choice)
assert 0.01 - 1e-8 < aug.scale.a[0] < 0.01 + 1e-8
assert 0.10 - 1e-8 < aug.scale.a[1] < 0.10 + 1e-8
avg1 = np.average([np.average(aug1.augment_image(img) * (~mask).astype(np.float32)) for _ in sm.xrange(3)])
avg2 = np.average([np.average(aug2.augment_image(img) * (~mask).astype(np.float32)) for _ in sm.xrange(3)])
seen = [0, 0]
for _ in sm.xrange(15):
observed = aug.augment_image(img)
avg = np.average(observed * (~mask).astype(np.float32))
diff1 = abs(avg - avg1)
diff2 = abs(avg - avg2)
if diff1 < diff2:
seen[0] += 1
else:
seen[1] += 1
assert seen[0] > 0
assert seen[1] > 0
# scale as tuple
aug = iaa.PiecewiseAffine(scale=(0.01, 0.10), nb_rows=12, nb_cols=4)
assert isinstance(aug.jitter.scale, iap.Uniform)
assert isinstance(aug.jitter.scale.a, iap.Deterministic)
assert isinstance(aug.jitter.scale.b, iap.Deterministic)
assert 0.01 - 1e-8 < aug.jitter.scale.a.value < 0.01 + 1e-8
assert 0.10 - 1e-8 < aug.jitter.scale.b.value < 0.10 + 1e-8
# scale as StochasticParameter
aug = iaa.PiecewiseAffine(scale=iap.Uniform(0.01, 0.10), nb_rows=12, nb_cols=4)
assert isinstance(aug.jitter.scale, iap.Uniform)
assert isinstance(aug.jitter.scale.a, iap.Deterministic)
assert isinstance(aug.jitter.scale.b, iap.Deterministic)
assert 0.01 - 1e-8 < aug.jitter.scale.a.value < 0.01 + 1e-8
assert 0.10 - 1e-8 < aug.jitter.scale.b.value < 0.10 + 1e-8
# bad datatype for scale
got_exception = False
try:
_ = iaa.PiecewiseAffine(scale=False, nb_rows=12, nb_cols=4)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# -----
# rows and cols
# -----
# verify effects of rows/cols
aug1 = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=4)
aug2 = iaa.PiecewiseAffine(scale=0.05, nb_rows=30, nb_cols=4)
std1 = []
std2 = []
for _ in sm.xrange(3):
observed1 = aug1.augment_image(img)
observed2 = aug2.augment_image(img)
grad_vert1 = observed1[1:, :].astype(np.float32) - observed1[:-1, :].astype(np.float32)
grad_vert2 = observed2[1:, :].astype(np.float32) - observed2[:-1, :].astype(np.float32)
grad_vert1 = grad_vert1 * (~mask[1:, :]).astype(np.float32)
grad_vert2 = grad_vert2 * (~mask[1:, :]).astype(np.float32)
std1.append(np.std(grad_vert1))
std2.append(np.std(grad_vert2))
std1 = np.average(std1)
std2 = np.average(std2)
assert std1 < std2
# -----
# rows
# -----
# rows as list
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=[4, 20], nb_cols=4)
assert isinstance(aug.nb_rows, iap.Choice)
assert aug.nb_rows.a[0] == 4
assert aug.nb_rows.a[1] == 20
seen = [0, 0]
for _ in sm.xrange(20):
observed = aug.augment_image(img)
grad_vert = observed[1:, :].astype(np.float32) - observed[:-1, :].astype(np.float32)
grad_vert = grad_vert * (~mask[1:, :]).astype(np.float32)
std = np.std(grad_vert)
diff1 = abs(std - std1)
diff2 = abs(std - std2)
if diff1 < diff2:
seen[0] += 1
else:
seen[1] += 1
assert seen[0] > 0
assert seen[1] > 0
# rows as tuple
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=(4, 20), nb_cols=4)
assert isinstance(aug.nb_rows, iap.DiscreteUniform)
assert isinstance(aug.nb_rows.a, iap.Deterministic)
assert isinstance(aug.nb_rows.b, iap.Deterministic)
assert aug.nb_rows.a.value == 4
assert aug.nb_rows.b.value == 20
# rows as StochasticParameter
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=iap.DiscreteUniform(4, 20), nb_cols=4)
assert isinstance(aug.nb_rows, iap.DiscreteUniform)
assert isinstance(aug.nb_rows.a, iap.Deterministic)
assert isinstance(aug.nb_rows.b, iap.Deterministic)
assert aug.nb_rows.a.value == 4
assert aug.nb_rows.b.value == 20
# bad datatype for rows
got_exception = False
try:
_ = iaa.PiecewiseAffine(scale=0.05, nb_rows=False, nb_cols=4)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# -----
# nb_cols
# -----
# cols as list
img_cols = img.T
mask_cols = mask.T
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=[4, 20])
assert isinstance(aug.nb_cols, iap.Choice)
assert aug.nb_cols.a[0] == 4
assert aug.nb_cols.a[1] == 20
aug1 = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=4)
aug2 = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=20)
std1 = []
std2 = []
for _ in sm.xrange(3):
observed1 = aug1.augment_image(img_cols)
observed2 = aug2.augment_image(img_cols)
grad_hori1 = observed1[:, 1:].astype(np.float32) - observed1[:, :-1].astype(np.float32)
grad_hori2 = observed2[:, 1:].astype(np.float32) - observed2[:, :-1].astype(np.float32)
grad_hori1 = grad_hori1 * (~mask_cols[:, 1:]).astype(np.float32)
grad_hori2 = grad_hori2 * (~mask_cols[:, 1:]).astype(np.float32)
std1.append(np.std(grad_hori1))
std2.append(np.std(grad_hori2))
std1 = np.average(std1)
std2 = np.average(std2)
seen = [0, 0]
for _ in sm.xrange(15):
observed = aug.augment_image(img_cols)
grad_hori = observed[:, 1:].astype(np.float32) - observed[:, :-1].astype(np.float32)
grad_hori = grad_hori * (~mask_cols[:, 1:]).astype(np.float32)
std = np.std(grad_hori)
diff1 = abs(std - std1)
diff2 = abs(std - std2)
if diff1 < diff2:
seen[0] += 1
else:
seen[1] += 1
assert seen[0] > 0
assert seen[1] > 0
# cols as tuple
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=(4, 20))
assert isinstance(aug.nb_cols, iap.DiscreteUniform)
assert isinstance(aug.nb_cols.a, iap.Deterministic)
assert isinstance(aug.nb_cols.b, iap.Deterministic)
assert aug.nb_cols.a.value == 4
assert aug.nb_cols.b.value == 20
# cols as StochasticParameter
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=iap.DiscreteUniform(4, 20))
assert isinstance(aug.nb_cols, iap.DiscreteUniform)
assert isinstance(aug.nb_cols.a, iap.Deterministic)
assert isinstance(aug.nb_cols.b, iap.Deterministic)
assert aug.nb_cols.a.value == 4
assert aug.nb_cols.b.value == 20
# bad datatype for cols
got_exception = False
try:
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# -----
# order
# -----
# single int for order
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, order=0)
assert isinstance(aug.order, iap.Deterministic)
assert aug.order.value == 0
# list for order
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, order=[0, 1, 3])
assert isinstance(aug.order, iap.Choice)
assert all([v in aug.order.a for v in [0, 1, 3]])
# StochasticParameter for order
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, order=iap.Choice([0, 1, 3]))
assert isinstance(aug.order, iap.Choice)
assert all([v in aug.order.a for v in [0, 1, 3]])
# ALL for order
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, order=ia.ALL)
assert isinstance(aug.order, iap.Choice)
assert all([v in aug.order.a for v in [0, 1, 3, 4, 5]])
# bad datatype for order
got_exception = False
try:
_ = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, order=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# -----
# cval
# -----
# cval as deterministic
img = np.zeros((50, 50, 3), dtype=np.uint8) + 255
aug = iaa.PiecewiseAffine(scale=0.7, nb_rows=10, nb_cols=10, mode="constant", cval=0)
observed = aug.augment_image(img)
assert np.sum([observed[:, :] == [0, 0, 0]]) > 0
# cval as deterministic, heatmaps should always use cval=0
heatmaps = ia.HeatmapsOnImage(np.zeros((50, 50, 1), dtype=np.float32), shape=(50, 50, 3))
aug = iaa.PiecewiseAffine(scale=0.7, nb_rows=10, nb_cols=10, mode="constant", cval=255)
observed = aug.augment_heatmaps([heatmaps])[0]
assert np.sum([observed.get_arr()[:, :] >= 0.01]) == 0
# cval as list
img = np.zeros((20, 20), dtype=np.uint8) + 255
aug = iaa.PiecewiseAffine(scale=0.7, nb_rows=5, nb_cols=5, mode="constant", cval=[0, 10])
assert isinstance(aug.cval, iap.Choice)
assert aug.cval.a[0] == 0
assert aug.cval.a[1] == 10
seen = [0, 0, 0]
for _ in sm.xrange(30):
observed = aug.augment_image(img)
nb_0 = np.sum([observed[:, :] == 0])
nb_10 = np.sum([observed[:, :] == 10])
if nb_0 > 0:
seen[0] += 1
elif nb_10 > 0:
seen[1] += 1
else:
seen[2] += 1
assert seen[0] > 5
assert seen[1] > 5
assert seen[2] <= 4
# cval as tuple
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, mode="constant", cval=(0, 10))
assert isinstance(aug.cval, iap.Uniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 0
assert aug.cval.b.value == 10
# cval as StochasticParameter
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, mode="constant", cval=iap.DiscreteUniform(0, 10))
assert isinstance(aug.cval, iap.DiscreteUniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 0
assert aug.cval.b.value == 10
# ALL as cval
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, mode="constant", cval=ia.ALL)
assert isinstance(aug.cval, iap.Uniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 0
assert aug.cval.b.value == 255
# bas datatype for cval
got_exception = False
try:
_ = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, cval=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# -----
# mode
# -----
# single string for mode
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, mode="nearest")
assert isinstance(aug.mode, iap.Deterministic)
assert aug.mode.value == "nearest"
# list for mode
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, mode=["nearest", "edge", "symmetric"])
assert isinstance(aug.mode, iap.Choice)
assert all([v in aug.mode.a for v in ["nearest", "edge", "symmetric"]])
# StochasticParameter for mode
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, mode=iap.Choice(["nearest", "edge", "symmetric"]))
assert isinstance(aug.mode, iap.Choice)
assert all([v in aug.mode.a for v in ["nearest", "edge", "symmetric"]])
# ALL for mode
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, mode=ia.ALL)
assert isinstance(aug.mode, iap.Choice)
assert all([v in aug.mode.a for v in ["constant", "edge", "symmetric", "reflect", "wrap"]])
# bad datatype for mode
got_exception = False
try:
_ = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, mode=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# ---------
# keypoints
# ---------
# basic test
img = np.zeros((100, 80), dtype=np.uint8)
img[:, 9:11+1] = 255
img[:, 69:71+1] = 255
mask = img > 0
kps = [ia.Keypoint(x=10, y=20), ia.Keypoint(x=10, y=40),
ia.Keypoint(x=70, y=20), ia.Keypoint(x=70, y=40)]
kpsoi = ia.KeypointsOnImage(kps, shape=img.shape)
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=10, nb_cols=10)
aug_det = aug.to_deterministic()
observed_img = aug_det.augment_image(img)
observed_kpsoi = aug_det.augment_keypoints([kpsoi])
assert not keypoints_equal([kpsoi], observed_kpsoi)
for kp in observed_kpsoi[0].keypoints:
assert observed_img[int(kp.y), int(kp.x)] > 0
# scale 0
aug = iaa.PiecewiseAffine(scale=0, nb_rows=10, nb_cols=10)
observed = aug.augment_keypoints([kpsoi])
assert keypoints_equal([kpsoi], observed)
# keypoints outside of image
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=10, nb_cols=10)
kps = [ia.Keypoint(x=-10, y=-20)]
kpsoi = ia.KeypointsOnImage(kps, shape=img.shape)
observed = aug.augment_keypoints([kpsoi])
assert keypoints_equal([kpsoi], observed)
# ---------
# get_parameters
# ---------
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=10, order=1, cval=2, mode="constant", absolute_scale=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert isinstance(params[2], iap.Deterministic)
assert isinstance(params[3], iap.Deterministic)
assert isinstance(params[4], iap.Deterministic)
assert isinstance(params[5], iap.Deterministic)
assert params[6] is False
assert 0.1 - 1e-8 < params[0].value < 0.1 + 1e-8
assert params[1].value == 8
assert params[2].value == 10
assert params[3].value == 1
assert params[4].value == 2
assert params[5].value == "constant"
###################
# test other dtypes
###################
aug = iaa.PiecewiseAffine(scale=0.2, nb_rows=8, nb_cols=4, order=0, mode="constant")
mask = np.zeros((21, 21), dtype=bool)
mask[:, 7:13] = True
# bool
image = np.zeros((21, 21), dtype=bool)
image[mask] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype == image.dtype
assert not np.all(image_aug == 1)
assert np.any(image_aug[~mask] == 1)
# uint, int
for dtype in [np.uint8, np.uint16, np.uint32, np.int8, np.int16, np.int32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if np.dtype(dtype).kind == "i":
values = [1, 5, 10, 100, int(0.1 * max_value), int(0.2 * max_value),
int(0.5 * max_value), max_value-100, max_value]
values = values + [(-1)*value for value in values]
else:
values = [1, 5, 10, 100, int(center_value), int(0.1 * max_value), int(0.2 * max_value),
int(0.5 * max_value), max_value-100, max_value]
for value in values:
image = np.zeros((21, 21), dtype=dtype)
image[:, 7:13] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype == np.dtype(dtype)
assert not np.all(image_aug == value)
assert np.any(image_aug[~mask] == value)
# float
for dtype in [np.float16, np.float32, np.float64]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
def _isclose(a, b):
atol = 1e-4 if dtype == np.float16 else 1e-8
return np.isclose(a, b, atol=atol, rtol=0)
isize = np.dtype(dtype).itemsize
values = [0.01, 1.0, 10.0, 100.0, 500 ** (isize - 1), 1000 ** (isize - 1)]
values = values + [(-1) * value for value in values]
values = values + [min_value, max_value]
for value in values:
image = np.zeros((21, 21), dtype=dtype)
image[:, 7:13] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype == np.dtype(dtype)
# TODO switch all other tests from float(...) to np.float128(...) pattern, seems
# to be more accurate for 128bit floats
assert not np.all(_isclose(image_aug, np.float128(value)))
assert np.any(_isclose(image_aug[~mask], np.float128(value)))
def test_PerspectiveTransform():
reseed()
img = np.zeros((30, 30), dtype=np.uint8)
img[10:20, 10:20] = 255
heatmaps = ia.HeatmapsOnImage((img / 255.0).astype(np.float32), shape=img.shape)
# without keep_size
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)
aug.jitter = iap.Deterministic(0.2)
observed = aug.augment_image(img)
y1 = int(30*0.2)
y2 = int(30*0.8)
x1 = int(30*0.2)
x2 = int(30*0.8)
expected = img[y1:y2, x1:x2]
assert all([abs(s1-s2) <= 1 for s1, s2 in zip(observed.shape, expected.shape)])
if observed.shape != expected.shape:
observed = ia.imresize_single_image(observed, expected.shape[0:2], interpolation="cubic")
# differences seem to mainly appear around the border of the inner rectangle, possibly
# due to interpolation
assert np.average(np.abs(observed.astype(np.int32) - expected.astype(np.int32))) < 30.0
hm = ia.HeatmapsOnImage(img.astype(np.float32)/255.0, shape=(30, 30))
hm_aug = aug.augment_heatmaps([hm])[0]
expected = (y2 - y1, x2 - x1)
assert all([abs(s1-s2) <= 1 for s1, s2 in zip(hm_aug.shape, expected)])
assert all([abs(s1-s2) <= 1 for s1, s2 in zip(hm_aug.arr_0to1.shape, expected + (1,))])
img_aug_mask = observed > 255*0.1
hm_aug_mask = hm_aug.arr_0to1 > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert (same / img_aug_mask.size) >= 0.99
# without keep_size, different heatmap size
img_small = ia.imresize_single_image(img, (20, 25), interpolation="cubic")
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)
aug.jitter = iap.Deterministic(0.2)
img_aug = aug.augment_image(img)
y1 = int(30*0.2)
y2 = int(30*0.8)
x1 = int(30*0.2)
x2 = int(30*0.8)
x1_small = int(25*0.2)
x2_small = int(25*0.8)
y1_small = int(20*0.2)
y2_small = int(20*0.8)
hm = ia.HeatmapsOnImage(img_small.astype(np.float32)/255.0, shape=(30, 30))
hm_aug = aug.augment_heatmaps([hm])[0]
expected = (y2 - y1, x2 - x1)
expected_small = (y2_small - y1_small, x2_small - x1_small, 1)
assert all([abs(s1-s2) <= 1 for s1, s2 in zip(hm_aug.shape, expected)])
assert all([abs(s1-s2) <= 1 for s1, s2 in zip(hm_aug.arr_0to1.shape, expected_small)])
img_aug_mask = img_aug > 255*0.1
hm_aug_mask = ia.imresize_single_image(hm_aug.arr_0to1, img_aug.shape[0:2], interpolation="cubic") > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert (same / img_aug_mask.size) >= 0.96
# with keep_size
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
aug.jitter = iap.Deterministic(0.2)
observed = aug.augment_image(img)
expected = img[int(30*0.2):int(30*0.8), int(30*0.2):int(30*0.8)]
expected = ia.imresize_single_image(expected, img.shape[0:2], interpolation="cubic")
assert observed.shape == img.shape
# differences seem to mainly appear around the border of the inner rectangle, possibly
# due to interpolation
assert np.average(np.abs(observed.astype(np.int32) - expected.astype(np.int32))) < 30.0
# with keep_size, heatmaps
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
aug.jitter = iap.Deterministic(0.2)
observed = aug.augment_heatmaps([heatmaps])[0]
expected = heatmaps.get_arr()[int(30*0.2):int(30*0.8), int(30*0.2):int(30*0.8)]
expected = ia.imresize_single_image((expected*255).astype(np.uint8), img.shape[0:2], interpolation="cubic")
expected = (expected / 255.0).astype(np.float32)
assert observed.shape == heatmaps.shape
assert heatmaps.min_value - 1e-6 < observed.min_value < heatmaps.min_value + 1e-6
assert heatmaps.max_value - 1e-6 < observed.max_value < heatmaps.max_value + 1e-6
# differences seem to mainly appear around the border of the inner rectangle, possibly
# due to interpolation
assert np.average(np.abs(observed.get_arr() - expected)) < 30.0
# with keep_size, RGB images
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
aug.jitter = iap.Deterministic(0.2)
imgs = np.tile(img[np.newaxis, :, :, np.newaxis], (2, 1, 1, 3))
observed = aug.augment_images(imgs)
for img_idx in sm.xrange(2):
for c in sm.xrange(3):
observed_i = observed[img_idx, :, :, c]
expected = imgs[img_idx, int(30*0.2):int(30*0.8), int(30*0.2):int(30*0.8), c]
expected = ia.imresize_single_image(expected, imgs.shape[1:3], interpolation="cubic")
assert observed_i.shape == imgs.shape[1:3]
# differences seem to mainly appear around the border of the inner rectangle, possibly
# due to interpolation
assert np.average(np.abs(observed_i.astype(np.int32) - expected.astype(np.int32))) < 30.0
# tuple for scale
aug = iaa.PerspectiveTransform(scale=(0.1, 0.2))
assert isinstance(aug.jitter.scale, iap.Uniform)
assert isinstance(aug.jitter.scale.a, iap.Deterministic)
assert isinstance(aug.jitter.scale.b, iap.Deterministic)
assert 0.1 - 1e-8 < aug.jitter.scale.a.value < 0.1 + 1e-8
assert 0.2 - 1e-8 < aug.jitter.scale.b.value < 0.2 + 1e-8
# list for scale
aug = iaa.PerspectiveTransform(scale=[0.1, 0.2, 0.3])
assert isinstance(aug.jitter.scale, iap.Choice)
assert len(aug.jitter.scale.a) == 3
assert 0.1 - 1e-8 < aug.jitter.scale.a[0] < 0.1 + 1e-8
assert 0.2 - 1e-8 < aug.jitter.scale.a[1] < 0.2 + 1e-8
assert 0.3 - 1e-8 < aug.jitter.scale.a[2] < 0.3 + 1e-8
# StochasticParameter for scale
aug = iaa.PerspectiveTransform(scale=iap.Choice([0.1, 0.2, 0.3]))
assert isinstance(aug.jitter.scale, iap.Choice)
assert len(aug.jitter.scale.a) == 3
assert 0.1 - 1e-8 < aug.jitter.scale.a[0] < 0.1 + 1e-8
assert 0.2 - 1e-8 < aug.jitter.scale.a[1] < 0.2 + 1e-8
assert 0.3 - 1e-8 < aug.jitter.scale.a[2] < 0.3 + 1e-8
# bad datatype for scale
got_exception = False
try:
_ = iaa.PerspectiveTransform(scale=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# keypoint augmentation without keep_size
# TODO deviations of around 0.4-0.7 in this and the next test (between expected and observed
# coordinates) -- why?
kps = [ia.Keypoint(x=10, y=10), ia.Keypoint(x=14, y=11)]
kpsoi = ia.KeypointsOnImage(kps, shape=img.shape)
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)
aug.jitter = iap.Deterministic(0.2)
observed = aug.augment_keypoints([kpsoi])
kps_expected = [
ia.Keypoint(x=10-0.2*30, y=10-0.2*30),
ia.Keypoint(x=14-0.2*30, y=11-0.2*30)
]
for kp_observed, kp_expected in zip(observed[0].keypoints, kps_expected):
assert kp_expected.x - 1.5 < kp_observed.x < kp_expected.x + 1.5
assert kp_expected.y - 1.5 < kp_observed.y < kp_expected.y + 1.5
# keypoint augmentation with keep_size
kps = [ia.Keypoint(x=10, y=10), ia.Keypoint(x=14, y=11)]
kpsoi = ia.KeypointsOnImage(kps, shape=img.shape)
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
aug.jitter = iap.Deterministic(0.2)
observed = aug.augment_keypoints([kpsoi])
kps_expected = [
ia.Keypoint(x=((10-0.2*30)/(30*0.6))*30, y=((10-0.2*30)/(30*0.6))*30),
ia.Keypoint(x=((14-0.2*30)/(30*0.6))*30, y=((11-0.2*30)/(30*0.6))*30)
]
for kp_observed, kp_expected in zip(observed[0].keypoints, kps_expected):
assert kp_expected.x - 1.5 < kp_observed.x < kp_expected.x + 1.5
assert kp_expected.y - 1.5 < kp_observed.y < kp_expected.y + 1.5
# get_parameters
aug = iaa.PerspectiveTransform(scale=0.1, keep_size=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Normal)
assert isinstance(params[0].scale, iap.Deterministic)
assert 0.1 - 1e-8 < params[0].scale.value < 0.1 + 1e-8
assert params[1] is False
###################
# test other dtypes
###################
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)
aug.jitter = iap.Deterministic(0.2)
y1 = int(30 * 0.2)
y2 = int(30 * 0.8)
x1 = int(30 * 0.2)
x2 = int(30 * 0.8)
# bool
image = np.zeros((30, 30), dtype=bool)
image[12:18, :] = True
image[:, 12:18] = True
expected = image[y1:y2, x1:x2]
image_aug = aug.augment_image(image)
assert image_aug.dtype == image.dtype
assert image_aug.shape == expected.shape
assert (np.sum(image_aug == expected) / expected.size) > 0.9
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if np.dtype(dtype).kind == "i":
values = [0, 1, 5, 10, 100, int(0.1 * max_value), int(0.2 * max_value),
int(0.5 * max_value), max_value-100, max_value]
values = values + [(-1)*value for value in values]
else:
values = [0, 1, 5, 10, 100, int(center_value), int(0.1 * max_value), int(0.2 * max_value),
int(0.5 * max_value), max_value-100, max_value]
for value in values:
image = np.zeros((30, 30), dtype=dtype)
image[12:18, :] = value
image[:, 12:18] = value
expected = image[y1:y2, x1:x2]
image_aug = aug.augment_image(image)
assert image_aug.dtype == image.dtype
assert image_aug.shape == expected.shape
# rather high tolerance of 0.7 here because of interpolation
assert (np.sum(image_aug == expected) / expected.size) > 0.7
# float
for dtype in [np.float16, np.float32, np.float64]:
def _isclose(a, b):
atol = 1e-4 if dtype == np.float16 else 1e-8
return np.isclose(a, b, atol=atol, rtol=0)
isize = np.dtype(dtype).itemsize
values = [0.01, 1.0, 10.0, 100.0, 500 ** (isize - 1), 1000 ** (isize - 1)]
values = values + [(-1) * value for value in values]
for value in values:
image = np.zeros((30, 30), dtype=dtype)
image[12:18, :] = value
image[:, 12:18] = value
expected = image[y1:y2, x1:x2]
image_aug = aug.augment_image(image)
assert image_aug.dtype == image.dtype
assert image_aug.shape == expected.shape
# rather high tolerance of 0.7 here because of interpolation
assert (np.sum(_isclose(image_aug, expected)) / expected.size) > 0.7
def test_ElasticTransformation():
reseed()
img = np.zeros((50, 50), dtype=np.uint8) + 255
img = np.pad(img, ((100, 100), (100, 100)), mode="constant", constant_values=0)
mask = img > 0
heatmaps = ia.HeatmapsOnImage((img / 255.0).astype(np.float32), shape=img.shape)
img_nonsquare = np.zeros((50, 100), dtype=np.uint8) + 255
img_nonsquare = np.pad(img_nonsquare, ((100, 100), (100, 100)), mode="constant", constant_values=0)
mask_nonsquare = img_nonsquare > 0
# test basic funtionality
aug = iaa.ElasticTransformation(alpha=0.5, sigma=0.25)
observed = aug.augment_image(img)
# assume that some white/255 pixels have been moved away from the center and replaced by black/0 pixels
assert np.sum(observed[mask]) < np.sum(img[mask])
# assume that some black/0 pixels have been moved away from the outer area and replaced by white/255 pixels
assert np.sum(observed[~mask]) > np.sum(img[~mask])
# test basic funtionality with non-square images
aug = iaa.ElasticTransformation(alpha=0.5, sigma=0.25)
observed = aug.augment_image(img_nonsquare)
assert np.sum(observed[mask_nonsquare]) < np.sum(img_nonsquare[mask_nonsquare])
assert np.sum(observed[~mask_nonsquare]) > np.sum(img_nonsquare[~mask_nonsquare])
# test basic funtionality, heatmaps
aug = iaa.ElasticTransformation(alpha=0.5, sigma=0.25)
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == heatmaps.shape
assert heatmaps.min_value - 1e-6 < observed.min_value < heatmaps.min_value + 1e-6
assert heatmaps.max_value - 1e-6 < observed.max_value < heatmaps.max_value + 1e-6
assert np.sum(observed.get_arr()[mask]) < np.sum(heatmaps.get_arr()[mask])
assert np.sum(observed.get_arr()[~mask]) > np.sum(heatmaps.get_arr()[~mask])
# test effects of increased alpha strength
aug1 = iaa.ElasticTransformation(alpha=0.1, sigma=0.25)
aug2 = iaa.ElasticTransformation(alpha=5.0, sigma=0.25)
observed1 = aug1.augment_image(img)
observed2 = aug2.augment_image(img)
# assume that the inner area has become more black-ish when using high alphas (more white pixels were moved out of
# the inner area)
assert np.sum(observed1[mask]) > np.sum(observed2[mask])
# assume that the outer area has become more white-ish when using high alphas (more black pixels were moved into
# the inner area)
assert np.sum(observed1[~mask]) < np.sum(observed2[~mask])
# test effects of increased alpha strength, heatmaps
aug1 = iaa.ElasticTransformation(alpha=0.1, sigma=0.25)
aug2 = iaa.ElasticTransformation(alpha=5.0, sigma=0.25)
observed1 = aug1.augment_heatmaps([heatmaps])[0]
observed2 = aug2.augment_heatmaps([heatmaps])[0]
assert observed1.shape == heatmaps.shape
assert observed2.shape == heatmaps.shape
assert heatmaps.min_value - 1e-6 < observed1.min_value < heatmaps.min_value + 1e-6
assert heatmaps.max_value - 1e-6 < observed1.max_value < heatmaps.max_value + 1e-6
assert heatmaps.min_value - 1e-6 < observed2.min_value < heatmaps.min_value + 1e-6
assert heatmaps.max_value - 1e-6 < observed2.max_value < heatmaps.max_value + 1e-6
assert np.sum(observed1.get_arr()[mask]) > np.sum(observed2.get_arr()[mask])
assert np.sum(observed1.get_arr()[~mask]) < np.sum(observed2.get_arr()[~mask])
# test effects of increased sigmas
aug1 = iaa.ElasticTransformation(alpha=3.0, sigma=0.1)
aug2 = iaa.ElasticTransformation(alpha=3.0, sigma=3.0)
observed1 = aug1.augment_image(img)
observed2 = aug2.augment_image(img)
observed1_std_hori = np.std(observed1.astype(np.float32)[:, 1:] - observed1.astype(np.float32)[:, :-1])
observed2_std_hori = np.std(observed2.astype(np.float32)[:, 1:] - observed2.astype(np.float32)[:, :-1])
observed1_std_vert = np.std(observed1.astype(np.float32)[1:, :] - observed1.astype(np.float32)[:-1, :])
observed2_std_vert = np.std(observed2.astype(np.float32)[1:, :] - observed2.astype(np.float32)[:-1, :])
observed1_std = (observed1_std_hori + observed1_std_vert) / 2
observed2_std = (observed2_std_hori + observed2_std_vert) / 2
assert observed1_std > observed2_std
# test alpha being iap.Choice
aug = iaa.ElasticTransformation(alpha=iap.Choice([0.001, 5.0]), sigma=0.25)
seen = [0, 0]
for _ in sm.xrange(100):
observed = aug.augment_image(img)
diff = np.average(np.abs(img.astype(np.float32) - observed.astype(np.float32)))
if diff < 1.0:
seen[0] += 1
else:
seen[1] += 1
assert seen[0] > 10
assert seen[1] > 10
# test alpha being tuple
aug = iaa.ElasticTransformation(alpha=(1.0, 2.0), sigma=0.25)
assert isinstance(aug.alpha, iap.Uniform)
assert isinstance(aug.alpha.a, iap.Deterministic)
assert isinstance(aug.alpha.b, iap.Deterministic)
assert 1.0 - 1e-8 < aug.alpha.a.value < 1.0 + 1e-8
assert 2.0 - 1e-8 < aug.alpha.b.value < 2.0 + 1e-8
# test unusual channels numbers
aug = iaa.ElasticTransformation(alpha=5, sigma=0.5)
for nb_channels in [1, 2, 4, 5, 7, 10, 11]:
img_c = np.tile(img[..., np.newaxis], (1, 1, nb_channels))
assert img_c.shape == (250, 250, nb_channels)
observed = aug.augment_image(img_c)
assert observed.shape == (250, 250, nb_channels)
for c in sm.xrange(1, nb_channels):
assert np.array_equal(observed[..., c], observed[..., 0])
# test alpha having bad datatype
got_exception = False
try:
_ = iaa.ElasticTransformation(alpha=False, sigma=0.25)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# test sigma being iap.Choice
aug = iaa.ElasticTransformation(alpha=3.0, sigma=iap.Choice([0.01, 5.0]))
seen = [0, 0]
for _ in sm.xrange(100):
observed = aug.augment_image(img)
observed_std_hori = np.std(observed.astype(np.float32)[:, 1:] - observed.astype(np.float32)[:, :-1])
observed_std_vert = np.std(observed.astype(np.float32)[1:, :] - observed.astype(np.float32)[:-1, :])
observed_std = (observed_std_hori + observed_std_vert) / 2
if observed_std > 10.0:
seen[0] += 1
else:
seen[1] += 1
assert seen[0] > 10
assert seen[1] > 10
# test sigma being tuple
aug = iaa.ElasticTransformation(alpha=0.25, sigma=(1.0, 2.0))
assert isinstance(aug.sigma, iap.Uniform)
assert isinstance(aug.sigma.a, iap.Deterministic)
assert isinstance(aug.sigma.b, iap.Deterministic)
assert 1.0 - 1e-8 < aug.sigma.a.value < 1.0 + 1e-8
assert 2.0 - 1e-8 < aug.sigma.b.value < 2.0 + 1e-8
# test sigma having bad datatype
got_exception = False
try:
_ = iaa.ElasticTransformation(alpha=0.25, sigma=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# order
# no proper tests here, because unclear how to test
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, order=ia.ALL)
assert isinstance(aug.order, iap.Choice)
assert all([order in aug.order.a for order in [0, 1, 2, 3, 4, 5]])
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, order=1)
assert isinstance(aug.order, iap.Deterministic)
assert aug.order.value == 1
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, order=[0, 1, 2])
assert isinstance(aug.order, iap.Choice)
assert all([order in aug.order.a for order in [0, 1, 2]])
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, order=iap.Choice([0, 1, 2, 3]))
assert isinstance(aug.order, iap.Choice)
assert all([order in aug.order.a for order in [0, 1, 2, 3]])
got_exception = False
try:
_ = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, order=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# cval
# few proper tests here, because unclear how to test
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, cval=ia.ALL)
assert isinstance(aug.cval, iap.DiscreteUniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 0
assert aug.cval.b.value == 255
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, cval=128)
assert isinstance(aug.cval, iap.Deterministic)
assert aug.cval.value == 128
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, cval=(128, 255))
assert isinstance(aug.cval, iap.DiscreteUniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 128
assert aug.cval.b.value == 255
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, cval=[16, 32, 64])
assert isinstance(aug.cval, iap.Choice)
assert all([cval in aug.cval.a for cval in [16, 32, 64]])
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, cval=iap.Choice([16, 32, 64]))
assert isinstance(aug.cval, iap.Choice)
assert all([cval in aug.cval.a for cval in [16, 32, 64]])
aug = iaa.ElasticTransformation(alpha=30.0, sigma=3.0, mode="constant", cval=255, order=0)
img = np.zeros((100, 100), dtype=np.uint8)
observed = aug.augment_image(img)
assert np.sum(observed == 255) > 0
assert np.sum(np.logical_and(0 < observed, observed < 255)) == 0
aug = iaa.ElasticTransformation(alpha=3.0, sigma=3.0, mode="constant", cval=255, order=2)
img = np.zeros((100, 100), dtype=np.uint8)
observed = aug.augment_image(img)
assert np.sum(np.logical_and(0 < observed, observed < 255)) > 0
aug = iaa.ElasticTransformation(alpha=3.0, sigma=3.0, mode="constant", cval=0, order=0)
img = np.zeros((100, 100), dtype=np.uint8)
observed = aug.augment_image(img)
assert np.sum(observed == 255) == 0
got_exception = False
try:
_ = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, cval=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# cval with heatmaps
heatmaps = ia.HeatmapsOnImage(np.zeros((32, 32, 1), dtype=np.float32), shape=(32, 32, 3))
aug = iaa.ElasticTransformation(alpha=3.0, sigma=3.0, mode="constant", cval=255)
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == heatmaps.shape
assert heatmaps.min_value - 1e-6 < observed.min_value < heatmaps.min_value + 1e-6
assert heatmaps.max_value - 1e-6 < observed.max_value < heatmaps.max_value + 1e-6
assert np.sum(observed.get_arr() > 0.01) == 0
# mode
# no proper tests here, because unclear how to test
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, mode=ia.ALL)
assert isinstance(aug.mode, iap.Choice)
assert all([mode in aug.mode.a for mode in ["constant", "nearest", "reflect", "wrap"]])
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, mode="nearest")
assert isinstance(aug.mode, iap.Deterministic)
assert aug.mode.value == "nearest"
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, mode=["constant", "nearest"])
assert isinstance(aug.mode, iap.Choice)
assert all([mode in aug.mode.a for mode in ["constant", "nearest"]])
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, mode=iap.Choice(["constant", "nearest"]))
assert isinstance(aug.mode, iap.Choice)
assert all([mode in aug.mode.a for mode in ["constant", "nearest"]])
got_exception = False
try:
_ = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, mode=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# keypoints
# for small alpha, should not move if below threshold
alpha_thresh_orig = iaa.ElasticTransformation.KEYPOINT_AUG_ALPHA_THRESH
sigma_thresh_orig = iaa.ElasticTransformation.KEYPOINT_AUG_SIGMA_THRESH
iaa.ElasticTransformation.KEYPOINT_AUG_ALPHA_THRESH = 1.0
iaa.ElasticTransformation.KEYPOINT_AUG_SIGMA_THRESH = 0
kps = [ia.Keypoint(x=1, y=1), ia.Keypoint(x=15, y=25), ia.Keypoint(x=5, y=5),
ia.Keypoint(x=7, y=4), ia.Keypoint(x=48, y=5), ia.Keypoint(x=21, y=37),
ia.Keypoint(x=32, y=39), ia.Keypoint(x=6, y=8), ia.Keypoint(x=12, y=21),
ia.Keypoint(x=3, y=45), ia.Keypoint(x=45, y=3), ia.Keypoint(x=7, y=48)]
kpsoi = ia.KeypointsOnImage(kps, shape=(50, 50))
aug = iaa.ElasticTransformation(alpha=0.001, sigma=1.0)
observed = aug.augment_keypoints([kpsoi])[0]
d = kpsoi.get_coords_array() - observed.get_coords_array()
d[:, 0] = d[:, 0] ** 2
d[:, 1] = d[:, 1] ** 2
d = np.sum(d, axis=1)
d = np.average(d, axis=0)
assert d < 1e-8
iaa.ElasticTransformation.KEYPOINT_AUG_ALPHA_THRESH = alpha_thresh_orig
iaa.ElasticTransformation.KEYPOINT_AUG_SIGMA_THRESH = sigma_thresh_orig
# for small sigma, should not move if below threshold
alpha_thresh_orig = iaa.ElasticTransformation.KEYPOINT_AUG_ALPHA_THRESH
sigma_thresh_orig = iaa.ElasticTransformation.KEYPOINT_AUG_SIGMA_THRESH
iaa.ElasticTransformation.KEYPOINT_AUG_ALPHA_THRESH = 0.0
iaa.ElasticTransformation.KEYPOINT_AUG_SIGMA_THRESH = 1.0
kps = [ia.Keypoint(x=1, y=1), ia.Keypoint(x=15, y=25), ia.Keypoint(x=5, y=5),
ia.Keypoint(x=7, y=4), ia.Keypoint(x=48, y=5), ia.Keypoint(x=21, y=37),
ia.Keypoint(x=32, y=39), ia.Keypoint(x=6, y=8), ia.Keypoint(x=12, y=21),
ia.Keypoint(x=3, y=45), ia.Keypoint(x=45, y=3), ia.Keypoint(x=7, y=48)]
kpsoi = ia.KeypointsOnImage(kps, shape=(50, 50))
aug = iaa.ElasticTransformation(alpha=1.0, sigma=0.001)
observed = aug.augment_keypoints([kpsoi])[0]
d = kpsoi.get_coords_array() - observed.get_coords_array()
d[:, 0] = d[:, 0] ** 2
d[:, 1] = d[:, 1] ** 2
d = np.sum(d, axis=1)
d = np.average(d, axis=0)
assert d < 1e-8
iaa.ElasticTransformation.KEYPOINT_AUG_ALPHA_THRESH = alpha_thresh_orig
iaa.ElasticTransformation.KEYPOINT_AUG_SIGMA_THRESH = sigma_thresh_orig
# for small alpha (at sigma 1.0), should barely move
alpha_thresh_orig = iaa.ElasticTransformation.KEYPOINT_AUG_ALPHA_THRESH
sigma_thresh_orig = iaa.ElasticTransformation.KEYPOINT_AUG_SIGMA_THRESH
iaa.ElasticTransformation.KEYPOINT_AUG_ALPHA_THRESH = 0
iaa.ElasticTransformation.KEYPOINT_AUG_SIGMA_THRESH = 0
kps = [ia.Keypoint(x=1, y=1), ia.Keypoint(x=15, y=25), ia.Keypoint(x=5, y=5),
ia.Keypoint(x=7, y=4), ia.Keypoint(x=48, y=5), ia.Keypoint(x=21, y=37),
ia.Keypoint(x=32, y=39), ia.Keypoint(x=6, y=8), ia.Keypoint(x=12, y=21),
ia.Keypoint(x=3, y=45), ia.Keypoint(x=45, y=3), ia.Keypoint(x=7, y=48)]
kpsoi = ia.KeypointsOnImage(kps, shape=(50, 50))
aug = iaa.ElasticTransformation(alpha=0.001, sigma=1.0)
observed = aug.augment_keypoints([kpsoi])[0]
d = kpsoi.get_coords_array() - observed.get_coords_array()
d[:, 0] = d[:, 0] ** 2
d[:, 1] = d[:, 1] ** 2
d = np.sum(d, axis=1)
d = np.average(d, axis=0)
assert d < 0.5
iaa.ElasticTransformation.KEYPOINT_AUG_ALPHA_THRESH = alpha_thresh_orig
iaa.ElasticTransformation.KEYPOINT_AUG_SIGMA_THRESH = sigma_thresh_orig
# test alignment between images and heatmaps
img = np.zeros((80, 80), dtype=np.uint8)
img[:, 30:50] = 255
img[30:50, :] = 255
hm = ia.HeatmapsOnImage(img.astype(np.float32)/255.0, shape=(80, 80))
aug = iaa.ElasticTransformation(alpha=60.0, sigma=4.0, mode="constant", cval=0)
aug_det = aug.to_deterministic()
img_aug = aug_det.augment_image(img)
hm_aug = aug_det.augment_heatmaps([hm])[0]
assert hm_aug.shape == (80, 80)
assert hm_aug.arr_0to1.shape == (80, 80, 1)
img_aug_mask = img_aug > 255*0.1
hm_aug_mask = hm_aug.arr_0to1 > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert (same / img_aug_mask.size) >= 0.99
# test alignment between images and heatmaps
# here with heatmaps that are smaller than the image
img = np.zeros((80, 80), dtype=np.uint8)
img[:, 30:50] = 255
img[30:50, :] = 255
img_small = ia.imresize_single_image(img, (40, 40), interpolation="nearest")
hm = ia.HeatmapsOnImage(img_small.astype(np.float32)/255.0, shape=(80, 80))
aug = iaa.ElasticTransformation(alpha=60.0, sigma=4.0, mode="constant", cval=0)
aug_det = aug.to_deterministic()
img_aug = aug_det.augment_image(img)
hm_aug = aug_det.augment_heatmaps([hm])[0]
assert hm_aug.shape == (80, 80)
assert hm_aug.arr_0to1.shape == (40, 40, 1)
img_aug_mask = img_aug > 255*0.1
hm_aug_mask = ia.imresize_single_image(hm_aug.arr_0to1, (80, 80), interpolation="nearest") > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert (same / img_aug_mask.size) >= 0.94
# get_parameters()
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, order=2, cval=10, mode="constant")
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert isinstance(params[2], iap.Deterministic)
assert isinstance(params[3], iap.Deterministic)
assert isinstance(params[4], iap.Deterministic)
assert 0.25 - 1e-8 < params[0].value < 0.25 + 1e-8
assert 1.0 - 1e-8 < params[1].value < 1.0 + 1e-8
assert params[2].value == 2
assert params[3].value == 10
assert params[4].value == "constant"
###################
# test other dtypes
###################
aug = iaa.ElasticTransformation(sigma=0.5, alpha=5, order=0)
mask = np.zeros((21, 21), dtype=bool)
mask[7:13, 7:13] = True
# bool
image = np.zeros((21, 21), dtype=bool)
image[mask] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert not np.all(image_aug == 1)
assert np.any(image_aug[~mask] == 1)
# uint, int
for dtype in [np.uint8, np.uint16, np.uint32, np.int8, np.int16, np.int32]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image =
|
np.zeros((21, 21), dtype=dtype)
|
numpy.zeros
|
# Copyright 2018 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains some basic model components"""
import math
import numpy as np
import tensorflow as tf
from tensorflow.python.ops.rnn_cell import DropoutWrapper
from tensorflow.python.ops import rnn_cell
from tensorflow.python.framework import function
from tensor2tensor.layers.common_layers import layer_norm
from tensor2tensor.layers.common_attention import add_timing_signal_1d, multihead_self_attention_memory_efficient
initializer_relu = lambda: tf.contrib.layers.variance_scaling_initializer(factor=2.0, mode='FAN_IN',
uniform=False, dtype=tf.float32)
class RNNEncoder(object):
"""
General-purpose module to encode a sequence using a RNN.
It feeds the input through a RNN and returns all the hidden states.
Note: In lecture 8, we talked about how you might use a RNN as an "encoder"
to get a single, fixed size vector representation of a sequence
(e.g. by taking element-wise max of hidden states).
Here, we're using the RNN as an "encoder" but we're not taking max;
we're just returning all the hidden states. The terminology "encoder"
still applies because we're getting a different "encoding" of each
position in the sequence, and we'll use the encodings downstream in the model.
"""
def __init__(self, hidden_size, keep_prob, cell_type, name='RNNEncoder'):
"""
Inputs:
hidden_size: int. Hidden size of the RNN
keep_prob: Tensor containing a single scalar that is the keep probability (for dropout)
"""
self.name = name
if cell_type == 'rnn_lstm':
cell = rnn_cell.LSTMCell
elif cell_type == 'rnn_gru':
cell = rnn_cell.GRUCell
else:
cell = rnn_cell.LSTMCell
self.hidden_size = hidden_size
self.keep_prob = keep_prob
self.rnn_cell_fw = cell(self.hidden_size)
self.rnn_cell_fw = DropoutWrapper(self.rnn_cell_fw, input_keep_prob=self.keep_prob)
self.rnn_cell_bw = cell(self.hidden_size)
self.rnn_cell_bw = DropoutWrapper(self.rnn_cell_bw, input_keep_prob=self.keep_prob)
def build_graph(self, inputs, masks):
"""
Inputs:
inputs: Tensor shape (batch_size, seq_len, input_size)
masks: Tensor shape (batch_size, seq_len).
Has 1s where there is real input, 0s where there's padding.
This is used to make sure tf.nn.bidirectional_dynamic_rnn doesn't iterate through masked steps.
Returns:
out: Tensor shape (batch_size, seq_len, hidden_size*2).
This is all hidden states (fw and bw hidden states are concatenated).
"""
with tf.variable_scope(self.name):
input_lens = tf.reduce_sum(masks, reduction_indices=1) # shape (batch_size)
# Note: fw_out and bw_out are the hidden states for every timestep.
# Each is shape (batch_size, seq_len, hidden_size).
(fw_out, bw_out), _ = tf.nn.bidirectional_dynamic_rnn(self.rnn_cell_fw, self.rnn_cell_bw, inputs, input_lens, dtype=tf.float32)
# Concatenate the forward and backward hidden states
out = tf.concat([fw_out, bw_out], 2)
# Apply dropout
out = tf.nn.dropout(out, self.keep_prob)
return out
class QAEncoder(object):
'''
Encoder block in QANet from https://arxiv.org/abs/1804.09541
'''
def __init__(self, num_blocks, num_layers, num_heads, filters, kernel_size, \
keep_prob, input_mapping=False, name='QAEncoder'):
self.name = name
self.num_blocks = num_blocks
self.num_layers = num_layers
self.num_heads = num_heads
self.filters = filters
self.kernel_size = kernel_size
self.keep_prob = keep_prob
self.input_mapping = input_mapping
def build_graph(self, inputs, masks):
with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE):
if self.input_mapping:
inputs = tf.layers.conv1d(inputs, filters=self.filters, \
kernel_size=1, padding='SAME', name='input_mapping')
outputs = inputs
for i in range(self.num_blocks):
with tf.variable_scope('block{}'.format(i + 1)):
outputs = add_timing_signal_1d(outputs)
for j in range(self.num_layers):
with tf.variable_scope('conv{}'.format(j + 1)):
def fn(x):
output = tf.layers.separable_conv1d(layer_norm(x, name='ln1_{}'.format(j + 1)), filters=self.filters, kernel_size=self.kernel_size, padding='SAME', name='conv{}'.format(j + 1))
if j % 2 == 0:
output = tf.nn.dropout(output, self.keep_prob)
return output
outputs = layer_dropout(
x=outputs, fn=fn,
keep_prob=1 - (j + 1) / self.num_layers * (1 - self.keep_prob))
outputs = tf.nn.dropout(outputs + multihead_self_attention(layer_norm(outputs, name='ln2_{}'.format(i)), masks, self.num_heads), self.keep_prob)
res = outputs
outputs = layer_norm(outputs, name='ln3_{}'.format(i + 1))
#outputs = tf.layers.conv1d(outputs, filters=self.filters, kernel_size=1, padding='SAME', kernel_initializer=initializer_relu(), name='ffn1')
outputs = tf.nn.relu(tf.layers.conv1d(outputs, filters=self.filters, kernel_size=1, padding='SAME', kernel_initializer=initializer_relu(), name='ffn1'))
outputs = tf.layers.conv1d(outputs, filters=self.filters, kernel_size=1, padding='SAME', name='ffn2')
outputs = tf.nn.dropout(res + outputs, self.keep_prob)
return outputs
class SimpleSoftmaxLayer(object):
"""
Module to take set of hidden states, (e.g. one for each context location),
and return probability distribution over those states.
"""
def __init__(self, name="SimpleSoftmaxLayer"):
self.name = name
def build_graph(self, inputs, masks):
"""
Applies one linear downprojection layer, then softmax.
Inputs:
inputs: Tensor shape (batch_size, seq_len, hidden_size)
masks: Tensor shape (batch_size, seq_len)
Has 1s where there is real input, 0s where there's padding.
Outputs:
logits: Tensor shape (batch_size, seq_len)
logits is the result of the downprojection layer, but it has -1e30
(i.e. very large negative number) in the padded locations
prob_dist: Tensor shape (batch_size, seq_len)
The result of taking softmax over logits.
This should have 0 in the padded locations, and the rest should sum to 1.
"""
with tf.variable_scope(self.name):
# Linear downprojection layer
logits = tf.contrib.layers.fully_connected(inputs, num_outputs=1, activation_fn=None) # shape (batch_size, seq_len, 1)
logits = tf.squeeze(logits, axis=[2]) # shape (batch_size, seq_len)
# Take softmax over sequence
masked_logits, prob_dist = masked_softmax(logits, masks, 1)
return masked_logits, prob_dist
class BasicAttn(object):
"""Module for basic attention.
Note: in this module we use the terminology of "keys" and "values" (see lectures).
In the terminology of "X attends to Y", "keys attend to values".
In the baseline model, the keys are the context hidden states
and the values are the question hidden states.
We choose to use general terminology of keys and values in this module
(rather than context and question) to avoid confusion if you reuse this
module with other inputs.
"""
def __init__(self, keep_prob, key_vec_size, value_vec_size, name="BasicAttn"):
"""
Inputs:
keep_prob: tensor containing a single scalar that is the keep probability (for dropout)
key_vec_size: size of the key vectors. int
value_vec_size: size of the value vectors. int
"""
self.name = name
self.keep_prob = keep_prob
self.key_vec_size = key_vec_size
self.value_vec_size = value_vec_size
def build_graph(self, values, values_mask, keys):
"""
Keys attend to values.
For each key, return an attention distribution and an attention output vector.
Inputs:
values: Tensor shape (batch_size, num_values, value_vec_size).
values_mask: Tensor shape (batch_size, num_values).
1s where there's real input, 0s where there's padding
keys: Tensor shape (batch_size, num_keys, value_vec_size)
Outputs:
attn_dist: Tensor shape (batch_size, num_keys, num_values).
For each key, the distribution should sum to 1,
and should be 0 in the value locations that correspond to padding.
output: Tensor shape (batch_size, num_keys, hidden_size).
This is the attention output; the weighted sum of the values
(using the attention distribution as weights).
"""
with tf.variable_scope(self.name):
# Calculate attention distribution
values_t = tf.transpose(values, perm=[0, 2, 1]) # (batch_size, value_vec_size, num_values)
attn_logits = tf.matmul(keys, values_t) # shape (batch_size, num_keys, num_values)
attn_logits_mask = tf.expand_dims(values_mask, 1) # shape (batch_size, 1, num_values)
_, attn_dist = masked_softmax(attn_logits, attn_logits_mask, 2) # shape (batch_size, num_keys, num_values). take softmax over values
# Use attention distribution to take weighted sum of values
output = tf.matmul(attn_dist, values) # shape (batch_size, num_keys, value_vec_size)
# Apply dropout
output = tf.nn.dropout(output, self.keep_prob)
return attn_dist, output
class BiDAFAttn(object):
'''Module for BiDAF attention cell from https://arxiv.org/abs/1611.01603
'''
def __init__(self, keep_prob=1.0, name='BiDAFAttn'):
self.name = name
self.keep_prob = keep_prob
def build_graph(self, c, c_mask, q, q_mask):
with tf.variable_scope(self.name):
S = trilinear_similarity(c, q)
_, alpha = masked_softmax(S, tf.expand_dims(q_mask, 1), 2)
a = tf.matmul(alpha, q)
m = tf.reduce_max(S, axis=2)
_, beta = masked_softmax(m, c_mask, 1)
c_attn = tf.matmul(tf.expand_dims(beta, 1), c)
output = tf.concat([c, a, tf.multiply(c, a), tf.multiply(c, c_attn)], -1)
return output
def masked_softmax(logits, mask, axis=-1):
"""
Takes masked softmax over given dimension of logits.
Inputs:
logits: Numpy array. We want to take softmax over dimension dim.
mask: Numpy array of same shape as logits.
Has 1s where there's real data in logits, 0 where there's padding
dim: int. dimension over which to take softmax
Returns:
masked_logits: Numpy array same shape as logits.
This is the same as logits, but with 1e30 subtracted
(i.e. very large negative number) in the padding locations.
prob_dist: Numpy array same shape as logits.
The result of taking softmax over masked_logits in given dimension.
Should be 0 in padding locations.
Should sum to 1 over given dimension.
"""
exp_mask = (1 - tf.cast(mask, 'float')) * (-1e30) # -large where there's padding, 0 elsewhere
masked_logits = tf.add(logits, exp_mask) # where there's padding, set logits to -large
prob_dist = tf.nn.softmax(masked_logits, axis)
return masked_logits, prob_dist
def trilinear_similarity(c, q, name='trilinear_similarity'):
'''
Calculate trilinear similarity matrix from https://arxiv.org/abs/1611.01603
'''
c_shape, q_shape = c.get_shape().as_list(), q.get_shape().as_list()
c_len, q_len, h = c_shape[1], q_shape[1], c_shape[2]
with tf.variable_scope(name):
w_c = tf.get_variable('w_c', [h, 1], dtype=tf.float32)
w_q = tf.get_variable('w_q', [h, 1], dtype=tf.float32)
w_cq = tf.get_variable('w_cq', [1, 1, h], dtype=tf.float32)
bias = tf.get_variable('bias', [1, 1, 1], dtype=tf.float32, initializer=tf.zeros_initializer())
S = tf.reshape(tf.matmul(tf.reshape(c, [-1, h]), w_c), [-1, c_len, 1]) \
+ tf.reshape(tf.matmul(tf.reshape(q, [-1, h]), w_q), [-1, 1, q_len]) \
+ tf.matmul(tf.multiply(c, w_cq), tf.transpose(q, perm=[0, 2, 1]))
S = tf.add(S, bias)
return S
def max_product_span(start_dist, end_dist):
'''
Find the answer span with maximum probability
'''
batch_size, input_len = start_dist.shape
i = np.zeros(batch_size, dtype=np.int32)
start_pos = np.zeros(batch_size, dtype=np.int32)
end_pos = np.zeros(batch_size, dtype=np.int32)
start_argmax = np.zeros(batch_size, dtype=np.int32)
max_start_prob = np.zeros(batch_size, dtype=np.float32)
max_product = np.zeros(batch_size, dtype=np.float32)
while np.any(i < input_len):
start_prob = start_dist[np.arange(batch_size), i]
start_argmax = np.where(start_prob > max_start_prob, i, start_argmax)
max_start_prob = np.where(start_prob > max_start_prob, start_prob, max_start_prob)
end_prob = end_dist[np.arange(batch_size), i]
new_product = max_start_prob * end_prob
start_pos = np.where(new_product > max_product, start_argmax, start_pos)
end_pos = np.where(new_product > max_product, i, end_pos)
max_product =
|
np.where(new_product > max_product, new_product, max_product)
|
numpy.where
|
from __future__ import print_function
import numpy as np
import Spectrum
import csv
import sys
from scipy import signal
from scipy import stats
from scipy.ndimage.filters import median_filter
import handythread
import multiprocessing
from functools import partial
import dm3_lib as DM3
#from ncempy.io import dm
import numbers
def make_dict_from_tags(iterables):
d = {}
for ii in iterables:
splitted = ii.split(' = ')
keys = list(splitted[:-1][0].split('.'))
value = splitted[-1]
tempD = d
for tt in keys[:-1]:
tempD = tempD.setdefault(tt, {})
if keys[-1] in tempD:
# duplicate tag
print('You have two tags in your DM3 file which are the same! ' + keys[-1], file=sys.stderr)
else:
tempD[keys[-1]] = value
return d
def import_EELS_dm3(filename):
data = DM3.DM3(filename)
# data = dm.dmReader(filename)
tags = make_dict_from_tags(data._storedTags)
imagedata = np.transpose(data.imagedata, axes=(1, 2, 0))
return imagedata, tags
class SpectrumImage(object):
"""Class for spectrum image data set, must be 3d numpy array
Axis0, Axis1: spatial dimensions, Axis2: spectrum dimension,
spectrum_units: units along the spectral axis,
calibration = spatial calibration (m/pixel)"""
def __init__(self, SI, spectrum_units, calibration=0):
if len(np.shape(SI)) != 3:
raise ValueError('That was not a 3D spectrum image!')
self.data = np.ma.array(SI.astype(float))
self.size = np.shape(SI)
self.calibration = calibration
self.spectrum_units = spectrum_units
## Add calibration for x, y, E/wavelength
class CLSpectrumImage(SpectrumImage):
def __init__(self, SI, WavelengthRange, spectrum_units='nm', calibration=0):
super(CLSpectrumImage, self).__init__(SI, spectrum_units, calibration)
self.SpectrumRange = WavelengthRange
self.spectrum_unit_label = 'Wavelength'
self.secondary_units = 'eV'
self.secondary_unit_label = 'Energy'
# self.spectrum_secondary_units = 'eV'
# self.spectrum_secondary_unit_label = 'Energy'
self.dispersion = self.SpectrumRange[1] - self.SpectrumRange[0]
def ExtractSpectrum(self, mask3D):
extractedspectrum = Spectrum.CLSpectrum(
np.mean(np.mean(
np.ma.masked_array(self.data, mask3D),
axis = 0), axis = 0), self.SpectrumRange,
units = self.spectrum_units)
return extractedspectrum
def SpikeRemoval(self, threshold):
# median = np.median(self.data, axis=2, keepdims=True)
# d = np.abs(self.data - median)
# median_d = np.median(d)
# s = d/median_d if median_d else 0.
# i = np.where(s>20)
# print(i)
# self.data[i] = np.mean([self.data[(i[0]-1, i[1], i[2])],
# self.data[(i[0]+1, i[1], i[2])],
# self.data[(i[0], i[1]-1, i[2])],
# self.data[(i[0], i[1]+1, i[2])]], axis=0)
grad = np.abs(np.gradient(self.data.astype(float)))
mask0 = np.zeros(np.shape(grad[0]))
mask0[grad[0] > threshold] = 1.
mask1 = np.zeros(np.shape(grad[1]))
mask1[grad[1] > threshold] = 1.
mask2 = np.zeros(np.shape(grad[2]))
mask2[grad[2] > threshold] = 1.
mask = np.logical_or(mask0, mask1).astype(float)
convolutionmask = np.array([[[0,0,0],[0,1,0],[0,0,0]],[[0,1,0],[1,0,1],[0,1,0]],[[0,0,0],[0,1,0],[0,0,0]]])
convolved = signal.convolve(mask, convolutionmask, mode='same')
filtermask = np.reshape(np.array([[0,1,0],[1,0,1],[0,1,0]]), (3,3,1))
filtermaskcorner = np.reshape(np.array([[1,1,1],[1,1,1],[1,1,1]]), (3,3,1))
filtercopy = median_filter(np.copy(self.data), footprint=filtermask)
corners = [(0, 0), (0, -1), (-1, -1), (-1, 0)]
for cc in corners:
filtercopy[cc][convolved[cc] >= 2] = median_filter(np.copy(self.data), footprint=filtermaskcorner)[cc][convolved[cc] >= 2]
convolved[cc][convolved[cc] >= 2] = 4
spike_free = filtercopy*(convolved >= 3) + self.data*(convolved < 3)
spike_free = CLSpectrumImage(spike_free, self.SpectrumRange, self.spectrum_units, self.calibration)
i = np.where(convolved > 3)
return spike_free
class EELSSpectrumImage(SpectrumImage):
def __init__(self, SI, SpectrumRange=None, channel_eV=None, dispersion=0.005, ZLP=False, spectrum_units='eV', calibration=0, metadata=None):
super(EELSSpectrumImage, self).__init__(SI, spectrum_units, calibration)
'''intensity: 3D array
SpectrumRange: 1D array of same length as energy axis
channel_eV: 2 element array [channel #, eV value]
dispersion: real number, width of each channel, must be provided if SpectrumRange is not, default is 5meV
ZLP: Boolean - True=ZLP is present
units: string, for plot axis
'''
if ZLP:
if not isinstance(dispersion, numbers.Real):
raise ValueError('Dispersion needs to be a real number!')
if SpectrumRange is not None:
raise ValueError("You don't need to define a SpectrumRange and ZLP/dispersion!")
self.ZLP = self.FindZLP(self.data)
self.dispersion = dispersion
self.SpectrumRange = np.arange(0 - self.ZLP, self.size[2] - self.ZLP) * self.dispersion
elif SpectrumRange is not None:
if len(SpectrumRange) != self.size[2]:
raise ValueError("Your SpectrumRange is not the same size as your energy axis!")
self.SpectrumRange = SpectrumRange
self.dispersion = SpectrumRange[1] - SpectrumRange[0]
if np.min(SpectrumRange) < 0 and np.max(SpectrumRange) > 0:
self.ZLP = self.FindZLP(self.data)
elif channel_eV:
if len(channel_eV) != 2:
raise ValueError('channel_eV must have length 2!')
if not isinstance(dispersion, numbers.Real):
raise ValueError('Dispersion needs to be a real number!')
eV0 = channel_eV[1] - channel_eV[0] * dispersion
self.SpectrumRange = np.linspace(
eV0,
eV0 + (self.size[2] - 1) * dispersion,
self.size[2]
)
self.dispersion = dispersion
else:
raise ValueError('You need to input an energy calibration!')
self.metadata = metadata
self.unit_label = 'Energy'
self.secondary_units = 'nm'
self.secondary_unit_label = 'Wavelength'
self.spectrum_unit_label = 'Energy'
# self.spectrum_secondary_units = 'nm'
# self.spectrum_secondary_unit_label = 'Wavelength'
@classmethod
def LoadFromDM3(cls, filename, spectrum_calibrated = True):
SI, metadata = import_EELS_dm3(filename)
dispersion = float(metadata['root']['ImageList']['1']['ImageData']['Calibrations']['Dimension']['2']['Scale'])
# drifttube = float(metadata['root']['ImageList']['1']['ImageTags']['EELS']['Acquisition']['Spectrometer']['Energy loss (eV)'])
zero = float(metadata['root']['ImageList']['1']['ImageData']['Calibrations']['Dimension']['2']['Origin'])
if zero >= 0:
ZLP = True
else:
ZLP = False
if spectrum_calibrated is True:
channel_eV = [0, -zero * dispersion]
else:
channel_eV = None
return cls(SI = SI, dispersion = dispersion, ZLP = ZLP, channel_eV = channel_eV, metadata = metadata)
@staticmethod
def FindZLP(data):
ZLP = int(stats.mode(np.argmax(data, axis = -1), axis=None)[0])
return ZLP
def FindZLPArray(self):
ZLParray = np.argmax(self.data, axis=2)
return ZLParray
def AlignZLP(self):
aligned = self.AlignChannel(self.FindZLPArray())
return aligned
def ExtractSpectrum(self, mask3D):
extractedspectrum = Spectrum.EELSSpectrum(
np.mean(np.mean(
np.ma.masked_array(self.data, mask3D),
axis = 0), axis = 0),
SpectrumRange = self.SpectrumRange,
units = self.spectrum_units)
return extractedspectrum
def Threshold(self, threshold):
'''To mask out pixels with very little signal'''
thrmask = np.less(self.data.data[:, :, self.ZLP], threshold)
# self.thrmask = np.reshape(thrmask, (np.append(np.shape(thrmask), 1))) * np.ones(np.shape(self.data))
self.data.mask = np.reshape(thrmask, (np.append(np.shape(thrmask), 1))) * np.ones(np.shape(self.data))
def InvertThreshold(self):
self.data.mask = np.invert(self.data.mask)
def Normalize(self):
'''Normalize data to integral'''
self.normfactor = np.sum(self.data, axis=2, keepdims=True)
data_norm = self.data/self.normfactor
return data_norm
def AlignChannel(self, indices):
''' Input: indices=a 2D array of the same shape as the image dimensions
containing the indices of where the desired channel is currently in the SI'''
channelmax = np.max(np.max(indices))
channelmin = np.min(np.min(indices))
index1, index2 = np.meshgrid(range(self.size[0]), range(self.size[1]))
index1 = np.expand_dims(np.transpose(index1), axis=2)
index2 = np.expand_dims(np.transpose(index2), axis=2)
index3 = np.expand_dims(np.expand_dims(range(self.size[2]) + channelmax,
axis=0), axis=0) -
|
np.expand_dims(indices, axis=2)
|
numpy.expand_dims
|
'''
check the heatmap for ant multi
'''
import joblib
import os
from os import path as osp
import numpy as np
from rlkit.core.vistools import plot_2dhistogram
def plot_expert_heatmap(data_path, target0, num_targets, num_bins, title, save_path, rel_pos_version=True, ax_lims=None):
assert rel_pos_version
d = joblib.load(data_path)
mean, std = d['obs_mean'][:,-2*num_targets:-2*num_targets+2], d['obs_std'][:,-2*num_targets:-2*num_targets+2]
buffer = d['train']
print(buffer._size)
xy_pos = buffer._observations[:buffer._size][:,-2*num_targets:-2*num_targets+2]
xy_pos = xy_pos * std + mean
xy_pos = target0 - xy_pos
plot_2dhistogram(xy_pos[:,0], xy_pos[:,1], num_bins, title, save_path, ax_lims=ax_lims)
if __name__ == '__main__':
save_dir = 'plots/junk_vis/heat_maps_for_ant_multi/'
# data_path = '/scratch/hdd001/home/kamyar/expert_demos/norm_rel_pos_obs_ant_multi_4_directions_4_distance_32_det_demos_per_task_no_sub_path_len_75'
data_path = '/scratch/hdd001/home/kamyar/expert_demos/norm_rel_pos_obs_ant_multi_4_directions_4_distance_32_det_demos_per_task_no_sub_path_terminates_within_0p5_of_target'
data_path = osp.join(data_path, 'extra_data.pkl')
plot_expert_heatmap(
data_path,
|
np.array([4.0, 0.0])
|
numpy.array
|
import numpy as np
import matplotlib.pyplot as plt
import stan_utils as stan
from mpl_utils import (mpl_style, common_limits)
plt.style.use(mpl_style)
def generate_data(N, M, D, scales=None, seed=None):
"""
Generate some toy data to play with. Here we assume all :math:`N` stars have
been observed by all :math:`M` surveys.
:param N:
The number of stars observed.
:param M:
The number of surveys.
:param D:
The dimensionality of the label space.
:param scales: [optional]
Optional values to provide for the relative scales on the latent factors.
:param seed: [optional]
An optional seed to provide to the random number generator.
:returns:
A two-length tuple containing the data :math:`y` and a dictionary with
the true values.
"""
if seed is not None:
np.random.seed(seed)
if scales is None:
scales = np.abs(np.random.normal(0, 1, size=D))
else:
scales = np.array(scales)
assert len(scales) == D
X = np.random.normal(
np.zeros(D),
scales,
size=(N, D))
# TODO: Better way to randomly generate positive semi-definite covariance
# matrices that are *very* close to an identity matrix.
# Use decomposition to ensure the resulting covariance matrix is positive
# semi-definite.
L = np.random.randn(M, D, D)
L[:, np.arange(D), np.arange(D)] = np.exp(L[:, np.arange(D), np.arange(D)])
i, j = np.triu_indices_from(L[0], 1)
L[:, i, j] = 0.0
# TODO: use matrix multiplication you idiot
theta = np.array([np.dot(L[i], L[i].T) for i in range(M)])
y = np.dot(X, theta)
# add noise.
phi = np.abs(np.random.normal(0, 0.1, size=(M, D)))
rank = np.random.normal(0, 1, size=y.shape)
noise = scales * rank * phi
y += noise
truths = dict(X=X, theta=theta, phi=phi, scales=scales, L=L, noise=noise)
return (y, truths)
N, M, D = (250, 10, 5)
scales =
|
np.ones(D)
|
numpy.ones
|
"""Methods related to the {2, 2} Danilov distribution envelope model.
References
----------
[1] https://doi.org/10.1103/PhysRevSTAB.6.094202
[2] https://doi.org/10.1103/PhysRevAccelBeams.24.044201
"""
from __future__ import print_function
import time
import copy
import numpy as np
import numpy.linalg as la
import scipy.optimize as opt
from tqdm import trange
from tqdm import tqdm
from bunch import Bunch
from orbit.twiss import twiss
from orbit.twiss import bogacz_lebedev as bl
from orbit.utils import helper_funcs as hf
from orbit.utils.consts import mass_proton
from orbit.utils.consts import pi
from orbit.utils.general import tprint
# Define bounds on 4D Twiss parameters.
pad = 1e-5
alpha_min, alpha_max = -np.inf, np.inf
beta_min, beta_max = pad, np.inf
nu_min, nu_max = pad, pi - pad
u_min, u_max = pad, 1 - pad
TWISS4D_LB = (alpha_min, alpha_min, beta_min, beta_min, u_min, nu_min)
TWISS4D_UB = (alpha_max, alpha_max, beta_max, beta_max, u_max, nu_max)
def moment_vector(Sigma):
"""Return array of 10 unique elements of covariance matrix."""
return Sigma[np.triu_indices(4)]
class DanilovEnvelope:
"""Class for the beam envelope of the Danilov distribution.
Attributes
----------
params : ndarray, shape (8,)
The envelope parameters [a, b, a', b', e, f, e', f']. The coordinates
of a particle on the beam envelope are parameterized as
x = a*cos(psi) + b*sin(psi), x' = a'*cos(psi) + b'*sin(psi),
y = e*cos(psi) + f*sin(psi), y' = e'*cos(psi) + f'*sin(psi),
where 0 <= psi <= 2pi.
eps_l : float
The nonzero rms intrinsic emittance of the beam (eps_1 or eps_2) [m*rad].
mode : int
Whether to choose eps_2 = 0 (mode 1) or eps_1 = 0 (mode 2). This amounts
to choosing the sign of the transverse angular momentum.
eps_x_frac : float
ex = eps_x_frac * eps
mass : float
Mass per particle [GeV/c^2].
kin_energy : float
Kinetic energy per particle [GeV].
intensity : float
Number of particles in the bunch represented by the envelope.
length : float
Bunch length [m].
perveance : float
Dimensionless beam perveance.
"""
def __init__(self, eps_l=1., mode=1, eps_x_frac=0.5, mass=mass_proton,
kin_energy=1.0, length=1.0, intensity=0.0, params=None):
self.eps_l = eps_l
self.mode = mode
self.eps_x_frac, self.ey_frac = eps_x_frac, 1.0 - eps_x_frac
self.mass = mass
self.kin_energy = kin_energy
self.length = length
self.set_intensity(intensity)
if params is None:
eps_x = eps_x_frac * eps_l
eps_y = (1.0 - eps_x_frac) * eps_l
rx, ry = np.sqrt(4 * eps_x), np.sqrt(4 * eps_y)
if mode == 1:
self.params = np.array([rx, 0, 0, rx, 0, -ry, +ry, 0])
elif mode == 2:
self.params = np.array([rx, 0, 0, rx, 0, +ry, -ry, 0])
else:
self.params = np.array(params)
eps_x, eps_y = self.apparent_emittances()
self.eps_l = eps_x + eps_y
self.eps_x_frac = eps_x / self.eps_l
def copy(self):
"""Produced a deep copy of the envelope."""
return copy.deepcopy(self)
def set_intensity(self, intensity):
"""Set beam intensity and re-calculate perveance."""
self.intensity = intensity
self.line_density = intensity / self.length
self.perveance = hf.get_perveance(self.mass, self.kin_energy,
self.line_density)
def set_length(self, length):
"""Set beam length and re-calculate perveance."""
self.length = length
self.set_intensity(self.intensity)
def get_params_for_dim(self, dim='x'):
"""Return envelope parameters associated with the given dimension."""
a, b, ap, bp, e, f, ep, fp = self.params
return {'x':(a, b), 'y':(e, f), 'xp':(ap, bp), 'yp': (ep, fp)}[dim]
def matrix(self):
"""Create the envelope matrix P from the envelope parameters.
The matrix is defined by x = P.c, where x = [x, x', y, y']^T,
c = [cos(psi), sin(psi)], and '.' means matrix multiplication, with
0 <= psi <= 2pi. This is useful because any transformation to the
particle coordinate vector x also done to P. For example, if x -> M x,
then P -> M P.
"""
a, b, ap, bp, e, f, ep, fp = self.params
return np.array([[a, b], [ap, bp], [e, f], [ep, fp]])
def to_vec(self, P):
"""Return list of envelope parameters from envelope matrix."""
return P.ravel()
def get_norm_mat_2D(self, inv=False):
"""Return normalization matrix V (x-x' and y-y' become circles)."""
V = twiss.V_matrix_4x4_uncoupled(*self.twiss2D())
return la.inv(V) if inv else V
def norm2D(self, scale=False):
"""Normalize x-x' and y-y' ellipses and return the envelope parameters.
The x-x' and y-y' ellipses will be circles of radius sqrt(eps_x) and
sqrt(eps_y), where eps_x and eps_y are the rms apparent emittances. If
`scale` is True, they will be unit circles.
"""
self.transform(self.get_norm_mat_2D(inv=True))
if scale:
ex, ey = 4 * self.apparent_emittances()
self.params[:4] /= np.sqrt(ex)
self.params[4:] /= np.sqrt(ey)
return self.params
def normed_params_2D(self):
"""Return the normalized envelope parameters in the 2D sense without
actually changing the envelope."""
true_params = np.copy(self.params)
normed_params = self.norm2D()
self.params = true_params
return normed_params
def norm4D(self):
"""Normalize the envelope parameters in the 4D sense.
In the transformed coordates the covariance matrix is diagonal, and the
x-x' and y-y' emittances are the intrinsic emittances.
"""
r_n = np.sqrt(4 * self.eps_l)
if self.mode == 1:
self.params = np.array([r_n, 0, 0, r_n, 0, 0, 0, 0])
elif self.mode == 2:
self.params = np.array([0, 0, 0, 0, 0, r_n, r_n, 0])
def transform(self, M):
"""Apply matrix M to the coordinates."""
self.params = np.matmul(M, self.matrix()).ravel()
def norm_transform(self, M):
"""Normalize, then apply M to the coordinates."""
self.norm4D()
self.transform(M)
def advance_phase(self, mux=0., muy=0.):
"""Advance the x{y} phase by mux{muy} degrees.
It is equivalent to tracking through an uncoupled lattice which the
envelope is matched to.
"""
mux, muy = np.radians([mux, muy])
V = self.get_norm_mat_2D()
M = la.multi_dot([V, twiss.phase_adv_matrix(mux, muy), la.inv(V)])
self.transform(M)
def rotate(self, phi):
"""Apply clockwise rotation by phi degrees in x-y space."""
self.transform(twiss.rotation_matrix_4D(np.radians(phi)))
def tilt_angle(self, x1='x', x2='y'):
"""Return ccw angle of ellipse in x1-x2 plane."""
a, b = self.get_params_for_dim(x1)
e, f = self.get_params_for_dim(x2)
return 0.5 * np.arctan2(2*(a*e + b*f), a**2 + b**2 - e**2 - f**2)
def radii(self, x1='x', x2='y'):
"""Return semi-major and semi-minor axes of ellipse in x1-x2 plane."""
a, b = self.get_params_for_dim(x1)
e, f = self.get_params_for_dim(x2)
phi = self.tilt_angle(x1, x2)
sin, cos = np.sin(phi),
|
np.cos(phi)
|
numpy.cos
|
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 20 20:50:15 2021
@author: dv516
"""
import numpy as np
import pickle
import matplotlib.pyplot as plt
import pyro
pyro.enable_validation(True) # can help with debugging
pyro.set_rng_seed(1)
from algorithms.PyBobyqa_wrapped.Wrapper_for_pybobyqa import PyBobyqaWrapper
from algorithms.Bayesian_opt_Pyro.utilities_full import BayesOpt
from algorithms.nesterov_random.nesterov_random import nesterov_random
from algorithms.simplex.simplex_method import simplex_method
from algorithms.CUATRO.CUATRO import CUATRO
from algorithms.Finite_differences.Finite_differences import finite_Diff_Newton
from algorithms.Finite_differences.Finite_differences import Adam_optimizer
from algorithms.Finite_differences.Finite_differences import BFGS_optimizer
from algorithms.SQSnobfit_wrapped.Wrapper_for_SQSnobfit import SQSnobFitWrapper
from algorithms.DIRECT_wrapped.Wrapper_for_Direct import DIRECTWrapper
from case_studies.RTO.systems import *
def trust_fig(X, Y, Z, g1, g2):
fig = plt.figure()
ax = fig.add_subplot()
ax.contour(X, Y, Z, 50)
ax.contour(X, Y, g1, levels = [0], colors = 'black')
ax.contour(X, Y, g2, levels = [0], colors = 'black')
return ax, fig
def average_from_list(solutions_list):
N = len(solutions_list)
f_best_all = np.zeros((N, 100))
for i in range(N):
f_best = np.array(solutions_list[i]['f_best_so_far'])
x_ind = np.array(solutions_list[i]['samples_at_iteration'])
for j in range(100):
ind = np.where(x_ind <= j+1)
if len(ind[0]) == 0:
f_best_all[i, j] = f_best[0]
else:
f_best_all[i, j] = f_best[ind][-1]
f_median = np.median(f_best_all, axis = 0)
# f_av = np.average(f_best_all, axis = 0)
# f_std = np.std(f_best_all, axis = 0)
f_min = np.min(f_best_all, axis = 0)
f_max = np.max(f_best_all, axis = 0)
return f_best_all, f_median, f_min, f_max
def fix_starting_points(complete_list, x0, init_out, only_starting_point = False):
if only_starting_point:
for i in range(len(complete_list)):
dict_out = complete_list[i]
f_arr = dict_out['f_best_so_far']
N_eval = len(f_arr)
g_arr = dict_out['g_best_so_far']
dict_out['x_best_so_far'][0] = np.array(x0)
dict_out['f_best_so_far'][0] = init_out[0]
dict_out['g_best_so_far'][0] = np.array(init_out[1])
complete_list[i] = dict_out
else:
for i in range(len(complete_list)):
dict_out = complete_list[i]
f_arr = dict_out['f_best_so_far']
N_eval = len(f_arr)
g_arr = dict_out['g_best_so_far']
dict_out['x_best_so_far'][0] = np.array(x0)
dict_out['f_best_so_far'][0] = init_out[0]
dict_out['g_best_so_far'][0] = np.array(init_out[1])
for j in range(1, N_eval):
if (g_arr[j] > 1e-3).any() or (init_out[0] < f_arr[j]):
dict_out['x_best_so_far'][j] = np.array(x0)
dict_out['f_best_so_far'][j] = init_out[0]
dict_out['g_best_so_far'][j] = np.array(init_out[1])
complete_list[i] = dict_out
return complete_list
def medianx_from_list(solutions_list, x0):
N = len(solutions_list)
_, N_x = np.array(solutions_list[0]['x_best_so_far']).shape
f_best_all =
|
np.zeros((N, 100))
|
numpy.zeros
|
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
"""
MKL based backend interface and tensor data structure.
"""
from __future__ import division
from builtins import zip
import logging
from neon.backends import layer_mkl
from neon.backends.util.check_mkl import get_mkl_lib
from neon.backends.nervanacpu import CPUTensor, NervanaCPU, CustomNumpy
import ctypes
from cffi import FFI
from ctypes import c_longlong, c_float, c_double, c_int
import numpy as np
from neon.backends import math_cpu
from neon.backends.backend import OpTreeNode
import os
import sys
_none_slice = slice(None, None, None)
logger = logging.getLogger(__name__)
# TODO: enable this flag to find numerical problems
# np.seterr(all='raise')
class MKLTensor(CPUTensor):
"""
MKLTensor, special for MKL Backend
"""
_tensor = None
def __init__(self,
backend,
shape=None,
dtype=np.float32,
ary=None,
name=None,
persist_values=True,
base=None):
super(MKLTensor, self).__init__(backend, shape, dtype, ary, name,
persist_values, base)
# add 4 address for cpu layout and buffer, mkl layout and buffer
self.primitive = np.zeros((4), dtype=np.uint64)
if ary is not None:
self.primitive[1] = self._tensor.ctypes.data
self.shape5D = None
def get_prim(self):
return c_longlong(self.primitive.ctypes.data)
def clean_mkl(self):
self.primitive[2] = 0
self.primitive[3] = 0
def set_mkl(self, tensor):
self.primitive[0] = tensor.primitive[0]
self.primitive[2] = tensor.primitive[2]
self.primitive[3] = tensor.primitive[3]
self.shape5D = tensor.shape5D
def __str__(self):
"""
Returns a string representation of this Tensor.
Returns:
str: the representation.
"""
if self._tensor.base is not None:
base_id = id(self._tensor.base)
else:
base_id = id(self._tensor)
return ("MKLTensor(base 0x%x) name:%s shape:%s dtype:%s strides:%s"
" is_c_contiguous:%s" % (base_id, self.name, self.shape,
self.dtype, self._tensor.strides,
self._tensor.flags.c_contiguous))
def get(self):
"""
Return the array.
"""
self.backend.convert(self)
return self._tensor.copy()
def reshape(self, *shape):
"""
Return a reshaped view.
"""
newTensor = super(MKLTensor, self).reshape(*shape)
newTensor.set_mkl(self)
return newTensor
def _assign_right_to_left(left, right):
math_cpu.blas_copy(left, right)
# how to overlaod numpy_call_dict?
numpy_call_dict_mkl = {
# assign
"assign": _assign_right_to_left,
# zero_operand ops
# unary ops
"neg": lambda left: math_cpu.neg(left),
"abs": lambda left: np.abs(left),
"sgn": lambda left: np.sign(left),
"sqrt": lambda left: math_cpu.sqrt(left),
"sqr": lambda left: math_cpu.square(left),
"exp": lambda left: math_cpu.exp(left),
"log": lambda left: math_cpu.log(left),
"safelog": lambda left: math_cpu.safelog(left),
"exp2": lambda left: np.exp2(left),
"log2": lambda left: np.log2(left),
"sig": lambda left: 1. / (1. + np.exp(-left)),
"sig2": lambda left: 1. / (1. + np.exp2(-left)),
"tanh": lambda left: np.tanh(left),
"tanh2": lambda left: (np.exp2(2. * left) - 1.) / (
|
np.exp2(2. * left)
|
numpy.exp2
|
"""
Modules for numpy.where to make it more IDL-like.
"""
from __future__ import print_function
import numpy as np
def andwhere(data, val1, test1, val2=None, test2=None, return_indices=False):
""" Performs an 'and' where search, i.e., where(input > 3 and input < 4).
Using the parameter names, where(data test1 val1 and data test2 val2).
Can also do a single test, i.e., where(input > 3).
Example
-------
To check data > 1.1 and data < 2.5,
> out = where(data=[1,2,3], val1=1.1, test1='>', val2='2.5', test2='<')
> print(out)
[1] # index
> print(data[out])
[2] # value at index 1
Parameters
----------
data : list or array
val1 : int, float, or str
First value you wish to check 'data' against.
test1 : str
Either '<', '>', '<=', '>=', or '=='.
val2 :
Second value you wish to check 'data' against.
test2 : str
Either '<', '>', '<=', '>=', or '=='.
return_indices : {True, False}
If True, returns only the indices of valid 'data' entries. If
False, returns only the values of 'data' corresponding to those
entries.
Returns
-------
If 'return_indices' is False,
data_cut<1/2> : array
The 'data' array cut down by the testing parameters.
If 'return_indices' is True:
The index array of 'data' corresponding to items cut down by the
testing parameters.
"""
# Transform the list to numpy array.
data = np.array(data)
# Transform the first equality tests.
if test1 == '<':
indices1 = np.where(data < val1)[0]
elif test1 == '>':
indices1 = np.where(data > val1)[0]
elif test1 == '<=':
indices1 = np.where(data <= val1)[0]
elif test1 == '>=':
indices1 = np.where(data >= val1)[0]
elif test1 == '==':
indices1 = np.where(data >= val1)[0]
else:
print("Invalid equality check, {}".format(test1))
data_cut1 = data[indices1]
# If only one equality check entered, finish.
if val2 == None and test2 == None:
if return_indices:
return indices1
else:
return data_cut1
# If a second check was entered, continue
# Transform the second equality tests.
if test2 == '<':
indices2 = np.where(data_cut1 < val2)[0]
elif test2 == '>':
indices2 =
|
np.where(data_cut1 > val2)
|
numpy.where
|
"""
desisim.templates
=================
Functions to simulate spectral templates for DESI.
"""
from __future__ import division, print_function
import os
import sys
import numpy as np
import multiprocessing
from desiutil.log import get_logger, DEBUG
from desisim.io import empty_metatable
try:
from scipy import constants
C_LIGHT = constants.c/1000.0
except TypeError: # This can happen during documentation builds.
C_LIGHT = 299792458.0/1000.0
def _check_input_meta(input_meta, ignore_templateid=False):
log = get_logger()
cols = input_meta.colnames
if ignore_templateid:
required_cols = ('SEED', 'REDSHIFT', 'MAG', 'MAGFILTER')
else:
required_cols = ('TEMPLATEID', 'SEED', 'REDSHIFT', 'MAG', 'MAGFILTER')
if not np.all(np.in1d(required_cols, cols)):
log.warning('Input metadata table (input_meta) is missing one or more required columns {}'.format(
required_cols))
raise ValueError
def _check_star_properties(star_properties, WD=False):
log = get_logger()
cols = star_properties.colnames
required_cols = ['REDSHIFT', 'MAG', 'MAGFILTER', 'TEFF', 'LOGG']
if not WD:
required_cols = required_cols + ['FEH']
if not np.all(np.in1d(required_cols, cols)):
log.warning('Input star_properties is missing one or more required columns {}'.format(
required_cols))
raise ValueError
class EMSpectrum(object):
"""Construct a complete nebular emission-line spectrum.
Read the requisite external data files and initialize the output wavelength array.
The desired output wavelength array can either by passed directly using LOG10WAVE
(note: must be a log-base10, i.e., constant-velocity pixel array!) or via the MINWAVE,
MAXWAVE, and CDELT_KMS arguments.
In addition, three data files are required: ${DESISIM}/data/recombination_lines.escv,
${DESISIM}/data/forbidden_lines.esv, and ${DESISIM}/data/forbidden_mog.fits.
TODO (@moustakas): Incorporate AGN-like emission-line ratios.
TODO (@moustakas): Think about how to best include dust attenuation in the lines.
Args:
minwave (float, optional): Minimum value of the output wavelength
array [Angstrom, default 3600].
maxwave (float, optional): Minimum value of the output wavelength
array [Angstrom, default 10000].
cdelt_kms (float, optional): Spacing of the output wavelength array
[km/s, default 20].
log10wave (numpy.ndarray, optional): Input/output wavelength array
(log10-Angstrom, default None).
include_mgii (bool, optional): Include Mg II in emission (default False).
Attributes:
log10wave (numpy.ndarray): Wavelength array constructed from the input arguments.
line (astropy.Table): Table containing the laboratoy (vacuum) wavelengths and nominal
line-ratios for several dozen forbidden and recombination nebular emission lines.
forbidmog (GaussianMixtureModel): Table containing the mixture of Gaussian parameters
encoding the forbidden emission-line priors.
oiiidoublet (float32): Intrinsic [OIII] 5007/4959 doublet ratio (set by atomic physics).
niidoublet (float32): Intrinsic [NII] 6584/6548 doublet ratio (set by atomic physics).
Raises:
IOError: If the required data files are not found.
"""
def __init__(self, minwave=3650.0, maxwave=7075.0, cdelt_kms=20.0, log10wave=None,
include_mgii=False):
from pkg_resources import resource_filename
from astropy.table import Table, Column, vstack
from desiutil.sklearn import GaussianMixtureModel
log = get_logger()
# Build a wavelength array if one is not given.
if log10wave is None:
cdelt = cdelt_kms/C_LIGHT/np.log(10) # pixel size [log-10 A]
npix = int(round((np.log10(maxwave)-np.log10(minwave))/cdelt))+1
self.log10wave = np.linspace(np.log10(minwave), np.log10(maxwave), npix)
else:
self.log10wave = log10wave
# Read the files which contain the recombination and forbidden lines.
recombfile = resource_filename('desisim', 'data/recombination_lines.ecsv')
forbidfile = resource_filename('desisim', 'data/forbidden_lines.ecsv')
forbidmogfile = resource_filename('desisim','data/forbidden_mogs.fits')
if not os.path.isfile(recombfile):
log.fatal('Required data file {} not found!'.format(recombfile))
raise IOError
if not os.path.isfile(forbidfile):
log.fatal('Required data file {} not found!'.format(forbidfile))
raise IOError
if not os.path.isfile(forbidmogfile):
log.fatal('Required data file {} not found!'.format(forbidmogfile))
raise IOError
recombdata = Table.read(recombfile, format='ascii.ecsv', guess=False)
forbiddata = Table.read(forbidfile, format='ascii.ecsv', guess=False)
self.include_mgii = include_mgii
if not self.include_mgii:
forbiddata.remove_rows(np.where(forbiddata['name'] == 'MgII_2800a')[0])
forbiddata.remove_rows(np.where(forbiddata['name'] == 'MgII_2800b')[0])
line = vstack([recombdata,forbiddata], metadata_conflicts='silent')
nline = len(line)
line['flux'] = Column(np.ones(nline), dtype='f8') # integrated line-flux
line['amp'] = Column(np.ones(nline), dtype='f8') # amplitude
self.line = line[np.argsort(line['wave'])]
self.forbidmog = GaussianMixtureModel.load(forbidmogfile)
self.oiiidoublet = 2.8875 # [OIII] 5007/4959
self.niidoublet = 2.93579 # [NII] 6584/6548
self.oidoublet = 3.03502 # [OI] 6300/6363
self.siiidoublet = 2.4686 # [SIII] 9532/9069
self.ariiidoublet = 4.16988 # [ArIII] 7135/7751
self.mgiidoublet = 1.0 # MgII 2803/2796
def spectrum(self, oiiihbeta=None, oiihbeta=None, niihbeta=None,
siihbeta=None, oiidoublet=0.73, siidoublet=1.3,
linesigma=75.0, zshift=0.0, oiiflux=None, hbetaflux=None,
seed=None):
"""Build the actual emission-line spectrum.
Building the emission-line spectrum involves three main steps. First,
the oiiihbeta, oiihbeta, and niihbeta emission-line ratios are either
drawn from the empirical mixture of Gaussians (recommended!) or input
values are used to construct the line-ratios of the strongest optical
forbidden lines relative to H-beta.
Note that all three of oiiihbeta, oiihbeta, and niihbeta must be
specified simultaneously in order for them to be used.
Second, the requested [OII] 3726,29 and [SII] 6716,31 doublet
ratios are imposed.
And finally the full emission-line spectrum is self-consistently
normalized to *either* an integrated [OII] 3726,29 line-flux
*or* an integrated H-beta line-flux. Generally an ELG and LRG
spectrum will be normalized using [OII] while the a BGS spectrum
will be normalized using H-beta. Note that the H-beta normalization
trumps the [OII] normalization (in the case that both are given).
TODO (@moustakas): Add a suitably scaled nebular continuum spectrum.
TODO (@moustakas): Add more emission lines (e.g., [NeIII] 3869).
Args:
oiiihbeta (float, optional): Desired logarithmic [OIII] 5007/H-beta
line-ratio (default -0.2). A sensible range is [-0.5,0.2].
oiihbeta (float, optional): Desired logarithmic [OII] 3726,29/H-beta
line-ratio (default 0.1). A sensible range is [0.0,0.4].
niihbeta (float, optional): Desired logarithmic [NII] 6584/H-beta
line-ratio (default -0.2). A sensible range is [-0.6,0.0].
siihbeta (float, optional): Desired logarithmic [SII] 6716/H-beta
line-ratio (default -0.3). A sensible range is [-0.5,0.2].
oiidoublet (float, optional): Desired [OII] 3726/3729 doublet ratio
(default 0.73).
siidoublet (float, optional): Desired [SII] 6716/6731 doublet ratio
(default 1.3).
linesigma (float, optional): Intrinsic emission-line velocity width/sigma
(default 75 km/s). A sensible range is [30-150].
zshift (float, optional): Perturb the emission lines from their laboratory
(rest) wavelengths by a factor 1+ZSHIFT (default 0.0). Use with caution!
oiiflux (float, optional): Normalize the emission-line spectrum to this
integrated [OII] emission-line flux (default None).
hbetaflux (float, optional): Normalize the emission-line spectrum to this
integrated H-beta emission-line flux (default None).
seed (int, optional): input seed for the random numbers.
Returns:
Tuple of (emspec, wave, line), where
emspec is an Array [npix] of flux values [erg/s/cm2/A];
wave is an Array [npix] of vacuum wavelengths corresponding to
FLUX [Angstrom, linear spacing];
line is a Table of emission-line parameters used to generate
the emission-line spectrum.
"""
from astropy.table import Table
rand = np.random.RandomState(seed)
line = self.line.copy()
nline = len(line)
# Convenience variables.
is4959 = np.where(line['name'] == '[OIII]_4959')[0]
is5007 = np.where(line['name'] == '[OIII]_5007')[0]
is6548 = np.where(line['name'] == '[NII]_6548')[0]
is6584 = np.where(line['name'] == '[NII]_6584')[0]
is6716 = np.where(line['name'] == '[SII]_6716')[0]
is6731 = np.where(line['name'] == '[SII]_6731')[0]
#is3869 = np.where(line['name'] == '[NeIII]_3869')[0]
is3726 = np.where(line['name'] == '[OII]_3726')[0]
is3729 = np.where(line['name'] == '[OII]_3729')[0]
is6300 = np.where(line['name'] == '[OI]_6300')[0]
is6363 = np.where(line['name'] == '[OI]_6363')[0]
is9532 = np.where(line['name'] == '[SIII]_9532')[0]
is9069 = np.where(line['name'] == '[SIII]_9069')[0]
is7135 = np.where(line['name'] == '[ArIII]_7135')[0]
is7751 = np.where(line['name'] == '[ArIII]_7751')[0]
# Draw from the MoGs for forbidden lines.
if oiiihbeta is None or oiihbeta is None or niihbeta is None or siihbeta is None:
oiiihbeta, oiihbeta, niihbeta, siihbeta = \
self.forbidmog.sample(random_state=rand)[0]
# Normalize [OIII] 4959, 5007.
line['ratio'][is5007] = 10**oiiihbeta # [OIII]/Hbeta
line['ratio'][is4959] = line['ratio'][is5007]/self.oiiidoublet
# Normalize [NII] 6548,6584.
line['ratio'][is6584] = 10**niihbeta # [NII]/Hbeta
line['ratio'][is6548] = line['ratio'][is6584]/self.niidoublet
# Normalize [SII] 6716,6731.
line['ratio'][is6716] = 10**siihbeta # [SII]/Hbeta
line['ratio'][is6731] = line['ratio'][is6716]/siidoublet
# Hack! For the following lines use constant ratios relative to H-beta--
# Normalize [OI]
line['ratio'][is6300] = 0.1 # [OI]6300/Hbeta
line['ratio'][is6363] = line['ratio'][is6300]/self.oidoublet
# Normalize [SIII]
line['ratio'][is9532] = 0.75 # [SIII]9532/Hbeta
line['ratio'][is9069] = line['ratio'][is9532]/self.siiidoublet
# Normalize [ArIII]
line['ratio'][is7135] = 0.04 # [ArIII]7135/Hbeta
line['ratio'][is7751] = line['ratio'][is7135]/self.ariiidoublet
# Normalize MgII
if self.include_mgii:
is2800a = np.where(line['name'] == 'MgII_2800a')[0]
is2800b = np.where(line['name'] == 'MgII_2800b')[0]
line['ratio'][is2800a] = 0.3 # MgII2796/Hbeta
line['ratio'][is2800a] = line['ratio'][is2800a]/self.mgiidoublet
## Normalize [NeIII] 3869.
#coeff = np.asarray([1.0876,-1.1647])
#disp = 0.1 # dex
#line['ratio'][is3869] = 10**(np.polyval(coeff,np.log10(oiiihbeta))+
# rand.normal(0.0,disp))
# Normalize [OII] 3727, split into [OII] 3726,3729.
factor1 = oiidoublet / (1.0+oiidoublet) # convert 3727-->3726
factor2 = 1.0/(1.0+oiidoublet) # convert 3727-->3729
line['ratio'][is3726] = factor1*10**oiihbeta
line['ratio'][is3729] = factor2*10**oiihbeta
# Normalize the full spectrum to the desired integrated [OII] 3727 or
# H-beta flux (but not both!)
if (oiiflux is None) and (hbetaflux is None):
line['flux'] = line['ratio']
if (hbetaflux is None) and (oiiflux is not None):
for ii in range(nline):
line['ratio'][ii] /= line['ratio'][is3729]
line['flux'][ii] = oiiflux*factor2*line['ratio'][ii]
if (hbetaflux is not None) and (oiiflux is None):
for ii in range(nline):
line['flux'][ii] = hbetaflux*line['ratio'][ii]
if (hbetaflux is not None) and (oiiflux is not None):
log.warning('Both hbetaflux and oiiflux were given; using hbetaflux.')
for ii in range(nline):
line['flux'][ii] = hbetaflux*line['ratio'][ii]
# Finally build the emission-line spectrum
log10sigma = linesigma /C_LIGHT / np.log(10) # line-width [log-10 Angstrom]
emspec = np.zeros_like(self.log10wave)
loglinewave = np.log10(line['wave'])
these = np.where( (loglinewave > self.log10wave.min()) *
(loglinewave < self.log10wave.max()) )[0]
if len(these) > 0:
theseline = line[these]
for ii in range(len(theseline)):
amp = theseline['flux'][ii] / theseline['wave'][ii] / np.log(10) # line-amplitude [erg/s/cm2/A]
thislinewave = np.log10(theseline['wave'][ii] * (1.0 + zshift))
theseline['amp'][ii] = amp / (np.sqrt(2.0 * np.pi) * log10sigma) # [erg/s/A]
# Construct the spectrum [erg/s/cm2/A, rest]
jj = np.abs( self.log10wave - thislinewave ) < 6 * log10sigma
emspec[jj] += amp * np.exp(-0.5 * (self.log10wave[jj]-thislinewave)**2 / log10sigma**2) \
/ (np.sqrt(2.0 * np.pi) * log10sigma)
else:
theseline = Table()
return emspec, 10**self.log10wave, theseline
class GALAXY(object):
"""Base class for generating Monte Carlo spectra of the various flavors of
galaxies (ELG, BGS, and LRG).
"""
def __init__(self, objtype='ELG', minwave=3600.0, maxwave=10000.0, cdelt=0.2, wave=None,
transient=None, tr_fluxratio=(0.01, 1.), tr_epoch=(-10,10),
include_mgii=False, colorcuts_function=None,
normfilter_north='BASS-r', normfilter_south='decam2014-r',
normline='OII', fracvdisp=(0.1, 40),
baseflux=None, basewave=None, basemeta=None):
"""Read the appropriate basis continuum templates, filter profiles and
initialize the output wavelength array.
Note:
Only a linearly-spaced output wavelength array is currently supported.
TODO (@moustakas): Incorporate size and morphological properties.
Args:
objtype (str): object type (default 'ELG')
minwave (float, optional): minimum value of the output wavelength
array (default 3600 Angstrom).
maxwave (float, optional): minimum value of the output wavelength
array (default 10000 Angstrom).
cdelt (float, optional): spacing of the output wavelength array
(default 2 Angstrom/pixel).
wave (numpy.ndarray): Input/output observed-frame wavelength array,
overriding the minwave, maxwave, and cdelt arguments (Angstrom).
colorcuts_function (function name): Function to use to select
templates that pass the color-cuts for the specified objtype Note
that this argument can also be a tuple of more than one selection
function to apply (e.g., desitarget.cuts.isBGS_faint and
desitarget.cuts.isBGS_bright) which will be applied in sequence
(default None).
normfilter_north (str): normalization filter for simulated "north"
templates. Each spectrum is normalized to the magnitude in this
filter bandpass (default 'BASS-r').
normfilter_south (str): corresponding normalization filter for "south"
(default 'decam2014-r').
normline (str): normalize the emission-line spectrum to the flux in
this emission line. The options are 'OII' (for ELGs, the default),
'HBETA' (for BGS), or None (for LRGs).
fracvdisp (tuple): two-element array which gives the fraction and
absolute number of unique velocity dispersion values. For example,
the default (0.1, 40) means there will be either int(0.1*nmodel) or
40 unique values, where nmodel is defined in
GALAXY.make_galaxy_templates, below.
transient (Transient, None): optional Transient object to integrate
into the spectrum (default None).
tr_fluxratio (tuple): optional flux ratio range for transient
and host spectrum. Default is (0.01, 1).
tr_epoch (tuple): optional epoch range for uniformly sampling a
transient spectrum, in days. Default is (-10, 10).
include_mgii (bool, optional): Include Mg II in emission (default False).
Attributes:
wave (numpy.ndarray): Output wavelength array (Angstrom).
baseflux (numpy.ndarray): Array [nbase,npix] of the base rest-frame
continuum spectra (erg/s/cm2/A).
basewave (numpy.ndarray): Array [npix] of rest-frame wavelengths
corresponding to BASEFLUX (Angstrom).
basemeta (astropy.Table): Table of meta-data [nbase] for each base template.
pixbound (numpy.ndarray): Pixel boundaries of BASEWAVE (Angstrom).
normfilt_north (speclite.filters instance): FilterSequence of
self.normfilter_north.
normfilt_south (speclite.filters instance): FilterSequence of
self.normfilter_south.
decamwise (speclite.filters instance): DECam2014-[g,r,z] and WISE2010-[W1,W2]
FilterSequence.
bassmzlswise (speclite.filters instance): BASS-[g,r], MzLS-z and
WISE2010-[W1,W2] FilterSequence.
Optional Attributes:
sne_baseflux (numpy.ndarray): Array [sne_nbase,sne_npix] of the base
rest-frame SNeIa spectra interpolated onto BASEWAVE [erg/s/cm2/A].
sne_basemeta (astropy.Table): Table of meta-data for each base SNeIa
spectra [sne_nbase].
rfilt_north (speclite.filters instance): BASS r-band FilterSequence.
rfilt_south (speclite.filters instance): DECam2014 r-band FilterSequence.
"""
from speclite import filters
from desisim import pixelsplines as pxs
self.objtype = objtype.upper()
self.colorcuts_function = colorcuts_function
self.normfilter_north = normfilter_north
self.normfilter_south = normfilter_south
self.normline = normline
# Initialize the output wavelength array (linear spacing) unless it is
# already provided.
if wave is None:
npix = int(round((maxwave-minwave) / cdelt))+1
wave = np.linspace(minwave, maxwave, npix)
self.wave = wave
# Read the rest-frame continuum basis spectra, if not specified.
if baseflux is None or basewave is None or basemeta is None:
from desisim.io import read_basis_templates
baseflux, basewave, basemeta = read_basis_templates(objtype=self.objtype)
self.baseflux = baseflux
self.basewave = basewave
self.basemeta = basemeta
# Initialize the EMSpectrum object with the same wavelength array as
# the "base" (continuum) templates so that we don't have to resample.
if self.normline is not None:
if self.normline.upper() not in ('OII', 'HBETA'):
log.warning('Unrecognized normline input {}; setting to None.'.format(self.normline))
self.normline = None
self.EM = EMSpectrum(log10wave=np.log10(self.basewave), include_mgii=include_mgii)
# Optionally access a transient model.
self.transient = transient
self.trans_fluxratiorange = tr_fluxratio
self.trans_epochrange = tr_epoch
if self.transient is not None:
self.rfilt_north = filters.load_filters('BASS-r')
self.rfilt_south = filters.load_filters('decam2014-r')
# Pixel boundaries
self.pixbound = pxs.cen2bound(basewave)
self.fracvdisp = fracvdisp
# Initialize the filter profiles.
self.normfilt_north = filters.load_filters(self.normfilter_north)
self.normfilt_south = filters.load_filters(self.normfilter_south)
self.decamwise = filters.load_filters('decam2014-g', 'decam2014-r', 'decam2014-z',
'wise2010-W1', 'wise2010-W2')
self.bassmzlswise = filters.load_filters('BASS-g', 'BASS-r', 'MzLS-z',
'wise2010-W1', 'wise2010-W2')
def _blurmatrix(self, vdisp, log=None):
"""Pre-compute the blur_matrix as a dictionary keyed by each unique value of
vdisp.
"""
from desisim import pixelsplines as pxs
uvdisp = list(set(vdisp))
log.debug('Populating blur matrix with {} unique velocity dispersion values.'.format(len(uvdisp)))
if len(uvdisp) > self.fracvdisp[1]:
log.warning('Slow code ahead! Consider reducing the number of input velocity dispersion values from {}.'.format(
len(uvdisp)))
blurmatrix = dict()
for uvv in uvdisp:
sigma = 1.0 + (self.basewave * uvv / C_LIGHT)
blurmatrix[uvv] = pxs.gauss_blur_matrix(self.pixbound, sigma).astype('f4')
return blurmatrix
def lineratios(self, nobj, oiiihbrange=(-0.5, 0.2), oiidoublet_meansig=(0.73, 0.05),
agnlike=False, rand=None):
"""Get the correct number and distribution of the forbidden and [OII] 3726/3729
doublet emission-line ratios. Note that the agnlike option is not yet
supported.
Supporting oiiihbrange needs a different (fast) approach. Suppressing
the code below for now until it's needed.
"""
if agnlike:
raise NotImplementedError('AGNLIKE option not yet implemented')
if rand is None:
rand = np.random.RandomState()
if oiidoublet_meansig[1] > 0:
oiidoublet = rand.normal(oiidoublet_meansig[0], oiidoublet_meansig[1], nobj)
else:
oiidoublet = np.repeat(oiidoublet_meansig[0], nobj)
# Sample from the MoG. This is not strictly correct because it ignores
# the prior on [OIII]/Hbeta, but let's revisit that later.
samp = self.EM.forbidmog.sample(nobj, random_state=rand)
oiiihbeta = samp[:, 0]
oiihbeta = samp[:, 1]
niihbeta = samp[:, 2]
siihbeta = samp[:, 3]
return oiidoublet, oiihbeta, niihbeta, siihbeta, oiiihbeta
def make_galaxy_templates(self, nmodel=100, zrange=(0.6, 1.6), magrange=(20.0, 22.0),
oiiihbrange=(-0.5, 0.2), logvdisp_meansig=(1.9, 0.15),
minlineflux=0.0, trans_filter='decam2014-r',
seed=None, redshift=None, mag=None, vdisp=None,
input_meta=None, nocolorcuts=False,
nocontinuum=False, agnlike=False, novdisp=False, south=True,
restframe=False, verbose=False):
"""Build Monte Carlo galaxy spectra/templates.
This function chooses random subsets of the basis continuum spectra (for
the given galaxy spectral type), constructs an emission-line spectrum
(if desired), redshifts, convolves by the intrinsic velocity dispersion,
and then finally normalizes each spectrum to a (generated or input)
apparent magnitude.
In detail, each (output) model gets randomly assigned a continuum
(basis) template; however, if that template doesn't pass the (spectral)
class-specific color cuts (at the specified redshift), then we iterate
through the rest of the templates until we find one that *does* pass the
color-cuts.
The user also (optionally) has a lot of flexibility over the
inputs/outputs and can specify any combination of the redshift, velocity
dispersion, and apparent magnitude (in the normalization filter
specified in the GALAXY.__init__ method) inputs. Alternatively, the
user can pass a complete metadata table, in order to easily regenerate
spectra on-the-fly (see the documentation for the input_meta argument,
below).
Note:
The default inputs are generally set to values which are appropriate
for ELGs, so be sure to alter them when generating templates for other
spectral classes.
Args:
nmodel (int, optional): Number of models to generate (default 100).
zrange (float, optional): Minimum and maximum redshift range. Defaults
to a uniform distribution between (0.6, 1.6).
magrange (float, optional): Minimum and maximum magnitude in the
bandpass specified by self.normfilter_south (if south=True) or
self.normfilter_north (if south=False). Defaults to a uniform
distribution between (20.0, 22.0).
oiiihbrange (float, optional): Minimum and maximum logarithmic
[OIII] 5007/H-beta line-ratio. Defaults to a uniform distribution
between (-0.5, 0.2).
logvdisp_meansig (float, optional): Logarithmic mean and sigma values
for the (Gaussian) stellar velocity dispersion distribution.
Defaults to log10-sigma=1.9+/-0.15 km/s.
minlineflux (float, optional): Minimum emission-line flux in the line
specified by self.normline (default 0 erg/s/cm2).
trans_filter (str): filter corresponding to TRANS_FLUXRATIORANGE (default
'decam2014-r').
seed (int, optional): Input seed for the random numbers.
redshift (float, optional): Input/output template redshifts. Array
size must equal nmodel. Ignores zrange input.
mag (float, optional): Input/output template magnitudes in the
bandpass specified by self.normfilter_south (if south=True) or
self.normfilter_north (if south=False). Array size must equal
nmodel. Ignores magrange input.
vdisp (float, optional): Input/output velocity dispersions in km/s.
Array size must equal nmodel.
input_meta (astropy.Table): *Input* metadata table with the following
required columns: TEMPLATEID, SEED, REDSHIFT, MAG, and MAGFILTER
(see desisim.io.empty_metatable for the expected data types). In
addition, in order to faithfully reproduce a previous set of
spectra, then VDISP must also be passed (normally returned in the
OBJMETA table). If present, then all other optional inputs (nmodel,
redshift, mag, zrange, logvdisp_meansig, etc.) are ignored.
nocolorcuts (bool, optional): Do not apply the color-cuts specified by
the self.colorcuts_function function (default False).
nocontinuum (bool, optional): Do not include the stellar continuum in
the output spectrum (useful for testing; default False). Note that
this option automatically sets nocolorcuts to True and transient to
False.
novdisp (bool, optional): Do not velocity-blur the spectrum (default
False).
agnlike (bool, optional): Adopt AGN-like emission-line ratios (e.g.,
for the LRGs and some BGS galaxies) (default False, meaning we adopt
star-formation-like line-ratios). Option not yet supported.
south (bool, optional): Apply "south" color-cuts using the DECaLS
filter system, otherwise apply the "north" (MzLS+BASS) color-cuts.
Defaults to True.
restframe (bool, optional): If True, return full resolution restframe
templates instead of resampled observer frame.
verbose (bool, optional): Be verbose!
Returns (outflux, wave, meta, objmeta) tuple where:
* outflux (numpy.ndarray): Array [nmodel, npix] of observed-frame
spectra (1e-17 erg/s/cm2/A).
* wave (numpy.ndarray): Observed-frame [npix] wavelength array (Angstrom).
* meta (astropy.Table): Table of meta-data [nmodel] for each output spectrum.
* objmeta (astropy.Table): Additional objtype-specific table data
[nmodel] for each spectrum.
Raises:
ValueError
"""
from speclite import filters
from desispec.interpolation import resample_flux
from astropy.table import Column
from astropy import units
if verbose:
log = get_logger(DEBUG)
else:
log = get_logger()
# Basic error checking and some preliminaries.
if nocontinuum:
log.warning('Forcing nocolorcuts=True, transient=None since nocontinuum=True.')
nocolorcuts = True
self.transient = None
npix = len(self.basewave)
nbase = len(self.basemeta)
# Optionally unpack a metadata table.
if input_meta is not None:
_check_input_meta(input_meta)
templateseed = input_meta['SEED'].data
rand = np.random.RandomState(templateseed[0])
redshift = input_meta['REDSHIFT'].data
mag = input_meta['MAG'].data
magfilter = np.char.strip(input_meta['MAGFILTER'].data)
nchunk = 1
nmodel = len(input_meta)
alltemplateid_chunk = [input_meta['TEMPLATEID'].data.reshape(nmodel, 1)]
meta, objmeta = empty_metatable(nmodel=nmodel, objtype=self.objtype)
else:
meta, objmeta = empty_metatable(nmodel=nmodel, objtype=self.objtype)
# Initialize the random seed.
rand = np.random.RandomState(seed)
templateseed = rand.randint(2**32, size=nmodel)
# Shuffle the basis templates and then split them into ~equal
# chunks, so we can speed up the calculations below.
chunksize = np.min((nbase, 50))
nchunk = int(np.ceil(nbase / chunksize))
alltemplateid = np.tile(np.arange(nbase), (nmodel, 1))
for tempid in alltemplateid:
rand.shuffle(tempid)
alltemplateid_chunk = np.array_split(alltemplateid, nchunk, axis=1)
# Assign redshift, magnitude, and velocity dispersion priors.
if redshift is None:
redshift = rand.uniform(zrange[0], zrange[1], nmodel)
if mag is None:
mag = rand.uniform(magrange[0], magrange[1], nmodel).astype('f4')
if south:
magfilter = np.repeat(self.normfilter_south, nmodel)
else:
magfilter = np.repeat(self.normfilter_north, nmodel)
if vdisp is None:
# Limit the number of unique velocity dispersion values.
nvdisp = int(np.max( ( np.min(
( np.round(nmodel * self.fracvdisp[0]), self.fracvdisp[1] ) ), 1 ) ))
if logvdisp_meansig[1] > 0:
vvdisp = 10**rand.normal(logvdisp_meansig[0], logvdisp_meansig[1], nvdisp)
else:
vvdisp = 10**np.repeat(logvdisp_meansig[0], nvdisp)
vdisp = rand.choice(vvdisp, nmodel)
if redshift is not None:
if len(redshift) != nmodel:
log.fatal('Redshift must be an nmodel-length array')
raise ValueError
if mag is not None:
if len(mag) != nmodel:
log.fatal('Mag must be an nmodel-length array')
raise ValueError
if vdisp is not None:
if len(vdisp) != nmodel:
log.fatal('Vdisp must be an nmodel-length array')
raise ValueError
vzero = np.where(vdisp <= 0)[0]
if len(vzero) > 0:
log.fatal('Velocity dispersion is zero or negative!')
raise ValueError
# Generate the (optional) distribution of transient model brightness
# and epoch priors or read them from the input table.
if self.transient is not None:
trans_rfluxratio = rand.uniform(self.trans_fluxratiorange[0], self.trans_fluxratiorange[1], nmodel)
log.debug('Flux ratio range: {:g} to {:g}'.format(self.trans_fluxratiorange[0], self.trans_fluxratiorange[1]))
log.debug('Generated ratios: {}'.format(trans_rfluxratio))
tmin = self.trans_epochrange[0]
if tmin < self.transient.mintime().to('day').value:
tmin = self.transient.mintime().to('day').value
tmin = int(tmin)
tmax = self.trans_epochrange[1]
if tmax > self.transient.maxtime().to('day').value:
tmax = self.transient.maxtime().to('day').value
tmax = int(tmax)
trans_epoch = rand.randint(tmin, tmax, nmodel)
log.debug('Epoch range: {:d} d to {:d} d'.format(tmin, tmax))
log.debug('Generated epochs: {}'.format(trans_epoch))
# Populate the object metadata table.
objmeta['TRANSIENT_MODEL'][:] = np.full(nmodel, self.transient.model)
objmeta['TRANSIENT_TYPE'][:] = np.full(nmodel, self.transient.type)
objmeta['TRANSIENT_EPOCH'][:] = trans_epoch
objmeta['TRANSIENT_RFLUXRATIO'][:] = trans_rfluxratio
# Precompute the velocity dispersion convolution matrix for each unique
# value of vdisp.
if nocontinuum or novdisp:
pass
else:
blurmatrix = self._blurmatrix(vdisp, log=log)
# Populate some of the metadata table.
objmeta['VDISP'][:] = vdisp
for key, value in zip(('REDSHIFT', 'MAG', 'MAGFILTER', 'SEED'),
(redshift, mag, magfilter, templateseed)):
meta[key][:] = value
# Load the unique set of MAGFILTERs. We could check against
# self.decamwise.names and self.bassmzlswise to see if the filters have
# already been loaded, but speed should not be an issue.
normfilt = dict()
for mfilter in np.unique(magfilter):
normfilt[mfilter] = filters.load_filters(mfilter)
# Optionally initialize the emission-line objects and line-ratios.
d4000 = self.basemeta['D4000']
# Build each spectrum in turn.
if restframe:
outflux = np.zeros([nmodel, len(self.basewave)])
else:
outflux = np.zeros([nmodel, len(self.wave)]) # [erg/s/cm2/A]
for ii in range(nmodel):
templaterand = np.random.RandomState(templateseed[ii])
zwave = self.basewave.astype(float) * (1.0 + redshift[ii])
# Optionally generate the emission-line spectrum for this model.
if self.normline is None:
emflux = np.zeros(npix)
normlineflux = np.zeros(nbase)
else:
# For speed, build just a single emission-line spectrum for all
# continuum templates. In detail the line-ratios should
# correlate with D(4000) or something else.
oiidoublet, oiihbeta, niihbeta, siihbeta, oiiihbeta = \
self.lineratios(nobj=1, oiiihbrange=oiiihbrange,
rand=templaterand, agnlike=agnlike)
for key, value in zip(('OIIIHBETA', 'OIIHBETA', 'NIIHBETA', 'SIIHBETA', 'OIIDOUBLET'),
(oiiihbeta, oiihbeta, niihbeta, siihbeta, oiidoublet)):
objmeta[key][ii] = value
if self.normline.upper() == 'OII':
ewoii = 10.0**(np.polyval(self.ewoiicoeff, d4000) + # rest-frame EW([OII]), Angstrom
templaterand.normal(0.0, 0.3, nbase))
normlineflux = self.basemeta['OII_CONTINUUM'].data * ewoii
emflux, emwave, emline = self.EM.spectrum(linesigma=vdisp[ii], seed=templateseed[ii],
oiidoublet=oiidoublet, oiiihbeta=oiiihbeta,
oiihbeta=oiihbeta, niihbeta=niihbeta,
siihbeta=siihbeta, oiiflux=1.0)
elif self.normline.upper() == 'HBETA':
ewhbeta = 10.0**(np.polyval(self.ewhbetacoeff, d4000) + \
templaterand.normal(0.0, 0.2, nbase)) * \
(self.basemeta['HBETA_LIMIT'].data == 0) # rest-frame H-beta, Angstrom
normlineflux = self.basemeta['HBETA_CONTINUUM'].data * ewhbeta
emflux, emwave, emline = self.EM.spectrum(linesigma=vdisp[ii], seed=templateseed[ii],
oiidoublet=oiidoublet, oiiihbeta=oiiihbeta,
oiihbeta=oiihbeta, niihbeta=niihbeta,
siihbeta=siihbeta, hbetaflux=1.0)
emflux /= (1+redshift[ii]) # [erg/s/cm2/A, @redshift[ii]]
# Optionally get the transient spectrum and normalization factor.
if self.transient is not None:
# Evaluate the flux where the model has defined wavelengths.
# Zero-pad all other wavelength values.
trans_restflux = np.zeros_like(self.basewave, dtype=float)
minw = self.transient.minwave().to('Angstrom').value
maxw = self.transient.maxwave().to('Angstrom').value
j = np.argwhere(self.basewave >= minw)[0,0]
k = np.argwhere(self.basewave <= maxw)[-1,0]
trans_restflux[j:k] = self.transient.flux(trans_epoch[ii], self.basewave[j:k]*units.Angstrom)
trans_norm = normfilt[magfilter[ii]].get_ab_maggies(trans_restflux, zwave)
for ichunk in range(nchunk):
if ii % 100 == 0 and ii > 0:
log.debug('Simulating {} template {}/{} in chunk {}/{}.'. \
format(self.objtype, ii, nmodel, ichunk+1, nchunk))
templateid = alltemplateid_chunk[ichunk][ii, :]
nbasechunk = len(templateid)
if nocontinuum:
restflux = np.tile(emflux, (nbasechunk, 1)) * \
np.tile(normlineflux[templateid], (npix, 1)).T
else:
restflux = self.baseflux[templateid, :] + np.tile(emflux, (nbasechunk, 1)) * \
np.tile(normlineflux[templateid], (npix, 1)).T
# Optionally add in the transient spectrum.
if self.transient is not None:
galnorm = normfilt[magfilter[ii]].get_ab_maggies(restflux, zwave)
trans_factor = galnorm[magfilter[ii]].data * trans_rfluxratio[ii]/trans_norm[magfilter[ii]].data
restflux += np.tile(trans_restflux, (nbasechunk, 1)) * np.tile(trans_factor, (npix, 1)).T
# Synthesize photometry to determine which models will pass the
# color-cuts.
if south:
maggies = self.decamwise.get_ab_maggies(restflux, zwave, mask_invalid=True)
else:
maggies = self.bassmzlswise.get_ab_maggies(restflux, zwave, mask_invalid=True)
if nocontinuum:
magnorm = np.repeat(10**(-0.4*mag[ii]), nbasechunk)
else:
normmaggies = np.array(normfilt[magfilter[ii]].get_ab_maggies(
restflux, zwave, mask_invalid=True)[magfilter[ii]])
magnorm = 10**(-0.4*mag[ii]) / normmaggies
synthnano = dict()
for key in maggies.columns:
synthnano[key] = 1E9 * maggies[key] * magnorm # nanomaggies
zlineflux = normlineflux[templateid] * magnorm
if south:
gflux, rflux, zflux, w1flux, w2flux = synthnano['decam2014-g'], \
synthnano['decam2014-r'], synthnano['decam2014-z'], \
synthnano['wise2010-W1'], synthnano['wise2010-W2']
else:
gflux, rflux, zflux, w1flux, w2flux = synthnano['BASS-g'], \
synthnano['BASS-r'], synthnano['MzLS-z'], \
synthnano['wise2010-W1'], synthnano['wise2010-W2']
if nocolorcuts or self.colorcuts_function is None:
colormask = np.repeat(1, nbasechunk)
else:
if self.objtype == 'BGS':
_colormask = []
for targtype in ('bright', 'faint', 'wise'):
_colormask.append(self.colorcuts_function(
gflux=gflux, rflux=rflux, zflux=zflux,
w1flux=w1flux, w2flux=w2flux, south=south,
targtype=targtype))
colormask = np.any( np.ma.getdata(np.vstack(_colormask)), axis=0 )
else:
colormask = self.colorcuts_function(gflux=gflux, rflux=rflux, zflux=zflux,
w1flux=w1flux, w2flux=w2flux, south=south)
# If the color-cuts pass then populate the output flux vector
# (suitably normalized) and metadata table, convolve with the
# velocity dispersion, resample, and finish up. Note that the
# emission lines already have the velocity dispersion
# line-width.
if np.any(colormask*(zlineflux >= minlineflux)):
this = templaterand.choice(np.where(colormask * (zlineflux >= minlineflux))[0]) # Pick one randomly.
tempid = templateid[this]
thisemflux = emflux * normlineflux[templateid[this]]
if nocontinuum or novdisp:
blurflux = restflux[this, :] * magnorm[this]
else:
blurflux = ((blurmatrix[vdisp[ii]] * (restflux[this, :] - thisemflux)) +
thisemflux) * magnorm[this]
if restframe:
outflux[ii, :] = blurflux
else:
outflux[ii, :] = resample_flux(self.wave, zwave, blurflux, extrapolate=True)
meta['TEMPLATEID'][ii] = tempid
meta['FLUX_G'][ii] = gflux[this]
meta['FLUX_R'][ii] = rflux[this]
meta['FLUX_Z'][ii] = zflux[this]
meta['FLUX_W1'][ii] = w1flux[this]
meta['FLUX_W2'][ii] = w2flux[this]
objmeta['D4000'][ii] = d4000[tempid]
if self.normline is not None:
if self.normline == 'OII':
objmeta['OIIFLUX'][ii] = zlineflux[this]
objmeta['EWOII'][ii] = ewoii[tempid]
elif self.normline == 'HBETA':
objmeta['HBETAFLUX'][ii] = zlineflux[this]
objmeta['EWHBETA'][ii] = ewhbeta[tempid]
break
# Check to see if any spectra could not be computed.
success = (np.sum(outflux, axis=1) > 0)*1
if ~np.all(success):
log.warning('{} spectra could not be computed given the input priors!'.\
format(np.sum(success == 0)))
if restframe:
outwave = self.basewave
else:
outwave = self.wave
return 1e17 * outflux, outwave, meta, objmeta
class ELG(GALAXY):
"""Generate Monte Carlo spectra of emission-line galaxies (ELGs)."""
def __init__(self, minwave=3600.0, maxwave=10000.0, cdelt=0.2, wave=None,
transient=None, tr_fluxratio=(0.01, 1.), tr_epoch=(-10,10), include_mgii=False, colorcuts_function=None,
normfilter_north='BASS-r', normfilter_south='decam2014-r',
baseflux=None, basewave=None, basemeta=None):
"""Initialize the ELG class. See the GALAXY.__init__ method for documentation
on the arguments plus the inherited attributes.
Note:
By default, we assume the emission-line spectra are normalized to the
integrated [OII] emission-line flux.
Args:
Attributes:
ewoiicoeff (float, array): empirically derived coefficients to map
D(4000) to EW([OII]).
Raises:
"""
if colorcuts_function is None:
from desitarget.cuts import isELG_colors as colorcuts_function
super(ELG, self).__init__(objtype='ELG', minwave=minwave, maxwave=maxwave,
cdelt=cdelt, wave=wave, normline='OII',
colorcuts_function=colorcuts_function,
normfilter_north=normfilter_north, normfilter_south=normfilter_south,
baseflux=baseflux, basewave=basewave, basemeta=basemeta,
transient=transient, tr_fluxratio=tr_fluxratio, tr_epoch=tr_epoch, include_mgii=include_mgii)
self.ewoiicoeff = [1.34323087, -5.02866474, 5.43842874]
def make_templates(self, nmodel=100, zrange=(0.6, 1.6), magrange=(21.0, 23.4),
oiiihbrange=(-0.5, 0.2), logvdisp_meansig=(1.9, 0.15),
minoiiflux=0.0, trans_filter='decam2014-r',
redshift=None, mag=None, vdisp=None, seed=None, input_meta=None,
nocolorcuts=False, nocontinuum=False, agnlike=False,
novdisp=False, south=True, restframe=False, verbose=False):
"""Build Monte Carlo ELG spectra/templates.
See the GALAXY.make_galaxy_templates function for documentation on the
arguments and inherited attributes. Here we only document the arguments
that are specific to the ELG class.
Args:
oiiihbrange (float, optional): Minimum and maximum logarithmic [OIII]
5007/H-beta line-ratio. Defaults to a uniform distribution between
(-0.5, 0.2).
logvdisp_meansig (float, optional): Logarithmic mean and sigma values
for the (Gaussian) stellar velocity dispersion distribution.
Defaults to log10-sigma=(1.9+/-0.15) km/s
minoiiflux (float, optional): Minimum [OII] 3727 flux (default 0.0
erg/s/cm2).
Returns (outflux, wave, meta, objmeta) tuple where:
* outflux (numpy.ndarray): Array [nmodel, npix] of observed-frame
spectra (1e-17 erg/s/cm2/A).
* wave (numpy.ndarray): Observed-frame [npix] wavelength array (Angstrom).
* meta (astropy.Table): Table of meta-data [nmodel] for each output spectrum.
* objmeta (astropy.Table): Additional objtype-specific table data
[nmodel] for each spectrum.
Raises:
"""
result = self.make_galaxy_templates(nmodel=nmodel, zrange=zrange, magrange=magrange,
oiiihbrange=oiiihbrange, logvdisp_meansig=logvdisp_meansig,
minlineflux=minoiiflux, redshift=redshift, vdisp=vdisp,
mag=mag, trans_filter=trans_filter,
seed=seed, input_meta=input_meta,
nocolorcuts=nocolorcuts, nocontinuum=nocontinuum, agnlike=agnlike,
novdisp=novdisp, south=south, restframe=restframe, verbose=verbose)
return result
class BGS(GALAXY):
"""Generate Monte Carlo spectra of bright galaxy survey galaxies (BGSs)."""
def __init__(self, minwave=3600.0, maxwave=10000.0, cdelt=0.2, wave=None,
transient=None, tr_fluxratio=(0.01, 1.), tr_epoch=(-10,10), include_mgii=False, colorcuts_function=None,
normfilter_north='BASS-r', normfilter_south='decam2014-r',
baseflux=None, basewave=None, basemeta=None):
"""Initialize the BGS class. See the GALAXY.__init__ method for documentation
on the arguments plus the inherited attributes.
Note:
By default, we assume the emission-line spectra are normalized to the
integrated H-beta emission-line flux.
Args:
Attributes:
ewhbetacoeff (float, array): empirically derived coefficients to map
D(4000) to EW(H-beta).
Raises:
"""
if colorcuts_function is None:
from desitarget.cuts import isBGS_colors as colorcuts_function
super(BGS, self).__init__(objtype='BGS', minwave=minwave, maxwave=maxwave,
cdelt=cdelt, wave=wave, normline='HBETA',
colorcuts_function=colorcuts_function,
normfilter_north=normfilter_north, normfilter_south=normfilter_south,
baseflux=baseflux, basewave=basewave, basemeta=basemeta,
transient=transient, tr_fluxratio=tr_fluxratio, tr_epoch=tr_epoch, include_mgii=include_mgii)
self.ewhbetacoeff = [1.28520974, -4.94408026, 4.9617704]
def make_templates(self, nmodel=100, zrange=(0.01, 0.4), magrange=(15.0, 20.0),
oiiihbrange=(-1.3, 0.6), logvdisp_meansig=(2.0, 0.17),
minhbetaflux=0.0, trans_filter='decam2014-r',
redshift=None, mag=None, vdisp=None, seed=None, input_meta=None,
nocolorcuts=False, nocontinuum=False, agnlike=False,
novdisp=False, south=True, restframe=False, verbose=False):
"""Build Monte Carlo BGS spectra/templates.
See the GALAXY.make_galaxy_templates function for documentation on the
arguments and inherited attributes. Here we only document the
arguments that are specific to the BGS class.
Args:
oiiihbrange (float, optional): Minimum and maximum logarithmic [OIII]
5007/H-beta line-ratio. Defaults to a uniform distribution between
(-1.3, 0.6).
logvdisp_meansig (float, optional): Logarithmic mean and sigma values
for the (Gaussian) stellar velocity dispersion distribution.
Defaults to log10-sigma=(2.0+/-0.17) km/s
minhbetaflux (float, optional): Minimum H-beta flux (default 0.0
erg/s/cm2).
Returns (outflux, wave, meta, objmeta) tuple where:
* outflux (numpy.ndarray): Array [nmodel, npix] of observed-frame
spectra (1e-17 erg/s/cm2/A).
* wave (numpy.ndarray): Observed-frame [npix] wavelength array (Angstrom).
* meta (astropy.Table): Table of meta-data [nmodel] for each output spectrum.
* objmeta (astropy.Table): Additional objtype-specific table data
[nmodel] for each spectrum.
Raises:
"""
result = self.make_galaxy_templates(nmodel=nmodel, zrange=zrange, magrange=magrange,
oiiihbrange=oiiihbrange, logvdisp_meansig=logvdisp_meansig,
minlineflux=minhbetaflux, redshift=redshift, vdisp=vdisp,
mag=mag, trans_filter=trans_filter,
seed=seed, input_meta=input_meta,
nocolorcuts=nocolorcuts, nocontinuum=nocontinuum, agnlike=agnlike,
novdisp=novdisp, south=south, restframe=restframe, verbose=verbose)
return result
class LRG(GALAXY):
"""Generate Monte Carlo spectra of luminous red galaxies (LRGs)."""
def __init__(self, minwave=3600.0, maxwave=10000.0, cdelt=0.2, wave=None,
transient=None, tr_fluxratio=(0.01, 1.), tr_epoch=(-10,10), colorcuts_function=None,
normfilter_north='MzLS-z', normfilter_south='decam2014-z',
baseflux=None, basewave=None, basemeta=None):
"""Initialize the LRG class. See the GALAXY.__init__ method for documentation
on the arguments plus the inherited attributes.
Note:
Emission lines (with presumably AGN-like line-ratios) are not yet
included.
Args:
Attributes:
Raises:
"""
if colorcuts_function is None:
from desitarget.cuts import isLRG_colors as colorcuts_function
super(LRG, self).__init__(objtype='LRG', minwave=minwave, maxwave=maxwave,
cdelt=cdelt, wave=wave, normline=None,
colorcuts_function=colorcuts_function,
normfilter_north=normfilter_north, normfilter_south=normfilter_south,
baseflux=baseflux, basewave=basewave, basemeta=basemeta,
transient=transient, tr_fluxratio=tr_fluxratio, tr_epoch=tr_epoch)
def make_templates(self, nmodel=100, zrange=(0.5, 1.0), magrange=(19.0, 20.2),
logvdisp_meansig=(2.3, 0.1),
trans_filter='decam2014-r', redshift=None, mag=None, vdisp=None,
seed=None, input_meta=None, nocolorcuts=False,
novdisp=False, agnlike=False, south=True, restframe=False, verbose=False):
"""Build Monte Carlo BGS spectra/templates.
See the GALAXY.make_galaxy_templates function for documentation on the
arguments and inherited attributes. Here we only document the
arguments that are specific to the LRG class.
Args:
logvdisp_meansig (float, optional): Logarithmic mean and sigma values
for the (Gaussian) stellar velocity dispersion distribution.
Defaults to log10-sigma=(2.3+/-0.1) km/s
agnlike (bool, optional): adopt AGN-like emission-line ratios (not yet
supported; defaults False).
Returns (outflux, wave, meta, objmeta) tuple where:
* outflux (numpy.ndarray): Array [nmodel, npix] of observed-frame
spectra (1e-17 erg/s/cm2/A).
* wave (numpy.ndarray): Observed-frame [npix] wavelength array (Angstrom).
* meta (astropy.Table): Table of meta-data [nmodel] for each output spectrum.
* objmeta (astropy.Table): Additional objtype-specific table data
[nmodel] for each spectrum.
Raises:
"""
result = self.make_galaxy_templates(nmodel=nmodel, zrange=zrange, magrange=magrange,
logvdisp_meansig=logvdisp_meansig, redshift=redshift,
vdisp=vdisp, mag=mag,
trans_filter=trans_filter, seed=seed, input_meta=input_meta,
nocolorcuts=nocolorcuts,
agnlike=agnlike, novdisp=novdisp, south=south,
restframe=restframe, verbose=verbose)
# Pre-v2.4 templates:
if 'ZMETAL' in self.basemeta.colnames:
good = np.where(meta['TEMPLATEID'] != -1)[0]
if len(good) > 0:
meta['ZMETAL'][good] = self.basemeta[meta['TEMPLATEID'][good]]['ZMETAL']
meta['AGE'][good] = self.basemeta[meta['TEMPLATEID'][good]]['AGE']
return result
class SUPERSTAR(object):
"""Base class for generating Monte Carlo spectra of the various flavors of stars."""
def __init__(self, objtype='STAR', subtype='', minwave=3600.0, maxwave=10000.0, cdelt=0.2,
wave=None, normfilter_north='BASS-r', normfilter_south='decam2014-r',
colorcuts_function=None, baseflux=None, basewave=None, basemeta=None):
"""Read the appropriate basis continuum templates, filter profiles and
initialize the output wavelength array.
Note:
Only a linearly-spaced output wavelength array is currently supported.
Args:
objtype (str): type of object to simulate (default STAR).
subtype (str, optional): stellar subtype, currently only for white
dwarfs. The choices are DA and DB and the default is DA.
minwave (float, optional): minimum value of the output wavelength
array (default 3600 Angstrom).
maxwave (float, optional): minimum value of the output wavelength
array (default 10000 Angstrom).
cdelt (float, optional): spacing of the output wavelength array
(default 2 Angstrom/pixel).
wave (numpy.ndarray): Input/output observed-frame wavelength array,
overriding the minwave, maxwave, and cdelt arguments (Angstrom).
colorcuts_function (function name): Function to use to select targets
(must accept a "south" Boolean argument for selecting templates that
pass the "north" vs "south" color-cuts (default None).
normfilter_north (str): normalization filter for simulated "north"
templates. Each spectrum is normalized to the magnitude in this
filter bandpass (default 'BASS-r').
normfilter_south (str): corresponding normalization filter for "south"
(default 'decam2014-r').
Attributes:
wave (numpy.ndarray): Output wavelength array (Angstrom).
baseflux (numpy.ndarray): Array [nbase,npix] of the base rest-frame
continuum spectra (erg/s/cm2/A).
basewave (numpy.ndarray): Array [npix] of rest-frame wavelengths
corresponding to BASEFLUX (Angstrom).
basemeta (astropy.Table): Table of meta-data [nbase] for each base template.
normfilt_north (speclite.filters instance): FilterSequence of
self.normfilter_north.
normfilt_south (speclite.filters instance): FilterSequence of
self.normfilter_south.
sdssrfilt (speclite.filters instance): SDSS2010-r FilterSequence.
decamwise (speclite.filters instance): DECam2014-[g,r,z] and WISE2010-[W1,W2]
FilterSequence.
bassmzlswise (speclite.filters instance): BASS-[g,r], MzLS-z and
WISE2010-[W1,W2] FilterSequence.
"""
from speclite import filters
self.objtype = objtype.upper()
self.subtype = subtype.upper()
self.colorcuts_function = colorcuts_function
self.normfilter_north = normfilter_north
self.normfilter_south = normfilter_south
# Initialize the output wavelength array (linear spacing) unless it is
# already provided.
if wave is None:
npix = int(round((maxwave-minwave) / cdelt))+1
wave = np.linspace(minwave, maxwave, npix)
self.wave = wave
# Read the rest-frame continuum basis spectra, if not specified.
if baseflux is None or basewave is None or basemeta is None:
from desisim.io import read_basis_templates
baseflux, basewave, basemeta = read_basis_templates(objtype=self.objtype,
subtype=self.subtype)
self.baseflux = baseflux
self.basewave = basewave
self.basemeta = basemeta
# Initialize the filter profiles.
self.normfilt_north = filters.load_filters(self.normfilter_north)
self.normfilt_south = filters.load_filters(self.normfilter_south)
self.sdssrfilt = filters.load_filters('sdss2010-r')
self.decamwise = filters.load_filters('decam2014-g', 'decam2014-r', 'decam2014-z',
'wise2010-W1', 'wise2010-W2')
self.bassmzlswise = filters.load_filters('BASS-g', 'BASS-r', 'MzLS-z',
'wise2010-W1', 'wise2010-W2')
def make_star_templates(self, nmodel=100, vrad_meansig=(0.0, 200.0),
magrange=(18.0, 22.0), seed=None, redshift=None,
mag=None, input_meta=None, star_properties=None,
nocolorcuts=False, south=True, restframe=False,
verbose=False):
"""Build Monte Carlo spectra/templates for various flavors of stars.
This function chooses random subsets of the continuum spectra for the
type of star specified by OBJTYPE, adds radial velocity jitter, applies
the targeting color-cuts, and then normalizes the spectrum to the
magnitude in the given filter.
The user also (optionally) has a lot of flexibility over the
inputs/outputs and can specify any combination of the radial velocity
and apparent magnitude (in the normalization filter specified in the
GALAXY.__init__ method) inputs. Alternatively, the user can pass a
complete metadata table, in order to easily regenerate spectra
on-the-fly (see the documentation for the input_meta argument, below).
Finally, the user can pass a star_properties table in order to
interpolate the base templates to non-gridded values of [Fe/H], logg,
and Teff.
Note:
* The default inputs are generally set to values which are appropriate
for generic stars, so be sure to alter them when generating
templates for other spectral classes.
* If both input_meta and star_properties are passed, then
star_properties is ignored.
Args:
nmodel (int, optional): Number of models to generate (default 100).
vrad_meansig (float, optional): Mean and sigma (standard deviation) of the
radial velocity "jitter" (in km/s) that should be included in each
spectrum. Defaults to a normal distribution with a mean of zero and
sigma of 200 km/s.
magrange (float, optional): Minimum and maximum magnitude in the
bandpass specified by self.normfilter_south (if south=True) or
self.normfilter_north (if south=False). Defaults to a uniform
distribution between (18, 22).
seed (int, optional): input seed for the random numbers.
redshift (float, optional): Input/output (dimensionless) radial
velocity. Array size must equal nmodel. Ignores vrad_meansig
input.
mag (float, optional): Input/output template magnitudes in the
bandpass specified by self.normfilter_south (if south=True) or
self.normfilter_north (if south=False). Array size must equal
nmodel. Ignores magrange input.
input_meta (astropy.Table): *Input* metadata table with the following
required columns: TEMPLATEID, SEED, REDSHIFT, MAG, and MAGFILTER
(see desisim.io.empty_metatable for the expected data types). If
present, then all other optional inputs (nmodel, redshift, mag,
zrange, vrad_meansig, etc.) are ignored.
star_properties (astropy.Table): *Input* table with the following
required columns: REDSHIFT, MAG, MAGFILTER, TEFF, LOGG, and FEH
(except for WDs, which don't need to have an FEH column).
Optionally, SEED can also be included in the table. When this table
is passed, the basis templates are interpolated to the desired
physical values provided, enabling large numbers of mock stellar
spectra to be generated with physically consistent properties.
However, be warned that the interpolation scheme is very
rudimentary.
nocolorcuts (bool, optional): Do not apply the color-cuts specified by
the self.colorcuts_function function (default False).
south (bool, optional): Apply "south" color-cuts using the DECaLS
filter system, otherwise apply the "north" (MzLS+BASS) color-cuts.
Defaults to True.
restframe (bool, optional): If True, return full resolution restframe
templates instead of resampled observer frame.
verbose (bool, optional): Be verbose!
Returns (outflux, wave, meta) tuple where:
* outflux (numpy.ndarray): Array [nmodel, npix] of observed-frame
spectra (1e-17 erg/s/cm2/A).
* wave (numpy.ndarray): Observed-frame [npix] wavelength array (Angstrom).
* meta (astropy.Table): Table of meta-data [nmodel] for each output spectrum.
Raises:
ValueError
"""
from speclite import filters
from desispec.interpolation import resample_flux
if verbose:
log = get_logger(DEBUG)
else:
log = get_logger()
npix = len(self.basewave)
nbase = len(self.basemeta)
# Optionally unpack a metadata table.
if input_meta is not None:
nmodel = len(input_meta)
_check_input_meta(input_meta)
templateseed = input_meta['SEED'].data
redshift = input_meta['REDSHIFT'].data
mag = input_meta['MAG'].data
magfilter = np.char.strip(input_meta['MAGFILTER'].data)
nchunk = 1
alltemplateid_chunk = [input_meta['TEMPLATEID'].data.reshape(nmodel, 1)]
else:
if star_properties is not None:
nmodel = len(star_properties)
_check_star_properties(star_properties, WD=self.objtype=='WD')
redshift = star_properties['REDSHIFT'].data
mag = star_properties['MAG'].data
magfilter = np.char.strip(star_properties['MAGFILTER'].data)
if 'SEED' in star_properties.keys():
templateseed = star_properties['SEED'].data
else:
rand = np.random.RandomState(seed)
templateseed = rand.randint(2**32, size=nmodel)
if 'FEH' in self.basemeta.columns:
base_properties = np.array([self.basemeta['LOGG'], self.basemeta['TEFF'],
self.basemeta['FEH']]).T.astype('f4')
input_properties = (star_properties['LOGG'].data, star_properties['TEFF'].data,
star_properties['FEH'].data)
else:
base_properties = np.array([self.basemeta['LOGG'], self.basemeta['TEFF']]).T.astype('f4')
input_properties = (star_properties['LOGG'].data, star_properties['TEFF'].data)
nchunk = 1
alltemplateid_chunk = [np.arange(nmodel).reshape(nmodel, 1)]
else:
# Initialize the random seed.
rand = np.random.RandomState(seed)
templateseed = rand.randint(2**32, size=nmodel)
# Shuffle the basis templates and then split them into ~equal chunks, so
# we can speed up the calculations below.
chunksize = np.min((nbase, 50))
nchunk = int(np.ceil(nbase / chunksize))
alltemplateid = np.tile(np.arange(nbase), (nmodel, 1))
for tempid in alltemplateid:
rand.shuffle(tempid)
alltemplateid_chunk = np.array_split(alltemplateid, nchunk, axis=1)
# Assign radial velocity and magnitude priors.
if redshift is None:
if vrad_meansig[1] > 0:
vrad = rand.normal(vrad_meansig[0], vrad_meansig[1], nmodel)
else:
vrad = np.repeat(vrad_meansig[0], nmodel)
redshift = np.array(vrad) / C_LIGHT
if mag is None:
mag = rand.uniform(magrange[0], magrange[1], nmodel).astype('f4')
if south:
magfilter =
|
np.repeat(self.normfilter_south, nmodel)
|
numpy.repeat
|
"""
fit motor circle task with external data (not simulated)
"""
import sys, os
import numpy as np
import pandas as pd
import stan
import arviz as az
import nest_asyncio
nest_asyncio.apply()
from matplotlib import pyplot as plt
import seaborn as sns
sys.path.append('.')
from simulations.sim_bandit3arm_combined import bandit_combined_preprocess_func
from visualisation.hdi_compare import hdi, hdi_diff
def extract_ind_results(df,pars_ind,data_dict):
out_col_names = []
out_df = np.zeros([data_dict['N'],len(pars_ind)*2])
i=0
for ind_par in pars_ind:
pattern = r'\A'+ind_par+r'.\d+'
out_col_names.append(ind_par+'_mean')
out_col_names.append(ind_par+'_std')
mean_val=df.iloc[:,df.columns.str.contains(pattern)].mean(axis=0).to_frame()
std_val=df.iloc[:,df.columns.str.contains(pattern)].std(axis=0).to_frame()
out_df[:,2*i:2*(i+1)] = np.concatenate([mean_val.values,std_val.values],axis=1)
i+=1
out_df = pd.DataFrame(out_df,columns=out_col_names)
beh_col_names = ['total','avg_rt','std_rt']
total_np = 100+data_dict['rew'].sum(axis=1,keepdims=True)+data_dict['los'].sum(axis=1,keepdims=True)
avg_rt_np = data_dict['rt'].mean(axis=1,keepdims=True)
std_rt_np = data_dict['rt'].std(axis=1,keepdims=True)
beh_df = pd.DataFrame(
|
np.concatenate([total_np,avg_rt_np,std_rt_np],axis=1)
|
numpy.concatenate
|
# pylint:disable=unused-variable
from .helpers import SeededTest
from pymc3 import Model, gp, sample, Uniform
import theano
import theano.tensor as tt
import numpy as np
import numpy.testing as npt
import pytest
class TestZero(object):
def test_value(self):
X = np.linspace(0,1,10)[:,None]
with Model() as model:
zero_mean = gp.mean.Zero()
M = theano.function([], zero_mean(X))()
assert np.all(M==0)
assert M.shape == (10,1)
class TestConstant(object):
def test_value(self):
X = np.linspace(0,1,10)[:,None]
with Model() as model:
const_mean = gp.mean.Constant(6)
M = theano.function([], const_mean(X))()
assert np.all(M==6)
assert M.shape == (10,1)
class TestLinearMean(object):
def test_value(self):
X = np.linspace(0,1,10)[:,None]
with Model() as model:
linear_mean = gp.mean.Linear(2, 0.5)
M = theano.function([], linear_mean(X))()
npt.assert_allclose(M[1, 0], 0.7222, atol=1e-3)
class TestCovAdd(object):
def test_symadd_cov(self):
X = np.linspace(0,1,10)[:,None]
with Model() as model:
cov1 = gp.cov.ExpQuad(1, 0.1)
cov2 = gp.cov.ExpQuad(1, 0.1)
cov = cov1 + cov2
K = theano.function([], cov(X))()
npt.assert_allclose(K[0, 1], 2 * 0.53940, atol=1e-3)
def test_rightadd_scalar(self):
X = np.linspace(0,1,10)[:,None]
with Model() as model:
a = 1
cov = gp.cov.ExpQuad(1, 0.1) + a
K = theano.function([], cov(X))()
npt.assert_allclose(K[0, 1], 1.53940, atol=1e-3)
def test_leftadd_scalar(self):
X = np.linspace(0,1,10)[:,None]
with Model() as model:
a = 1
cov = a + gp.cov.ExpQuad(1, 0.1)
K = theano.function([], cov(X))()
npt.assert_allclose(K[0, 1], 1.53940, atol=1e-3)
def test_rightadd_matrix(self):
X = np.linspace(0,1,10)[:,None]
M = 2 * np.ones((10,10))
with Model() as model:
cov = gp.cov.ExpQuad(1, 0.1) + M
K = theano.function([], cov(X))()
npt.assert_allclose(K[0, 1], 2.53940, atol=1e-3)
def test_leftprod_matrix(self):
X = np.linspace(0,1,3)[:,None]
M = np.array([[1,2,3],[2,1,2],[3,2,1]])
with Model() as model:
cov = M + gp.cov.ExpQuad(1, 0.1)
cov_true = gp.cov.ExpQuad(1, 0.1) + M
K = theano.function([], cov(X))()
K_true = theano.function([], cov_true(X))()
assert np.allclose(K, K_true)
class TestCovProd(object):
def test_symprod_cov(self):
X = np.linspace(0,1,10)[:,None]
with Model() as model:
cov1 = gp.cov.ExpQuad(1, 0.1)
cov2 = gp.cov.ExpQuad(1, 0.1)
cov = cov1 * cov2
K = theano.function([], cov(X))()
npt.assert_allclose(K[0, 1], 0.53940 * 0.53940, atol=1e-3)
def test_rightprod_scalar(self):
X = np.linspace(0,1,10)[:,None]
with Model() as model:
a = 2
cov = gp.cov.ExpQuad(1, 0.1) * a
K = theano.function([], cov(X))()
|
npt.assert_allclose(K[0, 1], 2 * 0.53940, atol=1e-3)
|
numpy.testing.assert_allclose
|
#!/usr/bin/env python3
# coding: utf-8
"""
Created on Dec 2020
@author: <NAME> <<EMAIL>>
"""
from typing import Optional, Union
import numpy as np
from scipy import sparse
from sknetwork.utils.check import check_random_state
from sknetwork.utils.format import get_adjacency
from sknetwork.clustering.louvain import Louvain
from sknetwork.embedding.base import BaseEmbedding
class LouvainNE(BaseEmbedding):
"""Embedding of graphs based on the hierarchical Louvain algorithm with random scattering per level.
Parameters
----------
n_components : int
Dimension of the embedding.
scale : float
Dilution factor to be applied on the random vector to be added at each iteration of the clustering method.
resolution :
Resolution parameter.
tol_optimization :
Minimum increase in the objective function to enter a new optimization pass.
tol_aggregation :
Minimum increase in the objective function to enter a new aggregation pass.
n_aggregations :
Maximum number of aggregations.
A negative value is interpreted as no limit.
shuffle_nodes :
Enables node shuffling before optimization.
random_state :
Random number generator or random seed. If None, numpy.random is used.
Attributes
----------
embedding_ : array, shape = (n, n_components)
Embedding of the nodes.
embedding_row_ : array, shape = (n_row, n_components)
Embedding of the rows, for bipartite graphs.
embedding_col_ : array, shape = (n_col, n_components)
Embedding of the columns, for bipartite graphs.
Example
-------
>>> from sknetwork.embedding import LouvainNE
>>> from sknetwork.data import karate_club
>>> louvain = LouvainNE(n_components=3)
>>> adjacency = karate_club()
>>> embedding = louvain.fit_transform(adjacency)
>>> embedding.shape
(34, 3)
References
----------
<NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2020, January).
`LouvainNE: Hierarchical Louvain Method for High Quality and Scalable Network Embedding.
<https://hal.archives-ouvertes.fr/hal-02999888/document>`_
In Proceedings of the 13th International Conference on Web Search and Data Mining (pp. 43-51).
"""
def __init__(self, n_components: int = 2, scale: float = .1, resolution: float = 1, tol_optimization: float = 1e-3,
tol_aggregation: float = 1e-3, n_aggregations: int = -1, shuffle_nodes: bool = False,
random_state: Optional[Union[np.random.RandomState, int]] = None, verbose: bool = False):
super(LouvainNE, self).__init__()
self.n_components = n_components
self.scale = scale
self._clustering_method = Louvain(resolution=resolution, tol_optimization=tol_optimization,
tol_aggregation=tol_aggregation, n_aggregations=n_aggregations,
shuffle_nodes=shuffle_nodes, random_state=random_state, verbose=verbose)
self.random_state = check_random_state(random_state)
self.bipartite = None
def _recursive_louvain(self, adjacency: Union[sparse.csr_matrix, np.ndarray], depth: int,
nodes: Optional[np.ndarray] = None):
"""Recursive function for fit, modifies the embedding in place.
Parameters
----------
adjacency :
Adjacency matrix of the graph.
depth :
Depth of the recursion.
nodes :
The indices of the current nodes in the original graph.
"""
n = adjacency.shape[0]
if nodes is None:
nodes =
|
np.arange(n)
|
numpy.arange
|
# -*- coding: utf-8 -*-
""" Thresholding schemes
Notes
-----
* This is a direct translation from `Data Driven Topological Filtering of Brain Networks via Orthogonal Minimal Spanning Trees <https://github.com/stdimitr/topological_filtering_networks>'
* Original author is <NAME> <<EMAIL>>
|
-----
"""
# Author: <NAME> <<EMAIL>>
# Author: <NAME> <<EMAIL>>
from typing import Tuple, Optional
import numpy as np
import networkx as nx
import bct
def k_core_decomposition(mtx: np.ndarray, threshold: float) -> np.ndarray:
""" Threshold a binary graph based on the detected k-cores.
.. [Alvarez2006] <NAME>., <NAME>., <NAME>., & <NAME>. (2006). Large scale networks fingerprinting and visualization using the k-core decomposition. In Advances in neural information processing systems (pp. 41-50).
.. [Hagman2008] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2008). Mapping the structural core of human cerebral cortex. PLoS biology, 6(7), e159.
Parameters
----------
mtx : array-like, shape(N, N)
Binary matrix.
threshold : int
Degree threshold.
Returns
-------
k_cores : array-like, shape(N, 1)
A binary matrix of the decomposed cores.
"""
imtx = mtx
N, _ = np.shape(mtx)
# in_degree = np.sum(mtx, 0)
# out_degree = np.sum(mtx, 1)
degree = bct.degrees_und(mtx)
for i in range(N):
if degree[i] < threshold:
for l in range(N):
imtx[i, l] = 0
# Recalculate the list of the degrees
degree = bct.degrees_und(imtx)
k_cores = np.zeros((N, 1), dtype=np.int32)
for i in range(N):
if degree[i] > 0:
k_cores[i] = 1
return k_cores
def threshold_mst_mean_degree(mtx: np.ndarray, avg_degree: float) -> np.ndarray:
""" Threshold a graph based on mean using minimum spanning trees.
Parameters
----------
mtx : array-like, shape(N, N)
Symmetric, weighted and undirected connectivity matrix.
avg_degree : float
Mean degree threshold.
Returns
-------
binary_mtx : array-like, shape(N, N)
A binary mask matrix.
"""
N, _ = np.shape(mtx)
CIJtree = np.zeros((N, N))
CIJnotintree = mtx
# Find the number of orthogonal msts according to the desired mean degree.
num_edges = avg_degree * N
num_msts = np.int32(
|
np.round(num_edges / (N - 1))
|
numpy.round
|
import numpy as np
import sys
import asdf
import matplotlib.pyplot as plt
from numpy import log10
from scipy.integrate import simps
import os
import time
from matplotlib.ticker import FormatStrFormatter
from astropy.io import fits
# Custom modules
from .function import *
from .function_class import Func
from .basic_func import Basic
from .function_igm import *
lcb = '#4682b4' # line color, blue
def plot_sfh(MB, f_comp=0, flim=0.01, lsfrl=-3, mmax=1000, Txmin=0.08, Txmax=4, lmmin=5, fil_path='./FILT/', \
inputs=None, dust_model=0, DIR_TMP='./templates/', f_SFMS=False, f_symbol=True, verbose=False, f_silence=True, \
f_log_sfh=True, dpi=250, TMIN=0.0001, tau_lim=0.01, skip_zhist=False, tset_SFR_SED=0.1, f_axis_force=True):
'''
Purpose
-------
Star formation history plot.
Parametes
---------
flim : float
Lower limit for plotting an age bin.
lsfrl : float
Lower limit for SFR, in logMsun/yr
f_SFMS : bool
If true, plot SFR of the main sequence of a ginen stellar mass at each lookback time.
tset_SFR_SED : float
in Gyr. Time scale over which SFR estimate is averaged.
'''
if f_silence:
import matplotlib
matplotlib.use("Agg")
fnc = MB.fnc
bfnc = MB.bfnc
ID = MB.ID
Z = MB.Zall
age = MB.age
nage = MB.nage
tau0 = MB.tau0
age = np.asarray(age)
try:
if not MB.ZFIX == None:
skip_zhist = True
except:
pass
if Txmin > np.min(age):
Txmin = np.min(age) * 0.8
NUM_COLORS = len(age)
cm = plt.get_cmap('gist_rainbow_r')
col = np.atleast_2d([cm(1.*i/NUM_COLORS) for i in range(NUM_COLORS)])
################
# RF colors.
home = os.path.expanduser('~')
c = MB.c
chimax = 1.
m0set = MB.m0set
Mpc_cm = MB.Mpc_cm
d = MB.d
#############
# Plot.
#############
if f_log_sfh:
fig = plt.figure(figsize=(8,2.8))
fig.subplots_adjust(top=0.88, bottom=0.18, left=0.07, right=0.99, hspace=0.15, wspace=0.3)
else:
fig = plt.figure(figsize=(8.2,2.8))
fig.subplots_adjust(top=0.88, bottom=0.18, left=0.1, right=0.99, hspace=0.15, wspace=0.3)
if skip_zhist:
if f_log_sfh:
fig = plt.figure(figsize=(5.5,2.8))
fig.subplots_adjust(top=0.88, bottom=0.18, left=0.1, right=0.99, hspace=0.15, wspace=0.3)
else:
fig = plt.figure(figsize=(6.2,2.8))
fig.subplots_adjust(top=0.88, bottom=0.18, left=0.1, right=0.99, hspace=0.15, wspace=0.3)
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
else:
ax1 = fig.add_subplot(131)
ax2 = fig.add_subplot(132)
ax4 = fig.add_subplot(133)
ax4t = ax4.twiny()
ax1t = ax1.twiny()
ax2t = ax2.twiny()
##################
# Fitting Results
##################
SNlim = 3 # avobe which SN line is shown.
###########################
# Open result file
###########################
file = MB.DIR_OUT + 'summary_' + ID + '.fits'
hdul = fits.open(file) # open a FITS file
try:
zbes = hdul[0].header['zmc']
except:
zbes = hdul[0].header['z']
chinu= hdul[1].data['chi']
try:
RA = hdul[0].header['RA']
DEC = hdul[0].header['DEC']
except:
RA = 0
DEC = 0
try:
SN = hdul[0].header['SN']
except:
###########################
# Get SN of Spectra
###########################
file = 'templates/spec_obs_' + ID + '.cat'
fds = np.loadtxt(file, comments='#')
nrs = fds[:,0]
lams = fds[:,1]
fsp = fds[:,2]
esp = fds[:,3]
consp = (nrs<10000) & (lams/(1.+zbes)>3600) & (lams/(1.+zbes)<4200)
if len((fsp/esp)[consp]>10):
SN = np.median((fsp/esp)[consp])
else:
SN = 1
Asum = 0
A50 = np.arange(len(age), dtype='float')
for aa in range(len(A50)):
A50[aa] = 10**hdul[1].data['A'+str(aa)][1]
Asum += A50[aa]
####################
# For cosmology
####################
DL = MB.cosmo.luminosity_distance(zbes).value * Mpc_cm # Luminositydistance in cm
Cons = (4.*np.pi*DL**2/(1.+zbes))
Tuni = MB.cosmo.age(zbes).value #, use_flat=True, **cosmo)
Tuni0 = (Tuni - age[:])
delT = np.zeros(len(age),dtype='float')
delTl = np.zeros(len(age),dtype='float')
delTu = np.zeros(len(age),dtype='float')
if len(age) == 1:
#if tau0[0] < 0: # SSP;
for aa in range(len(age)):
try:
tau_ssp = float(inputs['TAU_SSP'])
except:
tau_ssp = tau_lim
delTl[aa] = tau_ssp/2
delTu[aa] = tau_ssp/2
if age[aa] < tau_lim:
# This is because fsps has the minimum tau = tau_lim
delT[aa] = tau_lim
else:
delT[aa] = delTu[aa] + delTl[aa]
else: # This is only true when CSP...
for aa in range(len(age)):
if aa == 0:
delTl[aa] = age[aa]
delTu[aa] = (age[aa+1]-age[aa])/2.
delT[aa] = delTu[aa] + delTl[aa]
#print(age[aa],age[aa]-delTl[aa],age[aa]+delTu[aa])
elif Tuni < age[aa]:
delTl[aa] = (age[aa]-age[aa-1])/2.
delTu[aa] = Tuni-age[aa] #delTl[aa] #10.
delT[aa] = delTu[aa] + delTl[aa]
#print(age[aa],age[aa]-delTl[aa],age[aa]+delTu[aa])
elif aa == len(age)-1:
delTl[aa] = (age[aa]-age[aa-1])/2.
delTu[aa] = Tuni - age[aa]
delT[aa] = delTu[aa] + delTl[aa]
#print(age[aa],age[aa]-delTl[aa],age[aa]+delTu[aa])
else:
delTl[aa] = (age[aa]-age[aa-1])/2.
delTu[aa] = (age[aa+1]-age[aa])/2.
if age[aa]+delTu[aa]>Tuni:
delTu[aa] = Tuni-age[aa]
delT[aa] = delTu[aa] + delTl[aa]
#print(age[aa],age[aa]-delTl[aa],age[aa]+delTu[aa])
con_delt = (delT<=0)
delT[con_delt] = 1e10
delT[:] *= 1e9 # Gyr to yr
delTl[:] *= 1e9 # Gyr to yr
delTu[:] *= 1e9 # Gyr to yr
##############################
# Load Pickle
##############################
samplepath = MB.DIR_OUT
pfile = 'chain_' + ID + '_corner.cpkl'
niter = 0
data = loadcpkl(os.path.join(samplepath+'/'+pfile))
try:
ndim = data['ndim'] # By default, use ndim and burnin values contained in the cpkl file, if present.
burnin = data['burnin']
nmc = data['niter']
nwalk = data['nwalkers']
Nburn = burnin #* nwalk/10/2 # I think this takes 3/4 of samples
#if nmc>1000:
# Nburn = 500
samples = data['chain'][:]
except:
print(' = > NO keys of ndim and burnin found in cpkl, use input keyword values')
return -1
######################
# Mass-to-Light ratio.
######################
AM = np.zeros((len(age), mmax), dtype='float') # Mass in each bin.
AC = np.zeros((len(age), mmax), dtype='float') -99 # Cumulative mass in each bin.
AL = np.zeros((len(age), mmax), dtype='float') # Cumulative light in each bin.
ZM = np.zeros((len(age), mmax), dtype='float') # Z.
ZC = np.zeros((len(age), mmax), dtype='float') -99 # Cumulative Z.
ZL = np.zeros((len(age), mmax), dtype='float') -99 # Light weighted cumulative Z.
TC = np.zeros((len(age), mmax), dtype='float') # Mass weighted T.
TL = np.zeros((len(age), mmax), dtype='float') # Light weighted T.
ZMM= np.zeros((len(age), mmax), dtype='float') # Mass weighted Z.
ZML= np.zeros((len(age), mmax), dtype='float') # Light weighted Z.
SF = np.zeros((len(age), mmax), dtype='float') # SFR
Av = np.zeros(mmax, dtype='float') # SFR
# ##############################
# Add simulated scatter in quad
# if files are available.
# ##############################
if inputs:
f_zev = int(inputs['ZEVOL'])
else:
f_zev = 1
eZ_mean = 0
try:
meanfile = './sim_SFH_mean.cat'
dfile = np.loadtxt(meanfile, comments='#')
eA = dfile[:,2]
eZ = dfile[:,4]
eAv= np.mean(dfile[:,6])
if f_zev == 0:
eZ_mean = np.mean(eZ[:])
eZ[:] = age * 0 #+ eZ_mean
else:
try:
f_zev = int(prihdr['ZEVOL'])
if f_zev == 0:
eZ_mean = np.mean(eZ[:])
eZ = age * 0
except:
pass
except:
if verbose:
print('No simulation file (%s).\nError may be underestimated.' % meanfile)
eA = age * 0
eZ = age * 0
eAv = 0
mm = 0
#####################
# Get SED based SFR
#####################
f_SFRSED_plot = False
SFR_SED = np.zeros(mmax,dtype='float')
# ASDF;
af = asdf.open(MB.DIR_TMP + 'spec_all_' + MB.ID + '.asdf')
af0 = asdf.open(MB.DIR_TMP + 'spec_all.asdf')
sedpar = af['ML'] # For M/L
sedpar0 = af0['ML'] # For mass loss frac.
AAtmp = np.zeros(len(age), dtype='float')
ZZtmp = np.zeros(len(age), dtype='float')
mslist= np.zeros(len(age), dtype='float')
for mm in range(mmax):
delt_tot = 0
mtmp = np.random.randint(len(samples))# + Nburn
try:
Av_tmp = samples['Av'][mtmp]
except:
Av_tmp = MB.AVFIX
Avrand = np.random.uniform(-eAv, eAv)
if Av_tmp + Avrand<0:
Av[mm] = 0
else:
Av[mm] = Av_tmp + Avrand
for aa in range(len(age)):
try:
# This is in log.
AAtmp[aa] = samples['A'+str(aa)][mtmp]
except:
AAtmp[aa] = -10
pass
try:
ZZtmp[aa] = samples['Z'+str(aa)][mtmp]
except:
try:
ZZtmp[aa] = samples['Z0'][mtmp]
except:
ZZtmp[aa] = MB.ZFIX
nZtmp = bfnc.Z2NZ(ZZtmp[aa])
mslist[aa] = sedpar['ML_'+str(nZtmp)][aa]
Arand = np.random.uniform(-eA[aa],eA[aa])
Zrand = np.random.uniform(-eZ[aa],eZ[aa])
f_m_sur = sedpar0['frac_mass_survive_%d'%nZtmp][aa]
# quantity in log scale;
AM[aa, mm] = AAtmp[aa] + np.log10(mslist[aa]) + Arand
AL[aa, mm] = AM[aa,mm] - np.log10(mslist[aa])
SF[aa, mm] = AAtmp[aa] + np.log10(mslist[aa] / delT[aa] / f_m_sur) + Arand # / ml
ZM[aa, mm] = ZZtmp[aa] + Zrand
ZMM[aa, mm]= ZZtmp[aa] + AAtmp[aa] + np.log10(mslist[aa]) + Zrand
ZML[aa, mm]= ZMM[aa,mm] - np.log10(mslist[aa])
# SFR from SED. This will be converted in log later;
if age[aa]<=tset_SFR_SED:
SFR_SED[mm] += 10**SF[aa, mm] * delT[aa]
delt_tot += delT[aa]
SFR_SED[mm] /= delt_tot
if SFR_SED[mm] > 0:
SFR_SED[mm] = np.log10(SFR_SED[mm])
else:
SFR_SED[mm] = -99
for aa in range(len(age)):
if np.sum(10**AM[aa:,mm])>0:
AC[aa, mm] = np.log10(np.sum(10**AM[aa:,mm]))
ZC[aa, mm] = np.log10(np.sum(10**ZMM[aa:,mm])/10**AC[aa, mm])
if np.sum(10**AL[aa:,mm])>0:
ZL[aa, mm] = np.log10(np.sum(10**ZML[aa:,mm])/np.sum(10**AL[aa:,mm]))
if f_zev == 0: # To avoid random fluctuation in A.
ZC[aa,mm] = ZM[aa,mm]
ACs = 0
ALs = 0
for bb in range(aa, len(age), 1):
tmpAA = 10**np.random.uniform(-eA[bb],eA[bb])
tmpTT = np.random.uniform(-delT[bb]/1e9,delT[bb]/1e9)
TC[aa, mm] += (age[bb]+tmpTT) * 10**AAtmp[bb] * mslist[bb] * tmpAA
TL[aa, mm] += (age[bb]+tmpTT) * 10**AAtmp[bb] * tmpAA
ACs += 10**AAtmp[bb] * mslist[bb] * tmpAA
ALs += 10**AAtmp[bb] * tmpAA
TC[aa, mm] /= ACs
TL[aa, mm] /= ALs
if TC[aa, mm]>0:
TC[aa, mm] = np.log10(TC[aa, mm])
if TL[aa, mm]>0:
TL[aa, mm] = np.log10(TL[aa, mm])
# Do stuff...
time.sleep(0.01)
# Update Progress Bar
printProgressBar(mm, mmax, prefix = 'Progress:', suffix = 'Complete', length = 40)
Avtmp = np.percentile(Av[:],[16,50,84])
#############
# Plot
#############
AMp = np.zeros((len(age),3), dtype='float')
ACp = np.zeros((len(age),3), dtype='float')
ZMp = np.zeros((len(age),3), dtype='float')
ZCp = np.zeros((len(age),3), dtype='float')
ZLp = np.zeros((len(age),3), dtype='float')
SFp = np.zeros((len(age),3), dtype='float')
for aa in range(len(age)):
AMp[aa,:] = np.percentile(AM[aa,:], [16,50,84])
ACp[aa,:] = np.percentile(AC[aa,:], [16,50,84])
ZMp[aa,:] = np.percentile(ZM[aa,:], [16,50,84])
ZCp[aa,:] = np.percentile(ZC[aa,:], [16,50,84])
ZLp[aa,:] = np.percentile(ZL[aa,:], [16,50,84])
SFp[aa,:] = np.percentile(SF[aa,:], [16,50,84])
SFR_SED_med = np.percentile(SFR_SED[:],[16,50,84])
if f_SFRSED_plot:
ax1.errorbar(delt_tot/2./1e9, SFR_SED_med[1], xerr=[[delt_tot/2./1e9],[delt_tot/2./1e9]], \
yerr=[[SFR_SED_med[1]-SFR_SED_med[0]],[SFR_SED_med[2]-SFR_SED_med[1]]], \
linestyle='', color='orange', lw=1., marker='*',ms=8,zorder=-2)
###################
msize = np.zeros(len(age), dtype='float')
for aa in range(len(age)):
if A50[aa]/Asum>flim: # if >1%
msize[aa] = 200 * A50[aa]/Asum
conA = (msize>=0)
if f_log_sfh:
ax1.fill_between(age[conA], SFp[:,0][conA], SFp[:,2][conA], linestyle='-', color='k', alpha=0.5, zorder=-1)
ax1.errorbar(age, SFp[:,1], linestyle='-', color='k', marker='', zorder=-1, lw=.5)
else:
ax1.fill_between(age[conA], 10**SFp[:,0][conA], 10**SFp[:,2][conA], linestyle='-', color='k', alpha=0.5, zorder=-1)
ax1.errorbar(age, 10**SFp[:,1], linestyle='-', color='k', marker='', zorder=-1, lw=.5)
if f_symbol:
tbnd = 0.0001
for aa in range(len(age)):
agebin = np.arange(age[aa]-delTl[aa]/1e9, age[aa]+delTu[aa]/1e9, delTu[aa]/1e10)
tbnd = age[aa]+delT[aa]/2./1e9
if f_log_sfh:
ax1.errorbar(age[aa], SFp[aa,1], xerr=[[delTl[aa]/1e9], [delTu[aa]/1e9]], \
yerr=[[SFp[aa,1]-SFp[aa,0]], [SFp[aa,2]-SFp[aa,1]]], linestyle='', color=col[aa], marker='', zorder=1, lw=1.)
if msize[aa]>0:
ax1.scatter(age[aa], SFp[aa,1], marker='.', color=col[aa], edgecolor='k', s=msize[aa], zorder=1)
else:
ax1.errorbar(age[aa], 10**SFp[aa,1], xerr=[[delTl[aa]/1e9], [delTu[aa]/1e9]], \
yerr=[[10**SFp[aa,1]-10**SFp[aa,0]], [10**SFp[aa,2]-10**SFp[aa,1]]], linestyle='', color=col[aa], marker='.', zorder=1, lw=1.)
if msize[aa]>0:
ax1.scatter(age[aa], 10**SFp[aa,1], marker='.', color=col[aa], edgecolor='k', s=msize[aa], zorder=1)
#############
# Get SFMS in log10;
#############
IMF = int(inputs['NIMF'])
SFMS_16 = get_SFMS(zbes,age,10**ACp[:,0],IMF=IMF)
SFMS_50 = get_SFMS(zbes,age,10**ACp[:,1],IMF=IMF)
SFMS_84 = get_SFMS(zbes,age,10**ACp[:,2],IMF=IMF)
#try:
if False:
f_rejuv,t_quench,t_rejuv = check_rejuv(age,SFp[:,:],ACp[:,:],SFMS_50)
else:
print('Failed to call rejuvenation module.')
f_rejuv,t_quench,t_rejuv = 0,0,0
# Plot MS?
if f_SFMS:
if f_log_sfh:
ax1.fill_between(age[conA], SFMS_50[conA]-0.2, SFMS_50[conA]+0.2, linestyle='-', color='b', alpha=0.3, zorder=-2)
ax1.plot(age[conA], SFMS_50[conA], linestyle='--', color='k', alpha=0.5, zorder=-2)
else:
ax1.fill_between(age[conA], 10**(SFMS_50[conA]-0.2), 10**(SFMS_50[conA]+0.2), linestyle='-', color='b', alpha=0.3, zorder=-2)
ax1.plot(age[conA], 10**SFMS_50[conA], linestyle='--', color='k', alpha=0.5, zorder=-2)
#
# Mass in each bin
#
ax2label = ''
ax2.fill_between(age[conA], ACp[:,0][conA], ACp[:,2][conA], linestyle='-', color='k', alpha=0.5)
ax2.errorbar(age[conA], ACp[:,1][conA], xerr=[delTl[:][conA]/1e9,delTu[:][conA]/1e9], \
yerr=[ACp[:,1][conA]-ACp[:,0][conA],ACp[:,2][conA]-ACp[:,1][conA]], linestyle='-', color='k', lw=0.5, label=ax2label, zorder=1)
#ax2.scatter(age[conA], ACp[:,1][conA], marker='.', c='k', s=msize)
if f_symbol:
tbnd = 0.0001
mtmp = 0
for ii in range(len(age)):
aa = len(age) -1 - ii
agebin = np.arange(0, age[aa], delTu[aa]/1e10)
ax2.errorbar(age[aa], ACp[aa,1], xerr=[[delTl[aa]/1e9],[delTu[aa]/1e9]], \
yerr=[[ACp[aa,1]-ACp[aa,0]],[ACp[aa,2]-ACp[aa,1]]], linestyle='-', color=col[aa], lw=1, zorder=2)
tbnd = age[aa]+delT[aa]/2./1e9
mtmp = ACp[aa,1]
if msize[aa]>0:
ax2.scatter(age[aa], ACp[aa,1], marker='.', c=[col[aa]], edgecolor='k', s=msize[aa], zorder=2)
y2min = np.max([lmmin,np.min(ACp[:,0][conA])])
y2max = np.max(ACp[:,2][conA])+0.05
if np.abs(y2max-y2min) < 0.2:
y2min -= 0.2
#
# Total Metal
#
if not skip_zhist:
ax4.fill_between(age[conA], ZCp[:,0][conA], ZCp[:,2][conA], linestyle='-', color='k', alpha=0.5)
ax4.errorbar(age[conA], ZCp[:,1][conA], linestyle='-', color='k', lw=0.5, zorder=1)
for ii in range(len(age)):
aa = len(age) -1 - ii
if msize[aa]>0:
ax4.errorbar(age[aa], ZCp[aa,1], xerr=[[delTl[aa]/1e9],[delTu[aa]/1e9]], yerr=[[ZCp[aa,1]-ZCp[aa,0]],[ZCp[aa,2]-ZCp[aa,1]]], linestyle='-', color=col[aa], lw=1, zorder=1)
ax4.scatter(age[aa], ZCp[aa,1], marker='.', c=[col[aa]], edgecolor='k', s=msize[aa], zorder=2)
#############
# Axis
#############
# For redshift
if zbes<4:
if zbes<2:
zred = [zbes, 2, 3, 6]
zredl = ['$z_\mathrm{obs.}$', 2, 3, 6]
elif zbes<2.5:
zred = [zbes, 2.5, 3, 6]
zredl = ['$z_\mathrm{obs.}$', 2.5, 3, 6]
elif zbes<3.:
zred = [zbes, 3, 6]
zredl = ['$z_\mathrm{obs.}$', 3, 6]
else:
zred = [zbes, 6]
zredl = ['$z_\mathrm{obs.}$', 6]
elif zbes<6:
zred = [zbes, 5, 6, 9]
zredl = ['$z_\mathrm{obs.}$', 5, 6, 9]
else:
zred = [zbes, 12]
zredl = ['$z_\mathrm{obs.}$', 12]
Tzz = np.zeros(len(zred), dtype='float')
for zz in range(len(zred)):
Tzz[zz] = (Tuni - MB.cosmo.age(zred[zz]).value)
if Tzz[zz] < Txmin:
Tzz[zz] = Txmin
lsfru = 2.8
if np.max(SFp[:,2])>2.8:
lsfru = np.max(SFp[:,2])+0.1
if np.min(SFp[:,2])>lsfrl:
lsfrl = np.min(SFp[:,2])+0.1
if f_log_sfh:
if f_axis_force:
ax1.set_ylim(lsfrl, lsfru)
ax1.set_ylabel('$\log \dot{M}_*/M_\odot$yr$^{-1}$', fontsize=12)
#ax1.plot(Tzz, Tzz*0+lsfru+(lsfru-lsfrl)*.00, marker='|', color='k', ms=3, linestyle='None')
else:
ax1.set_ylim(0, 10**lsfru)
ax1.set_ylabel('$\dot{M}_*/M_\odot$yr$^{-1}$', fontsize=12)
#ax1.plot(Tzz, Tzz*0+10**lsfru+(lsfru-lsfrl)*.00, marker='|', color='k', ms=3, linestyle='None')
if Txmax < np.max(age):
Txmax = np.max(age)
ax1.set_xlim(Txmin, Txmax)
ax1.set_xscale('log')
ax2.set_ylabel('$\log M_*/M_\odot$', fontsize=12)
ax2.set_xlim(Txmin, Txmax)
if f_axis_force:
ax2.set_ylim(y2min, y2max)
ax2.set_xscale('log')
ax2.text(np.min(age*1.05), y2min + 0.07*(y2max-y2min), 'ID: %s\n$z_\mathrm{obs.}:%.2f$\n$\log M_\mathrm{*}/M_\odot:%.2f$\n$\log Z_\mathrm{*}/Z_\odot:%.2f$\n$\log T_\mathrm{*}$/Gyr$:%.2f$\n$A_V$/mag$:%.2f$'\
%(ID, zbes, ACp[0,1], ZCp[0,1], np.nanmedian(TC[0,:]), Avtmp[1]), fontsize=9, bbox=dict(facecolor='w', alpha=0.7), zorder=10)
#
# Brief Summary
#
# Writing SED param in a fits file;
# Header
prihdr = fits.Header()
prihdr['ID'] = ID
prihdr['z'] = zbes
prihdr['RA'] = RA
prihdr['DEC'] = DEC
# Add rejuv properties;
prihdr['f_rejuv'] = f_rejuv
prihdr['t_quen'] = t_quench
prihdr['t_rejuv'] = t_rejuv
# SFR
prihdr['tset_SFR'] = tset_SFR_SED
# Version;
import gsf
prihdr['version'] = gsf.__version__
percs = [16,50,84]
zmc = hdul[1].data['zmc']
ACP = [ACp[0,0], ACp[0,1], ACp[0,2]]
ZCP = [ZCp[0,0], ZCp[0,1], ZCp[0,2]]
ZLP = [ZLp[0,0], ZLp[0,1], ZLp[0,2]]
con = (~np.isnan(TC[0,:]))
TMW = [np.percentile(TC[0,:][con],16), np.percentile(TC[0,:][con],50), np.percentile(TC[0,:][con],84)]
con = (~np.isnan(TL[0,:]))
TLW = [np.percentile(TL[0,:][con],16), np.percentile(TL[0,:][con],50), np.percentile(TL[0,:][con],84)]
for ii in range(len(percs)):
prihdr['zmc_%d'%percs[ii]] = ('%.3f'%zmc[ii],'redshift')
for ii in range(len(percs)):
prihdr['HIERARCH Mstel_%d'%percs[ii]] = ('%.3f'%ACP[ii], 'Stellar mass, logMsun')
for ii in range(len(percs)):
prihdr['HIERARCH SFR_%d'%percs[ii]] = ('%.3f'%SFR_SED_med[ii], 'SFR, logMsun/yr')
for ii in range(len(percs)):
prihdr['HIERARCH Z_MW_%d'%percs[ii]] = ('%.3f'%ZCP[ii], 'Mass-weighted metallicity, logZsun')
for ii in range(len(percs)):
prihdr['HIERARCH Z_LW_%d'%percs[ii]] = ('%.3f'%ZLP[ii], 'Light-weighted metallicity, logZsun')
for ii in range(len(percs)):
prihdr['HIERARCH T_MW_%d'%percs[ii]] = ('%.3f'%TMW[ii], 'Mass-weighted age, logGyr')
for ii in range(len(percs)):
prihdr['HIERARCH T_LW_%d'%percs[ii]] = ('%.3f'%TLW[ii], 'Light-weighted agelogGyr')
for ii in range(len(percs)):
prihdr['AV_%d'%percs[ii]] = ('%.3f'%Avtmp[ii], 'Dust attenuation, mag')
prihdu = fits.PrimaryHDU(header=prihdr)
# For SFH plot;
t0 = Tuni - age[:]
col02 = []
col50 = fits.Column(name='time', format='E', unit='Gyr', array=age[:])
col02.append(col50)
col50 = fits.Column(name='time_l', format='E', unit='Gyr', array=age[:]-delTl[:]/1e9)
col02.append(col50)
col50 = fits.Column(name='time_u', format='E', unit='Gyr', array=age[:]+delTl[:]/1e9)
col02.append(col50)
col50 = fits.Column(name='SFR16', format='E', unit='logMsun/yr', array=SFp[:,0])
col02.append(col50)
col50 = fits.Column(name='SFR50', format='E', unit='logMsun/yr', array=SFp[:,1])
col02.append(col50)
col50 = fits.Column(name='SFR84', format='E', unit='logMsun/yr', array=SFp[:,2])
col02.append(col50)
col50 = fits.Column(name='Mstel16', format='E', unit='logMsun', array=ACp[:,0])
col02.append(col50)
col50 = fits.Column(name='Mstel50', format='E', unit='logMsun', array=ACp[:,1])
col02.append(col50)
col50 = fits.Column(name='Mstel84', format='E', unit='logMsun', array=ACp[:,2])
col02.append(col50)
col50 = fits.Column(name='Z16', format='E', unit='logZsun', array=ZCp[:,0])
col02.append(col50)
col50 = fits.Column(name='Z50', format='E', unit='logZsun', array=ZCp[:,1])
col02.append(col50)
col50 = fits.Column(name='Z84', format='E', unit='logZsun', array=ZCp[:,2])
col02.append(col50)
colms = fits.ColDefs(col02)
dathdu = fits.BinTableHDU.from_columns(colms)
hdu = fits.HDUList([prihdu, dathdu])
file_sfh = MB.DIR_OUT + 'SFH_' + ID + '.fits'
hdu.writeto(file_sfh, overwrite=True)
# Attach to MB;
MB.sfh_tlook = age
MB.sfh_tlookl= delTl[:][conA]/1e9
MB.sfh_tlooku= delTu[:][conA]/1e9
MB.sfh_sfr16 = SFp[:,0]
MB.sfh_sfr50 = SFp[:,1]
MB.sfh_sfr84 = SFp[:,2]
MB.sfh_mfr16 = ACp[:,0]
MB.sfh_mfr50 = ACp[:,1]
MB.sfh_mfr84 = ACp[:,2]
MB.sfh_zfr16 = ZCp[:,0]
MB.sfh_zfr50 = ZCp[:,1]
MB.sfh_zfr84 = ZCp[:,2]
# SFH
zzall = np.arange(1.,12,0.01)
Tall = MB.cosmo.age(zzall).value # , use_flat=True, **cosmo)
dely2 = 0.1
while (y2max-y2min)/dely2>7:
dely2 *= 2.
y2ticks = np.arange(y2min, y2max, dely2)
ax2.set_yticks(y2ticks)
ax2.set_yticklabels(np.arange(y2min, y2max, dely2), minor=False)
ax2.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
y3min, y3max = np.min(Z), np.max(Z)
if not skip_zhist:
ax4.set_xlim(Txmin, Txmax)
ax4.set_ylim(y3min-0.05, y3max)
ax4.set_xscale('log')
if f_axis_force:
ax4.set_yticks([-0.8, -0.4, 0., 0.4])
ax4.set_yticklabels(['-0.8', '-0.4', '0', '0.4'])
#ax4.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
#ax3.yaxis.labelpad = -2
ax4.yaxis.labelpad = -2
ax4.set_xlabel('$t_\mathrm{lookback}$/Gyr', fontsize=12)
ax4.set_ylabel('$\log Z_*/Z_\odot$', fontsize=12)
ax4t.set_xscale('log')
ax4t.set_xticklabels(zredl[:])
ax4t.set_xticks(Tzz[:])
ax4t.tick_params(axis='x', labelcolor='k')
ax4t.xaxis.set_ticks_position('none')
ax4t.plot(Tzz, Tzz*0+y3max+(y3max-y3min)*.00, marker='|', color='k', ms=3, linestyle='None')
ax4t.set_xlim(Txmin, Txmax)
ax1.set_xlabel('$t_\mathrm{lookback}$/Gyr', fontsize=12)
ax2.set_xlabel('$t_\mathrm{lookback}$/Gyr', fontsize=12)
# This has to come before set_xticks;
ax1t.set_xscale('log')
ax2t.set_xscale('log')
ax1t.set_xticklabels(zredl[:])
ax1t.set_xticks(Tzz[:])
ax1t.tick_params(axis='x', labelcolor='k')
ax1t.xaxis.set_ticks_position('none')
ax1t.plot(Tzz, Tzz*0+lsfru+(lsfru-lsfrl)*.00, marker='|', color='k', ms=3, linestyle='None')
ax2t.set_xticklabels(zredl[:])
ax2t.set_xticks(Tzz[:])
ax2t.tick_params(axis='x', labelcolor='k')
ax2t.xaxis.set_ticks_position('none')
ax2t.plot(Tzz, Tzz*0+y2max+(y2max-y2min)*.00, marker='|', color='k', ms=3, linestyle='None')
# This has to come after set_xticks;
ax1t.set_xlim(Txmin, Txmax)
ax2t.set_xlim(Txmin, Txmax)
# Save
fig.savefig(MB.DIR_OUT + 'SFH_' + ID + '_pcl.png', dpi=dpi)
def sfr_tau(t0, tau0, Z=0.0, sfh=0, tt=np.arange(0,13,0.1), Mtot=1.):
'''
Parameters
----------
sfh : int
1:exponential, 4:delayed exp, 5:, 6:lognormal
ML : float
Total Mass.
tt : float
Lookback time, in Gyr
tau0: float
in Gyr
t0 : float
age, in Gyr
Returns
-------
SFR :
in Msun/yr
MFR :
in Msun
'''
yy = np.zeros(len(tt), dtype='float')
yyms = np.zeros(len(tt), dtype='float')
con = (tt<=t0)
if sfh == 1:
yy[con] = np.exp((tt[con]-t0)/tau0)
elif sfh == 4:
yy[con] = (t0-tt[con]) * np.exp((tt[con]-t0)/tau0)
elif sfh == 6: # lognorm
con = (tt>0)
yy[con] = 1. / np.sqrt(2*np.pi*tau0**2) * np.exp(-(np.log(tt[con])-np.log(t0))**2/(2*tau0**2)) / tt[con]
# Total mass calculation;
#deltt = (tt[1] - tt[0]) #* 1e9
yyms[:] = np.cumsum(yy[::-1])[::-1] #* deltt * 1e9 # in Msun
# Normalization;
deltt = tt[1] - tt[0]
C = Mtot/np.max(yyms)
yyms *= C
yy *= C / deltt / 1e9 # in Msun/yr
yy[~con] = 1e-20
yyms[~con] = 1e-20
return tt, yy, yyms
def plot_sfh_tau(MB, f_comp=0, flim=0.01, lsfrl=-1, mmax=1000, Txmin=0.08, Txmax=4, lmmin=8.5, fil_path='./FILT/', \
inputs=None, dust_model=0, DIR_TMP='./templates/', f_SFMS=False, f_symbol=True, verbose=False, f_silence=True, \
f_log_sfh=True, dpi=250, TMIN=0.0001, tau_lim=0.01, skip_zhist=True, tset_SFR_SED=0.1):
'''
Purpose
-------
Star formation history plot.
Parameters
----------
flim : float
Lower limit for plotting an age bin.
lsfrl : float
Lower limit for SFR, in logMsun/yr
f_SFMS : bool
If true, plot SFR of the main sequence of a ginen stellar mass at each lookback time.
tset_SFR_SED : float
in Gyr. Time scale over which SFR estimate is averaged.
'''
if f_silence:
import matplotlib
matplotlib.use("Agg")
else:
import matplotlib
fnc = MB.fnc
bfnc = MB.bfnc
ID = MB.ID
Z = MB.Zall
age = MB.age
nage = MB.nage
tau0 = MB.tau0
ageparam = MB.ageparam
try:
if not MB.ZFIX == None:
skip_zhist = True
except:
pass
NUM_COLORS = len(age)
cm = plt.get_cmap('gist_rainbow_r')
col = np.atleast_2d([cm(1.*i/NUM_COLORS) for i in range(NUM_COLORS)])
################
# RF colors.
home = os.path.expanduser('~')
c = MB.c
chimax = 1.
m0set = MB.m0set
Mpc_cm = MB.Mpc_cm
d = MB.d #10**(73.6/2.5) * 1e-18 # From [ergs/s/cm2/A] to [ergs/s/cm2/Hz]
#############
# Plot.
#############
if f_log_sfh:
fig = plt.figure(figsize=(8,2.8))
fig.subplots_adjust(top=0.88, bottom=0.18, left=0.07, right=0.99, hspace=0.15, wspace=0.3)
else:
fig = plt.figure(figsize=(8.2,2.8))
fig.subplots_adjust(top=0.88, bottom=0.18, left=0.1, right=0.99, hspace=0.15, wspace=0.3)
if skip_zhist:
if f_log_sfh:
fig = plt.figure(figsize=(5.5,2.8))
fig.subplots_adjust(top=0.88, bottom=0.18, left=0.1, right=0.99, hspace=0.15, wspace=0.3)
else:
fig = plt.figure(figsize=(6.2,2.8))
fig.subplots_adjust(top=0.88, bottom=0.18, left=0.1, right=0.99, hspace=0.15, wspace=0.3)
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
else:
ax1 = fig.add_subplot(131)
ax2 = fig.add_subplot(132)
ax4 = fig.add_subplot(133)
ax4t = ax4.twiny()
ax1t = ax1.twiny()
ax2t = ax2.twiny()
##################
# Fitting Results
##################
SNlim = 3 # avobe which SN line is shown.
###########################
# Open result file
###########################
file = MB.DIR_OUT + 'summary_' + ID + '.fits'
hdul = fits.open(file) # open a FITS file
try:
zbes = hdul[0].header['zmc']
except:
zbes = hdul[0].header['z']
chinu= hdul[1].data['chi']
try:
RA = hdul[0].header['RA']
DEC = hdul[0].header['DEC']
except:
RA = 0
DEC = 0
try:
SN = hdul[0].header['SN']
except:
###########################
# Get SN of Spectra
###########################
file = 'templates/spec_obs_' + ID + '.cat'
fds = np.loadtxt(file, comments='#')
nrs = fds[:,0]
lams = fds[:,1]
fsp = fds[:,2]
esp = fds[:,3]
consp = (nrs<10000) & (lams/(1.+zbes)>3600) & (lams/(1.+zbes)<4200)
if len((fsp/esp)[consp]>10):
SN = np.median((fsp/esp)[consp])
else:
SN = 1
Asum = 0
A50 = np.arange(len(age), dtype='float')
for aa in range(len(A50)):
A50[aa] = 10**hdul[1].data['A'+str(aa)][1]
Asum += A50[aa]
####################
# For cosmology
####################
DL = MB.cosmo.luminosity_distance(zbes).value * Mpc_cm # Luminositydistance in cm
Cons = (4.*np.pi*DL**2/(1.+zbes))
Tuni = MB.cosmo.age(zbes).value #, use_flat=True, **cosmo)
Tuni0 = (Tuni - age[:])
delT = np.zeros(len(age),dtype='float')
delTl = np.zeros(len(age),dtype='float')
delTu = np.zeros(len(age),dtype='float')
if len(age) == 1:
#if tau0[0] < 0: # SSP;
for aa in range(len(age)):
try:
tau_ssp = float(inputs['TAU_SSP'])
except:
tau_ssp = tau_lim
delTl[aa] = tau_ssp/2
delTu[aa] = tau_ssp/2
if age[aa] < tau_lim:
# This is because fsps has the minimum tau = tau_lim
delT[aa] = tau_lim
else:
delT[aa] = delTu[aa] + delTl[aa]
else: # This is only true when CSP...
for aa in range(len(age)):
if aa == 0:
delTl[aa] = age[aa]
delTu[aa] = (age[aa+1]-age[aa])/2.
delT[aa] = delTu[aa] + delTl[aa]
#print(age[aa],age[aa]-delTl[aa],age[aa]+delTu[aa])
elif Tuni < age[aa]:
delTl[aa] = (age[aa]-age[aa-1])/2.
delTu[aa] = Tuni-age[aa] #delTl[aa] #10.
delT[aa] = delTu[aa] + delTl[aa]
#print(age[aa],age[aa]-delTl[aa],age[aa]+delTu[aa])
elif aa == len(age)-1:
delTl[aa] = (age[aa]-age[aa-1])/2.
delTu[aa] = Tuni - age[aa]
delT[aa] = delTu[aa] + delTl[aa]
#print(age[aa],age[aa]-delTl[aa],age[aa]+delTu[aa])
else:
delTl[aa] = (age[aa]-age[aa-1])/2.
delTu[aa] = (age[aa+1]-age[aa])/2.
if age[aa]+delTu[aa]>Tuni:
delTu[aa] = Tuni-age[aa]
delT[aa] = delTu[aa] + delTl[aa]
#print(age[aa],age[aa]-delTl[aa],age[aa]+delTu[aa])
con_delt = (delT<=0)
delT[con_delt] = 1e10
delT[:] *= 1e9 # Gyr to yr
delTl[:] *= 1e9 # Gyr to yr
delTu[:] *= 1e9 # Gyr to yr
##############################
# Load Pickle
##############################
samplepath = MB.DIR_OUT
pfile = 'chain_' + ID + '_corner.cpkl'
niter = 0
data = loadcpkl(os.path.join(samplepath+'/'+pfile))
try:
ndim = data['ndim'] # By default, use ndim and burnin values contained in the cpkl file, if present.
burnin = data['burnin']
nmc = data['niter']
nwalk = data['nwalkers']
Nburn = burnin #* nwalk/10/2 # I think this takes 3/4 of samples
#if nmc>1000:
# Nburn = 500
samples = data['chain'][:]
except:
print(' = > NO keys of ndim and burnin found in cpkl, use input keyword values')
return -1
######################
# Mass-to-Light ratio.
######################
AM = np.zeros((len(age), mmax), dtype='float') # Mass in each bin.
AC = np.zeros((len(age), mmax), dtype='float') -99 # Cumulative mass in each bin.
AL = np.zeros((len(age), mmax), dtype='float') # Cumulative light in each bin.
ZM = np.zeros((len(age), mmax), dtype='float') # Z.
ZC = np.zeros((len(age), mmax), dtype='float') -99 # Cumulative Z.
ZL = np.zeros((len(age), mmax), dtype='float') -99 # Light weighted cumulative Z.
TC = np.zeros((len(age), mmax), dtype='float') # Mass weighted T.
TL = np.zeros((len(age), mmax), dtype='float') # Light weighted T.
ZMM= np.zeros((len(age), mmax), dtype='float') # Mass weighted Z.
ZML= np.zeros((len(age), mmax), dtype='float') # Light weighted Z.
SF = np.zeros((len(age), mmax), dtype='float') # SFR
Av = np.zeros(mmax, dtype='float') # SFR
# ##############################
# Add simulated scatter in quad
# if files are available.
# ##############################
if inputs:
f_zev = int(inputs['ZEVOL'])
else:
f_zev = 1
eZ_mean = 0
#####################
# Get SED based SFR
#####################
f_SFRSED_plot = False
SFR_SED = np.zeros(mmax,dtype='float')
# ASDF;
af = asdf.open(MB.DIR_TMP + 'spec_all_' + MB.ID + '.asdf')
af0 = asdf.open(MB.DIR_TMP + 'spec_all.asdf')
sedpar = af['ML'] # For M/L
sedpar0 = af0['ML'] # For mass loss frac.
ttmin = 0.001
tt = np.arange(ttmin,Tuni+0.5,ttmin/10)
xSF = np.zeros((len(tt), mmax), dtype='float') # SFR
ySF = np.zeros((len(tt), mmax), dtype='float') # SFR
yMS = np.zeros((len(tt), mmax), dtype='float') # MFR
ySF_each = np.zeros((MB.npeak, len(tt), mmax), dtype='float') # SFR
yMS_each = np.zeros((MB.npeak, len(tt), mmax), dtype='float') # MFR
ZZmc = np.zeros((MB.npeak, mmax), dtype='float')
TTmc = np.zeros((MB.npeak, mmax), dtype='float')
TAmc = np.zeros((MB.npeak, mmax), dtype='float')
if Txmin > np.min(tt):
Txmin = np.min(tt) * 0.8
mm = 0
plot_each = True
while mm<mmax:
mtmp = np.random.randint(len(samples))# + Nburn
if MB.nAV != 0:
Av_tmp = samples['Av'][mtmp]
else:
Av_tmp = MB.AVFIX
for aa in range(MB.npeak):
AAtmp = samples['A%d'%aa][mtmp]
ltautmp = samples['TAU%d'%aa][mtmp]
lagetmp = samples['AGE%d'%aa][mtmp]
if aa == 0 or MB.ZEVOL:
try:
ZZtmp = samples['Z%d'%aa][mtmp]
except:
ZZtmp = MB.ZFIX
ZZmc[aa,mm] = ZZtmp
TAmc[aa,mm] = lagetmp
TTmc[aa,mm] = ltautmp
nZtmp,nttmp,natmp = bfnc.Z2NZ(ZZtmp, ltautmp, lagetmp)
mslist = sedpar['ML_'+str(nZtmp)+'_'+str(nttmp)][natmp]
xSF[:,mm], ySF_each[aa,:,mm], yMS_each[aa,:,mm] = sfr_tau(10**lagetmp, 10**ltautmp, ZZtmp, sfh=MB.SFH_FORM, tt=tt, Mtot=10**AAtmp*mslist)
#xSFtmp, ySFtmp, yMStmp = sfr_tau(10**lagetmp, 10**ltautmp, ZZtmp, sfh=MB.SFH_FORM, tt=tt, Mtot=10**AAtmp*mslist)
ySF[:,mm] += ySF_each[aa,:,mm]
yMS[:,mm] += yMS_each[aa,:,mm]
# SFR from SED. This will be converted in log later;
con_sfr = (xSF[:,mm] <= tset_SFR_SED)
SFR_SED[mm] += np.mean(10**ySF_each[aa,:,mm])
Av[mm] = Av_tmp
if plot_each:
ax1.plot(xSF[:,mm], np.log10(ySF[:,mm]), linestyle='-', color='k', alpha=0.01, zorder=-1, lw=0.5)
ax2.plot(xSF[:,mm], np.log10(yMS[:,mm]), linestyle='-', color='k', alpha=0.01, zorder=-1, lw=0.5)
if SFR_SED[mm] > 0:
SFR_SED[mm] = np.log10(SFR_SED[mm])
else:
SFR_SED[mm] = -99
mm += 1
Avtmp = np.percentile(Av[:],[16,50,84])
#############
# Plot
#############
xSFp = np.zeros((len(tt),3), dtype='float')
ySFp = np.zeros((len(tt),3), dtype='float')
yMSp = np.zeros((len(tt),3), dtype='float')
ySFp_each = np.zeros((MB.npeak, len(tt), 3), dtype='float')
yMSp_each = np.zeros((MB.npeak, len(tt), 3), dtype='float')
for ii in range(len(tt)):
xSFp[ii,:] = np.percentile(xSF[ii,:], [16,50,84])
ySFp[ii,:] = np.percentile(ySF[ii,:], [16,50,84])
yMSp[ii,:] = np.percentile(yMS[ii,:], [16,50,84])
for aa in range(MB.npeak):
ySFp_each[aa,ii,:] = np.percentile(ySF_each[aa,ii,:], [16,50,84])
yMSp_each[aa,ii,:] = np.percentile(yMS_each[aa,ii,:], [16,50,84])
for aa in range(MB.npeak):
ax1.plot(xSFp[:,1], np.log10(ySFp_each[aa,:,1]), linestyle='-', color=col[aa], alpha=1., zorder=-1, lw=0.5)
ax2.plot(xSFp[:,1], np.log10(ySFp_each[aa,:,1]), linestyle='-', color=col[aa], alpha=1., zorder=-1, lw=0.5)
ax1.plot(xSFp[:,1], np.log10(ySFp[:,1]), linestyle='-', color='k', alpha=1., zorder=-1, lw=0.5)
ax2.plot(xSFp[:,1], np.log10(yMSp[:,1]), linestyle='-', color='k', alpha=1., zorder=-1, lw=0.5)
ACp = np.zeros((len(tt),3),'float')
SFp = np.zeros((len(tt),3),'float')
ACp[:] = np.log10(yMSp[:,:])
SFp[:] = np.log10(ySFp[:,:])
SFR_SED_med = np.percentile(SFR_SED[:],[16,50,84])
###################
msize = np.zeros(len(age), dtype='float')
# Metal
ZCp = np.zeros((MB.npeak,3),'float')
TCp = np.zeros((MB.npeak,3),'float')
TTp = np.zeros((MB.npeak,3),'float')
for aa in range(len(age)):
if A50[aa]/Asum>flim: # if >1%
msize[aa] = 200 * A50[aa]/Asum
ZCp[aa,:] = np.percentile(ZZmc[aa,:], [16,50,84])
TCp[aa,:] = np.percentile(TTmc[aa,:], [16,50,84])
TTp[aa,:] = np.percentile(TAmc[aa,:], [16,50,84])
if False:
conA = (msize>=0)
if f_log_sfh:
ax1.fill_between(age[conA], SFp[:,0][conA], SFp[:,2][conA], linestyle='-', color='k', alpha=0.5, zorder=-1)
ax1.errorbar(age, SFp[:,1], linestyle='-', color='k', marker='', zorder=-1, lw=.5)
else:
ax1.fill_between(age[conA], 10**SFp[:,0][conA], 10**SFp[:,2][conA], linestyle='-', color='k', alpha=0.5, zorder=-1)
ax1.errorbar(age, 10**SFp[:,1], linestyle='-', color='k', marker='', zorder=-1, lw=.5)
#############
# Get SFMS in log10;
#############
IMF = int(inputs['NIMF'])
SFMS_16 = get_SFMS(zbes,tt,10**ACp[:,0],IMF=IMF)
SFMS_50 = get_SFMS(zbes,tt,10**ACp[:,1],IMF=IMF)
SFMS_84 = get_SFMS(zbes,tt,10**ACp[:,2],IMF=IMF)
#try:
if False:
f_rejuv,t_quench,t_rejuv = check_rejuv(age,SFp[:,:],ACp[:,:],SFMS_50)
else:
print('Failed to call rejuvenation module.')
f_rejuv,t_quench,t_rejuv = 0,0,0
# Plot MS?
conA = ()
if f_SFMS:
if f_log_sfh:
ax1.fill_between(tt[conA], SFMS_50[conA]-0.2, SFMS_50[conA]+0.2, linestyle='-', color='b', alpha=0.3, zorder=-2)
ax1.plot(tt[conA], SFMS_50[conA], linestyle='--', color='k', alpha=0.5, zorder=-2)
# Plot limit;
y2min = np.max([lmmin,np.min(np.log10(yMSp[:,1]))])
y2max = np.max(np.log10(yMSp[:,1]))+0.05
if np.abs(y2max-y2min) < 0.2:
y2min -= 0.2
# Total Metal
if not skip_zhist:
ax4.fill_between(age[conA], ZCp[:,0][conA], ZCp[:,2][conA], linestyle='-', color='k', alpha=0.5)
ax4.errorbar(age[conA], ZCp[:,1][conA], linestyle='-', color='k', lw=0.5, zorder=1)
for ii in range(len(age)):
aa = len(age) -1 - ii
if msize[aa]>0:
ax4.errorbar(age[aa], ZCp[aa,1], xerr=[[delTl[aa]/1e9],[delTu[aa]/1e9]], yerr=[[ZCp[aa,1]-ZCp[aa,0]],[ZCp[aa,2]-ZCp[aa,1]]], linestyle='-', color=col[aa], lw=1, zorder=1)
ax4.scatter(age[aa], ZCp[aa,1], marker='.', c=[col[aa]], edgecolor='k', s=msize[aa], zorder=2)
#############
# Axis
#############
# For redshift
if zbes<4:
if zbes<2:
zred = [zbes, 2, 3, 6]
zredl = ['$z_\mathrm{obs.}$', 2, 3, 6]
elif zbes<2.5:
zred = [zbes, 2.5, 3, 6]
zredl = ['$z_\mathrm{obs.}$', 2.5, 3, 6]
elif zbes<3.:
zred = [zbes, 3, 6]
zredl = ['$z_\mathrm{obs.}$', 3, 6]
else:
zred = [zbes, 6]
zredl = ['$z_\mathrm{obs.}$', 6]
elif zbes<6:
zred = [zbes, 5, 6, 9]
zredl = ['$z_\mathrm{obs.}$', 5, 6, 9]
else:
zred = [zbes, 12]
zredl = ['$z_\mathrm{obs.}$', 12]
Tzz = np.zeros(len(zred), dtype='float')
for zz in range(len(zred)):
Tzz[zz] = (Tuni - MB.cosmo.age(zred[zz]).value)
if Tzz[zz] < Txmin:
Tzz[zz] = Txmin
lsfru = 2.8
if np.max(SFp[:,2])>2.8:
lsfru = np.max(SFp[:,2])+0.1
if f_log_sfh:
ax1.set_ylim(lsfrl, lsfru)
ax1.set_ylabel('$\log \dot{M}_*/M_\odot$yr$^{-1}$', fontsize=12)
#ax1.plot(Tzz, Tzz*0+lsfru+(lsfru-lsfrl)*.00, marker='|', color='k', ms=3, linestyle='None')
else:
ax1.set_ylim(0, 10**lsfru)
ax1.set_ylabel('$\dot{M}_*/M_\odot$yr$^{-1}$', fontsize=12)
#ax1.plot(Tzz, Tzz*0+10**lsfru+(lsfru-lsfrl)*.00, marker='|', color='k', ms=3, linestyle='None')
ax1.set_xlim(Txmin, Txmax)
ax1.set_xscale('log')
ax2.set_ylabel('$\log M_*/M_\odot$', fontsize=12)
ax2.set_xlim(Txmin, Txmax)
ax2.set_ylim(y2min, y2max)
ax2.set_xscale('log')
ax2.text(np.min(age*1.05), y2min + 0.07*(y2max-y2min), 'ID: %s\n$z_\mathrm{obs.}:%.2f$\n$\log M_\mathrm{*}/M_\odot:%.2f$\n$\log Z_\mathrm{*}/Z_\odot:%.2f$\n$\log T_\mathrm{*}$/Gyr$:%.2f$\n$A_V$/mag$:%.2f$'\
%(ID, zbes, ACp[0,1], ZCp[0,1], np.nanmedian(TC[0,:]), Avtmp[1]), fontsize=9, bbox=dict(facecolor='w', alpha=0.7), zorder=10)
#
# Brief Summary
#
# Writing SED param in a fits file;
# Header
prihdr = fits.Header()
prihdr['ID'] = ID
prihdr['z'] = zbes
prihdr['RA'] = RA
prihdr['DEC'] = DEC
# Add rejuv properties;
prihdr['f_rejuv'] = f_rejuv
prihdr['t_quen'] = t_quench
prihdr['t_rejuv'] = t_rejuv
# SFR
prihdr['tset_SFR'] = tset_SFR_SED
# SFH
prihdr['SFH_FORM'] = MB.SFH_FORM
# Version;
import gsf
prihdr['version'] = gsf.__version__
percs = [16,50,84]
zmc = hdul[1].data['zmc']
ACP = [ACp[0,0], ACp[0,1], ACp[0,2]]
ZCP = [ZCp[0,0], ZCp[0,1], ZCp[0,2]]
ZLP = ZCP #[ZLp[0,0], ZLp[0,1], ZLp[0,2]]
TLW = TTp[0,:]
TMW = TTp[0,:]
TAW = TCp[0,:]
for ii in range(len(percs)):
prihdr['zmc_%d'%percs[ii]] = ('%.3f'%zmc[ii],'redshift')
for ii in range(len(percs)):
prihdr['HIERARCH Mstel_%d'%percs[ii]] = ('%.3f'%ACP[ii], 'Stellar mass, logMsun')
for ii in range(len(percs)):
prihdr['HIERARCH SFR_%d'%percs[ii]] = ('%.3f'%SFR_SED_med[ii], 'SFR, logMsun/yr')
for ii in range(len(percs)):
prihdr['HIERARCH Z_MW_%d'%percs[ii]] = ('%.3f'%ZCP[ii], 'Mass-weighted metallicity, logZsun')
for ii in range(len(percs)):
prihdr['HIERARCH Z_LW_%d'%percs[ii]] = ('%.3f'%ZLP[ii], 'Light-weighted metallicity, logZsun')
for ii in range(len(percs)):
prihdr['HIERARCH T_MW_%d'%percs[ii]] = ('%.3f'%TMW[ii], 'Mass-weighted age, logGyr')
for ii in range(len(percs)):
prihdr['HIERARCH T_LW_%d'%percs[ii]] = ('%.3f'%TLW[ii], 'Light-weighted age, logGyr')
for ii in range(len(percs)):
prihdr['HIERARCH TAU_%d'%percs[ii]] = ('%.3f'%TAW[ii], 'Tau, logGyr')
for ii in range(len(percs)):
prihdr['AV_%d'%percs[ii]] = ('%.3f'%Avtmp[ii], 'Dust attenuation, mag')
prihdu = fits.PrimaryHDU(header=prihdr)
# For SFH plot;
t0 = Tuni - age[:]
col02 = []
col50 = fits.Column(name='time', format='E', unit='Gyr', array=xSFp[:,1])
col02.append(col50)
col50 = fits.Column(name='time_l', format='E', unit='Gyr', array=xSFp[:,0])
col02.append(col50)
col50 = fits.Column(name='time_u', format='E', unit='Gyr', array=xSFp[:,2])
col02.append(col50)
col50 = fits.Column(name='SFR16', format='E', unit='logMsun/yr', array=SFp[:,0])
col02.append(col50)
col50 = fits.Column(name='SFR50', format='E', unit='logMsun/yr', array=SFp[:,1])
col02.append(col50)
col50 = fits.Column(name='SFR84', format='E', unit='logMsun/yr', array=SFp[:,2])
col02.append(col50)
col50 = fits.Column(name='Mstel16', format='E', unit='logMsun', array=ACp[:,0])
col02.append(col50)
col50 = fits.Column(name='Mstel50', format='E', unit='logMsun', array=ACp[:,1])
col02.append(col50)
col50 = fits.Column(name='Mstel84', format='E', unit='logMsun', array=ACp[:,2])
col02.append(col50)
col50 = fits.Column(name='Z16', format='E', unit='logZsun', array=ZCp[:,0])
col02.append(col50)
col50 = fits.Column(name='Z50', format='E', unit='logZsun', array=ZCp[:,1])
col02.append(col50)
col50 = fits.Column(name='Z84', format='E', unit='logZsun', array=ZCp[:,2])
col02.append(col50)
colms = fits.ColDefs(col02)
dathdu = fits.BinTableHDU.from_columns(colms)
hdu = fits.HDUList([prihdu, dathdu])
file_sfh = MB.DIR_OUT + 'SFH_' + ID + '.fits'
hdu.writeto(file_sfh, overwrite=True)
# Attach to MB;
MB.sfh_tlook = age
MB.sfh_tlookl= delTl[:][conA]/1e9
MB.sfh_tlooku= delTu[:][conA]/1e9
MB.sfh_sfr16 = SFp[:,0]
MB.sfh_sfr50 = SFp[:,1]
MB.sfh_sfr84 = SFp[:,2]
MB.sfh_mfr16 = ACp[:,0]
MB.sfh_mfr50 = ACp[:,1]
MB.sfh_mfr84 = ACp[:,2]
MB.sfh_zfr16 = ZCp[:,0]
MB.sfh_zfr50 = ZCp[:,1]
MB.sfh_zfr84 = ZCp[:,2]
# SFH
zzall = np.arange(1.,12,0.01)
Tall = MB.cosmo.age(zzall).value # , use_flat=True, **cosmo)
dely2 = 0.1
while (y2max-y2min)/dely2>7:
dely2 *= 2.
y2ticks = np.arange(y2min, y2max, dely2)
ax2.set_yticks(y2ticks)
ax2.set_yticklabels(np.arange(y2min, y2max, dely2), minor=False)
ax2.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
if not skip_zhist:
y3min, y3max = np.min([np.min(Z),-0.8]), np.max([np.max(Z),0.4])
ax4.set_xlim(Txmin, Txmax)
ax4.set_ylim(y3min-0.05, y3max)
ax4.set_xscale('log')
ax4.set_yticks([-0.8, -0.4, 0., 0.4])
ax4.set_yticklabels(['-0.8', '-0.4', '0', '0.4'])
#ax4.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
#ax3.yaxis.labelpad = -2
ax4.yaxis.labelpad = -2
ax4.set_xlabel('$t_\mathrm{lookback}$/Gyr', fontsize=12)
ax4.set_ylabel('$\log Z_*/Z_\odot$', fontsize=12)
ax4t.set_xscale('log')
ax4t.set_xticklabels(zredl[:])
ax4t.set_xticks(Tzz[:])
ax4t.tick_params(axis='x', labelcolor='k')
ax4t.xaxis.set_ticks_position('none')
ax4t.plot(Tzz, Tzz*0+y3max+(y3max-y3min)*.00, marker='|', color='k', ms=3, linestyle='None')
ax4t.set_xlim(Txmin, Txmax)
ax1.set_xlabel('$t_\mathrm{lookback}$/Gyr', fontsize=12)
ax2.set_xlabel('$t_\mathrm{lookback}$/Gyr', fontsize=12)
# This has to come before set_xticks;
ax1t.set_xscale('log')
ax2t.set_xscale('log')
ax1t.set_xticklabels(zredl[:])
ax1t.set_xticks(Tzz[:])
ax1t.tick_params(axis='x', labelcolor='k')
ax1t.xaxis.set_ticks_position('none')
ax1t.plot(Tzz, Tzz*0+lsfru+(lsfru-lsfrl)*.00, marker='|', color='k', ms=3, linestyle='None')
ax2t.set_xticklabels(zredl[:])
ax2t.set_xticks(Tzz[:])
ax2t.tick_params(axis='x', labelcolor='k')
ax2t.xaxis.set_ticks_position('none')
ax2t.plot(Tzz, Tzz*0+y2max+(y2max-y2min)*.00, marker='|', color='k', ms=3, linestyle='None')
# This has to come after set_xticks;
ax1t.set_xlim(Txmin, Txmax)
ax2t.set_xlim(Txmin, Txmax)
# Save
fig.savefig(MB.DIR_OUT + 'SFH_' + ID + '_pcl.png', dpi=dpi)
def get_evolv(MB, ID, Z=np.arange(-1.2,0.4249,0.05), age=[0.01, 0.1, 0.3, 0.7, 1.0, 3.0], f_comp=0, fil_path='./FILT/', \
inputs=None, dust_model=0, DIR_TMP='./templates/', delt_sfh=0.01):
'''
Purpose
-------
Reprocess output files to get spectra, UV color, and SFH at higher resolution.
Parameters
----------
delt_sfh : float
delta t of input SFH in Gyr.
Returns
-------
SED as function of age, based on SF and Z histories;
'''
print('This function may take a while as it runs fsps.')
flim = 0.01
lsfrl = -1 # log SFR low limit
mmax = 1000
Txmax = 4 # Max x value
lmmin = 10.3
nage = np.arange(0,len(age),1)
fnc = Func(ID, Z, nage, dust_model=dust_model) # Set up the number of Age/ZZ
bfnc = Basic(Z)
age = np.asarray(age)
################
# RF colors.
import os.path
home = os.path.expanduser('~')
c = MB.c #3.e18 # A/s
m0set = MB.m0set #25.0
chimax = 1.
d = 10**(73.6/2.5) * 1e-18 # From [ergs/s/cm2/A] to [ergs/s/cm2/Hz]
###########################
# Open result file
###########################
file = 'summary_' + ID + '.fits'
hdul = fits.open(file) # open a FITS file
zbes = hdul[0].header['z']
chinu= hdul[1].data['chi']
uv= hdul[1].data['uv']
vj= hdul[1].data['vj']
RA = 0
DEC = 0
rek = 0
erekl= 0
ereku= 0
mu = 1.0
nn = 0
qq = 0
enn = 0
eqq = 0
try:
RA = hdul[0].header['RA']
DEC = hdul[0].header['DEC']
except:
RA = 0
DEC = 0
try:
SN = hdul[0].header['SN']
except:
###########################
# Get SN of Spectra
###########################
file = 'templates/spec_obs_' + ID + '.cat'
fds = np.loadtxt(file, comments='#')
nrs = fds[:,0]
lams = fds[:,1]
fsp = fds[:,2]
esp = fds[:,3]
consp = (nrs<10000) & (lams/(1.+zbes)>3600) & (lams/(1.+zbes)<4200)
if len((fsp/esp)[consp]>10):
SN = np.median((fsp/esp)[consp])
else:
SN = 1
Asum = 0
A50 = np.arange(len(age), dtype='float')
for aa in range(len(A50)):
A50[aa] = hdul[1].data['A'+str(aa)][1]
Asum += A50[aa]
####################
# For cosmology
####################
DL = MB.cosmo.luminosity_distance(zbes).value * MB.Mpc_cm # Luminositydistance in cm
Cons = (4.*np.pi*DL**2/(1.+zbes))
Tuni = MB.cosmo.age(zbes).value
Tuni0 = (Tuni - age[:])
delT = np.zeros(len(age),dtype='float')
delTl = np.zeros(len(age),dtype='float')
delTu = np.zeros(len(age),dtype='float')
for aa in range(len(age)):
if aa == 0:
delTl[aa] = age[aa]
delTu[aa] = (age[aa+1]-age[aa])/2.
delT[aa] = delTu[aa] + delTl[aa]
elif Tuni < age[aa]:
delTl[aa] = (age[aa]-age[aa-1])/2.
delTu[aa] = delTl[aa] #10.
delT[aa] = delTu[aa] + delTl[aa]
elif aa == len(age)-1:
delTl[aa] = (age[aa]-age[aa-1])/2.
delTu[aa] = Tuni - age[aa]
delT[aa] = delTu[aa] + delTl[aa]
else:
delTl[aa] = (age[aa]-age[aa-1])/2.
delTu[aa] = (age[aa+1]-age[aa])/2.
delT[aa] = delTu[aa] + delTl[aa]
delT[:] *= 1e9 # Gyr to yr
delTl[:] *= 1e9 # Gyr to yr
delTu[:] *= 1e9 # Gyr to yr
##############################
# Load Pickle
##############################
samplepath = './'
pfile = 'chain_' + ID + '_corner.cpkl'
niter = 0
data = loadcpkl(os.path.join(samplepath+'/'+pfile))
try:
ndim = data['ndim'] # By default, use ndim and burnin values contained in the cpkl file, if present.
burnin = data['burnin']
nmc = data['niter']
nwalk = data['nwalkers']
Nburn = burnin #* nwalk/10/2 # I think this takes 3/4 of samples
#if nmc>1000:
# Nburn = 500
samples = data['chain'][:]
except:
print(' = > NO keys of ndim and burnin found in cpkl, use input keyword values')
return -1
######################
# Mass-to-Light ratio.
######################
AM = np.zeros((len(age), mmax), dtype='float') # Mass in each bin.
AC = np.zeros((len(age), mmax), dtype='float') # Cumulative mass in each bin.
AL = np.zeros((len(age), mmax), dtype='float') # Cumulative light in each bin.
ZM = np.zeros((len(age), mmax), dtype='float') # Z.
ZC = np.zeros((len(age), mmax), dtype='float') # Cumulative Z.
ZL = np.zeros((len(age), mmax), dtype='float') # Light weighted cumulative Z.
TC = np.zeros((len(age), mmax), dtype='float') # Mass weighted T.
TL = np.zeros((len(age), mmax), dtype='float') # Light weighted T.
ZMM= np.zeros((len(age), mmax), dtype='float') # Mass weighted Z.
ZML= np.zeros((len(age), mmax), dtype='float') # Light weighted Z.
SF = np.zeros((len(age), mmax), dtype='float') # SFR
Av = np.zeros(mmax, dtype='float') # SFR
# ##############################
# Add simulated scatter in quad
# if files are available.
# ##############################
if inputs:
f_zev = int(inputs['ZEVOL'])
else:
f_zev = 1
eZ_mean = 0
try:
meanfile = './sim_SFH_mean.cat'
dfile = np.loadtxt(meanfile, comments='#')
eA = dfile[:,2]
eZ = dfile[:,4]
eAv= np.mean(dfile[:,6])
if f_zev == 0:
eZ_mean = np.mean(eZ[:])
eZ[:] = age * 0 #+ eZ_mean
else:
try:
f_zev = int(prihdr['ZEVOL'])
if f_zev == 0:
eZ_mean = np.mean(eZ[:])
eZ = age * 0
except:
pass
except:
print('No simulation file (%s).\nError may be underestimated.' % meanfile)
eA = age * 0
eZ = age * 0
eAv= 0
mm = 0
for mm in range(mmax):
mtmp = np.random.randint(len(samples))# + Nburn
AAtmp = np.zeros(len(age), dtype='float')
ZZtmp = np.zeros(len(age), dtype='float')
mslist= np.zeros(len(age), dtype='float')
Av_tmp = samples['Av'][mtmp]
f0 = fits.open(DIR_TMP + 'ms_' + ID + '.fits')
sedpar = f0[1]
f1 = fits.open(DIR_TMP + 'ms.fits')
mloss = f1[1].data
Avrand = np.random.uniform(-eAv, eAv)
if Av_tmp + Avrand<0:
Av[mm] = 0
else:
Av[mm] = Av_tmp + Avrand
for aa in range(len(age)):
AAtmp[aa] = samples['A'+str(aa)][mtmp]/mu
try:
ZZtmp[aa] = samples['Z'+str(aa)][mtmp]
except:
ZZtmp[aa] = samples['Z0'][mtmp]
nZtmp = bfnc.Z2NZ(ZZtmp[aa])
mslist[aa] = sedpar.data['ML_'+str(nZtmp)][aa]
ml = mloss['ms_'+str(nZtmp)][aa]
Arand = np.random.uniform(-eA[aa],eA[aa])
Zrand = np.random.uniform(-eZ[aa],eZ[aa])
AM[aa, mm] = AAtmp[aa] * mslist[aa] * 10**Arand
AL[aa, mm] = AM[aa, mm] / mslist[aa]
SF[aa, mm] = AAtmp[aa] * mslist[aa] / delT[aa] / ml * 10**Arand
ZM[aa, mm] = ZZtmp[aa] + Zrand
ZMM[aa, mm]= (10 ** ZZtmp[aa]) * AAtmp[aa] * mslist[aa] * 10**Zrand
ZML[aa, mm]= ZMM[aa, mm] / mslist[aa]
for aa in range(len(age)):
AC[aa, mm] = np.sum(AM[aa:, mm])
ZC[aa, mm] = np.log10(np.sum(ZMM[aa:, mm])/AC[aa, mm])
ZL[aa, mm] = np.log10(np.sum(ZML[aa:, mm])/np.sum(AL[aa:, mm]))
if f_zev == 0: # To avoid random fluctuation in A.
ZC[aa, mm] = ZM[aa, mm]
ACs = 0
ALs = 0
for bb in range(aa, len(age), 1):
tmpAA = 10**np.random.uniform(-eA[bb],eA[bb])
tmpTT = np.random.uniform(-delT[bb]/1e9,delT[bb]/1e9)
TC[aa, mm] += (age[bb]+tmpTT) * AAtmp[bb] * mslist[bb] * tmpAA
TL[aa, mm] += (age[bb]+tmpTT) * AAtmp[bb] * tmpAA
ACs += AAtmp[bb] * mslist[bb] * tmpAA
ALs += AAtmp[bb] * tmpAA
TC[aa, mm] /= ACs
TL[aa, mm] /= ALs
Avtmp = np.percentile(Av[:],[16,50,84])
#############
# Plot
#############
AMp = np.zeros((len(age),3), dtype='float')
ACp = np.zeros((len(age),3), dtype='float')
ZMp = np.zeros((len(age),3), dtype='float')
ZCp = np.zeros((len(age),3), dtype='float')
SFp = np.zeros((len(age),3), dtype='float')
for aa in range(len(age)):
AMp[aa,:] = np.percentile(AM[aa,:], [16,50,84])
ACp[aa,:] = np.percentile(AC[aa,:], [16,50,84])
ZMp[aa,:] = np.percentile(ZM[aa,:], [16,50,84])
ZCp[aa,:] = np.percentile(ZC[aa,:], [16,50,84])
SFp[aa,:] = np.percentile(SF[aa,:], [16,50,84])
###################
msize = np.zeros(len(age), dtype='float')
for aa in range(len(age)):
if A50[aa]/Asum>flim: # if >1%
msize[aa] = 150 * A50[aa]/Asum
conA = (msize>=0)
# Make template;
tbegin = np.min(Tuni-age)
tuniv_hr = np.arange(tbegin,Tuni,delt_sfh) # in Gyr
sfh_hr_in= np.interp(tuniv_hr,(Tuni-age)[::-1],SFp[:,1][::-1])
zh_hr_in = np.interp(tuniv_hr,(Tuni-age)[::-1],ZCp[:,1][::-1])
# FSPS
con_sfh = (tuniv_hr>0)
import fsps
nimf = int(inputs['NIMF'])
try:
fneb = int(inputs['ADD_NEBULAE'])
except:
fneb = 0
if fneb == 1:
print('Metallicity is set to logZ/Zsun=%.2f'%(np.max(zh_hr_in)))
sp = fsps.StellarPopulation(compute_vega_mags=False, zcontinuous=1, imf_type=nimf, logzsol=
|
np.max(zh_hr_in)
|
numpy.max
|
# -*- coding: utf-8 -*-
'''
#-------------------------------------------------------------------------------
# NATIONAL UNIVERSITY OF SINGAPORE - NUS
# SINGAPORE INSTITUTE FOR NEUROTECHNOLOGY - SINAPSE
# Singapore
# URL: http://www.sinapseinstitute.org
#-------------------------------------------------------------------------------
# Neuromorphic Engineering Group
# Author: <NAME>, MSc
# Contact: <EMAIL>
#-------------------------------------------------------------------------------
# Description: Library implementing different control algorithms. The classes
# are not related to the application in any way. Feedback signals should be
# input directly. Likewise, the generated outputs should be sent to the proper
# hardware such as the iLimb or UR10 after processing
#-------------------------------------------------------------------------------
'''
#-------------------------------------------------------------------------------
import numpy as np
from copy import copy
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
'''
check later how to use an abstract class and it is really interesting
for control systems
class BaseController(ABC):
def __init__(self, _min=0, _max=500):
self.minOutput = _min
self.maxOutput = _max
super().__init__()
@abstractmethod
def run():
pass
'''
#-------------------------------------------------------------------------------
#Proportional controller for feedback-based control
#This can be used for pressing the switches, for example
class Pcontroller():
def __init__(self,_Kp=0.1,_min=0,_max=500,_initCond=0):
self.Kp = _Kp #proportional gain
self.minOutput = _min #minimum output
self.maxOutput = _max #maximum output
self.initCond = _initCond #initial condition
self.currentOutput = self.initCond
#run the
def run(self,_inputSignal,_setPoint):
#error signal
error = _setPoint - _inputSignal
#define the output
self.output += self.Kp*error
#saturation
if self.output > self.maxOutput:
self.output = self.maxOutput
elif self.output < self.minOutput:
self.output = self.minOutput
#return the current output
return copy(self.output)
#-------------------------------------------------------------------------------
#Ref: Prach et al., 2017: "Pareto-front analysis of a monotonic PI control law
#for slip suppression in a robotic manipulator"
class SlipController():
def __init__(self,_MPI_Kp=0.1,_MPI_Ki=0.2,_min=0,_max=500,_dt=0.1,_initCond=0):
self.MPI_Kp = _MPI_Kp #Proportional gain
self.MPI_Ki = _MPI_Ki #Integral gain
self.minOutput = _min #minimum output value
self.maxOutput = _max #maximum output value
#initial condition is necessary when the output is already at a level
#which is usually the case like baseline force or joint position that
#establishes contact between the robotic hand and the object
self.initCond = _initCond
#necessary for the derivative of the input signal which will be used
#as the velocity signal
self.prevSample = 0
#velocity generated signal
self.velocity = 0
#position signal
self.position = 0
self.dt = _dt #necessary since this is discrete-time formulation
self.MPI = self.initCond #current MPI output
self.prevMPI = 0 #previous MPI output, necessary for monotonic behavior
self.MPI_crossed = False
#resets the MPI controller
def reset(self):
self.MPI = 0
self.prevMPI = 0
self.position = 0
self.velocity = 0
#run the MPI control and returns its output
#input: velocity
def run(self,_inputVelocity):
self.velocity = _inputVelocity
#integrate the obtained velocity signal = position
self.position += (self.velocity*self.dt)
#print(self.velocity, self.position) #debugging
#debugging
#print('mpi', _inputSignal, self.velocity, self.position)
#MPI output
#Renaming variables to maintain the same structure as used
#in the simulink files developed by Dr. <NAME>
v = self.velocity
z = self.position
#print('mpi', 'v', v, 'z', z) #debugging
self.MPI += self.MPI_Kp*
|
np.sign(v)
|
numpy.sign
|
#!/usr/bin/env python
'''
mcu: Modeling and Crystallographic Utilities
Copyright (C) 2019 <NAME>. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Email: <NAME> <<EMAIL>>
'''
# This is the only place needed to be modified
# The path for the libwannier90 library
W90LIB = '/panfs/roc/groups/6/gagliard/phamx494/pyWannier90/src'
import sys
sys.path.append(W90LIB)
import importlib
found = importlib.util.find_spec('libwannier90') is not None
if found == True:
import libwannier90
else:
print('WARNING: Check the installation of libwannier90 and its path in pyscf/pbc/tools/pywannier90.py')
print('libwannier90 path: ' + W90LIB)
print('libwannier90 can be found at: https://github.com/hungpham2017/pyWannier90')
raise ImportError
import numpy as np
import scipy
import mcu
from mcu.vasp import const
from mcu.cell import utils as cell_utils
def angle(v1, v2):
'''
Return the angle (in radiant between v1 and v2)
'''
v1 = np.asarray(v1)
v2 = np.asarray(v2)
cosa = v1.dot(v2)/ np.linalg.norm(v1) / np.linalg.norm(v2)
return np.arccos(cosa)
def transform(x_vec, z_vec):
'''
Construct a transformation matrix to transform r_vec to the new coordinate system defined by x_vec and z_vec
'''
x_vec = x_vec/np.linalg.norm(np.asarray(x_vec))
z_vec = z_vec/np.linalg.norm(np.asarray(z_vec))
assert x_vec.dot(z_vec) == 0 # x and z have to be orthogonal to one another
y_vec = np.cross(x_vec,z_vec)
new = np.asarray([x_vec, y_vec, z_vec])
original = np.asarray([[1,0,0],[0,1,0],[0,0,1]])
tran_matrix = np.empty([3,3])
for row in range(3):
for col in range(3):
tran_matrix[row,col] = np.cos(angle(original[row],new[col]))
return tran_matrix.T
def cartesian_prod(arrays, out=None, order = 'C'):
'''
This function is similar to lib.cartesian_prod of PySCF, except the output can be in Fortran or in C order
'''
arrays = [np.asarray(x) for x in arrays]
dtype = np.result_type(*arrays)
nd = len(arrays)
dims = [nd] + [len(x) for x in arrays]
if out is None:
out = np.empty(dims, dtype)
else:
out = np.ndarray(dims, dtype, buffer=out)
tout = out.reshape(dims)
shape = [-1] + [1] * nd
for i, arr in enumerate(arrays):
tout[i] = arr.reshape(shape[:nd-i])
return tout.reshape((nd,-1),order=order).T
def periodic_grid(lattice, grid = [50,50,50], supercell = [1,1,1], order = 'C'):
'''
Generate a periodic grid for the unit/computational cell in F/C order
Note: coords has the same unit as lattice
'''
ngrid = np.asarray(grid)
qv = cartesian_prod([np.arange(-ngrid[i]*(supercell[i]//2),ngrid[i]*((supercell[i]+1)//2)) for i in range(3)], order=order)
a_frac = np.einsum('i,ij->ij', 1./ngrid, lattice)
coords = np.dot(qv, a_frac)
# Compute weight
ngrids = np.prod(grid)
ncells = np.prod(supercell)
weights = np.empty(ngrids*ncells)
vol = abs(np.linalg.det(lattice))
weights[:] = vol / ngrids / ncells
return coords, weights
def R_r(r_norm, r = 1, zona = 1):
'''
Radial functions used to compute \Theta_{l,m_r}(\theta,\phi)
Note: r_norm has the unit of Bohr
'''
if r == 1:
R_r = 2 * zona**(3/2) * np.exp(-zona*r_norm)
elif r == 2:
R_r = 1 / 2 / np.sqrt(2) * zona**(3/2) * (2 - zona*r_norm) * np.exp(-zona*r_norm/2)
else:
R_r = np.sqrt(4/27) * zona**(3/2) * (1 - 2*zona*r_norm/3 + 2*(zona**2)*(r_norm**2)/27) * np.exp(-zona*r_norm/3)
return R_r
def theta(func, cost, phi):
'''
Basic angular functions (s,p,d,f) used to compute \Theta_{l,m_r}(\theta,\phi)
'''
if func == 's': # s
theta = 1 / np.sqrt(4 * np.pi) * np.ones([cost.shape[0]])
elif func == 'pz':
theta = np.sqrt(3 / 4 / np.pi) * cost
elif func == 'px':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(3 / 4 / np.pi) * sint * np.cos(phi)
elif func == 'py':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(3 / 4 / np.pi) * sint * np.sin(phi)
elif func == 'dz2':
theta = np.sqrt(5 / 16 / np.pi) * (3*cost**2 - 1)
elif func == 'dxz':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(15 / 4 / np.pi) * sint * cost * np.cos(phi)
elif func == 'dyz':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(15 / 4 / np.pi) * sint * cost * np.sin(phi)
elif func == 'dx2-y2':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(15 / 16 / np.pi) * (sint**2) * np.cos(2*phi)
elif func == 'pxy':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(15 / 16 / np.pi) * (sint**2) * np.sin(2*phi)
elif func == 'fz3':
theta = np.sqrt(7) / 4 / np.sqrt(np.pi) * (5*cost**3 - 3*cost)
elif func == 'fxz2':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(21) / 4 / np.sqrt(2*np.pi) * (5*cost**2 - 1) * sint * np.cos(phi)
elif func == 'fyz2':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(21) / 4 / np.sqrt(2*np.pi) * (5*cost**2 - 1) * sint * np.sin(phi)
elif func == 'fz(x2-y2)':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(105) / 4 / np.sqrt(np.pi) * sint**2 * cost * np.cos(2*phi)
elif func == 'fxyz':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(105) / 4 / np.sqrt(np.pi) * sint**2 * cost * np.sin(2*phi)
elif func == 'fx(x2-3y2)':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(35) / 4 / np.sqrt(2*np.pi) * sint**3 * (np.cos(phi)**2 - 3*np.sin(phi)**2) * np.cos(phi)
elif func == 'fy(3x2-y2)':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(35) / 4 / np.sqrt(2*np.pi) * sint**3 * (3*
|
np.cos(phi)
|
numpy.cos
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class Mod(Base):
@staticmethod
def export_mod_mixed_sign_float64(): # type: () -> None
node = onnx.helper.make_node(
'Mod',
inputs=['x', 'y'],
outputs=['z'],
fmod=1
)
x = np.array([-4.3, 7.2, 5.0, 4.3, -7.2, 8.0]).astype(np.float64)
y = np.array([2.1, -3.4, 8.0, -2.1, 3.4, 5.0]).astype(np.float64)
z = np.fmod(x, y) # expected output [-0.1, 0.4, 5. , 0.1, -0.4, 3.]
expect(node, inputs=[x, y], outputs=[z],
name='test_mod_mixed_sign_float64')
@staticmethod
def export_mod_mixed_sign_float32(): # type: () -> None
node = onnx.helper.make_node(
'Mod',
inputs=['x', 'y'],
outputs=['z'],
fmod=1
)
x = np.array([-4.3, 7.2, 5.0, 4.3, -7.2, 8.0]).astype(np.float32)
y = np.array([2.1, -3.4, 8.0, -2.1, 3.4, 5.0]).astype(np.float32)
z = np.fmod(x, y) # expected output [-0.10000038, 0.39999962, 5. , 0.10000038, -0.39999962, 3.]
expect(node, inputs=[x, y], outputs=[z],
name='test_mod_mixed_sign_float32')
@staticmethod
def export_mod_mixed_sign_float16(): # type: () -> None
node = onnx.helper.make_node(
'Mod',
inputs=['x', 'y'],
outputs=['z'],
fmod=1
)
x = np.array([-4.3, 7.2, 5.0, 4.3, -7.2, 8.0]).astype(np.float16)
y = np.array([2.1, -3.4, 8.0, -2.1, 3.4, 5.0]).astype(np.float16)
z = np.fmod(x, y) # expected output [-0.10156, 0.3984 , 5. , 0.10156, -0.3984 , 3.]
expect(node, inputs=[x, y], outputs=[z],
name='test_mod_mixed_sign_float16')
@staticmethod
def export_mod_mixed_sign_int64(): # type: () -> None
node = onnx.helper.make_node(
'Mod',
inputs=['x', 'y'],
outputs=['z'],
)
x = np.array([-4, 7, 5, 4, -7, 8]).astype(np.int64)
y = np.array([2, -3, 8, -2, 3, 5]).astype(np.int64)
z = np.mod(x, y) # expected output [ 0, -2, 5, 0, 2, 3]
expect(node, inputs=[x, y], outputs=[z],
name='test_mod_mixed_sign_int64')
@staticmethod
def export_mod_mixed_sign_int32(): # type: () -> None
node = onnx.helper.make_node(
'Mod',
inputs=['x', 'y'],
outputs=['z'],
)
x = np.array([-4, 7, 5, 4, -7, 8]).astype(np.int32)
y = np.array([2, -3, 8, -2, 3, 5]).astype(np.int32)
z = np.mod(x, y) # expected output [ 0, -2, 5, 0, 2, 3]
expect(node, inputs=[x, y], outputs=[z],
name='test_mod_mixed_sign_int32')
@staticmethod
def export_mod_mixed_sign_int16(): # type: () -> None
node = onnx.helper.make_node(
'Mod',
inputs=['x', 'y'],
outputs=['z'],
)
x = np.array([-4, 7, 5, 4, -7, 8]).astype(np.int16)
y = np.array([2, -3, 8, -2, 3, 5]).astype(np.int16)
z =
|
np.mod(x, y)
|
numpy.mod
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for adaptive.py."""
from absl.testing import parameterized
import numpy as np
import scipy.stats
import tensorflow.compat.v2 as tf
from robust_loss import adaptive
from robust_loss import util
from robust_loss import wavelet
tf.enable_v2_behavior()
def generate_pixel_toy_image_data(image_width, num_samples, _):
"""Generates pixel data for _test_fitting_toy_image_data_is_correct().
Constructs a "mean" image in RGB pixel space (parametrized by `image_width`)
and draws `num_samples` samples from a normal distribution using that mean,
and returns those samples and their empirical mean as reference.
Args:
image_width: The width and height in pixels of the images being produced.
num_samples: The number of samples to generate.
_: Dummy argument so that this function's interface matches
generate_wavelet_toy_image_data()
Returns:
A tuple of (samples, reference, color_space, representation), where
samples = A set of sampled images of size
(`num_samples`, `image_width`, `image_width`, 3)
reference = The empirical mean of `samples` of size
(`image_width`, `image_width`, 3).
color_space = 'RGB'
representation = 'PIXEL'
"""
color_space = 'RGB'
representation = 'PIXEL'
mu = np.random.uniform(size=(image_width, image_width, 3))
samples = np.random.normal(
loc=np.tile(mu[np.newaxis], [num_samples, 1, 1, 1]))
reference = np.mean(samples, 0)
return samples, reference, color_space, representation
def generate_wavelet_toy_image_data(image_width, num_samples,
wavelet_num_levels):
"""Generates wavelet data for testFittingImageDataIsCorrect().
Constructs a "mean" image in the YUV wavelet domain (parametrized by
`image_width`, and `wavelet_num_levels`) and draws `num_samples` samples
from a normal distribution using that mean, and returns RGB images
corresponding to those samples and to the mean (computed in the
specified latent space) of those samples.
Args:
image_width: The width and height in pixels of the images being produced.
num_samples: The number of samples to generate.
wavelet_num_levels: The number of levels in the wavelet decompositions of
the generated images.
Returns:
A tuple of (samples, reference, color_space, representation), where
samples = A set of sampled images of size
(`num_samples`, `image_width`, `image_width`, 3)
reference = The empirical mean of `samples` (computed in YUV Wavelet space
but returned as an RGB image) of size (`image_width`, `image_width`, 3).
color_space = 'YUV'
representation = 'CDF9/7'
"""
color_space = 'YUV'
representation = 'CDF9/7'
samples = []
reference = []
for level in range(wavelet_num_levels):
samples.append([])
reference.append([])
w = image_width // 2**(level + 1)
scaling = 2**level
for _ in range(3):
# Construct the ground-truth pixel band mean.
mu = scaling * np.random.uniform(size=(3, w, w))
# Draw samples from the ground-truth mean.
band_samples = np.random.normal(
loc=np.tile(mu[np.newaxis], [num_samples, 1, 1, 1]))
# Take the empirical mean of the samples as a reference.
band_reference = np.mean(band_samples, 0)
samples[-1].append(np.reshape(band_samples, [-1, w, w]))
reference[-1].append(band_reference)
# Handle the residual band.
mu = scaling * np.random.uniform(size=(3, w, w))
band_samples = np.random.normal(
loc=np.tile(mu[np.newaxis], [num_samples, 1, 1, 1]))
band_reference = np.mean(band_samples, 0)
samples.append(
|
np.reshape(band_samples, [-1, w, w])
|
numpy.reshape
|
"""
Defines:
- data_in_material_coord(bdf, op2, in_place=False)
"""
from __future__ import annotations
import copy
from itertools import count
from typing import TYPE_CHECKING
import numpy as np
from numpy import cos, sin, cross
from numpy.linalg import norm # type: ignore
from pyNastran.utils.numpy_utils import integer_types
if TYPE_CHECKING: # pragma: no cover
from pyNastran.bdf.bdf import BDF, GRID
from pyNastran.op2.op2 import OP2
force_vectors = ['cquad4_force', 'cquad8_force', 'cquadr_force',
'ctria3_force', 'ctria6_force', 'ctriar_force']
stress_vectors = ['cquad4_stress', 'cquad8_stress', 'cquadr_stress',
'ctria3_stress', 'ctria6_stress', 'ctriar_stress']
strain_vectors = ['cquad4_strain', 'cquad8_strain', 'cquadr_strain',
'ctria3_strain', 'ctria6_strain', 'ctriar_strain']
def __transform_solids(model: OP2): # pragma: no cover
"""http://web.mit.edu/course/3/3.11/www/modules/trans.pdf"""
R = np.array([
[1., 0., 0.],
[0., 1., 0.],
[0., 0., 2.],
])
Ri = np.array([
[1., 0., 0.],
[0., 1., 0.],
[0., 0., 0.5],
])
thetad = 20.
theta = np.radians(thetad)
s = np.sin(theta)
c = np.cos(theta)
sc = s * c
c2 = c ** 2
s2 = s ** 2
oxx = 1.
oyy = 2.
ozz = 3.
txy = 1.
txz = 0.
tyz = 0.
Ar = np.array([
[c2, s2, 2. * sc],
[s2, c2, -2. * sc],
[-sc, sc, c2 - s2],
])
"""
{ox' {ox}
{oy' = [Ar] {oy}
{txy' {txy}
"""
from pyNastran.bdf.bdf import BDF
bdf_model = BDF()
bdf_model.add_grid(1, [1., 0., 0.], cp=0, cd=0, ps='', seid=0, comment='')
bdf_model.add_grid(2, [1., 1., 0.], cp=0, cd=0, ps='', seid=0, comment='')
bdf_model.add_grid(3, [0., 1., 0.], cp=0, cd=0, ps='', seid=0, comment='')
bdf_model.add_grid(4, [0., 0., 1.], cp=0, cd=0, ps='', seid=0, comment='')
ctetra = bdf_model.add_ctetra(1, 1, [1, 2, 3, 4],)
bdf_model.add_psolid(1, 1, cordm=0)
E = 3.0E7
G = None
nu = 0.3
bdf_model.add_mat1(1, E, G, nu)
bdf_model.cross_reference()
# this is ACTUALLY the element coordinate system
centroid, xe, ye, ze = ctetra.material_coordinate_system()
T = np.vstack([xe, ye, ze]) # Te
# we're going to transform the Te
stress = np.array([
[oxx, txy, txz],
[txy, oyy, tyz],
[txz, tyz, ozz],
])
#stress2 = Ar @ stress
#strain2 = (R @ A @ Ri) @ strain
# which is it?
stress3 = T.T @ stress @ T
stress3t = T @ stress @ T.T
# this is a test that these are the same...
stress4 = R @ stress @ Ri
print(stress)
print(stress4)
print('------------')
strain3 = T.T @ strain @ T
#strain3t = T.T @ strain @ T
#strain4 = R @ strain3 @ Ri
# is this strain3 or strain3t; is it (R @ strain3x @ Ri) or (Ri @ strain3x @ R)
strain4 = R @ strain3 @ Ri
#print(T)
#print(T @ T.T)
#print(stress2)
print(stress3)
print(stress3t)
x = 1
R = np.array([
[1., 0., 0.],
[0., 1., 0.],
[0., 0., 2.],
])
Ri = np.array([
[1., 0., 0.],
[0., 1., 0.],
[0., 0., 0.5],
])
nodes_desired = [213972, 213973, 213974, 213975, 213980, 213982, 213989, 213990,
213998, 213999, 214420, 214431, 214457, 214458, 214459, 214460]
def transform_solids(bdf_model: BDF, op2_model: OP2, cid: int):
"""
http://web.mit.edu/course/3/3.11/www/modules/trans.pdf
[stress_out] = [T_out] [stress_0] [T_out]^T
[T_out]^T [stress_0] [T_out] = [stress_0] [T_out]
[stress_out] = [T_out] [T_in]^T [stress_in] [T_in] [T_out]^T
[stress_out] = [T] [stress_in] [T]^T
[T] = [T_out] [T_in]^T
"""
Tout = np.eye(3, dtype='float64')
if cid != [-1, 0]:
coord_out = bdf_model.coords[cid]
assert coord_out.type in ['CORD2R', 'CORD1R'], coord_out
Tout = coord_out.beta()
#coord = model.coords[1]
#T1 = coord.beta()
# TODO: should be this...
#strain_obj = model.op2_results.strain.ctetra_strain[1]
# TODO: all we have for now
#stress_obj = op2_model.ctetra_stress[1]
result_types = ['ctetra_stress', 'cpenta_stress', 'cpyram_stress', 'chexa_stress']
for res_type in result_types:
res_dict = getattr(op2_model, res_type)
for subcase, stress_obj in res_dict.items():
_transform_solid_stress_obj(bdf_model, stress_obj, Tout)
def _transform_solid_stress_obj(bdf_model: BDF, stress_obj, Tout):
#['oxx', 'oyy', 'ozz', 'txy', 'tyz', 'txz', 'omax', 'omid', 'omin', 'von_mises']
data = stress_obj.data
nmodes = data.shape[0]
if stress_obj.is_stress:
oxx = data[:, :, 0]
oyy = data[:, :, 1]
ozz = data[:, :, 2]
txy = data[:, :, 3]
tyz = data[:, :, 4]
txz = data[:, :, 5]
else:
exx = data[:, :, 0]
eyy = data[:, :, 1]
ezz = data[:, :, 2]
exy = data[:, :, 3] / 2
eyz = data[:, :, 4] / 2
exz = data[:, :, 5] / 2
nnodes = 5 # CTETRA4 / CTETRA10
eids = stress_obj.element_node[:, 0]
neids = len(eids) // nnodes
nodes = stress_obj.element_node[:, 1].reshape(neids, nnodes)
ueids = np.unique(eids)
eids_cids = stress_obj.element_cid
cids = eids_cids[:, 1]
ucids = np.unique(cids)
for ucid in ucids:
if ucid == 0:
continue
if ucid == cid:
continue
ieids = np.where(cids == ucid)[0]
ueids = eids_cids[ieids, 0]
nodes_xyz = {}
for nid in np.unique(nodes[ieids, 1:].ravel()):
node = bdf_model.nodes[nid] # type: GRID
nodes_xyz[nid] = node.get_position_wrt(bdf_model, ucid)
for eid, ieid in zip(ueids, ieids):
i0 = ieid * nnodes
i1 = i0 + nnodes
nodes_eid = nodes[ieid, 1:]
assert len(nodes_eid) == nnodes - 1
e_nodes = np.vstack([nodes_xyz[nid] for nid in nodes_eid])
avg_node = e_nodes.mean(axis=0)
assert len(avg_node) == 3
if ucid == -1:
element = bdf_model.elements[eid]
centroid, xe, ye, ze = element.material_coordinate_system()
Te = np.vstack([xe, ye, ze]) # Te
else:
coord_in = bdf_model.coords[ucid]
Tin = coord_in.beta()
if coord_in.type in ['CORD2R', 'CORD1R']:
pass
#elif coord_in.type in ['CORD2C', 'CORD1C']:
#thetad = avg_node[0]
#print(avg_node)
#theta = np.radians(thetad)
#s = np.sin(theta)
#c = np.cos(theta)
#sc = s * c
#c2 = c ** 2
#s2 = s ** 2
#Ar = np.array([
#[c2, s2, 2. * sc],
#[s2, c2, -2. * sc],
#[-sc, sc, c2 - s2],
#])
#Ar = np.array([
#[c, s, 0.],
#[-s, c, 0.],
#[0., 0., 1.],
#])
#Tin2 = Tin @ Ar.T
else:
raise NotImplementedError(coord_in)
T = Tout @ Tin.T
for itime in range(nmodes):
for ielem in range(i0, i1):
#exx = data[itime, ielem, 0]
#eyy = data[itime, ielem, 1]
#ezz = data[itime, ielem, 2]
#exy_2 = data[itime, ielem, 3] / 2
#eyz_2 = data[itime, ielem, 4] / 2
#exz_2 = data[itime, ielem, 5] / 2
#strain = np.array([
#[exx, exy_2, exz_2],
#[exy_2, eyy, eyz_2],
#[exz_2, eyz_2, ezz],
#])
oxx = data[itime, ielem, 0]
oyy = data[itime, ielem, 1]
ozz = data[itime, ielem, 2]
txy = data[itime, ielem, 3]
tyz = data[itime, ielem, 4]
txz = data[itime, ielem, 5]
stress = np.array([
[oxx, txy, txz],
[txy, oyy, tyz],
[txz, tyz, ozz],
])
#[stress_out] = [T] [stress_in] [T]^T
T11 = T[0, 0]
T22 = T[1, 1]
#T33 = T[2, 2]
T12 = T[0, 1]
T13 = T[0, 2]
T32 = T23 = T[1, 2]
T21 = T[1, 0]
T31 = T[2, 0]
oxx2 = (oxx*T11**2 + oyy*T21**2 + ozz*T31**2 + 2*txy*T11*T12 + 2*txz*T11*T13 + 2*tyz*T21*T31)
oyy2 = (oxx*T12**2 + oyy*T22**2 + ozz*T32**2 + 2*txy*T11*T12 + 2*txz*T11*T13 + 2*tyz*T21*T31)
ozz2 = (oxx*T11**2 + oyy*T21**2 + ozz*T31**2 + 2*txy*T11*T12 + 2*txz*T11*T13 + 2*tyz*T21*T31)
#oxx2 = (oxx*T11**2 + oyy*T21**2 + ozz*T31**2 + 2*txy*T11*T12 + 2*txz*T11*T13 + 2*tyz*T21*T31)
stress2 = T @ stress @ T.T
print(eid)
print(stress)
print(stress2)
#ss
inid0 = 0
for ieid, eid in enumerate(ueids):
# ------------------------------------------
# this is ACTUALLY the element coordinate system
ctetra = bdf_model.elements[eid]
#print(ctetra)
T = get_transform(T1, Te)
for unused_irange in range(5):
exxi = exx[:, inid0]
eyyi = eyy[:, inid0]
ezzi = ezz[:, inid0]
exyi = exy[:, inid0]
eyzi = eyz[:, inid0]
exzi = exz[:, inid0]
# mode loop
for imode, exxii, eyyii, ezzii, exyii, eyzii, exzii in zip(count(), exxi, eyyi, ezzi, exyi, eyzi, exzi):
exxiit, eyyiit, ezziit, exyiit, eyziit, exziit = _transform_strain(
T, exxii, eyyii, ezzii, exyii, eyzii, exzii)
# save_op2 method
data[imode, inid0, :6] = exxiit, eyyiit, ezziit, exyiit, eyziit, exziit
inid0 += 1
#op2_filename_out = os.path.join(dirname, f'xform.op2')
#op2_model.write_op2(op2_filename_out, post=-1, endian=b'<', skips=None, nastran_format='nx')
def get_transform(T1, Te):
"""
[T_el] = [T1]^T [T]
[T_element_in_local] = [T_local_in_basic]^T [T_elemental_in_basic]
[T_element_in_local] = [T_basic_in_local] [T_elemental_in_basic]
"""
T = T1.T @ Te # TODO: is this the right order?; I think so...
return T
def _transform_strain(T, exxii, eyyii, ezzii, exyii, eyzii, exzii):
strain = np.array([
[exxii, exyii, exzii],
[exyii, eyyii, eyzii],
[exzii, eyzii, ezzii],
])
# TODO: is it T.t @ inner @ T
# or T @ inner @ T.T
#
# TODO: is it Ri @ inner @ R
# or R @ inner @ Ri
#
# TODO: is the T / R order right?
strain4 = T.T @ (R @ strain @ Ri) @ T
#print(T)
# TODO: or is it another strain?
strain_cid = strain4
exxiit = strain_cid[0, 0]
eyyiit = strain_cid[1, 1]
ezziit = strain_cid[2, 2]
exyiit = strain_cid[0, 1]
eyziit = strain_cid[1, 2]
exziit = strain_cid[0, 2]
return exxiit, eyyiit, ezziit, exyiit, eyziit, exziit
def transf_Mohr(Sxx, Syy, Sxy, thetarad):
"""Mohr's Circle-based Plane Stress Transformation
Parameters
----------
Sxx, Syy, Sxy : array-like
Sigma_xx, Sigma_yy, Sigma_xy stresses.
thetarad : array-like
Array with angles for wich the stresses should be transformed.
Returns
-------
Sxx_theta, Syy_theta, Sxy_theta : np.ndarray
Transformed stresses.
"""
Sxx = np.asarray(Sxx)
Syy = np.asarray(Syy)
Sxy = np.asarray(Sxy)
thetarad = np.asarray(thetarad)
Scenter = (Sxx + Syy)/2.
R = np.sqrt((Sxx - Scenter)**2 + Sxy**2)
thetarad_Mohr = np.arctan2(-Sxy, Sxx - Scenter) + 2*thetarad
cos_Mohr = cos(thetarad_Mohr)
Sxx_theta = Scenter + R*cos_Mohr
Syy_theta = Scenter - R*cos_Mohr
Sxy_theta = -R*sin(thetarad_Mohr)
return Sxx_theta, Syy_theta, Sxy_theta
def thetadeg_to_principal(Sxx, Syy, Sxy):
"""Calculate the angle to the principal plane stress state
Parameters
----------
Sxx, Syy, Sxy : array-like
Sigma_xx, Sigma_yy, Sigma_xy stresses.
Returns
-------
thetadeg : np.ndarray
Array with angles for which the given stresses are transformed to the
principal stress state.
"""
Scenter = (Sxx + Syy)/2.
thetarad = np.arctan2(Sxy, Scenter - Syy)
return np.rad2deg(thetarad)/2.
def get_eids_from_op2_vector(vector):
"""Obtain the element ids for a given op2 vector
Parameters
----------
vector : op2 vector
An op2 vector obtained, for example, doing::
vector = op2.cquad4_force[1]
vector = op2.cquad8_stress[1]
vector = op2.ctriar_force[1]
vector = op2.ctria3_stress[1]
"""
eids = getattr(vector, 'element', None)
if eids is None:
eids = vector.element_node[:, 0]
return eids
def is_mcid(elem):
"""
Determines if the element uses theta or the mcid (projected material coordinate system)
Parameters
----------
elem : varies
an element object
CQUAD4, CQUAD8, CQUADR
CTRIA3, CTRIA6, CTRIAR
Returns
-------
is_mcid : bool
the projected material coordinate system is used
"""
theta_mcid = getattr(elem, 'theta_mcid', None)
return isinstance(theta_mcid, integer_types)
def check_theta(elem) -> float:
theta = getattr(elem, 'theta_mcid', None)
if theta is None:
theta = 0.
elif isinstance(theta, float):
pass
elif isinstance(theta, integer_types):
raise ValueError('MCID is accepted by this function')
return theta
def angle2vec(v1, v2):
"""
Using the definition of the dot product to get the angle
v1 o v2 = |v1| * |v2| * cos(theta)
theta = np.arccos( (v1 o v2) / (|v1|*|v2|))
"""
denom = norm(v1, axis=1) * norm(v2, axis=1)
return np.arccos((v1 * v2).sum(axis=1) / denom)
def calc_imat(normals, csysi):
"""
Calculates the i vector in the material coordinate system.
j = k x ihat
jhat = j / |j|
i = jhat x k
Notes
-----
i is not a unit vector because k (the element normal)
is not a unit vector.
"""
jmat = cross(normals, csysi) # k x i
jmat /= norm(jmat)
imat = cross(jmat, normals)
return imat
def data_in_material_coord(bdf: BDF, op2: OP2, in_place: bool=False) -> OP2:
"""Convert OP2 2D element outputs to material coordinates
Nastran allows the use of 'PARAM,OMID,YES' to print 2D element forces,
stresses and strains based on the material direction. However, the
convertion only takes place in the F06 output file, whereas the OP2 output
file remains in the element coordinate system.
This function converts the 2D element vectors to the material OP2
similarly to most of the post-processing tools (Patran, Femap, HyperView,
etc). It handles both 2D elements with MCID or THETA.
Parameters
----------
bdf : :class:`.BDF` object
A :class:`.BDF` object that corresponds to the 'op2'.
op2 : :class:`.OP2` object
A :class:`.OP2` object that corresponds to the 'bdf'.
in_place : bool; default=False
If true the original op2 object is modified, otherwise a new one
is created.
Returns
-------
op2_new : :class:`.OP2` object
A :class:`.OP2` object with the abovementioned changes.
.. warning :: doesn't handle composite stresses/strains/forces
.. warning :: doesn't handle solid stresses/strains/forces (e.g. MAT11)
.. warning :: zeros out data for CQUAD8s
"""
if in_place:
op2_new = op2
else:
op2_new = copy.deepcopy(op2)
eids = np.array(list(bdf.elements.keys()))
elems = np.array(list(bdf.elements.values()))
mcid = np.array([is_mcid(e) for e in elems])
elemsmcid = elems[mcid]
elemstheta = elems[~mcid]
thetadeg = np.zeros(elems.shape)
thetadeg[~mcid] = np.array([check_theta(e) for e in elemstheta])
thetarad = np.deg2rad(thetadeg)
#NOTE separating quad types to get vectorizable "corner"
quad_types = ['CQUAD4', 'CQUAD8', 'CQUADR']
for quad_type in quad_types:
# elems with THETA
thisquad = np.array([quad_type in e.type for e in elemstheta])
if np.any(thisquad):
quadelems = elemstheta[thisquad]
corner = np.array([e.get_node_positions() for e in quadelems])
g1 = corner[:, 0, :]
g2 = corner[:, 1, :]
g3 = corner[:, 2, :]
g4 = corner[:, 3, :]
betarad = angle2vec(g3 - g1, g2 - g1)
gammarad = angle2vec(g4 - g2, g1 - g2)
alpharad = (betarad + gammarad) / 2.
tmp = thetarad[~mcid]
tmp[thisquad] -= betarad
tmp[thisquad] += alpharad
thetarad[~mcid] = tmp
# elems with MCID
thisquad = np.array([quad_type in e.type for e in elemsmcid])
if np.any(thisquad):
quadelems = elemsmcid[thisquad]
corner = np.array([e.get_node_positions() for e in quadelems])
g1 = corner[:, 0, :]
g2 = corner[:, 1, :]
g3 = corner[:, 2, :]
g4 = corner[:, 3, :]
normals = np.array([e.Normal() for e in quadelems])
csysi =
|
np.array([bdf.coords[e.theta_mcid].i for e in quadelems])
|
numpy.array
|
##imports===============================================================================
from scipy.spatial.distance import cdist
import numpy as np
import os, sys,glob, copy, csv, time
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.colors import Normalize
from neuropixels.continuous_traces import get_channel_count, filtr
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.manifold import TSNE
from scipy.cluster.vq import kmeans2
##===============================================================================
##===============================================================================
#probe geometry, for the summary figure only
option234_xpositions = np.zeros((192,2))
option234_ypositions = np.zeros((192,2))
option234_positions = np.zeros((384,2))
option234_positions[:,0][::4] = 21
option234_positions[:,0][1::4] = 53
option234_positions[:,0][2::4] = 5
option234_positions[:,0][3::4] = 37
option234_positions[:,1] = np.floor(np.linspace(383,0,384)/2) * 20
##===============================================================================
##helper functions===============================================================================
def read_kilosort_params(filename):
f=open(filename)
params = {}
for line in list(f):
try:
params[line.split(' =')[0]]=line.split('= ')[1].replace('\r\n','')
except:
pass
return params
def read_cluster_groups_CSV(directory):
cluster_id = [];
if os.path.isfile(os.path.join(directory,'cluster_group.tsv')):
cluster_id = [row for row in csv.reader(open(os.path.join(directory,'cluster_group.tsv')))][0:];
else:
if os.path.isfile(os.path.join(directory,'cluster_groups.csv')):
cluster_id = [row for row in csv.reader(open(os.path.join(directory,'cluster_groups.csv')))][0:];
else:
print('could not find cluster groups csv or tsv')
return None
good=[];mua=[];unsorted=[]
for i in np.arange(1,np.shape(cluster_id)[0]):
if cluster_id[i][0].split('\t')[1] == 'good':#if it is a 'good' cluster by manual sort
good.append(cluster_id[i][0].split('\t')[0])
if cluster_id[i][0].split('\t')[1] == 'mua':#if it is a 'good' cluster by manual sort
mua.append(cluster_id[i][0].split('\t')[0])
if cluster_id[i][0].split('\t')[1] == 'unsorted':#if it is a 'good' cluster by manual sort
unsorted.append(cluster_id[i][0].split('\t')[0])
return (np.array(good).astype(int),np.array(mua).astype(int),np.array(unsorted).astype(int))
def count_unique(x):
values=[]
instances=[]
for v in np.unique(x):
values.extend([v])
instances.extend([len(np.where(np.array(x)==v)[0].flatten())])
return values, instances
def ismember(a, b):
bind = {}
for i, elt in enumerate(b):
if elt not in bind:
bind[elt] = i
return [bind.get(itm, None) for itm in a] # None can be replaced by any other "not in b"
def load_waveforms(data,channel,times,pre=0.5,post=1.5,channels=384,sampling_rate=30000):
#input can be memory mapped file or a string specifiying the file to memory map. if string, deletes the memory mapped object when done, for hygiene.
pre = pre * .001
post = post * .001
channel = int(channel)
channels = int(channels)
if type(data)==str:
mm = np.memmap(data, dtype=np.int16, mode='r')
else:
mm=data
waveforms=[]
for i in times:
start = int((i - pre) * sampling_rate) * int(channels)
temp = mm[start:start+int((pre+post)*sampling_rate*channels)][channel::channels]# - mm[start:start+int((pre+post)*sampling_rate*channels)][channel::channels][0]
temp = temp - temp[0]
waveforms.extend([temp * 0.195])
if type(data)==str:
del mm
return waveforms
def mean_waveform(rawdata,times,pre=0.5,post=1.5,channels=384,sampling_rate=30000):
mean_waveform = []#np.zeros(channels,int((pre+post)*.001)*sampling_rate*channels)
for i,ch in enumerate(
|
np.linspace(0,channels-1,channels)
|
numpy.linspace
|
import unittest
import numpy as np
from desc.grid import LinearGrid
from desc.basis import polyder_vec, polyval_vec, powers, jacobi, fourier
from desc.basis import PowerSeries, DoubleFourierSeries, FourierZernikeBasis
class TestBasis(unittest.TestCase):
"""Tests Basis classes"""
def test_polyder(self):
"""Tests polyder_vec function
"""
p0 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 1, 1]])
p1 = polyder_vec(p0, 1)
p2 = polyder_vec(p0, 2)
correct_p1 = np.array([[0, 2, 0], [0, 0, 1], [0, 0, 0], [0, 2, 1]])
correct_p2 = np.array([[0, 0, 2], [0, 0, 0], [0, 0, 0], [0, 0, 2]])
np.testing.assert_allclose(p1, correct_p1, atol=1e-8)
np.testing.assert_allclose(p2, correct_p2, atol=1e-8)
def test_polyval(self):
"""Tests polyval_vec function
"""
p = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 1, 1]])
x = np.linspace(0, 1, 11)
correct_vals = np.array([x**2, x, np.ones_like(x), x**2+x+1])
values = polyval_vec(p, x)
np.testing.assert_allclose(values, correct_vals, atol=1e-8)
def test_powers(self):
"""Tests powers function
"""
l = np.array([0, 1, 2])
r = np.linspace(0, 1, 11) # rho coordinates
correct_vals = np.array([np.ones_like(r), r, r**2]).T
correct_ders = np.array([np.zeros_like(r), np.ones_like(r), 2*r]).T
values = powers(r, l, dr=0)
derivs = powers(r, l, dr=1)
np.testing.assert_allclose(values, correct_vals, atol=1e-8)
np.testing.assert_allclose(derivs, correct_ders, atol=1e-8)
def test_jacobi(self):
"""Tests jacobi function
"""
l =
|
np.array([3, 4, 6])
|
numpy.array
|
import sys
import numpy as np
Pposition={}
Lposition={}
ResinameP={}
ResinameL={}
Atomline=[]
residuePair=[]
if len(sys.argv) <1 :
print("python python2_L.py xxx")
filebase=sys.argv[1]
print(filebase)
for line in open(filebase+'_w.pdb'):
tem_B=' '
if len(line)>16:
tem_B=line[16]
line=line[:16]+' '+line[17:]
#print(line)
list = line.split()
id = list[0]
if id == 'ATOM' and tem_B !='B':
type = list[2]
#if type == 'CA' and list[3]!= 'UNK':
if list[3]!= 'UNK':
residue = list[3]
type_of_chain = line[21:22]
tem1=line[22:26].replace("A", "")
tem2=tem1.replace("B", "")
tem2=tem2.replace("C", "")
#tem2=filter(str.isdigit, list[5])
atom_count = tem2+line[21:22]
list[6]=line[30:38]
list[7]=line[38:46]
list[8]=line[46:54]
position = list[6:9]
Pposition[atom_count]=position
ResinameP[atom_count]=residue+list[5]+list[4]
resindex=residue+list[5]
Atomline.append(line)
index_nn=0
for line in open(filebase+'_ligand_n.mol2'):
tem_B=' '
line=line.strip()
#print(line)
if line == "@<TRIPOS>ATOM":
index_nn=1
#print(line)
if line == "@<TRIPOS>BOND":
index_nn=0
if index_nn==1 and line != "@<TRIPOS>ATOM":
list = line.split()
#tem2=filter(str.isdigit, list[5])
atom_count = list[0]+list[5]
position = list[2:5]
Lposition[atom_count]=position
ResinameL[atom_count]=list[5]
#-------------------------------------------------
for key1, value1 in Pposition.items():
#print ( key1)
for key2, value2 in Lposition.items():
#print (ResinameE[key], 'corresponds to', value)
##distance=pow(value1[0]-value2[0])
#print (value2)
a = np.array(value1)
a1 = a.astype(np.float)
b =
|
np.array(value2)
|
numpy.array
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 20 23:12:01 2014
@author: <NAME>
@email: <EMAIL>
@license: MIT
"""
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn import datasets
import sys
sys.path.append("../pyKMLib/")
import SparseFormats as spf
class TestSparseFormats(unittest.TestCase):
def setUp(self):
pass
def test_csr2sertilp(self):
mat = np.array([ [1,0,2,0,3,0],
[4,0,5,0,0,0],
[0,0,0,6,7,0],
[0,0,0,0,0,8],
[21,0,22,0,23,0],
[24,0,25,0,0,0],
[0,0,0,26,27,0],
[0,0,0,0,0,28]
])
sp_mat = sp.csr_matrix(mat)
row_len_right = np.array([1,1,1,1,1,1,1,1])
sl_start_right = np.array([0,16,32])
val_right = np.array([1.0,2.0,4.0,5.0,6.0,7.0,8.0,0.0,3.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,21.0,22.0,24.0,25.0,26.0,27.0,28.0,0.0,23.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
#collumns taken directly from dataset,
col_vs_right = np.array([1,3,1,3,4,5,6,0,5,0,0,0,0,0,0,0,1,3,1,3,4,5,6,0,5,0,0,0,0,0,0,0])
#but in sparse format collumns start from 0 so we have to substract 1
col_right = col_vs_right-1
col_right[col_right==-1]=0
val,col,row_len,sl_start=spf.csr2sertilp(sp_mat,
threadsPerRow=2,
prefetch=2,
sliceSize=4,
minAlign=2*4)
self.assertTrue(np.allclose(row_len,row_len_right), 'sliced ellpack row length arrays are not equal')
self.assertTrue(np.allclose(sl_start,sl_start_right), 'sliced ellpack slice start arrays are not equal')
self.assertTrue(np.allclose(val,val_right), 'sliced ellpack values arrays are not equal')
self.assertTrue(np.allclose(col,col_right), 'sliced ellpack collumns arrays are not equal')
def test_csr2sertilp_class_smaller_than_slice_size(self):
'''
The function tests the method of creation of sertilp_class representation
in which each class is algin to slice_size, when class is smaller than slice_size
than it is padded with 'slice_size - n' rows filled with zeros.
There are 8 rows for three different classes [0,1,2], in each class
the number of rows is smaller than sliceSize
'''
threadsPerRow=2
prefetch=2
sliceSize=4
minAlign=2*4
mat = np.array([ [1,0,2,0,3,0],
[4,0,5,0,0,0],
[0,0,0,6,7,0],
[0,0,0,0,0,8],
[9,0,10,0,11,0],
[12,0,13,0,0,0],
[0,0,0,14,15,0],
[0,0,0,0,0,16]
])
y = np.array([0,0,0,1,1,2,2,2])
sp_mat = sp.csr_matrix(mat)
row_len_right = np.array([1,1,1,1,1,1,1,1])
sl_start_right = np.array([0,16,32, 48])
cls_slice_right = np.array([0,1,2,3])
val_right = np.array([1.0,2.0, 4.0,5.0, 6.0,7.0, 0.0,0.0,
3.0,0.0, 0.0,0.0, 0.0,0.0, 0.0,0.0,
8.0,0.0, 9.0,10.0, 0.0,0.0, 0.0,0.0,
0.0,0.0, 11.0,0.0, 0.0,0.0, 0.0,0.0,
12.0,13.0, 14.0,15.0, 16.0,0.0, 0.0,0.0,
0.0,0.0, 0.0,0.0, 0.0,0.0, 0.0,0.0,
])
col_right = np.array([0,2, 0,2, 3,4, 0,0,
4,0, 0,0, 0,0, 0,0,
5,0, 0,2, 0,0, 0,0,
0,0, 4,0, 0,0, 0,0,
0,2, 3,4, 5,0, 0,0,
0,0, 0,0, 0,0, 0,0])
val,col,row_len,sl_start,cls_slice=spf.csr2sertilp_class(sp_mat,y,
threadsPerRow=threadsPerRow,
prefetch=prefetch,
sliceSize=sliceSize,
minAlign=minAlign)
self.assertTrue(np.allclose(row_len,row_len_right), 'sliced ellpack row length arrays are not equal')
self.assertTrue(np.allclose(sl_start,sl_start_right), 'sliced ellpack slice start arrays are not equal')
self.assertTrue(np.allclose(cls_slice,cls_slice_right), 'sliced ellpack class slice start arrays are not equal')
self.assertTrue(np.allclose(val,val_right), 'sliced ellpack values arrays are not equal')
self.assertTrue(np.allclose(col,col_right), 'sliced ellpack collumns arrays are not equal')
def test_csr2sertilp_class_grather_than_slice_size(self):
threadsPerRow=2
prefetch=2
sliceSize=4
minAlign=2*4
mat = np.array([ [1,0,2,0,3,0],
[1,2,0,0,0,0],
[1,2,3,4,0,0],
[4,0,5,0,0,0],
[0,0,0,6,7,0],
[0,0,0,0,0,8],
[9,0,10,0,11,0],
[12,0,13,0,0,0],
[0,0,0,14,15,0],
[0,0,0,0,0,16]
])
y = np.array([0,0,0,0,0,1,1,2,2,2])
sp_mat = sp.csr_matrix(mat)
row_len_right = np.array([1,1,1,1,1,1,1,1,1,1])
sl_start_right = np.array([0,16,32,48,64])
cls_slice_right = np.array([0,2,3,4])
val_right = np.array([1.0,2.0, 1.0,2.0, 1.0,2.0, 4.0,5.0,
3.0,0.0, 0.0,0.0, 3.0,4.0, 0.0,0.0,
6.0,7.0, 0.0,0.0, 0.0,0.0, 0.0,0.0,
0.0,0.0, 0.0,0.0, 0.0,0.0, 0.0,0.0,
8.0,0.0, 9.0,10.0, 0.0,0.0, 0.0,0.0,
0.0,0.0, 11.0,0.0, 0.0,0.0, 0.0,0.0,
12.0,13.0, 14.0,15.0, 16.0,0.0, 0.0,0.0,
0.0,0.0, 0.0,0.0, 0.0,0.0, 0.0,0.0,
])
col_right = np.array([0,2, 0,1, 0,1, 0,2,
4,0, 0,0, 2,3, 0,0,
3,4, 0,0, 0,0, 0,0,
0,0, 0,0, 0,0, 0,0,
5,0, 0,2, 0,0, 0,0,
0,0, 4,0, 0,0, 0,0,
0,2, 3,4, 5,0, 0,0,
0,0, 0,0, 0,0, 0,0])
val,col,row_len,sl_start, cls_slice=spf.csr2sertilp_class(sp_mat,y,
threadsPerRow=threadsPerRow,
prefetch=prefetch,
sliceSize=sliceSize,
minAlign=minAlign)
self.assertTrue(np.allclose(row_len,row_len_right), 'sliced ellpack row length arrays are not equal')
self.assertTrue(np.allclose(sl_start,sl_start_right), 'sliced ellpack slice start arrays are not equal')
self.assertTrue(np.allclose(cls_slice,cls_slice_right), 'sliced ellpack class slice start arrays are not equal')
self.assertTrue(np.allclose(val,val_right), 'sliced ellpack values arrays are not equal')
self.assertTrue(np.allclose(col,col_right), 'sliced ellpack collumns arrays are not equal')
def test_csr2sertilp_class_grather_than_slice_size_unequal(self):
threadsPerRow=2
prefetch=2
sliceSize=2
minAlign=threadsPerRow*sliceSize
# mat = np.array([ [1,2,3,4,5,6] ])
# y = np.array([0])
# mat = np.array([ [1,0,2,0,3,0],
# [1,2,0,0,0,0],
# [1,2,3,4,5,6] ])#,
# y = np.array([0,0,0])
mat = np.array([ [1,0,2,0,3,0],
[1,2,0,0,0,0],
[1,2,3,4,5,6],
[6,5,4,3,2,1],
[0,0,0,6,7,0],
[0,0,0,0,0,8],
[9,0,10,0,11,0],
[12,0,13,0,0,0],
[0,0,0,14,15,0],
[1,2,3,4,5,17],
[0,0,0,0,0,18],
[0,0,0,0,0,19]
])
y = np.array([0,0,0,1,1,1,2,2,2,3,3,3])
sp_mat = sp.csr_matrix(mat)
row_len_right = np.array([1,1,2,2,1,1,1,1,1,2,1,1])
sl_start_right = np.array([ 0, 8, 24, 40, 48, 56, 64, 80, 88])
cls_slice_right = np.array([0,2,4,6,8])
val_right = np.array([ 1., 2., 1., 2., 3., 0., 0., 0., 1., 2., 0.,
0., 3., 4., 0., 0., 5., 6., 0., 0., 0., 0.,
0., 0., 6., 5., 6., 7., 4., 3., 0., 0., 2.,
1., 0., 0., 0., 0., 0., 0., 8., 0., 0., 0.,
0., 0., 0., 0., 9., 10., 12., 13., 11., 0., 0.,
0., 14., 15., 0., 0., 0., 0., 0., 0., 1., 2.,
18., 0., 3., 4., 0., 0., 5., 17., 0., 0., 0.,
0., 0., 0., 19., 0., 0., 0., 0., 0., 0., 0.])
col_right = np.array([0, 2, 0, 1, 4, 0, 0, 0, 0, 1, 0, 0, 2, 3, 0, 0, 4, 5, 0, 0, 0, 0, 0,
0, 0, 1, 3, 4, 2, 3, 0, 0, 4, 5, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0,
0, 0, 0, 2, 0, 2, 4, 0, 0, 0, 3, 4, 0, 0, 0, 0, 0, 0, 0, 1, 5, 0, 2,
3, 0, 0, 4, 5, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0])
val,col,row_len,sl_start, cls_slice=spf.csr2sertilp_class(sp_mat,y,
threadsPerRow=threadsPerRow,
prefetch=prefetch,
sliceSize=sliceSize,
minAlign=minAlign)
self.assertTrue(np.allclose(row_len,row_len_right), 'sliced ellpack row length arrays are not equal')
self.assertTrue(np.allclose(sl_start,sl_start_right), 'sliced ellpack slice start arrays are not equal')
self.assertTrue(
|
np.allclose(cls_slice,cls_slice_right)
|
numpy.allclose
|
import numpy as np
import sys
from numba import jit
@jit
def count_primes(n):
v =
|
np.full(n,True,np.bool)
|
numpy.full
|
# coding: utf-8
import os
import shutil
import subprocess
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib.patches import Wedge
from scipy.integrate import trapz
from scipy.interpolate import NearestNDInterpolator, griddata, interp1d
from scipy.optimize import minimize
TESLAMAX_PACKAGE_DIR = Path(os.path.dirname(__file__))
TESLAMAX_JAVA_DIR = TESLAMAX_PACKAGE_DIR.parent / 'java'
TESLAMAX_CLASS_FILE = TESLAMAX_JAVA_DIR / 'TeslaMax.class'
TESLAMAX_CMD = ['comsolbatch', '-inputfile', str(TESLAMAX_CLASS_FILE)]
B_HIGH_FILENAME = "B_high.txt"
B_LOW_FILENAME = "B_low.txt"
B_III_FILENAME = "B_III.txt"
H_IV_FILENAME = "H_IV_1Q.txt"
MAIN_RESULTS_FILENAME = "COMSOL Main Results.txt"
MAGNETIC_PROFILE_FILENAME = "COMSOL Magnetic Profile.txt"
PARAMETER_FILENAME = "params.txt"
N_PROFILE_POINTS = 181 # keep at this level to have in increments of 1 degree
N_R_POINTS = 20
N_POINTS_PER_AXIS = 400
FIGSIZE_CM = 20
FIGSIZE_INCHES = FIGSIZE_CM / 2.54
FONTSIZE = 20
B_HIGH_LEVEL = 1.0
B_LOW_LEVEL = 0.0
DEBUG = False
def get_comsol_parameters_series(filename=PARAMETER_FILENAME):
"""Parse a COMSOL parameters file 'filename' and
return a pandas Series from it.
"""
param_comsol_file = Path(filename)
param_comsol_series = pd.read_csv(str(param_comsol_file),
squeeze=True,
sep=" ",
index_col=0,
header=None)
param_comsol_series.name = "COMSOL Parameters"
param_comsol_series.index.name = None
# append the units to the parameters names
names_with_units = {}
for name in param_comsol_series.keys():
if name.startswith("h_") or name.startswith("R_"):
names_with_units[name] = name + "[m]"
if name.startswith("alpha") or name.startswith(
"phi") or name.startswith("delta_phi"):
names_with_units[name] = name + "[deg]"
if name.startswith("B_"):
names_with_units[name] = name + "[T]"
if name.startswith("H_c"):
names_with_units[name] = name + "[A/m]"
param_comsol_series = param_comsol_series.rename(names_with_units)
return param_comsol_series
def read_comsol_data_file(filename):
"""Read and parse 'filename' as exported by COMSOL.
Export the numerical data as a numpy array containing only the numerical
data; the first two columns are x and y values. All values are in SI.
Keyword Arguments:
filename -- str
"""
return np.loadtxt(filename, skiprows=9)
def read_comsol_profile_data(filename):
"""
Read 'filename' as exported by TeslaMax and return an array of the
magnetic profile data, where the first column is the angle
in degrees [0,360] and the second is the average magnetic
flux density in tesla
"""
profile_data = np.loadtxt(filename,
skiprows=1)
return profile_data.T
def process_main_results_file():
"""Take the file "COMSOL Main Results.txt" as exported by COMSOL and
clean the header data.
"""
p = Path('.') / MAIN_RESULTS_FILENAME
results = pd.read_csv(MAIN_RESULTS_FILENAME,
sep="\s+",
skiprows=5,
index_col=None,
header=None,
names=["B_high[T]",
"B_low[T]",
"A_gap[m2]",
"A_magnet[m2]",
"-H_Brem_II_max[A/m]",
"-H_Brem_IV_max[A/m]",
"A_demag[m2]"])
results_series = results.ix[0]
results_series.to_csv(str(p),
float_format="%.6f",
sep=" ",
header=False,
index=True)
def read_main_results_file():
"""Return a Series where each row is one of the COMSOL Main results"""
results_filepath = Path(MAIN_RESULTS_FILENAME)
results_series = pd.read_table(results_filepath,
sep=" ",
squeeze=True,
index_col=0,
header=None)
results_series.index.name = None
results_series.name = "COMSOL Main Results"
return results_series
# noinspection PyPep8Naming
def calculate_magnitude(components_grid):
"""
Return an array [x, y, norm(V)] from [x, y, Vx, Vy]
"""
# noinspection PyPep8Naming
x, y, Vx, Vy = components_grid.T
V = np.sqrt(Vx * Vx + Vy * Vy)
return np.array((x, y, V)).T #sem erro aqui mas sai um zero
def calculate_magnetic_profile(B_data, params):
"""
Return the magnetic profile array [phi, B] based on data for the
magnetic flux density [x, y, B] and a dictionary of parameters.
The magnetic profile is defined as the magnetic flux density along the
circumference in the middle of the air gap.
The grid for 'B_data' is supposed to span the interval 0 <= phi <= 90
(the first quadrant); this function mirrors this interval and return phi
in the interval [0, 360].
"""
params = expand_parameter_dictionary(params)
R_g = params['R_g']
R_o = params['R_o']
# create ranges for phi and r
phi_min = 0.0
phi_max = np.pi / 2
phi_vector_1q = np.linspace(phi_min, phi_max, N_PROFILE_POINTS)
r_central = (R_o + R_g) / 2
# calcualte the points (x,y) distributed along
# radial lines
x_grid = r_central * np.cos(phi_vector_1q)
y_grid = r_central * np.sin(phi_vector_1q)
B_profile_1q = griddata(B_data[:, 0:2],
B_data[:, 2],
np.array([x_grid, y_grid]).T)
#ERRO AQUI. PRIMEIRO DIGITO DO B_profile_1q A PARTIR DE CERTO MOMENTO VEM NAN
b=np.isnan(B_profile_1q)
for i in range(0,len(b)):
if b[i]==True:
B_profile_1q[i]=B_profile_1q[i+1]
else: continue
# extrapolate data to the full circle
phi_vector = np.concatenate((phi_vector_1q,
phi_vector_1q + np.pi / 2,
phi_vector_1q + np.pi,
phi_vector_1q + (3 / 2) * np.pi))
B_profile = np.concatenate((B_profile_1q,
B_profile_1q[::-1],
B_profile_1q,
B_profile_1q[::-1]))
profile_data = np.array((np.rad2deg(phi_vector), B_profile)).T
return profile_data
def write_magnetic_profile_file():
"""Create a file "COMSOL Magnetic Profile.txt" in the current directory,
assuming the teslamax command was already ran, and write the magnetic
profile data (magentic flux density at the air gap central circumference).
"""
p = Path('.') / MAGNETIC_PROFILE_FILENAME
column_names = ["phi[deg]", "B[T]"]
column_header = " ".join(column_names)
# load data from the B_III filename
B_III_data = read_comsol_data_file(B_III_FILENAME)
# get the columns corresponding to [x, y, B_x, B_y] and calculate [x, y, B]
B_1q = calculate_magnitude(B_III_data[:, :4])
case_series = get_comsol_parameters_series()
profile_data = calculate_magnetic_profile(B_1q, case_series)
np.savetxt(str(p),
profile_data,
fmt=("%.2f", "%.5f"),
delimiter=" ",
header=column_header,
comments='')
def calculate_average_high_field(profile_data):
"""
Return the average magnetic profile through the high field region,
based on profile data [theta, B_profile]
"""
# in the present model, the high field region (equivalent to the cold
# blow in an AMR device) goes from -45° to +45°, and from 135° to 225°
theta_min = 135.0
theta_max = 225.0
theta_vector = profile_data[:, 0]
B_profile_vector = profile_data[:, 1]
# select values only where theta_vector falls in the high field region
# this will return an array with True only at those positions
region_filter = np.logical_and(theta_vector > theta_min,
theta_vector < theta_max)
# select the region of the magnetic profile that satisfy this condition
theta_high_field = theta_vector[region_filter]
B_profile_high_field = B_profile_vector[region_filter]
# return the integral of these samples, divided by the range
B_integrated = trapz(B_profile_high_field, theta_high_field)
theta_range = theta_max - theta_min
B_high_avg = B_integrated / theta_range
return B_high_avg
def write_magnetic_profile_central_file():
"""Create a file "COMSOL Magnetic Profile.txt" in the current directory,
assuming the teslamax command was already ran, and write the magnetic
profile data (magnetic induction at central radial position).
"""
p = Path('.') / MAGNETIC_PROFILE_FILENAME
column_names = ["phi[deg]", "B[T]"]
column_header = " ".join(column_names)
# load data from the high and low field regions
B_h = read_comsol_data_file(B_HIGH_FILENAME)
B_l = read_comsol_data_file(B_LOW_FILENAME)
B_1q = np.concatenate((B_h, B_l), axis=0)
# calcualte vector of angles for the first quadrant
case_series = get_comsol_parameters_series()
n_phi_points = 100
R_g = case_series['R_g[m]']
R_o = case_series['R_o[m]']
# create ranges for phi and r
phi_min = 0.0
phi_max = np.pi / 2
phi_vector_1q = np.linspace(phi_min, phi_max, N_PROFILE_POINTS)
r_min = R_o
r_max = R_g
r_central = (R_o + R_g)/2
# calcualte the points (x,y) distributed along
# radial lines
x_grid = r_central * np.cos(phi_vector_1q)
y_grid = r_central * np.sin(phi_vector_1q)
B_profile_1q = griddata(B_1q[:, 0:2], B_1q[:, 2],
np.array([x_grid, y_grid]).T)
# extrapolate data to the full circle
phi_vector = np.concatenate((phi_vector_1q,
phi_vector_1q + np.pi / 2,
phi_vector_1q + np.pi,
phi_vector_1q + (3 / 2) * np.pi))
B_profile = np.concatenate((B_profile_1q,
B_profile_1q[::-1],
B_profile_1q,
B_profile_1q[::-1]))
profile_data = np.array((np.rad2deg(phi_vector), B_profile)).T
np.savetxt(str(p),
profile_data,
fmt=("%.2f", "%.5f"),
delimiter=" ",
header=column_header,
comments='')
def run_teslamax(verbose=False):
"""
Run the teslamax process in the current directory, clean the results file
and create a magnetic profile file.
Assumes the parameters file is present in the current directory."""
comsol_process = subprocess.run(TESLAMAX_CMD,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
if verbose:
print(comsol_process.stdout)
process_main_results_file()
write_magnetic_profile_file()
def remove_units_from_dict_keys(dictionary):
"""Remove a string '[<anything>]' from every key of 'dictionary'"""
new_dictionary = {}
for key in dictionary.keys():
new_key = key.split('[')[0]
new_dictionary[new_key] = dictionary[key]
return new_dictionary
def expand_parameter_dictionary(param_simple):
"""
Return a new dictionary, calculating derivative parameters from
'param_simple', which is usually passed to COMSOL.
If the input dictionary contain units, they are removed. The returned
dict does not contain units
"""
param_dict = remove_units_from_dict_keys(param_simple)
# cast the number of segments to int, if necessary
param_dict["n_IV"] = int(param_dict["n_IV"])
# remove some keys that are not necessary (and which may cause errors)
try:
del param_dict["-H_Brem_IV_max"]
except:
pass
# calculate magnet geometry
param_dict["R_g"] = param_dict["R_o"] + param_dict["h_gap"]
param_dict["R_c"] = param_dict["R_s"] + param_dict["h_fc"]
if param_dict["n_II"] > 0:
param_dict["delta_phi_S_II"] = ((param_dict["phi_S_II"] -
param_dict["phi_C_II"]) /
param_dict["n_II"])
if param_dict["n_IV"] > 0:
param_dict["delta_phi_S_IV"] = (param_dict["phi_S_IV"] /
param_dict["n_IV"])
return param_dict
def write_parameter_file_from_dict(param_dict):
"""From a basic 'param_dict', calculate the necessary other parameters
(e.g. magnet segment size from total size and number of segments) and write
the correct parameters file.
If 'param_dict' contains units in the names, they are removed.
"""
param_dict = expand_parameter_dictionary(param_dict)
# write the dictionary file in the appropriate format that COMSOL can parse
parameters_file_path = Path(".") / PARAMETER_FILENAME
param_text = ""
for (key, value) in param_dict.items():
param_text = param_text + "%s %s\n" % (key, value)
parameters_file_path.write_text(param_text)
def run_teslamax_from_params(params, verbose=False):
"""Write the 'params' dictionary in the apropriate format to the current
directory (removing units if necessary) and run the teslamax process"""
write_parameter_file_from_dict(params)
run_teslamax(verbose)
def normalize_vector(v):
"""
Return the normalized (dimensionless) form of vector
(or list of vectors) v"""
# v could be a single vector or a list of vectors,
# so we handle different cases
if v.ndim == 1:
return v / np.linalg.norm(v)
else:
v_norm = np.linalg.norm(v, axis=1)
v_norm_inv = np.reciprocal(v_norm).reshape(len(v), 1)
return np.multiply(v, v_norm_inv)
def create_quater_circle_figure_template(r_lim, params):
"""
Return (fig,axes) correspondent to a figure of the first quadrant,
limited by r_lim.
Both magnets are also drawn.
The size of the figure is controlled by FIGSIZE_INCHES"""
fig = plt.figure(figsize=(FIGSIZE_INCHES, FIGSIZE_INCHES))
axes = fig.add_subplot(111, aspect='equal')
axes.set_ylim(0, 1e3 * r_lim)
axes.set_xlim(0, 1e3 * r_lim)
axes.set_ylabel(r'$y\ [\si{\mm}$]')
axes.set_xlabel(r'$x\ [\si{\mm}$]')
R_o = params['R_o']
R_i = params['R_i']
R_s = params['R_s']
R_g = params.get('R_g', params['R_o'] + params['h_gap'])
magnet_II_outer = plt.Circle((0, 0), 1000 * R_o, color='k', fill=False)
magnet_II_inner = plt.Circle((0, 0), 1e3 * R_i, color='k', fill=False)
axes.add_artist(magnet_II_outer)
axes.add_artist(magnet_II_inner)
magnet_IV_outer = plt.Circle((0, 0), 1e3 * R_s, color='k', fill=False)
magnet_IV_inner = plt.Circle((0, 0), 1000 * R_g, color='k', fill=False)
axes.add_artist(magnet_IV_outer)
axes.add_artist(magnet_IV_inner)
return fig, axes
def generate_sector_mesh_points(R1, R2, phi1, phi2):
"""
Return a list of points [X,Y] uniformily distributed in a circle between
radii R1 and R2 and angular positions phi1 and phi2
The number of points is controlled by N_POINTS_PER_AXIS.
"""
phi_min = phi1
phi_max = phi2
phi_vector = np.linspace(phi_min, phi_max, N_POINTS_PER_AXIS)
r_vector = np.linspace(R1, R2, N_POINTS_PER_AXIS)
phi_grid, r_grid = np.meshgrid(phi_vector, r_vector)
X_vector = (r_grid * np.cos(phi_grid)).flatten()
Y_vector = (r_grid * np.sin(phi_grid)).flatten()
return np.array([X_vector, Y_vector]).T
def create_magnet_IV_figure_template(params):
"""
Return (fig,axes) correspondent to a figure of the
first quadrant of magnet IV.
The size of the figure is controlled by FIGSIZE_INCHES"""
fig = plt.figure(figsize=(FIGSIZE_INCHES, FIGSIZE_INCHES))
axes = fig.add_subplot(111, aspect='equal')
R_o = params['R_o']
R_i = params['R_i']
R_s = params['R_s']
R_g = params.get('R_g', params['R_o'] + params['h_gap'])
R_c = params.get('R_c', params['R_s'] + params['h_fc'])
r_lim = R_c
axes.set_ylim(0, 1e3 * r_lim)
axes.set_xlim(0, 1e3 * r_lim)
axes.set_ylabel(r'$y\ [\si{\mm}$]')
axes.set_xlabel(r'$x\ [\si{\mm}$]')
width_IV = R_s - R_g
n_IV = int(params['n_IV'])
delta_phi_S_IV = params['delta_phi_S_IV']
for i in range(0, n_IV):
theta_0 = i * delta_phi_S_IV
theta_1 = (i + 1) * delta_phi_S_IV
magnet_segment = Wedge((0, 0),
1e3 * R_s,
theta_0,
theta_1,
1e3 * width_IV,
color='k',
fill=False)
axes.add_artist(magnet_segment)
return fig, axes
def create_magnets_figure_template(params):
"""
Return (fig,axes) correspondent to a figure of the
first quadrant of both magnets.
The size of the figure is controlled by FIGSIZE_INCHES"""
fig = plt.figure(figsize=(FIGSIZE_INCHES, FIGSIZE_INCHES))
axes = fig.add_subplot(111, aspect='equal')
R_o = params['R_o']
R_i = params['R_i']
R_s = params['R_s']
R_g = params.get('R_g', params['R_o'] + params['h_gap'])
R_c = params.get('R_c', params['R_s'] + params['h_fc'])
r_lim = R_c
axes.set_ylim(0, 1e3 * r_lim)
axes.set_xlim(0, 1e3 * r_lim)
axes.set_ylabel(r'$y\ [\si{\mm}$]')
axes.set_xlabel(r'$x\ [\si{\mm}$]')
width_II = R_o - R_i
n_II = int(params['n_II'])
delta_phi_S_II = params['delta_phi_S_II']
for i in range(0, n_II):
theta_0 = i * delta_phi_S_II
theta_1 = (i + 1) * delta_phi_S_II
magnet_segment = Wedge((0, 0),
1e3 * R_o,
theta_0,
theta_1,
1e3 * width_II,
color='k',
fill=False)
axes.add_artist(magnet_segment)
width_IV = R_s - R_g
n_IV = int(params['n_IV'])
delta_phi_S_IV = params['delta_phi_S_IV']
for j in range(0, n_IV):
theta_0 = j * delta_phi_S_IV
theta_1 = (j + 1) * delta_phi_S_IV
magnet_segment = Wedge((0, 0),
1e3 * R_s,
theta_0,
theta_1,
1e3 * width_IV,
color='k',
fill=False)
axes.add_artist(magnet_segment)
return fig, axes
class TeslaMaxGeometry:
"""
Class representing the physical geometry of the TeslaMax system,
with all radii and angles.
To instantiante, pass a dictionary (or similar object) with all geometric
parameters in SI units (except for the angles, which must be provided in
degrees). The names for the keys follow the standard convention, without
the units in the names. E.g.
>>> params = {'R_i': 0.015, 'phi_C_II': 15, ...} # provide other parameters
>>> tmg = TeslaMaxGeometry(params)
The parameters 'R_o', 'h_gap' and 'R_g' are not independent. If two are
provided, the class automatically calculates the other one. If you provide
all three, it's your responsibility to provide three consistent values.
Currently, the only possible calculations are volume-related.
"""
def __init__(self, params):
"""
Keyword Arguments:
params -- dict-like
"""
self.geometric_parameters = params.copy()
self._complete_geometric_parameters()
def _complete_geometric_parameters(self):
"""
For two of the parameters 'R_o', 'R_g', 'h_gap', calculate the
third one and populate the 'geometric_parameters' field.
If all three parameters are provided, nothing happens
"""
gp = self.geometric_parameters
if ('R_o' in gp) and ('R_g' in gp) and ('h_gap' in gp):
pass
else:
if ('R_o' in gp) and ('R_g' in gp):
gp['h_gap'] = gp['R_g'] - gp['R_o']
elif ('R_o' in gp) and ('h_gap' in gp):
gp['R_g'] = gp['R_o'] + gp['h_gap']
elif ('R_g' in gp) and ('h_gap' in gp):
gp['R_o'] = gp['R_g'] - gp['h_gap']
def calculate_magnet_volume(self, L):
"""
Return the volume (m3) of the permanent regions,
for a length of 'L' (m)
"""
params = self.geometric_parameters
phi_S_II = np.deg2rad(params["phi_S_II"])
phi_S_IV = np.deg2rad(params["phi_S_IV"])
phi_C_II = np.deg2rad(params["phi_C_II"])
R_i = params["R_i"]
R_o = params["R_o"]
R_s = params["R_s"]
R_g = params["R_g"]
# the factor of 2 already accounts for 4 quadrants
A_II = 2 * (phi_S_II - phi_C_II) * (R_o ** 2 - R_i ** 2)
A_IV = 2 * phi_S_IV * (R_s ** 2 - R_g ** 2)
V = (A_II + A_IV) * L
return V
def expand_parameters_from_remanence_array(magnet_parameters, params, prefix):
"""
Return a new parameters dict with the magnet parameters in the form
'<prefix>_<magnet>_<segment>', with the values from 'magnet_parameters'
and other parameters from 'params'.
The length of the array 'magnet_parameters' must be equal to the sum of
the number of segments in both cylinders.
The first n_II elements refer to the inner magnet,
and the remaining elements to the outer magnet.
"""
params_expanded = params.copy()
n_II = params["n_II"]
for i in range(0, n_II):
params_expanded["%s_II_%d" % (prefix, i + 1,)] = magnet_parameters[i]
n_IV = params["n_IV"]
for j in range(0, n_IV):
k = j + n_II # the first n_II elements refer to magnet II
params_expanded["%s_IV_%d" % (prefix, j + 1,)] = magnet_parameters[k]
return params_expanded
def calculate_instantaneous_profile(phi, B_high, B_low, *args):
"""
Calculate the value of the two-pole instantaneous magnetic profile at
angular position 'phi' (in degrees), where the profile oscillates from
'B_low' to 'B_high'
"""
high_region = (phi <= 45)
high_region = np.logical_or(high_region,
np.logical_and((phi >= 135),
(phi <= 225)))
high_region = np.logical_or(high_region, (phi >= 315))
return np.where(high_region, B_high, B_low)
def calculate_ramp_profile(phi, B_high, B_low, high_field_fraction, *args):
"""
Calculate the value of the two-pole instantaneous magnetic profile at
angular position 'phi' (in degrees), where the profile oscillates from
'B_low' to 'B_high' in a trapezoidal wave, with each plateau occupying
'high_field_fraction' of the cycle.
"""
# for the edge case of a field fraction of 50%,
# the ramp profile is equivalent to the instantaneous profile
if
|
np.isclose(high_field_fraction,0.5)
|
numpy.isclose
|
# AUTOGENERATED! DO NOT EDIT! File to edit: gen_cb.ipynb (unless otherwise specified).
__all__ = ['meshgrid2ps', 'affine_ps', 'homography_ps', 'rotate_ps', 'get_circle_poly', 'rotate_poly', 'affine_poly',
'poly2coords', 'affine_coords', 'homography_coords', 'plot_coords', 'euler2R', 'ARt2H', 'get_bb',
'get_fiducial_poly', 'get_checker_poly', 'get_ps_b', 'get_ps_fp', 'get_ps_t', 'get_poly_cb', 'plot_cb_poly',
'draw_ps', 'draw_coords']
# Cell
import copy
import descartes
import matplotlib.pyplot as plt
import numpy as np
import skimage.draw
import skimage.filters
import skimage.transform
from IPython.core.debugger import set_trace
from shapely import affinity
from shapely.geometry import Point, Polygon
# Cell
def meshgrid2ps(r_x, r_y, order='C'):
xs, ys = np.meshgrid(r_x, r_y)
return np.c_[xs.ravel(order), ys.ravel(order)]
# Cell
def _xform_ps(ps, mat):
ps, mat = map(np.array, [ps,mat])
ps_aug = np.concatenate([ps, np.ones((ps.shape[0], 1))], axis=1)
return (mat@ps_aug.T).T
# Cell
def affine_ps(ps, mat):
# Assumes last row of mat is [0, 0, 1]
return _xform_ps(ps, mat)[:, 0:2]
# Cell
def homography_ps(ps, mat):
ps = _xform_ps(ps, mat)
return ps[:, 0:2]/ps[:, 2:]
# Cell
def rotate_ps(ps, deg):
theta = np.radians(deg)
R = [[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[ 0, 0, 1]]
return affine_ps(ps, R)
# Cell
def get_circle_poly(p, r):
return Point(p).buffer(r)
# Cell
def rotate_poly(poly, deg):
return affinity.rotate(poly, deg, origin=(0,0))
# Cell
def affine_poly(poly, mat):
mat = np.array(mat)
return affinity.affine_transform(poly, np.r_[mat[0:2,0:2].ravel(), mat[0,2], mat[1,2]])
# Cell
def poly2coords(poly):
if isinstance(poly, Polygon):
poly = [poly]
coords = []
for idx, p in enumerate(poly):
coord = {}
coord['ext'] =
|
np.array(p.exterior.coords)
|
numpy.array
|
import numpy as np
from sim.sim2d_prediction import sim_run
# Simulator options.
options = {}
options['FIG_SIZE'] = [8,8]
options['ALLOW_SPEEDING'] = False
class KalmanFilter:
def __init__(self):
# Initial State
self.x = np.matrix([[55.],
[3.],
[5.],
[0.]])
# Uncertainity Matrix
self.P = np.matrix([[1., 0.,0.,0.],
[0., 1.,0.,0.],
[0., 0.,0.,0.],
[0., 0.,0.,100.]])
# Next State Function
self.F = np.matrix([[1., 0., 0.1,0.],
[0., 1., 0.,0.1],
[0., 0., 1.,0.],
[0., 0., 0.,1.]])
# Measurement Function
self.H = np.matrix([[1., 0., 0.,0.],
[0., 1., 0.,0.]])
# Measurement Uncertainty
self.R = np.matrix([[1.0],
[1.0]])
# Identity Matrix
self.I = np.matrix([[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]])
self.prev_time = 0.
def predict(self,t):
dt = t - self.prev_time
self.x = self.F*self.x
return
def measure_and_update(self,measurements,t):
dt = t - self.prev_time
Z = np.matrix(measurements)
y = np.transpose(Z) - self.H*self.x
S = self.H*self.P*np.transpose(self.H) + self.R
K = self.P*np.transpose(self.H)*np.linalg.inv(S)
self.x = self.x + K*y
self.P = (self.I - K*self.H)*self.P
self.P[0,0]+= 100
self.P[1,1] += 100
self.P[2,2]+= 100
self.P[3,3] += 100
self.v = (self.x[1,0])
self.prev_time = t
return [self.x[0], self.x[1]]
def predict_red_light(self,light_location):
light_duration = 3
F_new = np.copy(self.F)
F_new[0,2] = light_duration
F_new[1,3] = light_duration
x_new = F_new*self.x
if x_new[0] < light_location:
return [False, x_new[0]]
else:
return [True, x_new[0]]
def predict_red_light_speed(self, light_location):
light_duration = 3
F_new =
|
np.copy(self.F)
|
numpy.copy
|
# Built-in
import os
import warnings
import itertools as itt
import copy
import datetime as dtm # DB
# Common
import numpy as np
import scipy.optimize as scpopt
import scipy.interpolate as scpinterp
import scipy.constants as scpct
import scipy.sparse as sparse
from scipy.interpolate import BSpline
import scipy.stats as scpstats
import matplotlib.pyplot as plt
# ToFu-specific
import tofu.utils as utils
from . import _fit12d_funccostjac as _funccostjac
from . import _plot
__all__ = [
'fit1d_dinput', 'fit2d_dinput',
'fit12d_dvalid', 'fit12d_dscales',
'fit1d', 'fit2d',
'fit1d_extract', 'fit2d_extract',
]
_NPEAKMAX = 12
_DCONSTRAINTS = {
'bck_amp': False,
'bck_rate': False,
'amp': False,
'width': False,
'shift': False,
'double': False,
'symmetry': False,
}
_DORDER = ['amp', 'width', 'shift']
_SAME_SPECTRUM = False
_DEG = 2
_NBSPLINES = 13
_TOL1D = {'x': 1e-10, 'f': 1.e-10, 'g': 1.e-10}
_TOL2D = {'x': 1e-6, 'f': 1.e-6, 'g': 1.e-6}
_SYMMETRY_CENTRAL_FRACTION = 0.3
_BINNING = False
_POS = False
_SUBSET = False
_CHAIN = True
_METHOD = 'trf'
_LOSS = 'linear'
_D3 = {
'bck_amp': 'x',
'bck_rate': 'x',
'amp': 'x',
'coefs': 'lines',
'ratio': 'lines',
'Ti': 'x',
'width': 'x',
'vi': 'x',
'shift': 'lines', # necessarily by line for de-normalization (*lamb0)
}
_VALID_NSIGMA = 6.
_VALID_FRACTION = 0.8
_SIGMA_MARGIN = 3.
_ALLOW_PICKLE = True
_LTYPES = [int, float, np.int_, np.float_]
_DBOUNDS = {
'bck_amp': (0., 3.),
'bck_rate': (-3., 3.),
'amp': (0, 2),
'width': (0.01, 2.),
'shift': (-2, 2),
'dratio': (0., 2.),
'dshift': (-10., 10.),
'bs': (-10., 10.),
}
_DX0 = {
'bck_amp': 1.,
'bck_rate': 0.,
'amp': 1.,
'width': 1.,
'shift': 0.,
'dratio': 0.5,
'dshift': 0.,
'bs': 1.,
}
###########################################################
###########################################################
#
# Preliminary
# utility tools for 1d spectral fitting
#
###########################################################
###########################################################
def get_symmetry_axis_1dprofile(phi, data, cent_fraction=None):
""" On a series of 1d vertical profiles, find the best symmetry axis """
if cent_fraction is None:
cent_fraction = _SYMMETRY_CENTRAL_FRACTION
# Find the phi in the central fraction
phimin = np.nanmin(phi)
phimax = np.nanmax(phi)
phic = 0.5*(phimax + phimin)
dphi = (phimax - phimin)*cent_fraction
indphi = np.abs(phi-phic) <= dphi/2.
phiok = phi[indphi]
# Compute new phi and associated costs
phi2 = phi[:, None] - phiok[None, :]
phi2min = np.min([np.nanmax(np.abs(phi2 * (phi2 < 0)), axis=0),
np.nanmax(np.abs(phi2 * (phi2 > 0)), axis=0)], axis=0)
indout = np.abs(phi2) > phi2min[None, :]
phi2p = np.abs(phi2)
phi2n = np.abs(phi2)
phi2p[(phi2 < 0) | indout] = np.nan
phi2n[(phi2 > 0) | indout] = np.nan
nok = np.min([np.sum((~np.isnan(phi2p)), axis=0),
np.sum((~np.isnan(phi2n)), axis=0)], axis=0)
cost = np.full((data.shape[0], phiok.size), np.nan)
for ii in range(phiok.size):
indp = np.argsort(np.abs(phi2p[:, ii]))
indn = np.argsort(np.abs(phi2n[:, ii]))
cost[:, ii] = np.nansum(
(data[:, indp] - data[:, indn])[:, :nok[ii]]**2,
axis=1)
return phiok[np.nanargmin(cost, axis=1)]
###########################################################
###########################################################
#
# 1d spectral fitting from dlines
#
###########################################################
###########################################################
def _checkformat_dconstraints(dconstraints=None, defconst=None):
# Check constraints
if dconstraints is None:
dconstraints = defconst
# Check dconstraints keys
lk = sorted(_DCONSTRAINTS.keys())
c0 = (
isinstance(dconstraints, dict)
and all([k0 in lk for k0 in dconstraints.keys()])
)
if not c0:
msg = (
"\ndconstraints should contain constraints for spectrum fitting\n"
+ "It be a dict with the following keys:\n"
+ "\t- available keys: {}\n".format(lk)
+ "\t- provided keys: {}".format(dconstraints.keys())
)
raise Exception(msg)
# copy to avoid modifying reference
return copy.deepcopy(dconstraints)
def _checkformat_dconstants(dconstants=None, dconstraints=None):
if dconstants is None:
return
lk = [kk for kk in sorted(dconstraints.keys()) if kk != 'symmetry']
if not isinstance(dconstants, dict):
msg = (
"\ndconstants should be None or a dict with keys in:\n"
+ "\t- available keys: {}\n".format(lk)
+ "\t- provided : {}".format(type(dconstants))
)
raise Exception(msg)
# Check dconstraints keys
lc = [
k0 for k0, v0 in dconstants.items()
if not (
k0 in lk
and (
(
k0 in _DORDER
and isinstance(v0, dict)
and all([
k1 in dconstraints[k0].keys()
and type(v1) in _LTYPES
for k1, v1 in v0.items()
])
)
or (
k0 not in _DORDER
and type(v0) in _LTYPES
)
)
)
]
if len(lc) > 0:
dc0 = [
'\t\t{}: {}'.format(
kk,
sorted(dconstraints[kk].keys()) if kk in _DORDER else float
)
for kk in lk
]
dc1 = [
'\t\t{}: {}'.format(
kk,
sorted(dconstants[kk].keys())
if kk in _DORDER else dconstants[kk]
)
for kk in sorted(dconstants.keys())
]
msg = (
"\ndconstants should be None or a dict with keys in:\n"
+ "\t- available keys:\n"
+ "\n".join(dc0)
+ "\n\t- provided keys:\n"
+ "\n".join(dc1)
)
raise Exception(msg)
# copy to avoid modifying reference
return copy.deepcopy(dconstants)
def _dconstraints_double(dinput, dconstraints, defconst=_DCONSTRAINTS):
dinput['double'] = dconstraints.get('double', defconst['double'])
c0 = (
isinstance(dinput['double'], bool)
or (
isinstance(dinput['double'], dict)
and all([
kk in ['dratio', 'dshift'] and type(vv) in _LTYPES
for kk, vv in dinput['double'].items()
])
)
)
if c0 is False:
msg = (
"dconstraints['double'] must be either:\n"
+ "\t- False: no line doubling\n"
+ "\t- True: line doubling with unknown ratio and shift\n"
+ "\t- {'dratio': float}: line doubling with:\n"
+ "\t \t explicit ratio, unknown shift\n"
+ "\t- {'dshift': float}: line doubling with:\n"
+ "\t \t unknown ratio, explicit shift\n"
+ "\t- {'dratio': float, 'dshift': float}: line doubling with:\n"
+ "\t \t explicit ratio, explicit shift"
)
raise Exception(msg)
def _width_shift_amp(
indict, dconstants=None,
keys=None, dlines=None, nlines=None, k0=None,
):
# ------------------------
# Prepare error message
msg = ''
pavail = sorted(set(itt.chain.from_iterable([
v0.keys() for v0 in dlines.values()
])))
# ------------------------
# Check case
c0 = indict is False
c1 = (
isinstance(indict, str)
and indict in pavail
)
c2 = (
isinstance(indict, dict)
and all([
isinstance(k1, str)
and (
(isinstance(v1, str)) # and v0 in keys)
or (
isinstance(v1, list)
and all([
isinstance(v2, str)
# and v1 in keys
for v2 in v1
])
)
)
for k1, v1 in indict.items()
])
)
c3 = (
isinstance(indict, dict)
and all([
# ss in keys
isinstance(vv, dict)
and all([s1 in ['key', 'coef', 'offset'] for s1 in vv.keys()])
and isinstance(vv['key'], str)
for ss, vv in indict.items()
])
)
c4 = (
isinstance(indict, dict)
and isinstance(indict.get('keys'), list)
and isinstance(indict.get('ind'), np.ndarray)
)
if not any([c0, c1, c2, c3, c4]):
msg = (
"dconstraints['{}'] shoud be either:\n".format(k0)
+ "\t- False ({}): no constraint\n".format(c0)
+ "\t- str ({}): key from dlines['<lines>'] ".format(c1)
+ "to be used as criterion\n"
+ "\t\t available crit: {}\n".format(pavail)
+ "\t- dict ({}): ".format(c2)
+ "{str: line_keyi or [line_keyi, ..., line_keyj}\n"
+ "\t- dict ({}): ".format(c3)
+ "{line_keyi: {'key': str, 'coef': , 'offset': }}\n"
+ "\t- dict ({}): ".format(c4)
+ "{'keys': [], 'ind': np.ndarray}\n"
+ " Available line_keys:\n{}\n".format(sorted(keys))
+ " You provided:\n{}".format(indict)
)
raise Exception(msg)
# ------------------------
# str key to be taken from dlines as criterion
if c0:
lk = keys
ind = np.eye(nlines)
outdict = {
'keys': np.r_[lk],
'ind': ind,
'coefs': np.ones((nlines,)),
'offset': np.zeros((nlines,)),
}
if c1:
lk = sorted(set([dlines[k1].get(indict, k1) for k1 in keys]))
ind = np.array([
[dlines[k2].get(indict, k2) == k1 for k2 in keys]
for k1 in lk
])
outdict = {
'keys': np.r_[lk],
'ind': ind,
'coefs': np.ones((nlines,)),
'offset': np.zeros((nlines,)),
}
elif c2:
lkl = []
for k1, v1 in indict.items():
if isinstance(v1, str):
v1 = [v1]
v1 = [k2 for k2 in v1 if k2 in keys]
c0 = (
len(set(v1)) == len(v1)
and all([k2 not in lkl for k2 in v1])
)
if not c0:
msg = (
"Inconsistency in indict[{}], either:\n".format(k1)
+ "\t- v1 not unique: {}\n".format(v1)
+ "\t- some v1 not in keys: {}\n".format(keys)
+ "\t- some v1 in lkl: {}".format(lkl)
)
raise Exception(msg)
indict[k1] = v1
lkl += v1
for k1 in set(keys).difference(lkl):
indict[k1] = [k1]
lk = sorted(set(indict.keys()))
ind = np.array([[k2 in indict[k1] for k2 in keys] for k1 in lk])
outdict = {
'keys': np.r_[lk],
'ind': ind,
'coefs': np.ones((nlines,)),
'offset': np.zeros((nlines,)),
}
elif c3:
lk = sorted(set([v0['key'] for v0 in indict.values()]))
lk += sorted(set(keys).difference(indict.keys()))
ind = np.array([
[indict.get(k2, {'key': k2})['key'] == k1 for k2 in keys]
for k1 in lk
])
coefs = np.array([
indict.get(k1, {'coef': 1.}).get('coef', 1.) for k1 in keys
])
offset = np.array([
indict.get(k1, {'offset': 0.}).get('offset', 0.) for k1 in keys
])
outdict = {
'keys': np.r_[lk],
'ind': ind,
'coefs': coefs,
'offset': offset,
}
elif c4:
outdict = indict
if 'coefs' not in indict.keys():
outdict['coefs'] = np.ones((nlines,))
if 'offset' not in indict.keys():
outdict['offset'] = np.zeros((nlines,))
# ------------------------
# Ultimate conformity checks
if not c0:
assert sorted(outdict.keys()) == ['coefs', 'ind', 'keys', 'offset']
assert isinstance(outdict['ind'], np.ndarray)
assert outdict['ind'].dtype == np.bool_
assert outdict['ind'].shape == (outdict['keys'].size, nlines)
assert np.all(np.sum(outdict['ind'], axis=0) == 1)
assert outdict['coefs'].shape == (nlines,)
assert outdict['offset'].shape == (nlines,)
return outdict
###########################################################
###########################################################
#
# 2d spectral fitting from dlines
#
###########################################################
###########################################################
def _dconstraints_symmetry(
dinput,
dprepare=None,
symmetry=None,
cent_fraction=None,
defconst=_DCONSTRAINTS,
):
if symmetry is None:
symmetry = defconst['symmetry']
dinput['symmetry'] = symmetry
if not isinstance(dinput['symmetry'], bool):
msg = "dconstraints['symmetry'] must be a bool"
raise Exception(msg)
if dinput['symmetry'] is True:
dinput['symmetry_axis'] = get_symmetry_axis_1dprofile(
dprepare['phi1d'],
dprepare['dataphi1d'],
cent_fraction=cent_fraction,
)
###########################################################
###########################################################
#
# data, lamb, phi conformity checks
#
###########################################################
###########################################################
def _checkformat_data_fit12d_dlines_msg(data, lamb, phi=None, mask=None):
datash = data.shape if isinstance(data, np.ndarray) else type(data)
lambsh = lamb.shape if isinstance(lamb, np.ndarray) else type(lamb)
phish = phi.shape if isinstance(phi, np.ndarray) else type(phi)
masksh = mask.shape if isinstance(mask, np.ndarray) else type(mask)
shaped = '(nt, n1)' if phi is None else '(nt, n1, n2)'
shape = '(n1,)' if phi is None else '(n1, n2)'
msg = ("Args data, lamb, phi and mask must be:\n"
+ "\t- data: {} or {} np.ndarray\n".format(shaped, shape)
+ "\t- lamb, phi: both {} np.ndarray\n".format(shape)
+ "\t- mask: None or {}\n".format(shape)
+ " You provided:\n"
+ "\t - data: {}\n".format(datash)
+ "\t - lamb: {}\n".format(lambsh))
if phi is not None:
msg += "\t - phi: {}\n".format(phish)
msg += "\t - mask: {}\n".format(masksh)
return msg
def _checkformat_data_fit12d_dlines(
data, lamb, phi=None,
nxi=None, nxj=None, mask=None,
is2d=False,
):
# Check types
c0 = isinstance(data, np.ndarray) and isinstance(lamb, np.ndarray)
if is2d:
c0 &= isinstance(phi, np.ndarray)
if not c0:
msg = _checkformat_data_fit12d_dlines_msg(
data, lamb, phi=phi, mask=mask,
)
raise Exception(msg)
# Check shapes 1
mindim = 1 if phi is None else 2
phi1d, lamb1d, dataphi1d, datalamb1d = None, None, None, None
if is2d:
# special case
c1 = lamb.ndim == phi.ndim == 1
if c1:
if nxi is None:
nxi = lamb.size
if nxj is None:
nxj = phi.size
lamb1d = np.copy(lamb)
phi1d = np.copy(phi)
lamb = np.repeat(lamb[None, :], nxj, axis=0)
phi = np.repeat(phi[:, None], nxi, axis=1)
c0 = (
data.ndim in mindim + np.r_[0, 1]
and (
lamb.ndim == mindim
and lamb.shape == data.shape[-mindim:]
and lamb.shape == phi.shape
and lamb.shape in [(nxi, nxj), (nxj, nxi)]
)
)
else:
c0 = (
data.ndim in mindim + np.r_[0, 1]
and lamb.ndim == mindim
and lamb.shape == data.shape[-mindim:]
)
if not c0:
msg = _checkformat_data_fit12d_dlines_msg(
data, lamb, phi=phi, mask=mask,
)
raise Exception(msg)
# Check shapes 2
if data.ndim == mindim:
data = data[None, ...]
if is2d and c1:
dataphi1d = np.nanmean(data, axis=2)
datalamb1d = np.nanmean(data, axis=1)
if is2d and lamb.shape == (nxi, nxj):
lamb = lamb.T
phi = phi.T
data = np.swapaxes(data, 1, 2)
# mask
if mask is not None:
if mask.shape != lamb.shape:
if phi is not None and mask.T.shape == lamb.shape:
mask = mask.T
else:
msg = _checkformat_data_fit12d_dlines_msg(
data, lamb, phi=phi, mask=mask,
)
raise Exception(msg)
if is2d:
return lamb, phi, data, mask, phi1d, lamb1d, dataphi1d, datalamb1d
else:
return lamb, data, mask
###########################################################
###########################################################
#
# Domain limitation
#
###########################################################
###########################################################
def _checkformat_domain(domain=None, keys=['lamb', 'phi']):
if keys is None:
keys = ['lamb', 'phi']
if isinstance(keys, str):
keys = [keys]
if domain is None:
domain = {k0: {'spec': [np.inf*np.r_[-1., 1.]]} for k0 in keys}
return domain
c0 = (
isinstance(domain, dict)
and all([k0 in keys for k0 in domain.keys()])
)
if not c0:
msg = ("\nArg domain must be a dict with keys {}\n".format(keys)
+ "\t- provided: {}".format(domain))
raise Exception(msg)
domain2 = {k0: v0 for k0, v0 in domain.items()}
for k0 in keys:
domain2[k0] = domain2.get(k0, [np.inf*np.r_[-1., 1.]])
ltypesin = [list, np.ndarray]
ltypesout = [tuple]
for k0, v0 in domain2.items():
c0 = (
type(v0) in ltypesin + ltypesout
and (
(
all([type(v1) in _LTYPES for v1 in v0])
and len(v0) == 2
and v0[1] > v0[0]
)
or (
all([
type(v1) in ltypesin + ltypesout
and all([type(v2) in _LTYPES for v2 in v1])
and len(v1) == 2
and v1[1] > v1[0]
for v1 in v0
])
)
)
)
if not c0:
msg = (
"domain[{}] must be either a:\n".format(k0)
+ "\t- np.ndarray or list of 2 increasing values: "
+ "inclusive interval\n"
+ "\t- tuple of 2 increasing values: exclusive interval\n"
+ "\t- a list of combinations of the above\n"
+ " provided: {}".format(v0)
)
raise Exception(msg)
if type(v0) in ltypesout:
v0 = [v0]
else:
c0 = all([
type(v1) in ltypesin + ltypesout
and len(v1) == 2
and v1[1] > v1[0]
for v1 in v0
])
if not c0:
v0 = [v0]
domain2[k0] = {
'spec': v0,
'minmax': [np.nanmin(v0), np.nanmax(v0)],
}
return domain2
def apply_domain(lamb=None, phi=None, domain=None):
lc = [lamb is not None, phi is not None]
if not lc[0]:
msg = "At least lamb must be provided!"
raise Exception(msg)
din = {'lamb': lamb}
if lc[1]:
din['phi'] = phi
domain = _checkformat_domain(domain=domain, keys=din.keys())
ind = np.ones(lamb.shape, dtype=bool)
for k0, v0 in din.items():
indin = np.zeros(v0.shape, dtype=bool)
indout = np.zeros(v0.shape, dtype=bool)
for v1 in domain[k0]['spec']:
indi = (v0 >= v1[0]) & (v0 <= v1[1])
if isinstance(v1, tuple):
indout |= indi
else:
indin |= indi
ind = ind & indin & (~indout)
return ind, domain
###########################################################
###########################################################
#
# binning (2d only)
#
###########################################################
###########################################################
def _binning_check(
binning,
domain=None, nbsplines=None,
):
lk = ['phi', 'lamb']
lkall = lk + ['nperbin']
msg = (
"binning must be dict of the form:\n"
+ "\t- provide number of bins:\n"
+ "\t \t{'phi': int,\n"
+ "\t \t 'lamb': int}\n"
+ "\t- provide bin edges vectors:\n"
+ "\t \t{'phi': 1d np.ndarray (increasing),\n"
+ "\t \t 'lamb': 1d np.ndarray (increasing)}\n"
+ " provided:\n{}".format(binning)
)
# Check input
if binning is None:
binning = _BINNING
if nbsplines is None:
nbsplines = False
if nbsplines is not False:
c0 = isinstance(nbsplines, int) and nbsplines > 0
if not c0:
msg2 = (
"Both nbsplines and deg must be positive int!\n"
+ "\t- nbsplines: {}\n".format(nbsplines)
)
raise Exception(msg2)
# Check which format was passed and return None or dict
ltypes0 = _LTYPES
ltypes1 = [tuple, list, np.ndarray]
lc = [
binning is False,
(
isinstance(binning, dict)
and all([kk in lkall for kk in binning.keys()])
),
type(binning) in ltypes0,
type(binning) in ltypes1,
]
if not any(lc):
raise Exception(msg)
if binning is False:
return binning
elif type(binning) in ltypes0:
binning = {
'phi': {'nbins': int(binning)},
'lamb': {'nbins': int(binning)},
}
elif type(binning) in ltypes1:
binning = np.atleast_1d(binning).ravel()
binning = {
'phi': {'edges': binning},
'lamb': {'edges': binning},
}
for kk in lk:
if type(binning[kk]) in ltypes0:
binning[kk] = {'nbins': int(binning[kk])}
elif type(binning[kk]) in ltypes1:
binning[kk] = {'edges': np.atleast_1d(binning[kk]).ravel()}
c0 = all([
all([k1 in ['edges', 'nbins'] for k1 in binning[k0].keys()])
for k0 in lk
])
c0 = (
c0
and all([
(
(
binning[k0].get('nbins') is None
or type(binning[k0].get('nbins')) in ltypes0
)
and (
binning[k0].get('edges') is None
or type(binning[k0].get('edges')) in ltypes1
)
)
for k0 in lk
])
)
if not c0:
raise Exception(msg)
# Check dict
for k0 in lk:
c0 = all([k1 in ['nbins', 'edges'] for k1 in binning[k0].keys()])
if not c0:
raise Exception(msg)
if binning[k0].get('nbins') is not None:
binning[k0]['nbins'] = int(binning[k0]['nbins'])
if binning[k0].get('edges') is None:
binning[k0]['edges'] = np.linspace(
domain[k0]['minmax'][0], domain[k0]['minmax'][1],
binning[k0]['nbins'] + 1,
endpoint=True,
)
else:
binning[k0]['edges'] = np.atleast_1d(
binning[k0]['edges']).ravel()
if binning[k0]['nbins'] != binning[k0]['edges'].size - 1:
raise Exception(msg)
elif binning[k0].get('bin_edges') is not None:
binning[k0]['edges'] = np.atleast_1d(binning[k0]['edges']).ravel()
binning[k0]['nbins'] = binning[k0]['edges'].size - 1
else:
raise Exception(msg)
if not np.allclose(binning[k0]['edges'],
np.unique(binning[k0]['edges'])):
raise Exception(msg)
# Optional check vs nbsplines and deg
if nbsplines is not False:
if binning['phi']['nbins'] <= nbsplines:
msg = (
"The number of bins is too high:\n"
+ "\t- nbins = {}\n".format(binning['phi']['nbins'])
+ "\t- nbsplines = {}".format(nbsplines)
)
raise Exception(msg)
return binning
def binning_2d_data(
lamb, phi, data, indok=None,
domain=None, binning=None,
nbsplines=None,
phi1d=None, lamb1d=None,
dataphi1d=None, datalamb1d=None,
):
# ------------------
# Checkformat input
binning = _binning_check(
binning,
domain=domain, nbsplines=nbsplines,
)
nspect = data.shape[0]
if binning is False:
if phi1d is None:
phi1d_bins = np.linspace(domain['phi'][0], domain['phi'][1], 100)
lamb1d_bins = np.linspace(
domain['lamb'][0], domain['lamb'][1], 100,
)
dataf = data.reshape((nspect, data.shape[1]*data.shape[2]))
dataphi1d = scpstats.binned_statistics(
phi.ravel(),
dataf,
statistic='sum',
)
datalamb1d = scpstats.binned_statistics(
lamb.ravel(),
dataf,
statistic='sum',
)
phi1d = 0.5*(phi1d_bins[1:] + phi1d_bins[:-1])
lamb1d = 0.5*(lamb1d_bins[1:] + lamb1d_bins[:-1])
import pdb; pdb.set_trace() # DB
return (
lamb, phi, data, indok, binning,
phi1d, lamb1d, dataphi1d, datalamb1d,
)
else:
nphi = binning['phi']['nbins']
nlamb = binning['lamb']['nbins']
bins = (binning['lamb']['edges'], binning['phi']['edges'])
# ------------------
# Compute
databin = np.full((nspect, nphi, nlamb), np.nan)
nperbin = np.full((nspect, nphi, nlamb), np.nan)
for ii in range(nspect):
databin[ii, ...] = scpstats.binned_statistic_2d(
phi[indok[ii, ...]],
lamb[indok[ii, ...]],
data[indok[ii, ...]],
statistic='sum', bins=bins,
range=None, expand_binnumbers=True,
)[0]
nperbin[ii, ...] = scpstats.binned_statistic_2d(
phi[indok[ii, ...]],
lamb[indok[ii, ...]],
np.ones((indok[ii, ...].sum(),), dtype=int),
statistic='sum', bins=bins,
range=None, expand_binnumbers=True,
)[0]
binning['nperbin'] = nperbin
lambbin = 0.5*(
binning['lamb']['edges'][1:] + binning['lamb']['edges'][:-1]
)
phibin = 0.5*(
binning['phi']['edges'][1:] + binning['phi']['edges'][:-1]
)
lambbin = np.repeat(lambbin[None, :], nphi, axis=0)
phibin = np.repeat(phibin[:, None], nlamb, axis=1)
indok = ~np.isnan(databin)
# dataphi1d
phi1d = phibin
lamb1d = lambbin
dataphi1d = np.nanmean(databin, axis=2)
datalamb1d = np.nanmean(databin, axis=1)
return (
lambbin, phibin, databin, indok, binning,
phi1d, lamb1d, dataphi1d, datalamb1d,
)
###########################################################
###########################################################
#
# dprepare dict
#
###########################################################
###########################################################
def _get_subset_indices(subset, indlogical):
if subset is None:
subset = _SUBSET
if subset is False:
return indlogical
c0 = ((isinstance(subset, np.ndarray)
and subset.shape == indlogical.shape
and 'bool' in subset.dtype.name)
or (type(subset) in [int, float, np.int_, np.float_]
and subset >= 0))
if not c0:
msg = ("subset must be either:\n"
+ "\t- an array of bool of shape: {}\n".format(indlogical.shape)
+ "\t- a positive int (nb. of ind. to keep from indlogical)\n"
+ "You provided:\n{}".format(subset))
raise Exception(msg)
if isinstance(subset, np.ndarray):
indlogical = subset[None, ...] & indlogical
else:
subset = np.random.default_rng().choice(
indlogical.sum(), size=int(indlogical.sum() - subset),
replace=False, shuffle=False)
for ii in range(indlogical.shape[0]):
ind = indlogical[ii, ...].nonzero()
indlogical[ii, ind[0][subset], ind[1][subset]] = False
return indlogical
def _extract_lphi_spectra(
data, phi, lamb,
lphi=None, lphi_tol=None,
databin=None, binning=None, nlamb=None,
):
""" Extra several 1d spectra from 2d image at lphi """
# --------------
# Check input
if lphi is None:
lphi = False
if lphi is False:
lphi_tol = False
if lphi is not False:
lphi = np.atleast_1d(lphi).astype(float).ravel()
lphi_tol = float(lphi_tol)
if lphi is False:
return False, False
nphi = len(lphi)
# --------------
# Compute non-trivial cases
if binning is False:
if nlamb is None:
nlamb = lamb.shape[1]
lphi_lamb = np.linspace(lamb.min(), lamb.max(), nlamb+1)
lphi_spectra = np.full((data.shape[0], lphi_lamb.size-1, nphi), np.nan)
for ii in range(nphi):
indphi = np.abs(phi - lphi[ii]) < lphi_tol
lphi_spectra[:, ii, :] = scpstats.binned_statistic(
lamb[indphi], data[:, indphi], bins=lphi_lamb,
statistic='mean', range=None,
)[0]
else:
lphi_lamb = 0.5*(
binning['lamb']['edges'][1:] + binning['lamb']['edges'][:-1]
)
lphi_phi = 0.5*(
binning['phi']['edges'][1:] + binning['phi']['edges'][:-1]
)
lphi_spectra = np.full((data.shape[0], nphi, lphi_lamb.size), np.nan)
lphi_spectra1 = np.full((data.shape[0], nphi, lphi_lamb.size), np.nan)
for ii in range(nphi):
datai = databin[:, np.abs(lphi_phi - lphi[ii]) < lphi_tol, :]
iok = np.any(~np.isnan(datai), axis=1)
for jj in range(datai.shape[0]):
if np.any(iok[jj, :]):
lphi_spectra[jj, ii, iok[jj, :]] = np.nanmean(
datai[jj, :, iok[jj, :]],
axis=1,
)
return lphi_spectra, lphi_lamb
def _checkformat_possubset(pos=None, subset=None):
if pos is None:
pos = _POS
c0 = isinstance(pos, bool) or type(pos) in _LTYPES
if not c0:
msg = ("Arg pos must be either:\n"
+ "\t- False: no positivity constraints\n"
+ "\t- True: all negative values are set to nan\n"
+ "\t- float: all negative values are set to pos")
raise Exception(msg)
if subset is None:
subset = _SUBSET
return pos, subset
def multigausfit1d_from_dlines_prepare(
data=None, lamb=None,
mask=None, domain=None,
pos=None, subset=None,
):
# --------------
# Check input
pos, subset = _checkformat_possubset(pos=pos, subset=subset)
# Check shape of data (multiple time slices possible)
lamb, data, mask = _checkformat_data_fit12d_dlines(
data, lamb, mask=mask,
)
# --------------
# Use valid data only and optionally restrict lamb
indok, domain = apply_domain(lamb, domain=domain)
if mask is not None:
indok &= mask
# Optional positivity constraint
if pos is not False:
if pos is True:
data[data < 0.] = np.nan
else:
data[data < 0.] = pos
# Introduce time-dependence (useful for valid)
indok = indok[None, ...] & (~np.isnan(data))
# Recompute domain
domain['lamb']['minmax'] = [
np.nanmin(lamb[np.any(indok, axis=0)]),
np.nanmax(lamb[np.any(indok, axis=0)])
]
# --------------
# Optionally fit only on subset
# randomly pick subset indices (replace=False => no duplicates)
indok = _get_subset_indices(subset, indok)
if np.any(np.isnan(data[indok])):
msg = (
"Some NaNs in data not caught by indok!"
)
raise Exception(msg)
# --------------
# Return
dprepare = {
'data': data,
'lamb': lamb,
'domain': domain,
'indok': indok,
'pos': pos,
'subset': subset,
}
return dprepare
def multigausfit2d_from_dlines_prepare(
data=None, lamb=None, phi=None,
mask=None, domain=None,
pos=None, binning=None,
nbsplines=None, deg=None, subset=None,
nxi=None, nxj=None,
lphi=None, lphi_tol=None,
):
# --------------
# Check input
pos, subset = _checkformat_possubset(pos=pos, subset=subset)
# Check shape of data (multiple time slices possible)
(
lamb, phi, data, mask,
phi1d, lamb1d, dataphi1d, datalamb1d,
) = _checkformat_data_fit12d_dlines(
data, lamb, phi,
nxi=nxi, nxj=nxj, mask=mask, is2d=True,
)
# --------------
# Use valid data only and optionally restrict lamb / phi
indok, domain = apply_domain(lamb, phi, domain=domain)
if mask is not None:
indok &= mask
# Optional positivity constraint
if pos is not False:
if pos is True:
data[data < 0.] = np.nan
else:
data[data < 0.] = pos
# Introduce time-dependence (useful for valid)
indok = indok[None, ...] & (~np.isnan(data))
# Recompute domain
domain['lamb']['minmax'] = [
np.nanmin(lamb[np.any(indok, axis=0)]),
np.nanmax(lamb[np.any(indok, axis=0)])
]
domain['phi']['minmax'] = [
np.nanmin(phi[np.any(indok, axis=0)]),
np.nanmax(phi[np.any(indok, axis=0)])
]
# --------------
# Optionnal 2d binning
(
lambbin, phibin, databin, indok, binning,
phi1d, lamb1d, dataphi1d, datalamb1d,
) = binning_2d_data(
lamb, phi, data, indok=indok,
binning=binning, domain=domain,
nbsplines=nbsplines,
phi1d=phi1d, lamb1d=lamb1d,
dataphi1d=dataphi1d, datalamb1d=datalamb1d,
)
# --------------
# Optionally fit only on subset
# randomly pick subset indices (replace=False => no duplicates)
indok = _get_subset_indices(subset, indok)
# --------------
# Optionally extract 1d spectra at lphi
lphi_spectra, lphi_lamb = _extract_lphi_spectra(
data, phi, lamb,
lphi, lphi_tol,
databin=databin,
binning=binning,
)
# --------------
# Return
dprepare = {
'data': databin, 'lamb': lambbin, 'phi': phibin,
'domain': domain, 'binning': binning, 'indok': indok,
'pos': pos, 'subset': subset, 'nxi': nxi, 'nxj': nxj,
'lphi': lphi, 'lphi_tol': lphi_tol,
'lphi_spectra': lphi_spectra, 'lphi_lamb': lphi_lamb,
'phi1d': phi1d, 'dataphi1d': dataphi1d,
'lamb1d': lamb1d, 'datalamb1d': datalamb1d,
}
return dprepare
def multigausfit2d_from_dlines_dbsplines(
knots=None, deg=None, nbsplines=None,
phimin=None, phimax=None,
symmetryaxis=None,
):
# Check / format input
if nbsplines is None:
nbsplines = _NBSPLINES
c0 = [nbsplines is False, isinstance(nbsplines, int)]
if not any(c0):
msg = "nbsplines must be a int (degree of bsplines to be used!)"
raise Exception(msg)
if nbsplines is False:
lk = ['knots', 'knots_mult', 'nknotsperbs', 'ptsx0', 'nbs', 'deg']
return dict.fromkeys(lk, False)
if deg is None:
deg = _DEG
if not (isinstance(deg, int) and deg <= 3):
msg = "deg must be a int <= 3 (the degree of the bsplines to be used!)"
raise Exception(msg)
if symmetryaxis is None:
symmetryaxis = False
if knots is None:
if phimin is None or phimax is None:
msg = "Please provide phimin and phimax if knots is not provided!"
raise Exception(msg)
if symmetryaxis is False:
knots = np.linspace(phimin, phimax, nbsplines + 1 - deg)
else:
phi2max = np.max(
np.abs(np.r_[phimin, phimax][None, :] - symmetryaxis[:, None])
)
knots = np.linspace(0, phi2max, nbsplines + 1 - deg)
if not np.allclose(knots, np.unique(knots)):
msg = "knots must be a vector of unique values!"
raise Exception(msg)
# Get knots for scipy (i.e.: with multiplicity)
if deg > 0:
knots_mult = np.r_[[knots[0]]*deg, knots, [knots[-1]]*deg]
else:
knots_mult = knots
nknotsperbs = 2 + deg
nbs = knots.size - 1 + deg
assert nbs == knots_mult.size - 1 - deg
if deg == 0:
ptsx0 = 0.5*(knots[:-1] + knots[1:])
elif deg == 1:
ptsx0 = knots
elif deg == 2:
num = (knots_mult[3:]*knots_mult[2:-1]
- knots_mult[1:-2]*knots_mult[:-3])
denom = (knots_mult[3:] + knots_mult[2:-1]
- knots_mult[1:-2] - knots_mult[:-3])
ptsx0 = num / denom
else:
# To be derived analytically for more accuracy
ptsx0 = np.r_[
knots[0],
np.mean(knots[:2]),
knots[1:-1],
np.mean(knots[-2:]),
knots[-1],
]
msg = ("degree 3 not fully implemented yet!"
+ "Approximate values for maxima positions")
warnings.warn(msg)
assert ptsx0.size == nbs
dbsplines = {
'knots': knots, 'knots_mult': knots_mult,
'nknotsperbs': nknotsperbs, 'ptsx0': ptsx0,
'nbs': nbs, 'deg': deg,
}
return dbsplines
###########################################################
###########################################################
#
# dvalid dict (S/N ratio)
#
###########################################################
###########################################################
def _dvalid_checkfocus_errmsg(focus=None, focus_half_width=None,
lines_keys=None):
msg = ("Please provide focus as:\n"
+ "\t- str: the key of an available spectral line:\n"
+ "\t\t{}\n".format(lines_keys)
+ "\t- float: a wavelength value\n"
+ "\t- a list / tuple / flat np.ndarray of such\n"
+ " You provided:\n"
+ "{}\n\n".format(focus)
+ "Please provide focus_half_width as:\n"
+ "\t- float: a unique wavelength value for all focus\n"
+ "\t- a list / tuple / flat np.ndarray of such\n"
+ " You provided:\n"
+ "{}".format(focus_half_width))
return msg
def _dvalid_checkfocus(
focus=None,
focus_half_width=None,
lines_keys=None,
lines_lamb=None,
lamb=None,
):
""" Check the provided focus is properly formatted and convert it
focus specifies the wavelength range of interest in which S/N is evaluated
It can be provided as:
- a spectral line key (or list of such)
- a wavelength (or list of such)
For each wavelength, a spectral range centered on it, is defined using
the provided focus_half_width
The focus_half_width can be a unique value applied to all or a list of
values of the same length as focus.
focus is then return as a (n, 2) array where:
each line gives a central wavelength and halfwidth of interest
"""
if focus in [None, False]:
return False
# Check focus and transform to array of floats
lc0 = [
type(focus) in [str] + _LTYPES,
type(focus) in [list, tuple, np.ndarray]
]
if not any(lc0):
msg = _dvalid_checkfocus_errmsg(
focus, focus_half_width, lines_keys,
)
raise Exception(msg)
if lc0[0] is True:
focus = [focus]
for ii in range(len(focus)):
if focus[ii] not in lines_keys and type(focus[ii]) not in _LTYPES:
msg = _dvalid_checkfocus_errmsg(
focus, focus_half_width, lines_keys,
)
raise Exception(msg)
focus = np.array([
lines_lamb[(lines_keys == ff).nonzero()[0][0]]
if ff in lines_keys else ff for ff in focus
])
# Check focus_half_width and transform to array of floats
if focus_half_width is None:
focus_half_width = (np.nanmax(lamb) - np.nanmin(lamb))/10.
lc0 = [
type(focus_half_width) in _LTYPES,
(
type(focus_half_width) in [list, tuple, np.ndarray]
and len(focus_half_width) == focus.size
and all([type(fhw) in _LTYPES for fhw in focus_half_width])
)
]
if not any(lc0):
msg = _dvalid_checkfocus_errmsg(
focus, focus_half_width, lines_keys,
)
raise Exception(msg)
if lc0[0] is True:
focus_half_width = np.full((focus.size,), focus_half_width)
return np.array([focus, np.r_[focus_half_width]]).T
def fit12d_dvalid(
data=None, lamb=None, phi=None,
indok=None, binning=None,
valid_nsigma=None, valid_fraction=None,
focus=None, focus_half_width=None,
lines_keys=None, lines_lamb=None, dphimin=None,
nbs=None, deg=None, knots_mult=None, nknotsperbs=None,
return_fract=None,
):
""" Return a dict of valid time steps and phi indices
data points are considered valid if there signal is sufficient:
np.sqrt(data) >= valid_nsigma
data is supposed to be provided in counts (or photons).. TBC!!!
"""
# Check inputs
if valid_nsigma is None:
valid_nsigma = _VALID_NSIGMA
if valid_fraction is None:
valid_fraction = _VALID_FRACTION
if binning is None:
binning = False
if dphimin is None:
dphimin = 0.
if return_fract is None:
return_fract = False
data2d = data.ndim == 3
nspect = data.shape[0]
focus = _dvalid_checkfocus(
focus,
focus_half_width=focus_half_width,
lines_keys=lines_keys,
lines_lamb=lines_lamb,
lamb=lamb,
)
# Get indices of pts with enough signal
ind = np.zeros(data.shape, dtype=bool)
if indok is None:
isafe = (~np.isnan(data))
isafe[isafe] = data[isafe] >= 0.
# Ok with and w/o binning if data provided as counts / photons
# and binning was done by sum (and not mean)
ind[isafe] = np.sqrt(data[isafe]) > valid_nsigma
else:
ind[indok] = np.sqrt(data[indok]) > valid_nsigma
# Derive indt and optionally dphi and indknots
indbs, dphi = False, False
if focus is not False:
# TBC
lambok = np.rollaxis(
np.array([np.abs(lamb - ff[0]) < ff[1] for ff in focus]),
0,
lamb.ndim+1,
)
indall = ind[..., None] & lambok[None, ...]
if data2d is True:
# Make sure there are at least deg + 2 different phi
deltaphi = np.max(np.diff(knots_mult))
# Code ok with and without binning :-)
if focus is False:
fract = np.full((nspect, nbs), np.nan)
for ii in range(nbs):
iphi = (
(phi >= knots_mult[ii])
& (phi < knots_mult[ii+nknotsperbs-1])
)
fract[:, ii] = (
np.sum(np.sum(ind & iphi[None, ...], axis=-1), axis=-1)
/ np.sum(iphi)
)
indbs = fract > valid_fraction
else:
fract = np.full((nspect, nbs, len(focus)), np.nan)
for ii in range(nbs):
iphi = ((phi >= knots_mult[ii])
& (phi < knots_mult[ii+nknotsperbs-1]))
fract[:, ii, :] = (
np.sum(np.sum(indall & iphi[None, ..., None],
axis=1), axis=1)
/ np.sum(np.sum(iphi[..., None] & lambok,
axis=0), axis=0))
indbs = np.all(fract > valid_fraction, axis=2)
indt = np.any(indbs, axis=1)
dphi = deltaphi*(deg + indbs[:, deg:-deg].sum(axis=1))
else:
# 1d spectra
if focus is False:
fract = ind.sum(axis=-1) / ind.shape[1]
indt = fract > valid_fraction
else:
fract = np.sum(indall, axis=1) / lambok.sum(axis=0)[None, :]
indt = np.all(fract > valid_fraction, axis=1)
# Optional debug
if focus is not False and False:
indt_debug, ifocus = 40, 1
if data2d is True:
indall2 = indall.astype(int)
indall2[:, lambok] = 1
indall2[ind[..., None] & lambok[None, ...]] = 2
plt.figure()
plt.imshow(indall2[indt_debug, :, :, ifocus].T, origin='lower')
else:
plt.figure()
plt.plot(lamb[~indall[indt_debug, :, ifocus]],
data[indt_debug, ~indall[indt_debug, :, ifocus]], '.k',
lamb[indall[indt_debug, :, ifocus]],
data[indt_debug, indall[indt_debug, :, ifocus]], '.r')
plt.axvline(focus[ifocus, 0], ls='--', c='k')
if not np.any(indt):
msg = (
"\nThere is no valid time step with the provided constraints:\n"
+ "\t- valid_nsigma = {}\n".format(valid_nsigma)
+ "\t- valid_fraction = {}\n".format(valid_fraction)
+ "\t- focus = {}\n".format(focus)
+ "\t- fract = {}\n".format(fract)
)
raise Exception(msg)
# return
dvalid = {
'indt': indt, 'dphi': dphi, 'indbs': indbs, 'ind': ind,
'focus': focus, 'valid_fraction': valid_fraction,
'valid_nsigma': valid_nsigma,
}
if return_fract is True:
dvalid['fract'] = fract
return dvalid
###########################################################
###########################################################
#
# dlines dict (lines vs domain)
#
###########################################################
###########################################################
def _checkformat_dlines(dlines=None, domain=None):
if dlines is None:
dlines = False
if not isinstance(dlines, dict):
msg = "Arg dlines must be a dict!"
raise Exception(msg)
lc = [
(k0, type(v0)) for k0, v0 in dlines.items()
if not (
isinstance(k0, str)
and isinstance(v0, dict)
and 'lambda0' in v0.keys()
and (
type(v0['lambda0']) in _LTYPES
or (
isinstance(v0['lambda0'], np.ndarray)
and v0['lambda0'].size == 1
)
)
)
]
if len(lc) > 0:
lc = ["\t- {}: {}".format(*cc) for cc in lc]
msg = (
"Arg dlines must be a dict of the form:\n"
+ "\t{'line0': {'lambda0': float},\n"
+ "\t 'line1': {'lambda0': float},\n"
+ "\t ...\n"
+ "\t 'lineN': {'lambda0': float}}\n"
+ " You provided:\n{}".format('\n'.join(lc))
)
raise Exception(msg)
# Select relevant lines (keys, lamb)
lines_keys = np.array([k0 for k0 in dlines.keys()])
lines_lamb = np.array([float(dlines[k0]['lambda0']) for k0 in lines_keys])
if domain not in [None, False]:
ind = (
(lines_lamb >= domain['lamb']['minmax'][0])
& (lines_lamb <= domain['lamb']['minmax'][1])
)
lines_keys = lines_keys[ind]
lines_lamb = lines_lamb[ind]
inds = np.argsort(lines_lamb)
lines_keys, lines_lamb = lines_keys[inds], lines_lamb[inds]
nlines = lines_lamb.size
dlines = {k0: dict(dlines[k0]) for k0 in lines_keys}
return dlines, lines_keys, lines_lamb
###########################################################
###########################################################
#
# dinput dict (lines + spectral constraints)
#
###########################################################
###########################################################
def fit1d_dinput(
dlines=None, dconstraints=None, dconstants=None, dprepare=None,
data=None, lamb=None, mask=None,
domain=None, pos=None, subset=None,
same_spectrum=None, nspect=None, same_spectrum_dlamb=None,
focus=None, valid_fraction=None, valid_nsigma=None, focus_half_width=None,
valid_return_fract=None,
dscales=None, dx0=None, dbounds=None,
defconst=_DCONSTRAINTS,
):
""" Check and format a dict of inputs to be fed to fit1d()
This dict will contain all information relevant for solving the fit:
- dlines: dict of lines (with 'lambda0': wavelength at rest)
- lamb: vector of wavelength of the experimental spectrum
- data: experimental spectrum, possibly 2d (time-varying)
- dconstraints: dict of constraints on lines (amp, width, shift)
- pos: bool, consider only positive data (False => replace <0 with nan)
- domain:
- mask:
- subset:
- same_spectrum:
- focus:
"""
# ------------------------
# Check / format dprepare
# ------------------------
if dprepare is None:
dprepare = multigausfit1d_from_dlines_prepare(
data=data, lamb=lamb,
mask=mask, domain=domain,
pos=pos, subset=subset,
)
# ------------------------
# Check / format dlines
# ------------------------
dlines, lines_keys, lines_lamb = _checkformat_dlines(
dlines=dlines,
domain=dprepare['domain'],
)
nlines = lines_lamb.size
# Check same_spectrum
if same_spectrum is None:
same_spectrum = _SAME_SPECTRUM
if same_spectrum is True:
if type(nspect) not in [int, np.int]:
msg = "Please provide nspect if same_spectrum = True"
raise Exception(msg)
if same_spectrum_dlamb is None:
same_spectrum_dlamb = min(
2*np.diff(dprepare['domain']['lamb']['minmax']),
dprepare['domain']['lamb']['minmax'][0],
)
# ------------------------
# Check / format dconstraints
# ------------------------
dconstraints = _checkformat_dconstraints(
dconstraints=dconstraints, defconst=defconst,
)
dinput = {}
# ------------------------
# Check / format double
# ------------------------
_dconstraints_double(dinput, dconstraints, defconst=defconst)
# ------------------------
# Check / format width, shift, amp (groups with possible ratio)
# ------------------------
for k0 in ['amp', 'width', 'shift']:
dinput[k0] = _width_shift_amp(
dconstraints.get(k0, defconst[k0]),
dconstants=dconstants,
keys=lines_keys, nlines=nlines,
dlines=dlines, k0=k0,
)
# ------------------------
# add mz, symb, ION, keys, lamb
# ------------------------
mz = np.array([dlines[k0].get('m', np.nan) for k0 in lines_keys])
symb = np.array([dlines[k0].get('symbol', k0) for k0 in lines_keys])
ion = np.array([dlines[k0].get('ion', '?') for k0 in lines_keys])
# ------------------------
# same_spectrum
# ------------------------
if same_spectrum is True:
keysadd = np.array([[kk+'_bis{:04.0f}'.format(ii) for kk in keys]
for ii in range(1, nspect)]).ravel()
lines_lamb = (
same_spectrum_dlamb*np.arange(0, nspect)[:, None]
+ lines_lamb[None, :]
)
keys = np.r_[keys, keysadd]
for k0 in _DORDER:
# Add other lines to original group
keyk = dinput[k0]['keys']
offset = np.tile(dinput[k0]['offset'], nspect)
if k0 == 'shift':
ind = np.tile(dinput[k0]['ind'], (1, nspect))
coefs = (
dinput[k0]['coefs']
* lines_lamb[0, :] / lines_lamb
).ravel()
else:
coefs = np.tile(dinput[k0]['coefs'], nspect)
keysadd = np.array([
[kk+'_bis{:04.0f}'.format(ii) for kk in keyk]
for ii in range(1, nspect)
]).ravel()
ind = np.zeros((keyk.size*nspect, nlines*nspect))
for ii in range(nspect):
i0, i1 = ii*keyk.size, (ii+1)*keyk.size
j0, j1 = ii*nlines, (ii+1)*nlines
ind[i0:i1, j0:j1] = dinput[k0]['ind']
keyk = np.r_[keyk, keysadd]
dinput[k0]['keys'] = keyk
dinput[k0]['ind'] = ind
dinput[k0]['coefs'] = coefs
dinput[k0]['offset'] = offset
nlines *= nspect
lines_lamb = lines_lamb.ravel()
# update mz, symb, ion
mz = np.tile(mz, nspect)
symb = np.tile(symb, nspect)
ion = np.tile(ion, nspect)
# ------------------------
# add lines and properties
# ------------------------
dinput['keys'] = lines_keys
dinput['lines'] = lines_lamb
dinput['nlines'] = nlines
dinput['mz'] = mz
dinput['symb'] = symb
dinput['ion'] = ion
dinput['same_spectrum'] = same_spectrum
if same_spectrum is True:
dinput['same_spectrum_nspect'] = nspect
dinput['same_spectrum_dlamb'] = same_spectrum_dlamb
else:
dinput['same_spectrum_nspect'] = False
dinput['same_spectrum_dlamb'] = False
# ------------------------
# S/N threshold indices
# ------------------------
dinput['valid'] = fit12d_dvalid(
data=dprepare['data'],
lamb=dprepare['lamb'],
indok=dprepare['indok'],
valid_nsigma=valid_nsigma,
valid_fraction=valid_fraction,
focus=focus, focus_half_width=focus_half_width,
lines_keys=lines_keys, lines_lamb=lines_lamb,
return_fract=valid_return_fract,
)
# Update with dprepare
dinput['dprepare'] = dict(dprepare)
# Add dind
dinput['dind'] = multigausfit1d_from_dlines_ind(dinput)
# Add dscales, dx0 and dbounds
dinput['dscales'] = fit12d_dscales(dscales=dscales, dinput=dinput)
dinput['dbounds'] = fit12d_dbounds(dbounds=dbounds, dinput=dinput)
dinput['dx0'] = fit12d_dx0(dx0=dx0, dinput=dinput)
dinput['dconstants'] = fit12d_dconstants(
dconstants=dconstants, dinput=dinput,
)
return dinput
def fit2d_dinput(
dlines=None, dconstraints=None, dconstants=None, dprepare=None,
deg=None, nbsplines=None, knots=None,
data=None, lamb=None, phi=None, mask=None,
domain=None, pos=None, subset=None, binning=None, cent_fraction=None,
focus=None, valid_fraction=None, valid_nsigma=None, focus_half_width=None,
valid_return_fract=None,
dscales=None, dx0=None, dbounds=None,
nxi=None, nxj=None,
lphi=None, lphi_tol=None,
defconst=_DCONSTRAINTS,
):
""" Check and format a dict of inputs to be fed to fit2d()
This dict will contain all information relevant for solving the fit:
- dlines: dict of lines (with 'lambda0': wavelength at rest)
- lamb: vector of wavelength of the experimental spectrum
- data: experimental spectrum, possibly 2d (time-varying)
- dconstraints: dict of constraints on lines (amp, width, shift)
- pos: bool, consider only positive data (False => replace <0 with nan)
- domain:
- mask:
- subset:
- same_spectrum:
- focus:
"""
# ------------------------
# Check / format dprepare
# ------------------------
if dprepare is None:
dprepare = multigausfit2d_from_dlines_prepare(
data=data, lamb=lamb, phi=phi,
mask=mask, domain=domain,
pos=pos, subset=subset, binning=binning,
nbsplines=nbsplines, deg=deg,
nxi=nxi, nxj=nxj,
lphi=None, lphi_tol=None,
)
# ------------------------
# Check / format dlines
# ------------------------
dlines, lines_keys, lines_lamb = _checkformat_dlines(
dlines=dlines,
domain=dprepare['domain'],
)
nlines = lines_lamb.size
# ------------------------
# Check / format dconstraints
# ------------------------
dconstraints = _checkformat_dconstraints(
dconstraints=dconstraints, defconst=defconst,
)
dinput = {}
# ------------------------
# Check / format symmetry
# ------------------------
_dconstraints_symmetry(
dinput, dprepare=dprepare, symmetry=dconstraints.get('symmetry'),
cent_fraction=cent_fraction, defconst=defconst,
)
# ------------------------
# Check / format double (spectral line doubling)
# ------------------------
_dconstraints_double(dinput, dconstraints, defconst=defconst)
# ------------------------
# Check / format width, shift, amp (groups with posssible ratio)
# ------------------------
for k0 in ['amp', 'width', 'shift']:
dinput[k0] = _width_shift_amp(
dconstraints.get(k0, defconst[k0]),
dconstants=dconstants,
keys=lines_keys, nlines=nlines,
dlines=dlines, k0=k0,
)
# ------------------------
# add mz, symb, ION, keys, lamb
# ------------------------
mz = np.array([dlines[k0].get('m', np.nan) for k0 in lines_keys])
symb = np.array([dlines[k0].get('symbol', k0) for k0 in lines_keys])
ion = np.array([dlines[k0].get('ION', '?') for k0 in lines_keys])
# ------------------------
# add lines and properties
# ------------------------
dinput['keys'] = lines_keys
dinput['lines'] = lines_lamb
dinput['nlines'] = nlines
dinput['mz'] = mz
dinput['symb'] = symb
dinput['ion'] = ion
# ------------------------
# Get dict of bsplines
# ------------------------
dinput.update(multigausfit2d_from_dlines_dbsplines(
knots=knots, deg=deg, nbsplines=nbsplines,
phimin=dprepare['domain']['phi']['minmax'][0],
phimax=dprepare['domain']['phi']['minmax'][1],
symmetryaxis=dinput.get('symmetry_axis')
))
# ------------------------
# S/N threshold indices
# ------------------------
dinput['valid'] = fit12d_dvalid(
data=dprepare['data'],
lamb=dprepare['lamb'],
phi=dprepare['phi'],
binning=dprepare['binning'],
indok=dprepare['indok'],
valid_nsigma=valid_nsigma,
valid_fraction=valid_fraction,
focus=focus, focus_half_width=focus_half_width,
lines_keys=lines_keys, lines_lamb=lines_lamb,
nbs=dinput['nbs'],
deg=dinput['deg'],
knots_mult=dinput['knots_mult'],
nknotsperbs=dinput['nknotsperbs'],
return_fract=valid_return_fract,
)
# Update with dprepare
dinput['dprepare'] = dict(dprepare)
# Add dind
dinput['dind'] = multigausfit2d_from_dlines_ind(dinput)
# Add dscales, dx0 and dbounds
dinput['dscales'] = fit12d_dscales(dscales=dscales, dinput=dinput)
dinput['dbounds'] = fit12d_dbounds(dbounds=dbounds, dinput=dinput)
dinput['dx0'] = fit12d_dx0(dx0=dx0, dinput=dinput)
dinput['dconstants'] = fit12d_dconstants(
dconstants=dconstants, dinput=dinput,
)
return dinput
###########################################################
###########################################################
#
# dind dict (indices storing for fast access)
#
###########################################################
###########################################################
def multigausfit1d_from_dlines_ind(dinput=None):
""" Return the indices of quantities in x to compute y """
# indices
# General shape: [bck, amp, widths, shifts]
# If double [..., double_shift, double_ratio]
# Except for bck, all indices should render nlines (2*nlines if double)
dind = {
'bck_amp': {'x': np.r_[0]},
'bck_rate': {'x': np.r_[1]},
'dshift': None,
'dratio': None,
}
nn = dind['bck_amp']['x'].size + dind['bck_rate']['x'].size
inddratio, inddshift = None, None
for k0 in _DORDER:
ind = dinput[k0]['ind']
lnl =
|
np.sum(ind, axis=1)
|
numpy.sum
|
#!/usr/bin/env python
import numpy as np
import pandas as pd
from matplotlib import rcParams
#rcParams['font.family'] = ['Nimbus Sans L']
import matplotlib.pyplot as plt
import glob
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
import tushare as ts
import datetime
import argparse
import math
import ta
import os, sys, random
import smtplib
import imghdr
from email.message import EmailMessage
from email.mime.multipart import MIMEMultipart
from email.mime.application import MIMEApplication
from email.mime.text import MIMEText
import subprocess
import mplfinance as mpf
stock_index = ['000001.SH']
code2name = dict()
predict_days = 5
api = ts.pro_api(token='<PASSWORD> t<PASSWORD> token')
trading_note = """
本邮件由程序自动发送,勿回复,谢谢!
### 结论
经过这段时间的测试,选出以下指标,其中close为收盘价
- rsi2: 即两日RSI,2周期RSI是一个比较敏感的指标,对短期阶段性的买点判断比较准确,但是对买点判断不准确,只依靠2周期RSI容易卖飞,遇到差行情很容易回撤
- boll_wband20: 20周期的bollinger bands,对于短期趋势的判定很准确,当价格线上穿boll_wband20并且与boll_wband20同趋势的时候,是很强的上升势
- vwap30: 30周期的volume weighted average price,当价格线上穿vwap30并且与vwap30同趋势的时候,是很强的上升势
- kc_wband15: 15周期的keltner channel,当价格线上穿kc_wband15并且与kc_wband15同趋势的时候,是很强的上升势
- macd: 快线5周期,慢线15周期,信号线7周期,当macd线上穿信号线的时候是上升趋势,但是有一定的延时性
- adx15: 15周期average directional movement index, 当+DMI > -DMI的时候是上升势
- trix2: 2周期trix,当trix2上穿价格线并且与价格线同趋势的时候,是很强的上升势
- mi: mass index,当价格线上穿mi并且与mi同趋势的时候,是很强的上升势
- cci5: 5周期的commodity channel index,非常敏感,当cci5 > close并且没有很明显的下降趋势的时候,是上升势
- kst: kst oscillator, 当kst上穿信号线并且同趋势的时候,是很强的上升势,有误判情况
- psar: parabolic stop and reverse,每次价格线上穿psar都是买点
- tsi: true strength index,tsi上穿价格线是很强的上升势
- wr15: 15周期williams percent range,当wr15上穿价格线并持续保持在价格线之上,是上升势
- roc15: 15周期rate of change,当roc15上穿价格线并保持在价格线之上,是上升势
- kama: kaufman's adaptive moving average, 当价格线上穿kama,是上升势
"""
def check_stock_data(name):
files = glob.glob(name)
return (len(files) != 0)
def get_stock_data(name, weekly):
data = pd.DataFrame()
end_date = api.daily().iloc[0]['trade_date']
while True:
if weekly:
tmp = api.weekly(ts_code=name, end_date=end_date)
else:
tmp = ts.pro_bar(ts_code=name, api=api, end_date=end_date, adj='qfq')
print("get data length: %d, end_date: %s last trade day: %s" % (len(tmp), end_date, tmp.iloc[0].trade_date))
end_date = datetime.datetime.strptime(str(tmp.iloc[-1].trade_date), '%Y%m%d')
delta = datetime.timedelta(days=1)
end_date = (end_date - delta).strftime("%Y%m%d")
data = data.append(tmp)
if len(tmp) < 5000:
break
return data
def get_index_data(name, weekly):
today = datetime.date.today().strftime("%Y%m%d")
data = api.index_daily(ts_code=name)
if str(data.iloc[0].trade_date) != today:
print("today's index data is not ready, last trading day is %s" % data.iloc[0].trade_date)
return data
def get_stock_candidates():
today = datetime.date.today().strftime("%Y%m%d")
last_trading_day = api.daily().iloc[0]['trade_date']
if today != last_trading_day:
print("today's stock data is not ready, get stock candidates of %s" % last_trading_day)
df = api.daily_basic(trade_date=last_trading_day)
# 选取量在20w以上的, 价格在5-50之间的
candidates = df[(df.float_share * df.turnover_rate_f > 200000.) & (df.close > 5.) & (df.close < 50.)]["ts_code"].tolist()
return candidates
def get_code_name_map():
global code2name
global api
df = api.stock_basic()
for code, name in zip(df['ts_code'].to_list(), df['name'].to_list()):
code2name[code] = name
df = api.index_basic()
for code, name in zip(df['ts_code'].to_list(), df['name'].to_list()):
code2name[code] = name
def calculate_index(days, K):
global code2name
# days 交易日,最近的在前
last_day = api.daily().iloc[0]['trade_date']
print('last trade day: %s' % last_day)
open_cal = api.trade_cal(is_open='1', end_date=last_day)['cal_date'].to_list()[-1::-1][:days+20]
data = pd.DataFrame()
trade_date_ = []
open_ = []
high_ = []
low_ = []
close_ = []
vol_ = []
amount_ = []
top_K = []
r_top_K = []
w_top_K = []
for day in open_cal:
df = api.daily(trade_date=day)
df2 = api.daily_basic(trade_date=day)
df = df[df.ts_code.isin(df2.ts_code.tolist())]
df = df.sort_values('ts_code').reset_index()
df2 = df2.sort_values('ts_code').reset_index()
df['circ_mv'] = df2['circ_mv']
amount = df.circ_mv.sum()
df['weight'] = df['circ_mv'] / amount * 100
df['open'] = df['open'] * df['weight']
df['high'] = df['high'] * df['weight']
df['low'] = df['low'] * df['weight']
df['close'] = df['close'] * df['weight']
trade_date_.append(day)
open_.append(df.open.sum())
high_.append(df.high.sum())
low_.append(df.low.sum())
close_.append(df.close.sum())
vol_.append(df.vol.sum() / 10000.)
amount_.append(df.amount.sum() / 100000.)
cand = df.sort_values('weight', ascending=False).iloc[:K][['ts_code', 'weight']].to_numpy()
top_ = ["%s%+.3f%%" % (code2name[item[0]], item[1]) for item in cand]
w_top_K.append(top_)
cand = df.sort_values('close', ascending=False).iloc[:K][['ts_code', 'pct_chg']].to_numpy()
top_ = ["%s%+.2f%%" % (code2name[item[0]], item[1]) for item in cand]
top_K.append(top_)
cand = df.sort_values('close', ascending=True)[['ts_code', 'pct_chg']].to_numpy()
temp = []
count = 0
for item in cand:
if item[0] in code2name:
temp.append("%s%+.2f%%" %(code2name[item[0]], item[1]))
count += 1
if count >= K:
break
r_top_K.append(temp)
#time.sleep(0.5)
data['Date'] = trade_date_[-1::-1]
data['Open'] = open_[-1::-1]
data['High'] = high_[-1::-1]
data['Low'] = low_[-1::-1]
data['Close'] = close_[-1::-1]
data['Volume'] = vol_[-1::-1]
data['Amount'] = amount_[-1::-1]
bb = ta.volatility.BollingerBands(close=data['Close'], n=20, ndev=2)
data['BollHBand'] = bb.bollinger_hband()
data['BollLBand'] = bb.bollinger_lband()
data['BollMAvg'] = bb.bollinger_mavg()
return data.iloc[20:], (top_K, r_top_K, w_top_K)
def plot_index(df, top_K, savefile):
df['Date'] = df['Date'].astype('datetime64[ns]')
df = df.set_index('Date')
mc = mpf.make_marketcolors(up='r', down='g', ohlc='white')
style = mpf.make_mpf_style(base_mpf_style='nightclouds', marketcolors=mc)
wconfig = dict()
apdict = mpf.make_addplot(df[['BollHBand', 'BollLBand', 'BollMAvg']])
mpf.plot(df, type='ohlc', volume=True, style=style, title='Stock A Index', return_width_config=wconfig, ylabel='Index', figscale=1.5, tight_layout=True, addplot=apdict, scale_width_adjustment=dict(lines=0.7))
print(wconfig)
plt.savefig(savefile)
plt.close('all')
today = datetime.date.today().strftime("%Y%m%d")
trade_date = api.trade_cal(end_date=today, is_open='1')
print('trade date: %s' % trade_date.iloc[-1]['cal_date'])
print('open: %.2f' % df.iloc[-1]['Open'])
print('high: %.2f' % df.iloc[-1]['High'])
print('low: %.2f' % df.iloc[-1]['Low'])
print('close: %.2f' % df.iloc[-1]['Close'])
print('volume: %.2f万手' % df.iloc[-1]['Volume'])
print('amount: %.2f亿' % df.iloc[-1]['Amount'])
print('percent change: %+.2f%%' % ((df.iloc[-1]['Close'] - df.iloc[-2]['Close']) / df.iloc[-2]['Close'] * 100.))
print("权重占比前十: %s" % ' '.join(top_K[2][0]))
print('指数占比前十: %s' % ' '.join(top_K[0][0]))
print('指数占比倒数前十: %s' % ' '.join(top_K[1][0]))
def add_preday_info(data):
new_data = data.reset_index(drop=True)
extend = pd.DataFrame()
pre_open = []
pre_high = []
pre_low = []
pre_change = []
pre_pct_chg = []
pre_vol = []
pre_amount = []
for idx in range(len(new_data) - 1):
pre_open.append(new_data.iloc[idx + 1].open)
pre_high.append(new_data.iloc[idx + 1].high)
pre_low.append(new_data.iloc[idx + 1].low)
pre_change.append(new_data.iloc[idx + 1].change)
pre_pct_chg.append(new_data.iloc[idx + 1].pct_chg)
pre_vol.append(new_data.iloc[idx + 1].vol)
pre_amount.append(new_data.iloc[idx + 1].amount)
pre_open.append(0.)
pre_high.append(0.)
pre_low.append(0.)
pre_change.append(0.)
pre_pct_chg.append(0.)
pre_vol.append(0.)
pre_amount.append(0.)
new_data['pre_open'] = pre_open
new_data['pre_high'] = pre_high
new_data['pre_low'] = pre_low
new_data['pre_change'] = pre_change
new_data['pre_pct_chg'] = pre_pct_chg
new_data['pre_vol'] = pre_vol
new_data['pre_amount'] = pre_amount
# fill predicting target
days = [[] for i in range(predict_days)]
for idx in range(predict_days - 1, len(new_data)):
for i in range(len(days)):
days[i].append(new_data.iloc[idx - i].pct_chg)
# fill invalid days with 0.
for i in range(len(days)):
for idx in range(predict_days - 1):
days[i].insert(0, 0.)
# extend pandas frame
for i in range(len(days)):
col = "pct_chg%d" % (i + 1)
new_data[col] = days[i]
return new_data
def add_ma_info(data):
new_data = data.reset_index(drop=True)
days = [5, 10, 15, 20, 30, 50, 100, 200]
# add simple ma info
cols = ["sma%d" % d for d in days]
for day, col in zip(days, cols):
new_data[col] = ta.utils.sma(new_data.iloc[-1::-1].close, periods=day)[-1::-1]
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
# add exponential ma info
# scaling = s / (1 + d), s is smoothing, typically 2, d is ma days
# ema(t) = v * scaling + ema(t - 1) * (1 - scaling), v is time(t)'s price
cols = ["ema%d" % d for d in days]
for day, col in zip(days, cols):
new_data[col] = ta.utils.ema(new_data.iloc[-1::-1].close, periods=day)[-1::-1]
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_rsi_info(data):
new_data = data.reset_index(drop=True)
'''
RSI = 100 - 100 / (1 + RS)
RS = average up / average down
average up = sum(up moves) / N
average downn = sum(down moves) / N
'''
# calculate ups and downs
N = [2,3,4,5,6]
cols = ["rsi%d" % n for n in N]
for n, col in zip(N, cols):
new_data[col] = ta.momentum.rsi(new_data.iloc[-1::-1].close, n=n)[-1::-1]
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_crossover_info(data):
# this project is for short-swing trading, so I just
# track 5-day period ema crossover with 10-day, 15-day, 20-day,
# 30-day, 50-day, 100-day, 200-day,
# -1 for breakdowns, 0 for normal, 1 for breakouts
new_data = data.reset_index(drop=True)
tracking_day = 'ema5'
cross_day = ['ema10', 'ema15', 'ema20', 'ema30', 'ema50', 'ema100', 'ema200']
cross_cols = ['cross5-10', 'cross5-15', 'cross5-20', 'cross5-30', 'cross5-50', 'cross5-100', 'cross5-200']
for ema, cross_col in zip(cross_day, cross_cols):
prestatus = 0
if new_data.iloc[-2][tracking_day] >= new_data.iloc[-2][ema]:
prestatus = 1
else:
prestatus = -1
crossover = []
crossover.append(prestatus)
for idx in range(len(new_data) - 2, -1, -1):
if prestatus == -1:
if new_data.iloc[idx][tracking_day] >= new_data.iloc[idx][ema]:
crossover.append(1)
prestatus = 1
else:
crossover.append(0)
elif prestatus == 1:
if new_data.iloc[idx][tracking_day] >= new_data.iloc[idx][ema]:
crossover.append(0)
else:
crossover.append(-1)
prestatus = -1
new_data[cross_col] = crossover[-1::-1]
precross_cols = ['pre_cross5-10', 'pre_cross5-15', 'pre_cross5-20', 'pre_cross5-30', 'pre_cross5-50', 'pre_cross5-100', 'pre_cross5-200']
for cross_col, precross_col in zip(cross_cols, precross_cols):
vals = new_data.iloc[1:][cross_col].tolist()
vals.append(0)
new_data[precross_col] = vals
return new_data
def add_long_crossover_info(data):
# add 50-day 100-day crossover info, I think
# it is not important for short-swing trading,
# but sometimes it happens, just add this feature
new_data = data.reset_index(drop=True)
tracking_day = 'ema50'
cross_day = ['ema100']
cross_cols = ['longcross']
for ema, cross_col in zip(cross_day, cross_cols):
prestatus = 0
if new_data.iloc[-2][tracking_day] >= new_data.iloc[-2][ema]:
prestatus = 1
else:
prestatus = -1
crossover = []
crossover.append(prestatus)
for idx in range(len(new_data) - 2, -1, -1):
if prestatus == -1:
if new_data.iloc[idx][tracking_day] >= new_data.iloc[idx][ema]:
crossover.append(1)
prestatus = 1
else:
crossover.append(0)
elif prestatus == 1:
if new_data.iloc[idx][tracking_day] >= new_data.iloc[idx][ema]:
crossover.append(0)
else:
crossover.append(-1)
prestatus = -1
new_data[cross_col] = crossover[-1::-1]
precross_cols = ['pre_longcross']
for cross_col, precross_col in zip(cross_cols, precross_cols):
vals = new_data.iloc[1:][cross_col].tolist()
vals.append(0)
new_data[precross_col] = vals
return new_data
def add_bollinger_band_info(data):
new_data = data.reset_index(drop=True)
#N = [20, 14, 12, 10, 5, 4, 3, 2]
N = [20, 10]
for n in N:
bb = ta.volatility.BollingerBands(close=new_data.iloc[-1::-1].close, n=n, ndev=2)
col = 'boll_hband%d' % n
new_data[col] = bb.bollinger_hband()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data['pre_%s' % col] = temp
col = 'boll_lband%d' % n
new_data[col] = bb.bollinger_lband()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data['pre_%s' % col] = temp
col = 'boll_hband_ind%d' % n
new_data[col] = bb.bollinger_hband_indicator()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data['pre_%s' % col] = temp
col = 'boll_lband_ind%d' % n
new_data[col] = bb.bollinger_lband_indicator()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data['pre_%s' % col] = temp
col = 'boll_mavg%d' % n
new_data[col] = bb.bollinger_mavg()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data['pre_%s' % col] = temp
col = 'boll_pband%d' % n
new_data[col] = bb.bollinger_pband()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data['pre_%s' % col] = temp
col = 'boll_wband%d' % n
new_data[col] = bb.bollinger_wband()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data['pre_%s' % col] = temp
return new_data
def add_obv_info(data):
new_data = data.reset_index(drop=True)
obv = ta.volume.OnBalanceVolumeIndicator(close=new_data.iloc[-1::-1].close, volume=new_data.iloc[-1::-1].vol)
new_data['obv'] = obv.on_balance_volume()
temp = new_data.iloc[1:]['obv'].tolist()
temp.append(np.nan)
new_data['pre_obv'] = temp
return new_data
def add_adi_info(data):
new_data = data.reset_index(drop=True)
adi = ta.volume.AccDistIndexIndicator(high=new_data.iloc[-1::-1].high, low=new_data.iloc[-1::-1].low, close=new_data.iloc[-1::-1].close, volume=new_data.iloc[-1::-1].vol)
new_data['adi'] = adi.acc_dist_index()
temp = new_data.iloc[1:]['adi'].tolist()
temp.append(np.nan)
new_data['pre_adi'] = temp
return new_data
def add_cmf_info(data):
new_data = data.reset_index(drop=True)
days = [2,5,10,15,20,30]
for day in days:
cmf = ta.volume.ChaikinMoneyFlowIndicator(high=new_data.iloc[-1::-1].high, low=new_data.iloc[-1::-1].low, close=new_data.iloc[-1::-1].close, volume=new_data.iloc[-1::-1].vol, n=day)
col = "cmf%d" % day
new_data[col] = cmf.chaikin_money_flow()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_fi_info(data):
new_data = data.reset_index(drop=True)
days = [2,5,10,15,20,30]
for day in days:
fi = ta.volume.ForceIndexIndicator(close=new_data.iloc[-1::-1].close, volume=new_data.iloc[-1::-1].vol, n=day)
col = "fi%d" % day
new_data[col] = fi.force_index()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_eom_info(data):
new_data = data.reset_index(drop=True)
days = [2,5,10,15,20,30]
for day in days:
eom = ta.volume.EaseOfMovementIndicator(high=new_data.iloc[-1::-1].high, low=new_data.iloc[-1::-1].low, volume=new_data.iloc[-1::-1].vol, n=day)
col = "eom%d" % day
new_data[col] = eom.ease_of_movement()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "sma_eom%d" % day
new_data[col] = eom.sma_ease_of_movement()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_vpt_info(data):
new_data = data.reset_index(drop=True)
vpt = ta.volume.VolumePriceTrendIndicator(close=new_data.iloc[-1::-1].close, volume=new_data.iloc[-1::-1].vol)
col = "vpt"
new_data[col] = vpt.volume_price_trend()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_nvi_info(data):
new_data = data.reset_index(drop=True)
nvi = ta.volume.NegativeVolumeIndexIndicator(close=new_data.iloc[-1::-1].close, volume=new_data.iloc[-1::-1].vol)
col = "nvi"
new_data[col] = nvi.negative_volume_index()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_vwap_info(data):
new_data = data.reset_index(drop=True)
days = [2,5,10,15,20,30]
for day in days:
vwap = ta.volume.VolumeWeightedAveragePrice(high=new_data.iloc[-1::-1].high, low=new_data.iloc[-1::-1].low, volume=new_data.iloc[-1::-1].vol, close=new_data.iloc[-1::-1].close, n=day)
col = "vwap%d" % day
new_data[col] = vwap.volume_weighted_average_price()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_atr_info(data):
new_data = data.reset_index(drop=True)
days = [2,5,10,15,20,30,14]
for day in days:
atr = ta.volatility.AverageTrueRange(high=new_data.iloc[-1::-1].high, low=new_data.iloc[-1::-1].low, close=new_data.iloc[-1::-1].close, n=day)
col = "atr%d" % day
new_data[col] = atr.average_true_range()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_kc_info(data):
new_data = data.reset_index(drop=True)
days = [2,5,10,15,20,30]
for day in days:
kc = ta.volatility.KeltnerChannel(high=new_data.iloc[-1::-1].high, low=new_data.iloc[-1::-1].low, close=new_data.iloc[-1::-1].close, n=day)
col = "kc_mband%d" % day
new_data[col] = kc.keltner_channel_mband()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "kc_pband%d" % day
new_data[col] = kc.keltner_channel_pband()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "kc_wband%d" % day
new_data[col] = kc.keltner_channel_wband()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_dc_info(data):
new_data = data.reset_index(drop=True)
days = [2,5,10,15,20,30]
for day in days:
dc = ta.volatility.DonchianChannel(high=new_data.iloc[-1::-1].high, low=new_data.iloc[-1::-1].low, close=new_data.iloc[-1::-1].close, n=day)
col = "dc_mband%d" % day
new_data[col] = dc.donchian_channel_mband()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "dc_pband%d" % day
new_data[col] = dc.donchian_channel_pband()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "dc_wband%d" % day
new_data[col] = dc.donchian_channel_wband()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_macd_info(data):
new_data = data.reset_index(drop=True)
macd = ta.trend.MACD(close=new_data.iloc[-1::-1].close, n_slow=15, n_fast=5, n_sign=7)
col = "macd"
new_data[col] = macd.macd()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "macd_diff"
new_data[col] = macd.macd_diff()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "macd_signal"
new_data[col] = macd.macd_signal()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_adx_info(data):
new_data = data.reset_index(drop=True)
days = [2,5,10,15,20,30,14,10]
for day in days:
adx = ta.trend.ADXIndicator(high=new_data.iloc[-1::-1].high, low=new_data.iloc[-1::-1].low, close=new_data.iloc[-1::-1].close, n=day)
col = "adx%d" % day
new_data[col] = adx.adx()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "adx_neg%d" % day
new_data[col] = adx.adx_neg()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "adx_pos%d" % day
new_data[col] = adx.adx_pos()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_vi_info(data):
new_data = data.reset_index(drop=True)
days = [2,5,10,15,20,30]
for day in days:
vi = ta.trend.VortexIndicator(high=new_data.iloc[-1::-1].high, low=new_data.iloc[-1::-1].low, close=new_data.iloc[-1::-1].close, n=day)
col = "vi_diff%d" % day
new_data[col] = vi.vortex_indicator_diff()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "vi_neg%d" % day
new_data[col] = vi.vortex_indicator_neg()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "vi_pos%d" % day
new_data[col] = vi.vortex_indicator_pos()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_trix_info(data):
new_data = data.reset_index(drop=True)
days = [2,5,10,15,20,30]
for day in days:
trix = ta.trend.TRIXIndicator(close=new_data.iloc[-1::-1].close, n=day)
col = "trix%d" % day
new_data[col] = trix.trix()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_mi_info(data):
new_data = data.reset_index(drop=True)
mi = ta.trend.MassIndex(high=new_data.iloc[-1::-1].high, low=new_data.iloc[-1::-1].low)
col = "mi"
new_data[col] = mi.mass_index()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_cci_info(data):
new_data = data.reset_index(drop=True)
days = [2,5,10,15,20,30]
for day in days:
cci = ta.trend.CCIIndicator(high=new_data.iloc[-1::-1].high, low=new_data.iloc[-1::-1].low, close=new_data.iloc[-1::-1].close, n=day)
col = "cci%d" % day
new_data[col] = cci.cci()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_dpo_info(data):
new_data = data.reset_index(drop=True)
days = [2,5,10,15,20,30]
for day in days:
dpo = ta.trend.DPOIndicator(close=new_data.iloc[-1::-1].close, n=day)
col = "dpo%d" % day
new_data[col] = dpo.dpo()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_kst_info(data):
new_data = data.reset_index(drop=True)
kst = ta.trend.KSTIndicator(close=new_data.iloc[-1::-1].close)
col = "kst"
new_data[col] = kst.kst()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "kst_diff"
new_data[col] = kst.kst_diff()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "kst_sig"
new_data[col] = kst.kst_sig()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_ichimoku_info(data):
new_data = data.reset_index(drop=True)
ichimoku = ta.trend.IchimokuIndicator(high=new_data.iloc[-1::-1].high, low=new_data.iloc[-1::-1].low)
col = "ichimoku_a"
new_data[col] = ichimoku.ichimoku_a()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "ichimoku_b"
new_data[col] = ichimoku.ichimoku_b()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "ichimoku_base"
new_data[col] = ichimoku.ichimoku_base_line()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "ichimoku_conv"
new_data[col] = ichimoku.ichimoku_conversion_line()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_psar_info(data):
new_data = data.reset_index(drop=True)
psar = ta.trend.PSARIndicator(high=new_data.iloc[-1::-1].high, low=new_data.iloc[-1::-1].low, close=new_data.iloc[-1::-1].close)
col = "psar"
new_data[col] = psar.psar()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "psar_down"
new_data[col] = psar.psar_down()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "psar_down_idc"
new_data[col] = psar.psar_down_indicator()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "psar_up"
new_data[col] = psar.psar_up()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "psar_up_idc"
new_data[col] = psar.psar_up_indicator()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_tsi_info(data):
new_data = data.reset_index(drop=True)
tsi = ta.momentum.TSIIndicator(close=new_data.iloc[-1::-1].close)
col = "tsi"
new_data[col] = tsi.tsi()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_uo_info(data):
new_data = data.reset_index(drop=True)
uo = ta.momentum.UltimateOscillator(high=new_data.iloc[-1::-1].high, low=new_data.iloc[-1::-1].low, close=new_data.iloc[-1::-1].close)
col = "uo"
new_data[col] = uo.uo()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_so_info(data):
new_data = data.reset_index(drop=True)
days = [2,5,10,15,20,30]
for day in days:
so = ta.momentum.StochasticOscillator(high=new_data.iloc[-1::-1].high, low=new_data.iloc[-1::-1].low, close=new_data.iloc[-1::-1].close, n=day)
col = "stoch%d" % day
new_data[col] = so.stoch()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
col = "stoch_signal%d" % day
new_data[col] = so.stoch_signal()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_wr_info(data):
new_data = data.reset_index(drop=True)
days = [2,5,10,15,20,30]
for day in days:
wr = ta.momentum.WilliamsRIndicator(high=new_data.iloc[-1::-1].high, low=new_data.iloc[-1::-1].low, close=new_data.iloc[-1::-1].close, lbp=day)
col = "wr%d" % day
new_data[col] = wr.wr()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_ao_info(data):
new_data = data.reset_index(drop=True)
ao = ta.momentum.AwesomeOscillatorIndicator(high=new_data.iloc[-1::-1].high, low=new_data.iloc[-1::-1].low)
col = "ao"
new_data[col] = ao.ao()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_kama_info(data):
new_data = data.reset_index(drop=True)
kama = ta.momentum.KAMAIndicator(close=new_data.iloc[-1::-1].close)
col = "kama"
new_data[col] = kama.kama()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_roc_info(data):
new_data = data.reset_index(drop=True)
days = [2,5,10,15,20,30]
for day in days:
roc = ta.momentum.ROCIndicator(close=new_data.iloc[-1::-1].close, n=day)
col = "roc%d" % day
new_data[col] = roc.roc()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_dr_info(data):
new_data = data.reset_index(drop=True)
dr = ta.others.DailyReturnIndicator(close=new_data.iloc[-1::-1].close)
col = "dr"
new_data[col] = dr.daily_return()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_dlr_info(data):
new_data = data.reset_index(drop=True)
dlr = ta.others.DailyLogReturnIndicator(close=new_data.iloc[-1::-1].close)
col = "dlr"
new_data[col] = dlr.daily_log_return()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_cr_info(data):
new_data = data.reset_index(drop=True)
cr = ta.others.CumulativeReturnIndicator(close=new_data.iloc[-1::-1].close)
col = "cr"
new_data[col] = cr.cumulative_return()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
def add_mfi_info(data):
new_data = data.reset_index(drop=True)
mfi = ta.volume.MFIIndicator(high=new_data.iloc[-1::-1].high, low=new_data.iloc[-1::-1].low, close=new_data.iloc[-1::-1].close, volume=new_data.iloc[-1::-1].vol, n=5)
col = "mfi"
new_data[col] = mfi.money_flow_index()
temp = new_data.iloc[1:][col].tolist()
temp.append(np.nan)
new_data["pre_%s" % col] = temp
return new_data
# support and resistance
def add_sr_info(data):
new_data = data.reset_index(drop=True)
v = new_data['vol'].to_numpy()
p = new_data['close'].to_numpy()
h = new_data['high'].to_numpy()
l = new_data['low'].to_numpy()
avg = (p + h + l) / 3.
sr = [v * p for v, p in zip(v, avg)]
new_data['sup_res'] = sr
boll = ta.volatility.BollingerBands(close=new_data.iloc[-1::-1]['sup_res'])
new_data['sup_res_h'] = boll.bollinger_hband()
new_data['sup_res_l'] = boll.bollinger_lband()
new_data['sup_res_mavg'] = boll.bollinger_mavg()
new_data['sup_res_p'] = boll.bollinger_pband()
return new_data
def add_features(data):
new_data = data.reset_index(drop=True)
# previous day info
#new_data = add_preday_info(new_data)
# moving average info
new_data = add_ma_info(new_data)
# rsi info
new_data = add_rsi_info(new_data)
# crossover of moving average
#new_data = add_crossover_info(new_data)
# long crossover of moving average
#new_data = add_long_crossover_info(new_data)
# bollinger bands
new_data = add_bollinger_band_info(new_data)
# on-balance volume
#new_data = add_obv_info(new_data)
# accumulation/distribution index
#new_data = add_adi_info(new_data)
# chaikin money flow
#new_data = add_cmf_info(new_data)
# force index
#new_data = add_fi_info(new_data)
# ease of movement
#new_data = add_eom_info(new_data)
# volume price trend
#new_data = add_vpt_info(new_data)
# negative volume index
#new_data = add_nvi_info(new_data)
# volume weighted average price
#new_data = add_vwap_info(new_data)
# average true range
#new_data = add_atr_info(new_data)
# keltner channel
#new_data = add_kc_info(new_data)
# donchian channel
#new_data = add_dc_info(new_data)
# moving average convergence divergence
#new_data = add_macd_info(new_data)
# average directional movement index
new_data = add_adx_info(new_data)
# vortex indicator
#new_data = add_vi_info(new_data)
# trix indicator
#new_data = add_trix_info(new_data)
# mass index
#new_data = add_mi_info(new_data)
# commodity channel index
#new_data = add_cci_info(new_data)
# detrended price oscillator
#new_data = add_dpo_info(new_data)
# kst oscillator
#new_data = add_kst_info(new_data)
# ichimoku kinko hyo
#new_data = add_ichimoku_info(new_data)
# parabolic stop and reverse
new_data = add_psar_info(new_data)
# true strength index
#new_data = add_tsi_info(new_data)
# ultimate oscillator
#new_data = add_uo_info(new_data)
# stochastic oscillator
#new_data = add_so_info(new_data)
# williams %R
#new_data = add_wr_info(new_data)
# awesome oscillator
#new_data = add_ao_info(new_data)
# kaufman's adaptive moving average
#new_data = add_kama_info(new_data)
# rate of change
#new_data = add_roc_info(new_data)
# daily return
#new_data = add_dr_info(new_data)
# daily log return
#new_data = add_dlr_info(new_data)
# cumulative return
#new_data = add_cr_info(new_data)
# money flow index
#new_data = add_mfi_info(new_data)
# support and resistance
new_data = add_sr_info(new_data)
return new_data
def plot_data(data, days, close, cols, filename, stock):
x = [i for i in range(days)]
count = 0
plt.figure()
fig, ax = plt.subplots(len(cols), figsize=[6.4 * 3, 4 * len(cols)])
if not isinstance(ax, np.ndarray):
ax = [ax]
for col in cols:
if 'ema' in col or 'boll_band' in col or 'pct_chg' in col:
vals2 = data.iloc[0:days].iloc[-1::-1][close].to_numpy()
vals3 = data.iloc[0:days].iloc[-1::-1]['ema5'].to_numpy()
sns.lineplot(x=x, y=vals3, ax=ax[count])
sns.lineplot(x=x, y=vals2, ax=ax[count])
elif 'vol' in col:
vals = data.iloc[0:days].iloc[-1::-1]['vol'].to_numpy()
sns.lineplot(x=x, y=vals, ax=ax[count])
elif 'gap' in col:
vals2 = data.iloc[0:days].iloc[-1::-1][close].to_numpy()
vals2 = StandardScaler().fit_transform(vals2.reshape(-1, 1)).flatten()
sns.lineplot(x=x, y=vals2, ax=ax[count])
vals = data.iloc[0:days].iloc[-1::-1]['vol'].to_numpy()
vals = StandardScaler().fit_transform(vals.reshape(-1, 1)).flatten()
sns.lineplot(x=x, y=vals, ax=ax[count])
max_ = max([np.amax(vals2), np.amax(vals)])
min_ = min([np.amin(vals2), np.amin(vals)])
elif 'boll_pband' in col or 'adx' in col or 'sup_res' in col:
vals1 = data.iloc[0:days].iloc[-1::-1][col].to_numpy()
max_ = np.amax(vals1)
min_ = np.amin(vals1)
sns.lineplot(x=x, y=vals1, ax=ax[count])
else:
vals1 = data.iloc[0:days].iloc[-1::-1][col].to_numpy()
max_ = np.amax(vals1)
min_ = np.amin(vals1)
vals2 = data.iloc[0:days].iloc[-1::-1][close].to_numpy()
vals3 = data.iloc[0:days].iloc[-1::-1]['ema5'].to_numpy()
sns.lineplot(x=x, y=vals1, ax=ax[count])
sns.lineplot(x=x, y=vals2, ax=ax[count])
sns.lineplot(x=x, y=vals3, ax=ax[count])
if 'cmf' in col:
vals = data.iloc[0:days].iloc[-1::-1]['adi'].to_numpy()
sns.lineplot(x=x, y=StandardScaler().fit_transform(vals.reshape(-1,1)).flatten(), ax=ax[count])
ax[count].legend([col, close, 'ema5', 'adi'], loc='upper left')
elif 'macd' in col:
vals = data.iloc[0:days].iloc[-1::-1]['macd_signal'].to_numpy()
sns.lineplot(x=x, y=StandardScaler().fit_transform(vals.reshape(-1,1)).flatten(), ax=ax[count])
ax[count].legend([col, close, 'ema5', 'macd_signal'], loc='upper left')
elif 'adx' in col:
day = col.replace('adx', '')
pos_ = data.iloc[0:days].iloc[-1::-1]['adx_pos%s' % day].to_numpy()
max_ = np.amax(pos_)
sns.lineplot(x=x, y=pos_, ax=ax[count])
neg_ = data.iloc[0:days].iloc[-1::-1]['adx_neg%s' % day].to_numpy()
min_ = np.amin(neg_)
sns.lineplot(x=x, y=neg_, ax=ax[count])
# scatter plot
y = [min_]
for i in range(1, days):
if pos_[i] > neg_[i] and pos_[i-1] < neg_[i-1]:
y.append(max_)
else:
y.append(min_)
sns.scatterplot(x=x, y=y, ax=ax[count])
adx_ = data.iloc[0:days].iloc[-1::-1][col].to_numpy()
adx_max = np.amax(adx_)
adx_min = np.amin(adx_)
adx_ = [(x - adx_min) / (adx_max - adx_min) for x in adx_]
y = []
for i in range(days):
if adx_[i] > 0.8:
y.append(min_ + (max_ - min_) / 4. * 3)
elif adx_[i] < 0.1:
y.append(min_ + (max_ - min_) / 4. * 1)
else:
y.append(min_)
sns.scatterplot(x=x, y=y, ax=ax[count])
ax[count].legend([col, '+DMI', '-DMI', 'buy', 'signal'], loc='upper left')
elif 'vi_diff' in col:
day = col.replace('vi_diff', '')
pos_ = data.iloc[0:days].iloc[-1::-1]['vi_pos%s' % day].to_numpy()
pos_ = StandardScaler().fit_transform(neg_.reshape(-1,1)).flatten()
sns.lineplot(x=x, y=pos_, ax=ax[count])
neg_ = data.iloc[0:days].iloc[-1::-1]['vi_neg%s' % day].to_numpy()
neg_ = StandardScaler().fit_transform(neg_.reshape(-1,1)).flatten()
sns.lineplot(x=x, y=neg_, ax=ax[count])
ax[count].legend([col, close, 'ema5', '+VI', '-VI'], loc='upper left')
elif 'kst' in col:
vals = data.iloc[0:days].iloc[-1::-1]['kst_diff'].to_numpy()
sns.lineplot(x=x, y=StandardScaler().fit_transform(vals.reshape(-1,1)).flatten(), ax=ax[count])
vals = data.iloc[0:days].iloc[-1::-1]['kst_sig'].to_numpy()
sns.lineplot(x=x, y=StandardScaler().fit_transform(vals.reshape(-1,1)).flatten(), ax=ax[count])
ax[count].legend([col, close, 'ema5', 'kst_diff', 'kst_sig'], loc='upper left')
elif 'stoch' in col:
day = col.replace('stoch', '')
vals = data.iloc[0:days].iloc[-1::-1]['stoch_signal%s' % day].to_numpy()
sns.lineplot(x=x, y=StandardScaler().fit_transform(vals.reshape(-1,1)).flatten(), ax=ax[count])
ax[count].legend([col, close, 'ema5', 'stoch_signal'], loc='upper left')
elif 'boll_wband' in col:
day = col.replace('boll_wband', '')
vals = data.iloc[0:days].iloc[-1::-1]['boll_mavg%s' % day].to_numpy()
sns.lineplot(x=x, y=StandardScaler().fit_transform(vals.reshape(-1,1)).flatten(), ax=ax[count])
ax[count].legend([col, close, 'ema5', 'boll_mavg'], loc='upper left')
elif 'boll_band' in col:
day = col.replace('boll_band', '')
vals = data.iloc[0:days].iloc[-1::-1]['boll_mavg%s' % day].to_numpy()
sns.lineplot(x=x, y=vals, ax=ax[count])
vals = data.iloc[0:days].iloc[-1::-1]['boll_hband%s' % day].to_numpy()
max_ = np.amax(vals)
sns.lineplot(x=x, y=vals, ax=ax[count])
vals = data.iloc[0:days].iloc[-1::-1]['boll_lband%s' % day].to_numpy()
min_ = np.amin(vals)
sns.lineplot(x=x, y=vals, ax=ax[count])
# gap
close_ = data.iloc[0:days].iloc[-1::-1]['close'].to_numpy()
high_ = data.iloc[0:days].iloc[-1::-1]['high'].to_numpy()
low_ = data.iloc[0:days].iloc[-1::-1]['low'].to_numpy()
# up gap
y = []
for i in range(days):
if (high_[i] - close_[i]) / (high_[i] - low_[i]) < 0.05:
y.append(max_)
else:
y.append(min_)
sns.scatterplot(x=x, y=y, ax=ax[count])
ax[count].legend(['ema5', close, 'boll_mavg', 'boll_hband', 'boll_lband', 'gap'], loc='upper left')
elif 'sup_res' in col:
vals = data.iloc[0:days].iloc[-1::-1]['sup_res_mavg'].to_numpy()
sns.lineplot(x=x, y=vals, ax=ax[count])
vals = data.iloc[0:days].iloc[-1::-1]['sup_res_h'].to_numpy()
max_ = np.amax(vals)
sns.lineplot(x=x, y=vals, ax=ax[count])
vals = data.iloc[0:days].iloc[-1::-1]['sup_res_l'].to_numpy()
min_ = np.amin(vals)
sns.lineplot(x=x, y=vals, ax=ax[count])
pband = data.iloc[0:days].iloc[-1::-1]['sup_res_p'].to_numpy()
y = []
for i in range(days):
if pband[i] < 0.1:
y.append(max_)
else:
y.append(min_)
sns.scatterplot(x=x, y=y, ax=ax[count])
ax[count].legend([col, 'mavg', 'hband', 'lband', 'buy'], loc='upper left')
elif 'boll_pband' in col:
day = col.replace('boll_pband', '')
vals = data.iloc[0:days].iloc[-1::-1]['boll_pband%s' % day].to_numpy()
y = []
for i in range(days):
if vals[i] > 0.8:
y.append(max_)
elif vals[i] < 0.1:
y.append((max_ + min_) / 2.)
else:
y.append(min_)
sns.scatterplot(x=x, y=y, ax=ax[count])
ax[count].legend([col, close, 'ema5', 'buy'], loc='upper left')
elif 'rsi' in col:
vals = data.iloc[0:days].iloc[-1::-1]['rsi2'].to_numpy()
y = []
for v in vals:
if v > 80.:
y.append(max_)
elif v < 10.:
y.append((max_ + min_) / 2.)
else:
y.append(min_)
sns.scatterplot(x=x, y=y, ax=ax[count])
ax[count].legend([col, close, 'ema5', 'buy'], loc='upper left')
elif 'mfi' in col:
vals = data.iloc[0:days].iloc[-1::-1]['mfi'].to_numpy()
print("mfi:", vals)
y = []
for v in vals:
if v > 80.:
y.append(1.)
elif v < 20.:
y.append(0.)
else:
y.append(-1.)
sns.scatterplot(x=x, y=y, ax=ax[count])
ax[count].legend([col, close, 'ema5', 'buy'], loc='upper left')
elif 'ema' in col:
vals = data.iloc[0:days].iloc[-1::-1]['ema10'].to_numpy()
sns.lineplot(x=x, y=vals, ax=ax[count])
vals = data.iloc[0:days].iloc[-1::-1]['ema15'].to_numpy()
sns.lineplot(x=x, y=vals, ax=ax[count])
vals = data.iloc[0:days].iloc[-1::-1]['ema20'].to_numpy()
sns.lineplot(x=x, y=vals, ax=ax[count])
vals = data.iloc[0:days].iloc[-1::-1]['ema30'].to_numpy()
sns.lineplot(x=x, y=vals, ax=ax[count])
# scatter break through
period = 10
ema_ = data.iloc[0:days].iloc[-1::-1]['ema%d' % period].to_numpy()
close_ = data.iloc[0:days].iloc[-1::-1]['close'].to_numpy()
min_ = np.amin(close_)
max_ = np.amax(close_)
y = [min_] * period
for i in range(period, days):
appended = False
if close_[i] > ema_[i] and close_[i-1] < ema_[i-1]:
y.append(max_)
appended = True
if not appended:
y.append(min_)
sns.scatterplot(x=x, y=y, ax=ax[count])
ax[count].legend(['ema5', close, 'ema10', 'ema15', 'ema20', 'ema30', 'break'], loc='upper left')
elif 'vol' in col:
vol_ = data.iloc[0:days].iloc[-1::-1]['vol'].to_numpy()
close_ = data.iloc[0:days].iloc[-1::-1]['close'].to_numpy()
max_ = np.amax(vol_)
min_ = np.amin(vol_)
y = [min_]
for i in range(days - 1):
if close_[i] > close_[i+1] and vol_[i] < vol_[i+1]:
y.append(max_)
elif close_[i] < close_[i+1] and vol_[i] > vol_[i+1]:
y.append((max_ + min_) / 2.)
else:
y.append(min_)
sns.scatterplot(x=x, y=y, ax=ax[count])
ax[count].legend(['vol', 'buy'], loc='upper left')
elif 'test' in col:
vol_ = data.iloc[0:days].iloc[-1::-1]['vol'].to_numpy()
max_ = np.amax(vol_)
min_ = np.amin(vol_)
y = []
for i in range(days):
if (vol_[i] - min_) / (max_ - min_) > 0.8:
y.append(max_)
else:
y.append(min_)
sns.scatterplot(x=x, y=y, ax=ax[count])
ax[count].legend(['vol', 'buy'], loc='upper left')
elif 'gap' in col:
close_ = data.iloc[0:days].iloc[-1::-1]['close'].to_numpy()
high_ = data.iloc[0:days].iloc[-1::-1]['high'].to_numpy()
low_ = data.iloc[0:days].iloc[-1::-1]['low'].to_numpy()
# up gap
y = []
for i in range(days):
if (high_[i] - close_[i]) / (high_[i] - low_[i]) < 0.05:
y.append(max_)
elif (close_[i] - low_[i]) / (high_[i] - low_[i]) < 0.05:
y.append((max_ + min_) / 2.)
else:
y.append(min_)
sns.scatterplot(x=x, y=y, ax=ax[count])
ax[count].legend(['close', 'vol', 'gap'], loc='upper left')
elif 'pct_chg' in col:
close_ = data.iloc[0:days].iloc[-1::-1]['close'].to_numpy()
change_ = data.iloc[0:days].iloc[-1::-1]['pct_chg'].to_numpy()
high_ = data.iloc[0:days].iloc[-1::-1]['high'].to_numpy()
low_ = data.iloc[0:days].iloc[-1::-1]['low'].to_numpy()
open_ = data.iloc[0:days].iloc[-1::-1]['open'].to_numpy()
max_ = np.amax(close_)
min_ = np.amin(close_)
y = []
for i in range(days):
if change_[i] > 8.:
y.append(max_)
elif abs(close_[i] - open_[i]) / (high_[i] - low_[i]) < 0.05:
y.append((max_ + min_) / 2.)
else:
y.append(min_)
sns.scatterplot(x=x, y=y, ax=ax[count])
ax[count].legend(['ema5', 'close', 'change'], loc='upper left')
else:
ax[count].legend([col, close, 'ema5'], loc='upper left')
count += 1
fig.suptitle(stock, fontsize=40, fontweight='normal')
plt.savefig(filename)
plt.close('all')
def clear_directory(dirname):
files = glob.glob("%s/*" % dirname)
for fname in files:
os.remove(fname)
def send_mail(mail_to):
clear_directory('mail')
cmd = 'zip -r mail/stock_png.zip pattern'
subprocess.call(cmd.split())
msg_from = "<EMAIL>"
passwd = "<PASSWORD>"
msg_to = mail_to
print("sending mail to %s ..." % msg_to)
today = datetime.datetime.today().strftime("%Y-%m-%d")
msg = MIMEMultipart()
msg['Subject'] = "%s stock trading plottings" % today
msg['From'] = msg_from
msg['To'] = msg_to
msg.attach(MIMEText(trading_note))
files = glob.glob("mail/stock_png.*")
for fname in files:
with open(fname, 'rb') as fp:
data = fp.read()
msg.attach(MIMEApplication(data, Name='pattern.zip'))
try:
s = smtplib.SMTP('smtp.qq.com', 587)
s.login(msg_from, passwd)
s.sendmail(msg_from, msg_to, msg.as_string())
print("sending mail success!")
except smtplib.SMTPException as e:
print("sending failed:", e)
finally:
s.quit()
'''
过滤机制:
1. 五日内psar与close越来越近,close有上穿趋势
'''
def filter_by_strategy1(data, days):
# normalize close
temp_close = data.iloc[0:days].iloc[-1::-1]['close'].to_numpy()
close = StandardScaler().fit_transform(temp_close.reshape(-1, 1)).flatten()
flag = False
temp = data.iloc[0:days].iloc[-1::-1]['psar'].to_numpy()
psar = StandardScaler().fit_transform(temp.reshape(-1, 1)).flatten()
close_ = close[-1:-6:-1]
psar_ = psar[-1:-6:-1]
# 1.当前psar大于close,五日psar与close递增取均值,当前差加上递增均值是否大于0,如果大于0,则可能上穿
if psar_[0] > close_[0]:
dist = close_ - psar_
sum_ = 0.
for i in range(4):
sum_ += dist[i] - dist[i + 1]
avg = sum_ / 4.
if dist[0] + avg > 0.:
flag = True
# 2.当前psar小于close,是否是五日内发生的上穿现象,如果是,则趋势可能仍在
if psar_[0] < close_[0] and psar_[-1] > close_[-1]:
flag = True
if not flag:
return False
# 取五日adx_pos, adx_neg均值做为POS和NEG,取五日的距离,相比前一天差值变小大于1天,则
# 即使psar合格但是adx趋势还没有走到
temp_pos = data.iloc[0:days].iloc[-1::-1]['adx_pos14'].to_numpy()
pos = StandardScaler().fit_transform(temp_pos.reshape(-1, 1)).flatten()
pos = pos[-1::-1]
temp_neg = data.iloc[0:days].iloc[-1::-1]['adx_neg14'].to_numpy()
neg = StandardScaler().fit_transform(temp_neg.reshape(-1, 1)).flatten()
neg = neg[-1::-1]
# 取五日均值做平滑
new_pos = []
new_neg = []
for i in range(5):
pos_ = np.sum(pos[i : i + 5]) / 5.
neg_ = np.sum(neg[i : i + 5]) / 5.
new_pos.append(pos_)
new_neg.append(neg_)
dist = np.array(new_pos) - np.array(new_neg)
count = 0
for i in range(4):
if dist[i] < dist[i + 1]:
count += 1
if count > 1:
return False
return True
'''
1. 用2周期RSI过滤,看买点
'''
def filter_by_strategy2(data, days):
flag = False
rsi2 = data.iloc[0:days]['rsi2'].to_numpy()
max_ = np.amax(rsi2)
min_ = np.amin(rsi2)
if (data.iloc[0]['rsi2'] - min_) / (max_ - min_) < 0.1:
flag = True
if not flag:
return flag
return True
'''
1. 取15日vwap 做整体趋势控制,再考虑kama和psar上穿情况
'''
def filter_by_strategy3(data, days):
# filter price < 5.0.
if data.iloc[0]['close'] < 5.:
return False
# filter volume < 200000.
if data.iloc[0]['vol'] < 200000.:
return False
# 1. boll_wband < 0.3
boll_wd = data.iloc[0:days]['boll_wband20'].to_numpy()
boll_max = np.amax(boll_wd)
boll_min = np.amin(boll_wd)
if (boll_wd[0] - boll_min) / (boll_max - boll_min) > 0.08:
return False
# normalize close
temp_close = data.iloc[0:days].iloc[-1::-1]['ema5'].to_numpy()
close = StandardScaler().fit_transform(temp_close.reshape(-1, 1)).flatten()
vwap_flag = False
temp = data.iloc[0:days].iloc[-1::-1]['vwap30'].to_numpy()
vwap = StandardScaler().fit_transform(temp.reshape(-1, 1)).flatten()
close_ = close[-1:-6:-1]
vwap_ = vwap[-1:-6:-1]
# 1.当前vwap大于close,五日psar与close递增取均值,当前差加上递增均值是否大于0,如果大于0,则可能上穿
if vwap_[0] > close_[0]:
dist = close_ - vwap_
sum_ = 0.
for i in range(4):
sum_ += dist[i] - dist[i + 1]
avg = sum_ / 4.
if dist[0] + avg > 0.:
vwap_flag = True
# 2.当前vwap小于close,是否是五日内发生的上穿现象,如果是,则趋势可能仍在
if vwap_[0] < close_[0] and vwap_[-1] > close_[-1]:
vwap_flag = True
psar_flag = False
temp = data.iloc[0:days].iloc[-1::-1]['psar'].to_numpy()
psar = StandardScaler().fit_transform(temp.reshape(-1, 1)).flatten()
close_ = close[-1:-6:-1]
psar_ = psar[-1:-6:-1]
# 1.当前psar大于close,五日psar与close递增取均值,当前差加上递增均值是否大于0,如果大于0,则可能上穿
if psar_[0] > close_[0]:
dist = close_ - psar_
sum_ = 0.
for i in range(4):
sum_ += dist[i] - dist[i + 1]
avg = sum_ / 4.
if dist[0] + avg > 0.:
psar_flag = True
# 2.当前psar小于close,是否是五日内发生的上穿现象,如果是,则趋势可能仍在
if psar_[0] < close_[0] and psar_[-1] > close_[-1]:
psar_flag = True
kama_flag = False
temp = data.iloc[0:days].iloc[-1::-1]['kama'].to_numpy()
kama = StandardScaler().fit_transform(temp.reshape(-1, 1)).flatten()
close_ = close[-1:-6:-1]
kama_ = kama[-1:-6:-1]
# 1.当前kama大于close,五日kama与close递增取均值,当前差加上递增均值是否大于0,如果大于0,则可能上穿
if kama_[0] > close_[0]:
dist = close_ - kama_
sum_ = 0.
for i in range(4):
sum_ += dist[i] - dist[i + 1]
avg = sum_ / 4.
if dist[0] + avg > 0.:
kama_flag = True
# 2.当前kama小于close,是否是五日内发生的上穿现象,如果是,则趋势可能仍在
if kama_[0] < close_[0] and kama_[-1] > close_[-1]:
kama_flag = True
if (not psar_flag) and (not kama_flag) and (not vwap_flag):
return False
# 1. 5日内close上穿boll_mavg20
boll_flag = False
temp = data.iloc[0:days].iloc[-1::-1]['boll_mavg20'].to_numpy()
boll_mavg = StandardScaler().fit_transform(temp.reshape(-1, 1)).flatten()
close_ = close[-1:-6:-1]
boll_mavg_ = boll_mavg[-1:-6:-1]
if boll_mavg_[0] < close_[0] and boll_mavg_[-1] > close_[-1]:
boll_flag = True
if not boll_flag:
return False
return True
'''
1. 取adx相交做为buy signal
'''
def filter_by_strategy4(data, days):
# filter by ADX
adx_flag = False
adx_pos = data.iloc[0:days]['adx_pos14'].to_numpy()
adx_pos = StandardScaler().fit_transform(adx_pos.reshape(-1, 1)).flatten()
adx_neg = data.iloc[0:days]['adx_neg14'].to_numpy()
adx_neg = StandardScaler().fit_transform(adx_neg.reshape(-1, 1)).flatten()
if adx_pos[0] > adx_neg[0] and adx_pos[1] < adx_neg[1]:
adx_flag = True
if not adx_flag:
return False
return True
'''
1. 取pband < 0.1 做为buy signal
'''
def filter_by_strategy5(data, days):
# filter by boll_pband20
pband = data.iloc[0:days]['boll_pband20'].to_numpy()
if pband[0] > 0.1:
return False
return True
'''
1. 取adx < 0.1 做为buy signal
'''
def filter_by_strategy6(data, days):
# filter by adx14
adx = data.iloc[0:days]['adx14'].to_numpy()
max_ = np.amax(adx)
min_ = np.amin(adx)
if (adx[0] - min_) / (max_ - min_) > 0.1:
return False
# filter by price, don't pick percent of price is high
price = data.iloc[0:days]['close'].to_numpy()
max_ = np.amax(price)
min_ = np.amin(price)
if (price[0] - min_) / (max_ - min_) > 0.1:
return False
return True
'''
1. 取adx < 0.1 和 psar上穿做为buy signal
'''
def filter_by_strategy7(data, days):
# filter by adx14
adx = data.iloc[0:days]['adx14'].to_numpy()
max_ = np.amax(adx)
min_ = np.amin(adx)
if (adx[0] - min_) / (max_ - min_) > 0.1:
return False
# filter by psar
psar_flag = False
psar = data.iloc[0:days]['psar'].to_numpy()
psar = StandardScaler().fit_transform(psar.reshape(-1, 1)).flatten()
close = data.iloc[0:days]['close'].to_numpy()
close = StandardScaler().fit_transform(close.reshape(-1, 1)).flatten()
if close[0] > psar[0] and close[1] < psar[1]:
psar_flag = True
if not psar_flag:
return False
return True
'''
1. 取adx < 0.1 and pband < 0.1做为buy signal
'''
def filter_by_strategy8(data, days):
# filter by adx14
adx = data.iloc[0:days]['adx14'].to_numpy()
max_ = np.amax(adx)
min_ = np.amin(adx)
if (adx[0] - min_) / (max_ - min_) > 0.1:
return False
# filter by boll_pband20
pband = data.iloc[0:days]['boll_pband20'].to_numpy()
if pband[0] > 0.1:
return False
return True
'''
1. 取pband < 0.1 and vol < 0.1做为buy signal (短)
'''
def filter_by_strategy9(data, days):
# filter by vol
vol = data.iloc[0:days]['vol'].to_numpy()
max_ = np.amax(vol)
min_ = np.amin(vol)
if (vol[0] - min_) / (max_ - min_) > 0.1:
return False
# filter by boll_pband20
pband = data.iloc[0:days]['boll_pband20'].to_numpy()
if pband[0] > 0.1:
return False
return True
'''
1. 取adx < 0.1 and vol < 0.1做为buy signal (中)
'''
def filter_by_strategy10(data, days):
# filter by vol
vol = data.iloc[0:days]['vol'].to_numpy()
max_ = np.amax(vol)
min_ = np.amin(vol)
if (vol[0] - min_) / (max_ - min_) > 0.1:
return False
# filter by adx14
adx = data.iloc[0:days]['adx14'].to_numpy()
max_ = np.amax(adx)
min_ = np.amin(adx)
if (adx[0] - min_) / (max_ - min_) > 0.1:
return False
return True
'''
1. 取价跌、量涨为信号
'''
def filter_by_strategy11(data, days):
# filter by vol
vol = data.iloc[0:days]['vol'].to_numpy()
close = data.iloc[0:days]['close'].to_numpy()
if not (vol[0] > vol[1] and close[0] < close[1]):
return False
# filter by adx14
adx = data.iloc[0:days]['adx14'].to_numpy()
max_ = np.amax(adx)
min_ = np.amin(adx)
if (adx[0] - min_) / (max_ - min_) > 0.1:
return False
return True
'''
1. 取价涨、量跌为信号
'''
def filter_by_strategy12(data, days):
# filter by vol
vol = data.iloc[0:days]['vol'].to_numpy()
close = data.iloc[0:days]['close'].to_numpy()
if not (vol[0] < vol[1] and close[0] > close[1]):
return False
# close 太高没有必要
max_ = np.amax(close)
min_ = np.amin(close)
if (close[0] - min_) / (max_ - min_) > 0.5:
return False
# normalize close
temp_close = data.iloc[0:days]['close'].to_numpy()
close_ = StandardScaler().fit_transform(temp_close.reshape(-1, 1)).flatten()
temp = data.iloc[0:days]['psar'].to_numpy()
psar_ = StandardScaler().fit_transform(temp.reshape(-1, 1)).flatten()
if close_[0] < psar_[0]:
return False
return True
'''
1. 取价涨、量跌为信号
'''
def filter_by_strategy13(data, days):
# filter by vol
vol = data.iloc[0:days]['vol'].to_numpy()
close = data.iloc[0:days]['close'].to_numpy()
if not (vol[0] < vol[1] and close[0] > close[1]):
return False
# close 太高没有必要
max_ = np.amax(close)
min_ =
|
np.amin(close)
|
numpy.amin
|
import h5py
import numpy as np
from shutil import copyfile
copyfile("baseline_reg.h5", "pretrained_pruned.h5") # create pretrained.h5 using datastructure from dummy.h5
bl = h5py.File("baseline_reg.h5", 'r')
#dummy = h5py.File("dummy.h5", 'r')
pretrained = h5py.File("pretrained_pruned.h5", 'r+')
normalisation="l2"
channel_threshold=0.5
p_c1=-1
p_c2=1
p_c3=1.00
p_c4=1.00
p_c5=1.00
p_c6=1.00
p_d1=1.00
p_d2=1.00
p_d3=-1
# conv layer 1
bl_w1 = bl["model_weights"]["binary_conv_1"]["binary_conv_1"]["Variable_1:0"]
#bl_rand_map = bl["model_weights"]["binary_conv_1"]["binary_conv_1"]["rand_map:0"]
bl_pruning_mask = bl["model_weights"]["binary_conv_1"]["binary_conv_1"]["pruning_mask:0"]
bl_gamma = bl["model_weights"]["binary_conv_1"]["binary_conv_1"]["Variable:0"]
zero_fill = np.zeros(np.shape(np.array(bl_w1)))
pret_w1 = pretrained["model_weights"]["binary_conv_1"]["binary_conv_1"]["Variable_1:0"]
#pret_rand_map = pretrained["model_weights"]["binary_conv_1"]["binary_conv_1"]["rand_map:0"]
pret_pruning_mask = pretrained["model_weights"]["binary_conv_1"]["binary_conv_1"]["pruning_mask:0"]
p_gamma = pretrained["model_weights"]["binary_conv_1"]["binary_conv_1"]["Variable:0"]
pret_w1[...] = np.array(bl_w1)
#pret_rand_map[...] = np.array(bl_rand_map)
p_gamma[...] = np.array(bl_gamma)
weight = np.array(bl_w1)
TRC = 1
TM = 1
TN = 2
Tsize_RC = np.shape(weight)[0]/TRC
Tsize_M = np.shape(weight)[2]/TM
Tsize_N = np.shape(weight)[3]/TN
one_tile = np.zeros([Tsize_RC,Tsize_RC,Tsize_M,Tsize_N])
# set up pruning_mask
#mean=np.mean(abs(weight),axis=3)
norm=one_tile
if normalisation=="l1":
for n in range(TN):
for m in range(TM):
for rc in range(TRC):
norm = norm + weight[(rc*Tsize_RC):((rc+1)*Tsize_RC),(rc*Tsize_RC):((rc+1)*Tsize_RC),(m*Tsize_M):((m+1)*Tsize_M),(n*Tsize_N):((n+1)*Tsize_N)]
norm = norm / (TRC*TRC*TM*TN)
elif normalisation=="l2":
for n in range(TN):
for m in range(TM):
for rc in range(TRC):
norm = norm + weight[(rc*Tsize_RC):((rc+1)*Tsize_RC),(rc*Tsize_RC):((rc+1)*Tsize_RC),(m*Tsize_M):((m+1)*Tsize_M),(n*Tsize_N):((n+1)*Tsize_N)]**2
norm = norm / (TRC*TRC*TM*TN)
norm = np.sqrt(norm)
norm=np.reshape(norm, [-1,np.shape(norm)[3]])
pruning_mask = np.greater(norm, p_c1)
pret_pruning_mask[...] = np.array(pruning_mask,dtype=float)
print(np.sum(np.array(pret_pruning_mask)))
# conv layer 2
bl_w1 = bl["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_1:0"]
#bl_w2 = bl["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_2:0"]
#bl_w3 = bl["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_3:0"]
#bl_w4 = bl["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_4:0"]
#bl_rand_map = bl["model_weights"]["binary_conv_2"]["binary_conv_2"]["rand_map:0"]
bl_pruning_mask = bl["model_weights"]["binary_conv_2"]["binary_conv_2"]["pruning_mask:0"]
bl_gamma = bl["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable:0"]
bl_means = bl["model_weights"]["residual_sign_1"]["residual_sign_1"]["means:0"]
zero_fill = np.zeros(np.shape(np.array(bl_w1)))
pret_w1 = pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_1:0"]
#pret_w2 = pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_2:0"]
#pret_w3 = pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_3:0"]
#pret_w4 = pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_4:0"]
#pret_rand_map = pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["rand_map:0"]
pret_pruning_mask = pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["pruning_mask:0"]
p_gamma = pretrained["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable:0"]
pret_means = pretrained["model_weights"]["residual_sign_1"]["residual_sign_1"]["means:0"]
pret_w1[...] = np.array(bl_w1)
#pret_w2[...] = zero_fill
#pret_w3[...] = zero_fill
#pret_w4[...] = -np.array(bl_w1)
#pret_rand_map[...] = np.array(bl_rand_map)
p_gamma[...] = np.array(bl_gamma)
pret_means[...] = np.array(bl_means)
weight = np.array(bl_w1)
TRC = 1
TM = 8
TN = 8
Tsize_RC = np.shape(weight)[0]/TRC
Tsize_M = np.shape(weight)[2]/TM
Tsize_N = np.shape(weight)[3]/TN
one_tile = np.zeros([Tsize_RC,Tsize_RC,Tsize_M,Tsize_N])
# set up pruning_mask
#mean=np.mean(abs(weight),axis=3)
norm=one_tile
if normalisation=="l1":
for n in range(TN):
for m in range(TM):
for rc in range(TRC):
norm = norm + weight[(rc*Tsize_RC):((rc+1)*Tsize_RC),(rc*Tsize_RC):((rc+1)*Tsize_RC),(m*Tsize_M):((m+1)*Tsize_M),(n*Tsize_N):((n+1)*Tsize_N)]
norm = norm / (TRC*TRC*TM*TN)
elif normalisation=="l2":
for n in range(TN):
for m in range(TM):
for rc in range(TRC):
norm = norm + weight[(rc*Tsize_RC):((rc+1)*Tsize_RC),(rc*Tsize_RC):((rc+1)*Tsize_RC),(m*Tsize_M):((m+1)*Tsize_M),(n*Tsize_N):((n+1)*Tsize_N)]**2
norm = norm / (TRC*TRC*TM*TN)
norm = np.sqrt(norm)
norm=np.reshape(norm, [-1,np.shape(norm)[3]])
pruning_mask = np.greater(norm, p_c2)
pret_pruning_mask[...] = np.array(pruning_mask,dtype=float)
print(np.sum(np.array(pret_pruning_mask)))
# conv layer 3
bl_w1 = bl["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_1:0"]
#bl_w2 = bl["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_2:0"]
#bl_w3 = bl["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_3:0"]
#bl_w4 = bl["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_4:0"]
#bl_rand_map = bl["model_weights"]["binary_conv_3"]["binary_conv_3"]["rand_map:0"]
bl_pruning_mask = bl["model_weights"]["binary_conv_3"]["binary_conv_3"]["pruning_mask:0"]
bl_gamma = bl["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable:0"]
bl_means = bl["model_weights"]["residual_sign_2"]["residual_sign_2"]["means:0"]
zero_fill = np.zeros(np.shape(np.array(bl_w1)))
pret_w1 = pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_1:0"]
#pret_w2 = pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_2:0"]
#pret_w3 = pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_3:0"]
#pret_w4 = pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_4:0"]
#pret_rand_map = pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["rand_map:0"]
pret_pruning_mask = pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["pruning_mask:0"]
p_gamma = pretrained["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable:0"]
pret_means = pretrained["model_weights"]["residual_sign_2"]["residual_sign_2"]["means:0"]
pret_w1[...] = np.array(bl_w1)
#pret_w2[...] = zero_fill
#pret_w3[...] = zero_fill
#pret_w4[...] = -np.array(bl_w1)
#pret_rand_map[...] = np.array(bl_rand_map)
p_gamma[...] = np.array(bl_gamma)
pret_means[...] = np.array(bl_means)
weight = np.array(bl_w1)
TRC = 1
TM = 8
TN = 8
Tsize_RC = np.shape(weight)[0]/TRC
Tsize_M = np.shape(weight)[2]/TM
Tsize_N = np.shape(weight)[3]/TN
one_tile = np.zeros([Tsize_RC,Tsize_RC,Tsize_M,Tsize_N])
# set up pruning_mask
#mean=np.mean(abs(weight),axis=3)
norm=one_tile
if normalisation=="l1":
for n in range(TN):
for m in range(TM):
for rc in range(TRC):
norm = norm + weight[(rc*Tsize_RC):((rc+1)*Tsize_RC),(rc*Tsize_RC):((rc+1)*Tsize_RC),(m*Tsize_M):((m+1)*Tsize_M),(n*Tsize_N):((n+1)*Tsize_N)]
norm = norm / (TRC*TRC*TM*TN)
elif normalisation=="l2":
for n in range(TN):
for m in range(TM):
for rc in range(TRC):
norm = norm + weight[(rc*Tsize_RC):((rc+1)*Tsize_RC),(rc*Tsize_RC):((rc+1)*Tsize_RC),(m*Tsize_M):((m+1)*Tsize_M),(n*Tsize_N):((n+1)*Tsize_N)]**2
norm = norm / (TRC*TRC*TM*TN)
norm = np.sqrt(norm)
norm=np.reshape(norm, [-1,np.shape(norm)[3]])
pruning_mask = np.greater(norm, p_c3)
pret_pruning_mask[...] = np.array(pruning_mask,dtype=float)
print(np.sum(np.array(pret_pruning_mask)))
# conv layer 4
bl_w1 = bl["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_1:0"]
#bl_w2 = bl["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_2:0"]
#bl_w3 = bl["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_3:0"]
#bl_w4 = bl["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_4:0"]
#bl_rand_map = bl["model_weights"]["binary_conv_4"]["binary_conv_4"]["rand_map:0"]
bl_pruning_mask = bl["model_weights"]["binary_conv_4"]["binary_conv_4"]["pruning_mask:0"]
bl_gamma = bl["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable:0"]
bl_means = bl["model_weights"]["residual_sign_3"]["residual_sign_3"]["means:0"]
zero_fill = np.zeros(np.shape(np.array(bl_w1)))
pret_w1 = pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_1:0"]
#pret_w2 = pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_2:0"]
#pret_w3 = pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_3:0"]
#pret_w4 = pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_4:0"]
#pret_rand_map = pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["rand_map:0"]
pret_pruning_mask = pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["pruning_mask:0"]
p_gamma = pretrained["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable:0"]
pret_means = pretrained["model_weights"]["residual_sign_3"]["residual_sign_3"]["means:0"]
pret_w1[...] = np.array(bl_w1)
#pret_w2[...] = zero_fill
#pret_w3[...] = zero_fill
#pret_w4[...] = -np.array(bl_w1)
#pret_rand_map[...] = np.array(bl_rand_map)
p_gamma[...] = np.array(bl_gamma)
pret_means[...] = np.array(bl_means)
weight = np.array(bl_w1)
TRC = 1
TM = 8
TN = 8
Tsize_RC = np.shape(weight)[0]/TRC
Tsize_M = np.shape(weight)[2]/TM
Tsize_N = np.shape(weight)[3]/TN
one_tile = np.zeros([Tsize_RC,Tsize_RC,Tsize_M,Tsize_N])
# set up pruning_mask
#mean=np.mean(abs(weight),axis=3)
norm=one_tile
if normalisation=="l1":
for n in range(TN):
for m in range(TM):
for rc in range(TRC):
norm = norm + weight[(rc*Tsize_RC):((rc+1)*Tsize_RC),(rc*Tsize_RC):((rc+1)*Tsize_RC),(m*Tsize_M):((m+1)*Tsize_M),(n*Tsize_N):((n+1)*Tsize_N)]
norm = norm / (TRC*TRC*TM*TN)
elif normalisation=="l2":
for n in range(TN):
for m in range(TM):
for rc in range(TRC):
norm = norm + weight[(rc*Tsize_RC):((rc+1)*Tsize_RC),(rc*Tsize_RC):((rc+1)*Tsize_RC),(m*Tsize_M):((m+1)*Tsize_M),(n*Tsize_N):((n+1)*Tsize_N)]**2
norm = norm / (TRC*TRC*TM*TN)
norm = np.sqrt(norm)
norm=np.reshape(norm, [-1,np.shape(norm)[3]])
pruning_mask = np.greater(norm, p_c4)
pret_pruning_mask[...] = np.array(pruning_mask,dtype=float)
print(np.sum(np.array(pret_pruning_mask)))
# conv layer 5
bl_w1 = bl["model_weights"]["binary_conv_5"]["binary_conv_5"]["Variable_1:0"]
#bl_w2 = bl["model_weights"]["binary_conv_5"]["binary_conv_5"]["Variable_2:0"]
#bl_w3 = bl["model_weights"]["binary_conv_5"]["binary_conv_5"]["Variable_3:0"]
#bl_w4 = bl["model_weights"]["binary_conv_5"]["binary_conv_5"]["Variable_4:0"]
#bl_rand_map = bl["model_weights"]["binary_conv_5"]["binary_conv_5"]["rand_map:0"]
bl_pruning_mask = bl["model_weights"]["binary_conv_5"]["binary_conv_5"]["pruning_mask:0"]
bl_gamma = bl["model_weights"]["binary_conv_5"]["binary_conv_5"]["Variable:0"]
bl_means = bl["model_weights"]["residual_sign_4"]["residual_sign_4"]["means:0"]
zero_fill = np.zeros(np.shape(np.array(bl_w1)))
pret_w1 = pretrained["model_weights"]["binary_conv_5"]["binary_conv_5"]["Variable_1:0"]
#pret_w2 = pretrained["model_weights"]["binary_conv_5"]["binary_conv_5"]["Variable_2:0"]
#pret_w3 = pretrained["model_weights"]["binary_conv_5"]["binary_conv_5"]["Variable_3:0"]
#pret_w4 = pretrained["model_weights"]["binary_conv_5"]["binary_conv_5"]["Variable_4:0"]
#pret_rand_map = pretrained["model_weights"]["binary_conv_5"]["binary_conv_5"]["rand_map:0"]
pret_pruning_mask = pretrained["model_weights"]["binary_conv_5"]["binary_conv_5"]["pruning_mask:0"]
p_gamma = pretrained["model_weights"]["binary_conv_5"]["binary_conv_5"]["Variable:0"]
pret_means = pretrained["model_weights"]["residual_sign_4"]["residual_sign_4"]["means:0"]
pret_w1[...] = np.array(bl_w1)
#pret_w2[...] = zero_fill
#pret_w3[...] = zero_fill
#pret_w4[...] = -np.array(bl_w1)
#pret_rand_map[...] = np.array(bl_rand_map)
p_gamma[...] = np.array(bl_gamma)
pret_means[...] = np.array(bl_means)
weight = np.array(bl_w1)
TRC = 1
TM = 8
TN = 8
Tsize_RC = np.shape(weight)[0]/TRC
Tsize_M = np.shape(weight)[2]/TM
Tsize_N = np.shape(weight)[3]/TN
one_tile = np.zeros([Tsize_RC,Tsize_RC,Tsize_M,Tsize_N])
# set up pruning_mask
#mean=np.mean(abs(weight),axis=3)
norm=one_tile
if normalisation=="l1":
for n in range(TN):
for m in range(TM):
for rc in range(TRC):
norm = norm + weight[(rc*Tsize_RC):((rc+1)*Tsize_RC),(rc*Tsize_RC):((rc+1)*Tsize_RC),(m*Tsize_M):((m+1)*Tsize_M),(n*Tsize_N):((n+1)*Tsize_N)]
norm = norm / (TRC*TRC*TM*TN)
elif normalisation=="l2":
for n in range(TN):
for m in range(TM):
for rc in range(TRC):
norm = norm + weight[(rc*Tsize_RC):((rc+1)*Tsize_RC),(rc*Tsize_RC):((rc+1)*Tsize_RC),(m*Tsize_M):((m+1)*Tsize_M),(n*Tsize_N):((n+1)*Tsize_N)]**2
norm = norm / (TRC*TRC*TM*TN)
norm = np.sqrt(norm)
norm=np.reshape(norm, [-1,np.shape(norm)[3]])
pruning_mask = np.greater(norm, p_c5)
pret_pruning_mask[...] = np.array(pruning_mask,dtype=float)
print(np.sum(np.array(pret_pruning_mask)))
# conv layer 6
bl_w1 = bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_1:0"]
#bl_w2 = bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_2:0"]
#bl_w3 = bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_3:0"]
#bl_w4 = bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_4:0"]
#bl_rand_map = bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["rand_map:0"]
bl_pruning_mask = bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["pruning_mask:0"]
bl_gamma = bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable:0"]
bl_means = bl["model_weights"]["residual_sign_5"]["residual_sign_5"]["means:0"]
zero_fill = np.zeros(np.shape(np.array(bl_w1)))
pret_w1 = pretrained["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_1:0"]
#pret_w2 = pretrained["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_2:0"]
#pret_w3 = pretrained["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_3:0"]
#pret_w4 = pretrained["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_4:0"]
#pret_rand_map = pretrained["model_weights"]["binary_conv_6"]["binary_conv_6"]["rand_map:0"]
pret_pruning_mask = pretrained["model_weights"]["binary_conv_6"]["binary_conv_6"]["pruning_mask:0"]
p_gamma = pretrained["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable:0"]
pret_means = pretrained["model_weights"]["residual_sign_5"]["residual_sign_5"]["means:0"]
pret_w1[...] = np.array(bl_w1)
#pret_w2[...] = zero_fill
#pret_w3[...] = zero_fill
#pret_w4[...] = -np.array(bl_w1)
#pret_rand_map[...] = np.array(bl_rand_map)
p_gamma[...] = np.array(bl_gamma)
pret_means[...] = np.array(bl_means)
weight = np.array(bl_w1)
TRC = 1
TM = 8
TN = 8
Tsize_RC = np.shape(weight)[0]/TRC
Tsize_M = np.shape(weight)[2]/TM
Tsize_N = np.shape(weight)[3]/TN
one_tile = np.zeros([Tsize_RC,Tsize_RC,Tsize_M,Tsize_N])
# set up pruning_mask
#mean=np.mean(abs(weight),axis=3)
norm=one_tile
if normalisation=="l1":
for n in range(TN):
for m in range(TM):
for rc in range(TRC):
norm = norm + weight[(rc*Tsize_RC):((rc+1)*Tsize_RC),(rc*Tsize_RC):((rc+1)*Tsize_RC),(m*Tsize_M):((m+1)*Tsize_M),(n*Tsize_N):((n+1)*Tsize_N)]
norm = norm / (TRC*TRC*TM*TN)
elif normalisation=="l2":
for n in range(TN):
for m in range(TM):
for rc in range(TRC):
norm = norm + weight[(rc*Tsize_RC):((rc+1)*Tsize_RC),(rc*Tsize_RC):((rc+1)*Tsize_RC),(m*Tsize_M):((m+1)*Tsize_M),(n*Tsize_N):((n+1)*Tsize_N)]**2
norm = norm / (TRC*TRC*TM*TN)
norm = np.sqrt(norm)
norm=np.reshape(norm, [-1,np.shape(norm)[3]])
pruning_mask = np.greater(norm, p_c6)
pret_pruning_mask[...] = np.array(pruning_mask,dtype=float)
print(np.sum(np.array(pret_pruning_mask)))
# dense layer 1
bl_w1 = bl["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable_1:0"]
#bl_w2 = bl["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable_2:0"]
#bl_w3 = bl["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable_3:0"]
#bl_w4 = bl["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable_4:0"]
#bl_rand_map = bl["model_weights"]["binary_dense_1"]["binary_dense_1"]["rand_map:0"]
bl_pruning_mask = bl["model_weights"]["binary_dense_1"]["binary_dense_1"]["pruning_mask:0"]
bl_gamma = bl["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable:0"]
bl_means = bl["model_weights"]["residual_sign_6"]["residual_sign_6"]["means:0"]
zero_fill = np.zeros(np.shape(np.array(bl_w1)))
pret_w1 = pretrained["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable_1:0"]
#pret_w2 = pretrained["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable_2:0"]
#pret_w3 = pretrained["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable_3:0"]
#pret_w4 = pretrained["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable_4:0"]
#pret_rand_map = pretrained["model_weights"]["binary_dense_1"]["binary_dense_1"]["rand_map:0"]
pret_pruning_mask = pretrained["model_weights"]["binary_dense_1"]["binary_dense_1"]["pruning_mask:0"]
p_gamma = pretrained["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable:0"]
pret_means = pretrained["model_weights"]["residual_sign_6"]["residual_sign_6"]["means:0"]
pret_w1[...] = np.array(bl_w1)
#pret_w2[...] = zero_fill
#pret_w3[...] = zero_fill
#pret_w4[...] = -np.array(bl_w1)
#pret_rand_map[...] = np.array(bl_rand_map)
p_gamma[...] = np.array(bl_gamma)
pret_means[...] = np.array(bl_means)
weight = np.array(bl_w1)
TM = 8
TN = 8
Tsize_M = np.shape(weight)[0]/TM
Tsize_N = np.shape(weight)[1]/TN
one_tile = np.zeros([Tsize_M,Tsize_N])
# set up pruning_mask
#mean=np.mean(abs(weight),axis=3)
norm=one_tile
if normalisation=="l1":
for n in range(TN):
for m in range(TM):
norm = norm + weight[(m*Tsize_M):((m+1)*Tsize_M),(n*Tsize_N):((n+1)*Tsize_N)]
norm = norm / (TRC*TRC*TM*TN)
elif normalisation=="l2":
for n in range(TN):
for m in range(TM):
norm = norm + weight[(m*Tsize_M):((m+1)*Tsize_M),(n*Tsize_N):((n+1)*Tsize_N)]**2
norm = norm / (TM*TN)
norm = np.sqrt(norm)
#l1_norm=np.reshape(l1_norm, [-1,np.shape(l1_norm)[3]])
pruning_mask = np.greater(norm, p_d1)
pret_pruning_mask[...] = np.array(pruning_mask,dtype=float)
print(np.sum(np.array(pret_pruning_mask)))
# dense layer 2
bl_w1 = bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_1:0"]
#bl_w2 = bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_2:0"]
#bl_w3 = bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_3:0"]
#bl_w4 = bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_4:0"]
#bl_rand_map = bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map:0"]
bl_pruning_mask = bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["pruning_mask:0"]
bl_gamma = bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable:0"]
bl_means = bl["model_weights"]["residual_sign_7"]["residual_sign_7"]["means:0"]
zero_fill = np.zeros(np.shape(np.array(bl_w1)))
pret_w1 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_1:0"]
#pret_w2 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_2:0"]
#pret_w3 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_3:0"]
#pret_w4 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_4:0"]
#pret_rand_map = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map:0"]
pret_pruning_mask = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["pruning_mask:0"]
p_gamma = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable:0"]
pret_means = pretrained["model_weights"]["residual_sign_7"]["residual_sign_7"]["means:0"]
pret_w1[...] = np.array(bl_w1)
#pret_w2[...] = zero_fill
#pret_w3[...] = zero_fill
#pret_w4[...] = -np.array(bl_w1)
#pret_rand_map[...] = np.array(bl_rand_map)
p_gamma[...] = np.array(bl_gamma)
pret_means[...] = np.array(bl_means)
weight = np.array(bl_w1)
TM = 8
TN = 8
Tsize_M = np.shape(weight)[0]/TM
Tsize_N = np.shape(weight)[1]/TN
one_tile = np.zeros([Tsize_M,Tsize_N])
# set up pruning_mask
#mean=np.mean(abs(weight),axis=3)
norm=one_tile
if normalisation=="l1":
for n in range(TN):
for m in range(TM):
norm = norm + weight[(m*Tsize_M):((m+1)*Tsize_M),(n*Tsize_N):((n+1)*Tsize_N)]
norm = norm / (TRC*TRC*TM*TN)
elif normalisation=="l2":
for n in range(TN):
for m in range(TM):
norm = norm + weight[(m*Tsize_M):((m+1)*Tsize_M),(n*Tsize_N):((n+1)*Tsize_N)]**2
norm = norm / (TM*TN)
norm = np.sqrt(norm)
#l1_norm=np.reshape(l1_norm, [-1,np.shape(l1_norm)[3]])
pruning_mask = np.greater(norm, p_d2)
pret_pruning_mask[...] = np.array(pruning_mask,dtype=float)
print(np.sum(np.array(pret_pruning_mask)))
# dense layer 3
bl_w1 = bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_1:0"]
#bl_w2 = bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_2:0"]
#bl_w3 = bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_3:0"]
#bl_w4 = bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_4:0"]
#bl_rand_map = bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map:0"]
bl_pruning_mask = bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["pruning_mask:0"]
bl_gamma = bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable:0"]
bl_means = bl["model_weights"]["residual_sign_8"]["residual_sign_8"]["means:0"]
zero_fill = np.zeros(np.shape(np.array(bl_w1)))
pret_w1 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_1:0"]
#pret_w2 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_2:0"]
#pret_w3 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_3:0"]
#pret_w4 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_4:0"]
#pret_rand_map = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map:0"]
pret_pruning_mask = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["pruning_mask:0"]
p_gamma = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable:0"]
pret_means = pretrained["model_weights"]["residual_sign_8"]["residual_sign_8"]["means:0"]
pret_w1[...] = np.array(bl_w1)
#pret_w2[...] = zero_fill
#pret_w3[...] = zero_fill
#pret_w4[...] = -np.array(bl_w1)
#pret_rand_map[...] = np.array(bl_rand_map)
p_gamma[...] = np.array(bl_gamma)
pret_means[...] = np.array(bl_means)
weight = np.array(bl_w1)
TM = 8
TN = 10
Tsize_M = np.shape(weight)[0]/TM
Tsize_N = np.shape(weight)[1]/TN
one_tile = np.zeros([Tsize_M,Tsize_N])
# set up pruning_mask
#mean=np.mean(abs(weight),axis=3)
norm=one_tile
if normalisation=="l1":
for n in range(TN):
for m in range(TM):
norm = norm + weight[(m*Tsize_M):((m+1)*Tsize_M),(n*Tsize_N):((n+1)*Tsize_N)]
norm = norm / (TRC*TRC*TM*TN)
elif normalisation=="l2":
for n in range(TN):
for m in range(TM):
norm = norm + weight[(m*Tsize_M):((m+1)*Tsize_M),(n*Tsize_N):((n+1)*Tsize_N)]**2
norm = norm / (TM*TN)
norm = np.sqrt(norm)
#l1_norm=np.reshape(l1_norm, [-1,np.shape(l1_norm)[3]])
pruning_mask = np.greater(norm, p_d3)
pret_pruning_mask[...] = np.array(pruning_mask,dtype=float)
print(np.sum(np.array(pret_pruning_mask)))
# bn 1
bl_beta = bl["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["beta:0"]
bl_gamma = bl["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["gamma:0"]
bl_moving_mean = bl["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["moving_mean:0"]
bl_moving_variance = bl["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["moving_variance:0"]
p_beta = pretrained["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["beta:0"]
p_gamma = pretrained["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["gamma:0"]
p_moving_mean = pretrained["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["moving_mean:0"]
p_moving_variance = pretrained["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["moving_variance:0"]
p_beta[...] = np.array(bl_beta)
p_gamma[...] = np.array(bl_gamma)
p_moving_mean[...] = np.array(bl_moving_mean)
p_moving_variance[...] = np.array(bl_moving_variance)
# bn 2
bl_beta = bl["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["beta:0"]
bl_gamma = bl["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["gamma:0"]
bl_moving_mean = bl["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["moving_mean:0"]
bl_moving_variance = bl["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["moving_variance:0"]
p_beta = pretrained["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["beta:0"]
p_gamma = pretrained["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["gamma:0"]
p_moving_mean = pretrained["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["moving_mean:0"]
p_moving_variance = pretrained["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["moving_variance:0"]
p_beta[...] = np.array(bl_beta)
p_gamma[...] = np.array(bl_gamma)
p_moving_mean[...] = np.array(bl_moving_mean)
p_moving_variance[...] = np.array(bl_moving_variance)
# bn 3
bl_beta = bl["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["beta:0"]
bl_gamma = bl["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["gamma:0"]
bl_moving_mean = bl["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["moving_mean:0"]
bl_moving_variance = bl["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["moving_variance:0"]
p_beta = pretrained["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["beta:0"]
p_gamma = pretrained["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["gamma:0"]
p_moving_mean = pretrained["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["moving_mean:0"]
p_moving_variance = pretrained["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["moving_variance:0"]
p_beta[...] = np.array(bl_beta)
p_gamma[...] = np.array(bl_gamma)
p_moving_mean[...] = np.array(bl_moving_mean)
p_moving_variance[...] = np.array(bl_moving_variance)
# bn 4
bl_beta = bl["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["beta:0"]
bl_gamma = bl["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["gamma:0"]
bl_moving_mean = bl["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["moving_mean:0"]
bl_moving_variance = bl["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["moving_variance:0"]
p_beta = pretrained["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["beta:0"]
p_gamma = pretrained["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["gamma:0"]
p_moving_mean = pretrained["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["moving_mean:0"]
p_moving_variance = pretrained["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["moving_variance:0"]
p_beta[...] = np.array(bl_beta)
p_gamma[...] = np.array(bl_gamma)
p_moving_mean[...] = np.array(bl_moving_mean)
p_moving_variance[...] = np.array(bl_moving_variance)
# bn 5
bl_beta = bl["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["beta:0"]
bl_gamma = bl["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["gamma:0"]
bl_moving_mean = bl["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["moving_mean:0"]
bl_moving_variance = bl["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["moving_variance:0"]
p_beta = pretrained["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["beta:0"]
p_gamma = pretrained["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["gamma:0"]
p_moving_mean = pretrained["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["moving_mean:0"]
p_moving_variance = pretrained["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["moving_variance:0"]
p_beta[...] = np.array(bl_beta)
p_gamma[...] = np.array(bl_gamma)
p_moving_mean[...] = np.array(bl_moving_mean)
p_moving_variance[...] = np.array(bl_moving_variance)
# bn 6
bl_beta = bl["model_weights"]["batch_normalization_6"]["batch_normalization_6"]["beta:0"]
bl_gamma = bl["model_weights"]["batch_normalization_6"]["batch_normalization_6"]["gamma:0"]
bl_moving_mean = bl["model_weights"]["batch_normalization_6"]["batch_normalization_6"]["moving_mean:0"]
bl_moving_variance = bl["model_weights"]["batch_normalization_6"]["batch_normalization_6"]["moving_variance:0"]
p_beta = pretrained["model_weights"]["batch_normalization_6"]["batch_normalization_6"]["beta:0"]
p_gamma = pretrained["model_weights"]["batch_normalization_6"]["batch_normalization_6"]["gamma:0"]
p_moving_mean = pretrained["model_weights"]["batch_normalization_6"]["batch_normalization_6"]["moving_mean:0"]
p_moving_variance = pretrained["model_weights"]["batch_normalization_6"]["batch_normalization_6"]["moving_variance:0"]
p_beta[...] =
|
np.array(bl_beta)
|
numpy.array
|
# -*- coding: utf-8 -*-
"""
Author
------
<NAME>
Email
-----
<EMAIL>
Created on
----------
- Sat Sep 03 16:00:00 2017
Modifications
-------------
- Sat Sep 03 12:00:00 2017
Aims
----
- plotting tools
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import binned_statistic_2d
from matplotlib import cm
from lmfit.models import GaussianModel
from mpl_toolkits.mplot3d import Axes3D
from .analysis import label_diff_lmfit
def plot_mse(s):
fig, ax = plt.subplots(1, 1, figsize=(8, 6), tight_layout=True)
plt.hist(-s.nmse[s.nmse != 0], np.linspace(0, 1, 80), histtype='step',
lw=2, label="MSE")
plt.hist(-s.scores[s.nmse != 0], np.linspace(0, 1, 80), histtype='step',
lw=2, label="CV MSE")
ylim = plt.gca().get_ylim()
plt.vlines(np.percentile(-s.nmse[s.nmse != 0], [14, 50, 86]), *ylim,
linestyle='--', label="14, 50, 86 percentiles")
plt.xlim(0, 1)
plt.ylim(*ylim)
plt.ylabel("Counts")
plt.xlabel("MSE")
fig.tight_layout()
return fig
# ################ #
# image
# ################ #
def image(ax, x, y, xbins, ybins, log=True):
plt.sca(ax)
c, xe, ye, bn = binned_statistic_2d(x, y, x, statistic="count",
bins=[xbins, ybins])
if log:
c = np.log10(c)
plt.imshow(c.T, origin="lower", extent=(*xbins[[0, -1]], *ybins[[0, -1]]),
cmap=cm.viridis, aspect="auto")
return
def compare_labels(X_true, X_pred,
xlabels=None, ylabels=None, reslabels=None,
xlims=None, reslims=None,
histlim=None, nxb=30, cornerlabel="",
figsize=None):
nlabel = X_true.shape[1]
if xlabels is None:
xlabels = ["$X_{{true}}:{}$".format(i) for i in range(nlabel)]
if ylabels is None:
ylabels = ["$X_{{pred}}:{}$".format(i) for i in range(nlabel)]
if reslabels is None:
reslabels = ["$X_{{res}}:{}$".format(i) for i in range(nlabel)]
# default xlim
if xlims is None:
xlim1 = np.min(np.vstack((np.percentile(X_true, 1, axis=0),
np.percentile(X_pred, 1, axis=0))), axis=0)
xlim2 = np.min(np.vstack((np.percentile(X_true, 99, axis=0),
np.percentile(X_pred, 99, axis=0))), axis=0)
xlims = (xlim2 - xlim1).reshape(-1, 1) * 0.4 * np.array(
[-1, 1]) + np.vstack((xlim1, xlim2)).T
if reslims is None:
reslims = np.repeat(
np.max(np.abs(np.percentile(X_pred - X_true, [1, 99], axis=0).T),
axis=1).reshape(-1, 1), 2, axis=1) * np.array([-1, 1])
reslims = np.abs(np.diff(reslims, axis=1)) * np.array(
[-1, 1]) * 0.2 + reslims
# run MCMC
X_bias, X_scatter, frs, histdata = label_diff_lmfit(
X_true, X_pred, bins="auto", plot=False, emcee=True)
print("bias", X_bias)
print("scatter", X_scatter)
if histlim is None:
histlim = (0, np.max([np.max(histdata_[0]) for histdata_ in histdata]))
histlim = np.array(histlim)
if figsize is None:
figsize = (3 * nlabel, 3 * nlabel)
# draw figure
fig, axs2 = plt.subplots(nlabel+1, nlabel+1, figsize=figsize)
# 1. Gaussian
gm = GaussianModel()
for i in range(nlabel):
plt.sca(axs2[i + 1, -1])
fr = frs[i]
hist_, bin_edge_, data_ = histdata[i]
plt.hist(data_, bins=bin_edge_, histtype="step",
orientation="horizontal")
axs2[i + 1, -1].plot(gm.eval(fr.mcmc.params, x=bin_edge_), bin_edge_)
axs2[i + 1, -1].tick_params(direction='in', pad=5)
axs2[i + 1, -1].set_xlim(histlim)
axs2[i + 1, -1].set_ylim(reslims[i])
axs2[i + 1, -1].set_ylim(reslims[i])
axs2[i + 1, -1].yaxis.tick_right()
axs2[i + 1, -1].hlines(X_bias[i], *histlim, linestyle='--', color="k")
pos_text_x = np.dot(np.array([[0.9, 0.1]]), histlim.reshape(-1, 1))
pos_text_y = np.dot(np.array([[0.15, 0.85]]),
reslims[i].reshape(-1, 1))
axs2[i + 1, -1].text(pos_text_x, pos_text_y,
"$bias={:.4f}$".format(X_bias[i]))
pos_text_x = np.dot(np.array([[0.9, 0.1]]), histlim.reshape(-1, 1))
pos_text_y = np.dot(np.array([[0.30, 0.70]]),
reslims[i].reshape(-1, 1))
axs2[i + 1, -1].text(pos_text_x, pos_text_y,
"$\\sigma={:.4f}$".format(X_scatter[i]))
axs2[i + 1, -1].yaxis.tick_right()
if i < nlabel-1:
axs2[i + 1, -1].set_xticklabels([])
axs2[-1, -1].set_xlabel("Counts")
# 2. diagnal
for i in range(nlabel):
image(axs2[0, i], X_true[:, i], X_pred[:, i],
|
np.linspace(xlims[i][0], xlims[i][1], nxb)
|
numpy.linspace
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.