prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import os
import pandas
import math
import ntpath
from BirdRoostLocation.ReadData import Labels
import numpy as np
from BirdRoostLocation import utils
from BirdRoostLocation.PrepareData import NexradUtils
from BirdRoostLocation import LoadSettings as settings
from BirdRoostLocation.BuildModels.CNN import model as shallow_model
import tensorflow as tf
from keras.models import model_from_json
import keras
class Batch_Generator:
"""This class organized the machine learning labels and creates ML batches.
Class Variables:
self.root_dir: The directory where the radar images are stored
self.ml_sets: A dictionary containing a list of files that are part of
the given ml set
self.batch_size: the size of the minibatch learning batches
self.label_dict: A dictionary of the labels, the key is the filename,
and the value is a ML_Label object.
"""
def __init__(
self,
ml_label_csv,
ml_split_csv,
validate_k_index=3,
test_k_index=4,
default_batch_size=settings.DEFAULT_BATCH_SIZE,
root_dir=utils.RADAR_IMAGE_DIR,
):
self.label_dict = {}
self.root_dir = root_dir
self.no_roost_sets = {}
self.roost_sets = {}
self.no_roost_sets_V06 = {}
self.roost_sets_V06 = {}
self.batch_size = default_batch_size
print("ML LABEL CSV")
print(ml_label_csv)
self.__set_ml_sets(ml_split_csv, validate_k_index, test_k_index)
def __set_ml_sets(self, ml_split_csv, validate_k_index, test_k_index):
"""Create Train, test, and Validation set from k data folds.
The k data folds are saved out to ml_split_csv. The fold at the given
test and train indices as set to their corresponding set. The rest
of the data is put into train. This method will initialize the following
class variables: self.train, self.validation, and self.test. Each of
these contains a list of filenames that correspond with the set.
Args:
ml_split_csv: A path to a csv file, where the csv has three columns,
'AWS_file', 'Roost', and 'split_index'.
validate_k_index: The index of the validation set.
test_k_index: The index of the test set.
"""
print("ML SPLIT CSV")
print(ml_split_csv)
ml_split_pd = pandas.read_csv(ml_split_csv)
# Remove files that weren't found
all_files = utils.getListOfFilesInDirectory(self.root_dir + "data", ".png")
print("ROOT DIR")
print(self.root_dir + "data")
all_files_dict = {}
for i in range(len(all_files)):
all_files_dict[os.path.basename(all_files[i])[2:25]] = True
for index, row in ml_split_pd.iterrows():
if all_files_dict.get(row["AWS_file"]) is None:
ml_split_pd.drop(index, inplace=True)
print("LENGTHS OF NO ROOST/ROOST:")
print(len(ml_split_pd[ml_split_pd.Roost != True]))
print(len(ml_split_pd[ml_split_pd.Roost]))
print("BEFORE self.__set_ml_sets_helper - NO ROOST")
self.__set_ml_sets_helper(
self.no_roost_sets,
self.no_roost_sets_V06,
ml_split_pd[ml_split_pd.Roost != True],
validate_k_index,
test_k_index,
)
print("AFTER self.__set_ml_sets_helper - NO ROOST")
self.__set_ml_sets_helper(
self.roost_sets,
self.roost_sets_V06,
ml_split_pd[ml_split_pd.Roost],
validate_k_index,
test_k_index,
)
print("AFTER self.__set_ml_sets_helper - ROOST")
def __set_ml_sets_helper(self, ml_sets, ml_sets_V06, ml_split_pd, val_k, test_k):
no_val_pd = ml_split_pd[ml_split_pd.split_index != val_k]
ml_sets[utils.ML_Set.training] = list(
no_val_pd[no_val_pd.split_index != test_k]["AWS_file"]
)
ml_sets[utils.ML_Set.validation] = list(
ml_split_pd[ml_split_pd.split_index == val_k]["AWS_file"]
)
ml_sets[utils.ML_Set.testing] = list(
ml_split_pd[ml_split_pd.split_index == test_k]["AWS_file"]
)
print("ml_sets[utils.ML_Set....]")
for key in list(ml_sets.keys()):
ml_sets_V06[key] = []
for item in ml_sets[key]:
if int(item[-1]) >= 6:
ml_sets_V06[key].append(item)
np.random.shuffle(ml_sets[key])
np.random.shuffle(ml_sets_V06[key])
def get_batch_indices(self, ml_sets, ml_set, num_temporal_data=0):
indices = np.random.randint(
low=0, high=len(ml_sets[ml_set]), size=int(self.batch_size / 2)
)
return indices
def get_batch(self, ml_set, dualPol, radar_product=None):
ground_truths = []
train_data = []
filenames = []
roost_sets = self.roost_sets
no_roost_sets = self.no_roost_sets
if dualPol:
roost_sets = self.roost_sets_V06
no_roost_sets = self.no_roost_sets_V06
return train_data, ground_truths, filenames, roost_sets, no_roost_sets
def single_product_batch_param_helper(
self,
filename,
filenames,
radar_product,
problem,
model_type,
train_data,
ground_truths,
images,
):
is_roost = int(self.label_dict[filename][0].is_roost)
polar_radius = [
float(self.label_dict[filename][i].polar_radius)
for i in range(len(self.label_dict[filename]))
]
polar_theta = [
float(self.label_dict[filename][i].polar_theta)
for i in range(len(self.label_dict[filename]))
]
roost_size = [
float(self.label_dict[filename][i].radius)
for i in range(len(self.label_dict[filename]))
]
if images != []:
if problem == "detection":
if np.array(train_data).size == 0:
train_data = images
train_data = np.array(train_data)
else:
train_data = np.concatenate((train_data, np.array(images)), axis=0)
if np.array(ground_truths).size == 0:
ground_truths = [[is_roost, 1 - is_roost]] * np.array(images).shape[
0
]
else:
ground_truths = np.concatenate(
(
ground_truths,
[[is_roost, 1 - is_roost]] * np.array(images).shape[0],
),
axis=0,
)
print("ground truths shape")
print(np.array(ground_truths).shape)
else: # localization
all_radii = np.array([])
all_thetas = np.array([])
for k in range(len(polar_radius)):
radii = np.array([polar_radius[k]] * np.array(images).shape[0])
if not np.isnan(np.sum(radii)):
mask_radii = [(radius / 300) * (240 / 2) for radius in radii]
thetas = []
for i in range(len(images)):
thetas.append(
adjustTheta(
self,
polar_theta[k],
self.label_dict[filename][0].images[radar_product][
i
],
)
)
all_radii = np.append(all_radii, np.array(mask_radii))
all_thetas = np.append(all_thetas, np.array(thetas))
all_radii = np.reshape(
all_radii,
(
len(self.label_dict[filename]),
int(len(all_radii) / len(self.label_dict[filename])),
),
)
all_thetas = np.reshape(
all_thetas,
(
len(self.label_dict[filename]),
int(len(all_thetas) / len(self.label_dict[filename])),
),
)
masks = np.zeros((len(all_radii[0]), 240, 240))
if type(roost_size) != float or math.isnan(roost_size):
roost_size = 28.0
else:
roost_size = roost_size / 1000 # convert to km
mask_roost_size = (roost_size / 300) * (240 / 2)
vconvert_to_cart = np.vectorize(convert_to_cart)
try:
cart_x, cart_y = vconvert_to_cart(all_radii, all_thetas)
except ValueError as e:
return train_data, ground_truths
for k in range(cart_x.shape[0]):
for j in range(cart_x.shape[1]):
try:
masks[j][
120 - int(round(cart_y[k][j])),
120 + int(round(cart_x[k][j])),
] = 1.0
color_pts = points_in_circle_np(
mask_roost_size,
y0=120 - int(round(cart_y[k][j])),
x0=120 + int(round(cart_x[k][j])),
)
for pt in color_pts:
masks[j][pt[0], pt[1]] = 1.0
except IndexError as e:
pass
if np.array(train_data).size == 0:
train_data = images
train_data = np.array(train_data)
else:
train_data = np.concatenate((train_data, np.array(images)), axis=0)
if np.array(ground_truths).size == 0:
ground_truths = masks
else:
ground_truths = np.concatenate((ground_truths, masks), axis=0)
train_data = np.array(train_data)
return train_data, ground_truths
def single_product_batch_params(
self,
ground_truths,
train_data,
filenames,
roost_sets,
no_roost_sets,
ml_set,
radar_product,
model_type,
problem,
is_eval=False,
):
extended_filenames = np.array([])
print("FILENAMES")
print(filenames)
if filenames == []:
for ml_sets in [roost_sets, no_roost_sets]:
if ml_sets[ml_set]: # in case you only train on true or false labels
indices = Batch_Generator.get_batch_indices(self, ml_sets, ml_set)
for i, index in enumerate(indices):
filename = ml_sets[ml_set][index]
print(filename)
if filename not in extended_filenames:
images = self.label_dict[filename][0].get_image(
radar_product
)
if images != []:
train_data, ground_truths = Batch_Generator.single_product_batch_param_helper(
self,
filename,
filenames,
radar_product,
problem,
model_type,
train_data,
ground_truths,
images,
)
#### !!!!
if is_eval == False:
extended_filenames = np.append(
extended_filenames, filename
)
else:
extended_filenames = np.append(
extended_filenames,
[filename]
* (len(train_data) - len(extended_filenames)),
)
else:
for filename in filenames:
images = self.label_dict[filename][0].get_image(radar_product)
if images != []:
train_data, ground_truths = Batch_Generator.single_product_batch_param_helper(
self,
filename,
filenames,
radar_product,
problem,
model_type,
train_data,
ground_truths,
images,
)
### !!!!
if is_eval == False:
extended_filenames = np.append(extended_filenames, filename)
else:
extended_filenames = np.append(
extended_filenames,
[filename] * (len(train_data) - len(extended_filenames)),
)
truth_shape = np.array(ground_truths).shape
print("truth shape: ")
print(truth_shape)
try:
if problem == "detection":
ground_truths = np.array(ground_truths).reshape(
truth_shape[0], truth_shape[1]
)
train_data_np = np.array(train_data)
shape = train_data_np.shape
train_data_np = train_data_np.reshape(
shape[0], shape[1], shape[2], shape[3]
)
print("RETURN SHAPES")
print(train_data_np.shape)
print(ground_truths.shape)
print(extended_filenames.shape)
return train_data_np, np.array(ground_truths), np.array(extended_filenames)
except IndexError as e:
print(e)
return None, None, None
class Single_Product_Batch_Generator(Batch_Generator):
def __init__(
self,
ml_label_csv,
ml_split_csv,
validate_k_index=3,
test_k_index=4,
default_batch_size=settings.DEFAULT_BATCH_SIZE,
root_dir=utils.RADAR_IMAGE_DIR,
high_memory_mode=False,
):
Batch_Generator.__init__(
self,
ml_label_csv,
ml_split_csv,
validate_k_index,
test_k_index,
default_batch_size,
root_dir,
)
ml_label_pd = pandas.read_csv(ml_label_csv)
for _, row in ml_label_pd.iterrows():
if row["AWS_file"] not in self.label_dict:
self.label_dict[row["AWS_file"]] = []
self.label_dict[row["AWS_file"]].append(
Labels.ML_Label(row["AWS_file"], row, self.root_dir, high_memory_mode)
)
def get_batch(
self,
ml_set,
dualPol,
radar_product=None,
num_temporal_data=0,
model_type="cnn",
problem="detection",
filenames=[],
is_eval=False,
):
"""Get a batch of data for machine learning. As a default, a batch
contains data from a single radar product.
Args:
ml_set: ML_Set enum value, train, test, or validation.
radar_product: Radar_Product enum value, reflectivity, velocity,
zdr, or rho_hv.
Returns:
train_data, ground_truth, filenames:
The ground truth is an array of batch size, where each item
in the array contains a single ground truth label.
The train_data is an array of images, corresponding to the
ground truth values.
filenames is an array of filenames, corresponding to the
ground truth values.
"""
if len(filenames) == 0:
ground_truths, train_data, filenames, roost_sets, no_roost_sets = Batch_Generator.get_batch(
self, ml_set, dualPol, radar_product
)
else:
ground_truths, train_data, _, roost_sets, no_roost_sets = Batch_Generator.get_batch(
self, ml_set, dualPol, radar_product
)
return Batch_Generator.single_product_batch_params(
self,
ground_truths,
train_data,
filenames,
roost_sets,
no_roost_sets,
ml_set,
radar_product,
model_type,
problem,
is_eval,
)
class Multiple_Product_Batch_Generator(Batch_Generator):
def __init__(
self,
ml_label_csv,
ml_split_csv,
validate_k_index=3,
test_k_index=4,
default_batch_size=settings.DEFAULT_BATCH_SIZE,
root_dir=utils.RADAR_IMAGE_DIR,
high_memory_mode=False,
):
Batch_Generator.__init__(
self,
ml_label_csv,
ml_split_csv,
validate_k_index,
test_k_index,
default_batch_size,
root_dir,
)
print("after Batch_Generator.__init__")
print(ml_label_csv)
ml_label_pd = pandas.read_csv(ml_label_csv)
print(ml_label_pd.shape)
for _, row in ml_label_pd.iterrows():
if row["AWS_file"] not in self.label_dict:
self.label_dict[row["AWS_file"]] = [
Labels.ML_Label(
row["AWS_file"], row, self.root_dir, high_memory_mode
)
]
else:
self.label_dict[row["AWS_file"]].append(
Labels.ML_Label(
row["AWS_file"], row, self.root_dir, high_memory_mode
)
)
# channels will be RGB values, first dimension will be radar products
def get_batch(
self,
ml_set,
dualPol,
batch_size=settings.DEFAULT_BATCH_SIZE,
loaded_models=None,
num_temporal_data=0,
model_type="cnn",
problem="detection",
):
"""Get a batch of data for machine learning. This batch contains data
with four channels in it, one for each radar product. For dualPol data
this will be four radar products, and for legacy data this will be two
radar products.
Args:
ml_set: ML_Set enum value, train, test, or validation.
dualPol: Boolean, true if the data is dual pol, false if the radar
data is legacy.
Returns:
train_data, ground_truth, filenames:
The ground truth is an array of batch size, where each item
in the array contains a single ground truth label.
The train_data is an array of images, corresponding to the
ground truth values.
filenames is an array of filenames, corresponding to the
ground truth values.
"""
ground_truths, train_data, filenames, roost_sets, no_roost_sets = Batch_Generator.get_batch(
self, ml_set, dualPol, radar_product=None
)
train_list = []
truth_list = []
pred_list = []
file_list = []
radar_products = [
utils.Radar_Products.cc,
utils.Radar_Products.diff_reflectivity,
utils.Radar_Products.reflectivity,
utils.Radar_Products.velocity,
]
for k, product in enumerate(radar_products):
print(product)
print("BEFORE")
print(len(filenames))
train, truth, filenames = Batch_Generator.single_product_batch_params(
self,
ground_truths,
train_data,
filenames,
roost_sets,
no_roost_sets,
ml_set,
product,
model_type,
problem,
)
print("AFTER")
print(len(filenames))
print("train.shape")
print(np.array(train).shape)
print("truth.shape")
print(np.array(truth).shape)
print("filenames.shape")
print(np.array(filenames).shape)
predictions = np.array([])
for i in range(0, len(train), batch_size):
train_batch = []
for j in range(0, batch_size):
if (i + j) < len(train):
train_batch.append(train[i + j])
train_batch = np.array(train_batch)
if len(train_batch) > 0:
if problem == "detection":
pred = loaded_models[k].predict_proba(train_batch)
else:
pred = loaded_models[k].predict(train_batch)
predictions = np.append(predictions, np.array(pred))
predictions = np.reshape(predictions, (-1, 240, 240))
print("predictions.shape")
print(predictions.shape)
train_list.append(np.array(train))
truth_list.append(np.array(truth))
file_list.append(np.array(filenames))
print("train_list.shape")
print(np.array(train_list).shape)
print("truth_list.shape")
print(np.array(truth_list).shape)
print("file_list.shape")
print(np.array(file_list).shape)
print("predictions.shape")
print(np.array(predictions).shape)
# try:
if problem == "detection":
predictions = np.reshape(
predictions, (np.array(truth_list).shape[1], 2)
)
else:
predictions = np.reshape(predictions, (-1, 240, 240))
print(np.array(truth_list).shape)
pred_list.append(predictions)
print("train_list, truth_list, pred_list, file_list")
print(np.array(train_list).shape)
print(np.array(truth_list).shape)
print(np.array(pred_list).shape)
print(np.array(file_list).shape)
return (
np.array(train_list),
np.array(truth_list),
np.array(pred_list),
np.array(file_list),
)
def normalize(self, x, maxi, mini):
if type(x) is list:
return [(y - mini) / (maxi - mini) for y in x]
else:
return (x - mini) / (maxi - mini)
def adjustTheta(self, theta, path):
filename = os.path.splitext(ntpath.basename(path))[0]
parts = filename.split("_")
if "flip" in parts:
if theta > 180.0:
theta = 540 - theta
else:
theta = 180 - theta
# rotation
try:
if "noise" in parts:
degree_offset = int(parts[-2])
else:
degree_offset = int(parts[-1])
theta += degree_offset
except ValueError:
return theta
return theta
def convert_to_cart(radius, theta):
return radius * math.cos(theta), radius * math.sin(theta)
def points_in_circle_np(radius, y0=0, x0=0):
x_ = | np.arange(x0 - radius - 1, x0 + radius + 1, dtype=int) | numpy.arange |
import pytest
import math
import numpy as np
import autograd.numpy as adnp
from autograd import grad
import cs107_salad.Forward.salad as ad
from cs107_salad.Forward.utils import check_list, compare_dicts, compare_dicts_multi
def test_add_radd():
x = ad.Variable(3)
y = x + 3
assert y.val == 6
assert list(y.der.values()) == np.array([1])
x = ad.Variable(3)
y = 3 + x
assert y.val == 6
assert list(y.der.values()) == np.array([1])
x = ad.Variable(3, {"x": 1})
y = ad.Variable(3, {"y": 1})
z = x + y
assert z.val == 6
assert z.der == {"x": 1, "y": 1}
x = ad.Variable(np.ones((5, 5)), label="x")
y = ad.Variable(np.ones((5, 5)), label="y")
z = x + y
assert np.array_equal(z.val, 2 * np.ones((5, 5)))
np.testing.assert_equal(z.der, {"x": np.ones((5, 5)), "y": np.ones((5, 5))})
z = x + x + y + y + 2
assert np.array_equal(z.val, 4 * np.ones((5, 5)) + 2)
np.testing.assert_equal(z.der, {"x": 2 * np.ones((5, 5)), "y": 2 * np.ones((5, 5))})
def test_sub_rsub():
x = ad.Variable(3)
y = x - 3
assert y.val == 0
assert list(y.der.values()) == np.array([1])
x = ad.Variable(3)
y = 3 - x
assert y.val == 0
assert list(y.der.values()) == np.array([-1])
x = ad.Variable(3, {"x": 1})
y = ad.Variable(3, {"y": 1})
z = x - y
assert z.val == 0
assert z.der == {"x": 1, "y": -1}
x = ad.Variable(np.ones((5, 5)), label="x")
y = ad.Variable(np.ones((5, 5)), label="y")
z = x - y
assert np.array_equal(z.val, np.zeros((5, 5)))
np.testing.assert_equal(z.der, {"x": np.ones((5, 5)), "y": -1 * np.ones((5, 5))})
z = x + x - y - y + 2
assert np.array_equal(z.val, 2 * np.ones((5, 5)))
np.testing.assert_equal(
z.der, {"x": 2 * np.ones((5, 5)), "y": -2 * np.ones((5, 5))}
)
def test_mul_rmul():
x = ad.Variable(3, label="x")
y = x * 2
assert y.val == 6
assert y.der == {"x": 2}
# y = 5x + x^2
y = x * 2 + 3 * x + x * x
assert y.val == 24
assert y.der == {"x": 11}
x = ad.Variable(3, label="x")
y = ad.Variable(2, label="y")
z = x * y
assert z.val == 6
assert z.der == {"x": 2, "y": 3}
z = 3 * z * 3
assert z.val == 54
assert z.der == {"x": 18, "y": 27}
x = ad.Variable(3, label="x")
y = ad.Variable(2, label="y")
z = x * y
z = y * z # y^2*x
assert z.val == 12
assert z.der == {"x": y.val ** 2, "y": 2 * y.val * x.val}
x = ad.Variable(2 * np.ones((5, 5)), label="x")
y = ad.Variable(3 * np.ones((5, 5)), label="y")
z = x * y
assert np.array_equal(z.val, 2 * 3 * np.ones((5, 5)))
np.testing.assert_equal(z.der, {"x": 3 * np.ones((5, 5)), "y": 2 * np.ones((5, 5))})
z = -1 * z * x # f = -(x^2) * y, dx = -2xy, dy = -x^2
assert np.array_equal(z.val, -12 * np.ones((5, 5)))
np.testing.assert_equal(
z.der, {"x": -2 * 2 * 3 * np.ones((5, 5)), "y": -1 * 2 * 2 * np.ones((5, 5))}
)
def test_truediv_rtruediv():
x = ad.Variable(3, label="x")
y = x / 2
assert y.val == 1.5
assert y.der == {"x": 1 / 2}
y = x / 2 + 3 / x + x / x
assert y.val == 3.5
assert y.der == {"x": 0.5 - 3 / 9}
x = ad.Variable(3, label="x")
y = ad.Variable(2, label="y")
z = x / y
assert z.val == 3 / 2
assert z.der == {"x": 1 / 2, "y": -3 / 4} # dx = 1/y, dy = -x/y^2
z = 2.4 / z / x / 8 # 2.4/(x/y)/x/8
assert z.val == 2.4 / (3 / 2) / 3 / 8
## Using this function because of rounding errors
assert compare_dicts(
z.der, {"x": (-0.6 * y.val) / (x.val ** 3), "y": (0.3 / (x.val ** 2))}
) # dx = -.6y/x^3 , dy = .3/x^2
x = ad.Variable(2 * np.ones((5, 5)), label="x")
y = ad.Variable(3 * np.ones((5, 5)), label="y")
z = x / y
assert np.array_equal(z.val, 2 / 3 * np.ones((5, 5)))
np.testing.assert_equal(z.der, {"x": 1 / y.val, "y": -1 * x.val / (y.val ** 2)})
z = -1 / z / x
assert np.array_equal(z.val, -1 / (2 / 3) / 2 * np.ones((5, 5)))
np.testing.assert_equal(
z.der, {"x": 2 * y.val / (x.val ** 3), "y": -1 / (x.val ** 2)}
)
def test_exp():
x = 3
ans = ad.exp(x)
sol = np.exp(x)
assert sol == ans
x = ad.Variable(3, label="x")
y = ad.exp(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = np.exp(3), np.exp(3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(3, label="x")
ans_val, ans_der = ad.exp(x).val, ad.exp(x).der["x"]
sol_val, sol_der = np.exp(3), np.exp(3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(3, label="x") + 3
y = ad.exp(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = np.exp(6), np.exp(6)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(3, label="x") + ad.Variable(4, label="y")
y = ad.exp(x)
ans_val, ans_der = y.val, [y.der["x"], y.der["y"]]
sol_val, sol_der = np.exp(7), [np.exp(7), np.exp(7)]
assert ans_val == sol_val
assert check_list(ans_der, sol_der)
x = ad.Variable([3, 4, 5], label="x")
y = ad.exp(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[np.exp(3), np.exp(4), np.exp(5)],
[np.exp(3), np.exp(4), np.exp(5),],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([3, 4, 5], label="x")
y = ad.exp(x) + ad.exp(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[2 * np.exp(3), 2 * np.exp(4), 2 * np.exp(5)],
[2 * np.exp(3), 2 * np.exp(4), 2 * np.exp(5),],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([3, 4, 5], label="x")
z = x + x
y = ad.exp(z)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[np.exp(2 * 3), np.exp(2 * 4), np.exp(2 * 5)],
[2 * np.exp(2 * 3), 2 * np.exp(2 * 4), 2 * np.exp(2 * 5),],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([3, 4, 5], label="x")
y = ad.Variable([6, 6, 6], label="y")
y = ad.exp(x + y)
ans_val, ans_der_x, ans_der_y = y.val, y.der["x"], y.der["y"]
sol_val, sol_der_x, sol_der_y = (
[np.exp(9), np.exp(10), np.exp(11)],
[
grad(lambda x, y: adnp.exp(x + y), 0)(3.0, 6.0),
grad(lambda x, y: adnp.exp(x + y), 0)(4.0, 6.0),
grad(lambda x, y: adnp.exp(x + y), 0)(5.0, 6.0),
],
[
grad(lambda x, y: adnp.exp(x + y), 1)(3.0, 6.0),
grad(lambda x, y: adnp.exp(x + y), 1)(4.0, 6.0),
grad(lambda x, y: adnp.exp(x + y), 1)(5.0, 6.0),
],
)
assert check_list(ans_val, sol_val)
assert check_list(sol_der_x, ans_der_x) & check_list(sol_der_y, ans_der_y)
def test_ln():
x = 3
ans = ad.ln(x)
sol = adnp.log(x)
assert sol == ans
x = ad.Variable(3, label="x")
y = ad.ln(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.log(3), grad(adnp.log)(3.0)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(3, label="x") + 3
y = ad.ln(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.log(6), grad(lambda x: adnp.log(x + 3.0))(3.0)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(3, label="x") + ad.Variable(4, label="y")
y = ad.ln(x)
ans_val, ans_der = y.val, [y.der["x"], y.der["y"]]
sol_val, sol_der = (
adnp.log(7),
[
grad(lambda x, y: adnp.log(x + y), 0)(3.0, 4.0),
grad(lambda x, y: adnp.log(x + y), 1)(3.0, 4.0),
],
)
assert ans_val == sol_val
assert check_list(ans_der, sol_der)
x = ad.Variable([3, 4, 5], label="x")
y = ad.ln(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[np.log(3), np.log(4), np.log(5)],
[
grad(lambda x: adnp.log(x))(3.0),
grad(lambda x: adnp.log(x))(4.0),
grad(lambda x: adnp.log(x))(5.0),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([3, 4, 5], label="x")
y = ad.ln(x + x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.log(3 * 2), adnp.log(4 * 2), adnp.log(5 * 2)],
[
grad(lambda x: adnp.log(x + x))(3.0),
grad(lambda x: adnp.log(x + x))(4.0),
grad(lambda x: adnp.log(x + x))(5.0),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([3, 4, 5], label="x")
y = ad.Variable([6, 6, 6], label="y")
y = ad.ln(x + y)
ans_val, ans_der_x, ans_der_y = y.val, y.der["x"], y.der["y"]
sol_val, sol_der_x, sol_der_y = (
[np.log(9), np.log(10), np.log(11)],
[
grad(lambda x, y: adnp.log(x + y), 0)(3.0, 6.0),
grad(lambda x, y: adnp.log(x + y), 0)(4.0, 6.0),
grad(lambda x, y: adnp.log(x + y), 0)(5.0, 6.0),
],
[
grad(lambda x, y: adnp.log(x + y), 1)(3.0, 6.0),
grad(lambda x, y: adnp.log(x + y), 1)(4.0, 6.0),
grad(lambda x, y: adnp.log(x + y), 1)(5.0, 6.0),
],
)
assert check_list(ans_val, sol_val)
assert check_list(sol_der_x, ans_der_x) & check_list(sol_der_y, ans_der_y)
def test_logistic():
def logistic(x):
return 1 / (1 + adnp.exp(-x))
x = 3
ans = ad.logistic(x)
sol = logistic(x)
assert sol == ans
x = ad.Variable(3, label="x")
y = ad.logistic(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = logistic(3), grad(logistic)(3.0)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(3, label="x") + 3
y = ad.logistic(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = logistic(6), grad(lambda x: logistic(x + 3.0))(3.0)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(3, label="x") + ad.Variable(4, label="y")
y = ad.logistic(x)
ans_val, ans_der = y.val, [y.der["x"], y.der["y"]]
sol_val, sol_der = (
logistic(7),
[
grad(lambda x, y: logistic(x + y), 0)(3.0, 4.0),
grad(lambda x, y: logistic(x + y), 1)(3.0, 4.0),
],
)
assert ans_val == sol_val
assert check_list(ans_der, sol_der)
x = ad.Variable([3, 4, 5], label="x")
y = ad.logistic(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[logistic(3), logistic(4), logistic(5)],
[
grad(lambda x: logistic(x))(3.0),
grad(lambda x: logistic(x))(4.0),
grad(lambda x: logistic(x))(5.0),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([3, 4, 5], label="x")
y = ad.logistic(x + x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[logistic(3 * 2), logistic(4 * 2), logistic(5 * 2)],
[
grad(lambda x: logistic(x + x))(3.0),
grad(lambda x: logistic(x + x))(4.0),
grad(lambda x: logistic(x + x))(5.0),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([3, 4, 5], label="x")
y = ad.Variable([6, 6, 6], label="y")
y = ad.logistic(x + y)
ans_val, ans_der_x, ans_der_y = y.val, y.der["x"], y.der["y"]
sol_val, sol_der_x, sol_der_y = (
[logistic(9), logistic(10), logistic(11)],
[
grad(lambda x, y: logistic(x + y), 0)(3.0, 6.0),
grad(lambda x, y: logistic(x + y), 0)(4.0, 6.0),
grad(lambda x, y: logistic(x + y), 0)(5.0, 6.0),
],
[
grad(lambda x, y: logistic(x + y), 1)(3.0, 6.0),
grad(lambda x, y: logistic(x + y), 1)(4.0, 6.0),
grad(lambda x, y: logistic(x + y), 1)(5.0, 6.0),
],
)
assert check_list(ans_val, sol_val)
assert check_list(sol_der_x, ans_der_x) & check_list(sol_der_y, ans_der_y)
def test_log10():
def log10(x):
return adnp.log(x) / adnp.log(10)
x = 3
ans = ad.log10(x)
sol = log10(x)
assert sol == ans
x = ad.Variable(3, label="x")
y = ad.log10(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = log10(3), grad(log10)(3.0)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(3, label="x") + 3
y = ad.log10(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = log10(6), grad(lambda x: log10(x + 3.0))(3.0)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(3, label="x") + ad.Variable(4, label="y")
y = ad.log10(x)
ans_val, ans_der = y.val, [y.der["x"], y.der["y"]]
sol_val, sol_der = (
log10(7),
[
grad(lambda x, y: log10(x + y), 0)(3.0, 4.0),
grad(lambda x, y: log10(x + y), 1)(3.0, 4.0),
],
)
assert ans_val == sol_val
assert check_list(ans_der, sol_der)
x = ad.Variable([3, 4, 5], label="x")
y = ad.log10(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[log10(3), log10(4), log10(5)],
[
grad(lambda x: log10(x))(3.0),
grad(lambda x: log10(x))(4.0),
grad(lambda x: log10(x))(5.0),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([3, 4, 5], label="x")
y = ad.log10(x + x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[log10(3 * 2), log10(4 * 2), log10(5 * 2)],
[
grad(lambda x: log10(x + x))(3.0),
grad(lambda x: log10(x + x))(4.0),
grad(lambda x: log10(x + x))(5.0),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([3, 4, 5], label="x")
y = ad.Variable([6, 6, 6], label="y")
y = ad.log10(x + y)
ans_val, ans_der_x, ans_der_y = y.val, y.der["x"], y.der["y"]
sol_val, sol_der_x, sol_der_y = (
[log10(9), log10(10), log10(11)],
[
grad(lambda x, y: log10(x + y), 0)(3.0, 6.0),
grad(lambda x, y: log10(x + y), 0)(4.0, 6.0),
grad(lambda x, y: log10(x + y), 0)(5.0, 6.0),
],
[
grad(lambda x, y: log10(x + y), 1)(3.0, 6.0),
grad(lambda x, y: log10(x + y), 1)(4.0, 6.0),
grad(lambda x, y: log10(x + y), 1)(5.0, 6.0),
],
)
assert check_list(ans_val, sol_val)
assert check_list(sol_der_x, ans_der_x) & check_list(sol_der_y, ans_der_y)
def test_sin():
x = 0.3
ans = ad.sin(x)
sol = adnp.sin(x)
assert sol == ans
x = ad.Variable(0.3, label="x")
y = ad.sin(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.sin(0.3), grad(adnp.sin)(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + 0.3
y = ad.sin(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.sin(0.6), grad(lambda x: adnp.sin(x + 0.3))(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + ad.Variable(0.4, label="y")
y = ad.sin(x)
ans_val, ans_der = y.val, [y.der["x"], y.der["y"]]
sol_val, sol_der = (
adnp.sin(0.7),
[
grad(lambda x, y: adnp.sin(x + y), 0)(0.3, 0.4),
grad(lambda x, y: adnp.sin(x + y), 1)(0.3, 0.4),
],
)
assert ans_val == sol_val
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.sin(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.sin(0.3), adnp.sin(0.4), adnp.sin(0.5)],
[
grad(lambda x: adnp.sin(x))(0.3),
grad(lambda x: adnp.sin(x))(0.4),
grad(lambda x: adnp.sin(x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.sin(x + x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.sin(0.3 * 2), adnp.sin(0.4 * 2), adnp.sin(0.5 * 2)],
[
grad(lambda x: adnp.sin(x + x))(0.3),
grad(lambda x: adnp.sin(x + x))(0.4),
grad(lambda x: adnp.sin(x + x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.03, 0.04, 0.05], label="x")
y = ad.Variable([0.06, 0.06, 0.06], label="y")
y = ad.sin(x + y)
ans_val, ans_der_x, ans_der_y = y.val, y.der["x"], y.der["y"]
sol_val, sol_der_x, sol_der_y = (
[adnp.sin(0.09), adnp.sin(0.10), adnp.sin(0.11)],
[
grad(lambda x, y: adnp.sin(x + y), 0)(0.030, 0.060),
grad(lambda x, y: adnp.sin(x + y), 0)(0.040, 0.060),
grad(lambda x, y: adnp.sin(x + y), 0)(0.050, 0.060),
],
[
grad(lambda x, y: adnp.sin(x + y), 1)(0.030, 0.060),
grad(lambda x, y: adnp.sin(x + y), 1)(0.040, 0.060),
grad(lambda x, y: adnp.sin(x + y), 1)(0.050, 0.060),
],
)
assert check_list(ans_val, sol_val)
assert check_list(sol_der_x, ans_der_x) & check_list(sol_der_y, ans_der_y)
def test_arcsin():
x = 0.3
ans = ad.arcsin(x)
sol = adnp.arcsin(x)
assert sol == ans
x = ad.Variable(0.3, label="x")
y = ad.arcsin(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.arcsin(0.3), grad(adnp.arcsin)(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + 0.3
y = ad.arcsin(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.arcsin(0.6), grad(lambda x: adnp.arcsin(x + 0.3))(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + ad.Variable(0.4, label="y")
y = ad.arcsin(x)
ans_val, ans_der = y.val, [y.der["x"], y.der["y"]]
sol_val, sol_der = (
adnp.arcsin(0.7),
[
grad(lambda x, y: adnp.arcsin(x + y), 0)(0.3, 0.4),
grad(lambda x, y: adnp.arcsin(x + y), 1)(0.3, 0.4),
],
)
assert ans_val == sol_val
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.arcsin(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.arcsin(0.3), adnp.arcsin(0.4), adnp.arcsin(0.5)],
[
grad(lambda x: adnp.arcsin(x))(0.3),
grad(lambda x: adnp.arcsin(x))(0.4),
grad(lambda x: adnp.arcsin(x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.arcsin(x + x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.arcsin(0.3 * 2), adnp.arcsin(0.4 * 2), adnp.arcsin(0.5 * 2)],
[
grad(lambda x: adnp.arcsin(x + x))(0.3),
grad(lambda x: adnp.arcsin(x + x))(0.4),
grad(lambda x: adnp.arcsin(x + x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.03, 0.04, 0.05], label="x")
y = ad.Variable([0.06, 0.06, 0.06], label="y")
y = ad.arcsin(x + y)
ans_val, ans_der_x, ans_der_y = y.val, y.der["x"], y.der["y"]
sol_val, sol_der_x, sol_der_y = (
[adnp.arcsin(0.09), adnp.arcsin(0.10), adnp.arcsin(0.11)],
[
grad(lambda x, y: adnp.arcsin(x + y), 0)(0.030, 0.060),
grad(lambda x, y: adnp.arcsin(x + y), 0)(0.040, 0.060),
grad(lambda x, y: adnp.arcsin(x + y), 0)(0.050, 0.060),
],
[
grad(lambda x, y: adnp.arcsin(x + y), 1)(0.030, 0.060),
grad(lambda x, y: adnp.arcsin(x + y), 1)(0.040, 0.060),
grad(lambda x, y: adnp.arcsin(x + y), 1)(0.050, 0.060),
],
)
assert check_list(ans_val, sol_val)
assert check_list(sol_der_x, ans_der_x) & check_list(sol_der_y, ans_der_y)
x = ad.Variable(2, label="x")
with pytest.raises(Exception):
y = ad.arcsin(x)
def test_sinh():
x = 0.3
ans = ad.sinh(x)
sol = adnp.sinh(x)
assert sol == ans
x = ad.Variable(0.3, label="x")
y = ad.sinh(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.sinh(0.3), grad(adnp.sinh)(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + 0.3
y = ad.sinh(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.sinh(0.6), grad(lambda x: adnp.sinh(x + 0.3))(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + ad.Variable(0.4, label="y")
y = ad.sinh(x)
ans_val, ans_der = y.val, [y.der["x"], y.der["y"]]
sol_val, sol_der = (
adnp.sinh(0.7),
[
grad(lambda x, y: adnp.sinh(x + y), 0)(0.3, 0.4),
grad(lambda x, y: adnp.sinh(x + y), 1)(0.3, 0.4),
],
)
assert ans_val == sol_val
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.sinh(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.sinh(0.3), adnp.sinh(0.4), adnp.sinh(0.5)],
[
grad(lambda x: adnp.sinh(x))(0.3),
grad(lambda x: adnp.sinh(x))(0.4),
grad(lambda x: adnp.sinh(x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.sinh(x + x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.sinh(0.3 * 2), adnp.sinh(0.4 * 2), adnp.sinh(0.5 * 2)],
[
grad(lambda x: adnp.sinh(x + x))(0.3),
grad(lambda x: adnp.sinh(x + x))(0.4),
grad(lambda x: adnp.sinh(x + x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.03, 0.04, 0.05], label="x")
y = ad.Variable([0.06, 0.06, 0.06], label="y")
y = ad.sinh(x + y)
ans_val, ans_der_x, ans_der_y = y.val, y.der["x"], y.der["y"]
sol_val, sol_der_x, sol_der_y = (
[adnp.sinh(0.09), adnp.sinh(0.10), adnp.sinh(0.11)],
[
grad(lambda x, y: adnp.sinh(x + y), 0)(0.030, 0.060),
grad(lambda x, y: adnp.sinh(x + y), 0)(0.040, 0.060),
grad(lambda x, y: adnp.sinh(x + y), 0)(0.050, 0.060),
],
[
grad(lambda x, y: adnp.sinh(x + y), 1)(0.030, 0.060),
grad(lambda x, y: adnp.sinh(x + y), 1)(0.040, 0.060),
grad(lambda x, y: adnp.sinh(x + y), 1)(0.050, 0.060),
],
)
assert check_list(ans_val, sol_val)
assert check_list(sol_der_x, ans_der_x) & check_list(sol_der_y, ans_der_y)
def test_cos():
x = 0.3
ans = ad.cos(x)
sol = adnp.cos(x)
assert sol == ans
x = ad.Variable(0.3, label="x")
y = ad.cos(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.cos(0.3), grad(adnp.cos)(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + 0.3
y = ad.cos(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.cos(0.6), grad(lambda x: adnp.cos(x + 0.3))(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + ad.Variable(0.4, label="y")
y = ad.cos(x)
ans_val, ans_der = y.val, [y.der["x"], y.der["y"]]
sol_val, sol_der = (
adnp.cos(0.7),
[
grad(lambda x, y: adnp.cos(x + y), 0)(0.3, 0.4),
grad(lambda x, y: adnp.cos(x + y), 1)(0.3, 0.4),
],
)
assert ans_val == sol_val
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.cos(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.cos(0.3), adnp.cos(0.4), adnp.cos(0.5)],
[
grad(lambda x: adnp.cos(x))(0.3),
grad(lambda x: adnp.cos(x))(0.4),
grad(lambda x: adnp.cos(x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.cos(x + x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.cos(0.3 * 2), adnp.cos(0.4 * 2), adnp.cos(0.5 * 2)],
[
grad(lambda x: adnp.cos(x + x))(0.3),
grad(lambda x: adnp.cos(x + x))(0.4),
grad(lambda x: adnp.cos(x + x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.03, 0.04, 0.05], label="x")
y = ad.Variable([0.06, 0.06, 0.06], label="y")
y = ad.cos(x + y)
ans_val, ans_der_x, ans_der_y = y.val, y.der["x"], y.der["y"]
sol_val, sol_der_x, sol_der_y = (
[adnp.cos(0.09), adnp.cos(0.10), adnp.cos(0.11)],
[
grad(lambda x, y: adnp.cos(x + y), 0)(0.030, 0.060),
grad(lambda x, y: adnp.cos(x + y), 0)(0.040, 0.060),
grad(lambda x, y: adnp.cos(x + y), 0)(0.050, 0.060),
],
[
grad(lambda x, y: adnp.cos(x + y), 1)(0.030, 0.060),
grad(lambda x, y: adnp.cos(x + y), 1)(0.040, 0.060),
grad(lambda x, y: adnp.cos(x + y), 1)(0.050, 0.060),
],
)
assert check_list(ans_val, sol_val)
assert check_list(sol_der_x, ans_der_x) & check_list(sol_der_y, ans_der_y)
def test_arccos():
x = 0.3
ans = ad.arccos(x)
sol = adnp.arccos(x)
assert sol == ans
x = ad.Variable(0.3, label="x")
y = ad.arccos(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.arccos(0.3), grad(adnp.arccos)(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + 0.3
y = ad.arccos(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.arccos(0.6), grad(lambda x: adnp.arccos(x + 0.3))(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + ad.Variable(0.4, label="y")
y = ad.arccos(x)
ans_val, ans_der = y.val, [y.der["x"], y.der["y"]]
sol_val, sol_der = (
adnp.arccos(0.7),
[
grad(lambda x, y: adnp.arccos(x + y), 0)(0.3, 0.4),
grad(lambda x, y: adnp.arccos(x + y), 1)(0.3, 0.4),
],
)
assert ans_val == sol_val
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.arccos(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.arccos(0.3), adnp.arccos(0.4), adnp.arccos(0.5)],
[
grad(lambda x: adnp.arccos(x))(0.3),
grad(lambda x: adnp.arccos(x))(0.4),
grad(lambda x: adnp.arccos(x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.arccos(x + x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.arccos(0.3 * 2), adnp.arccos(0.4 * 2), adnp.arccos(0.5 * 2)],
[
grad(lambda x: adnp.arccos(x + x))(0.3),
grad(lambda x: adnp.arccos(x + x))(0.4),
grad(lambda x: adnp.arccos(x + x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.03, 0.04, 0.05], label="x")
y = ad.Variable([0.06, 0.06, 0.06], label="y")
y = ad.arccos(x + y)
ans_val, ans_der_x, ans_der_y = y.val, y.der["x"], y.der["y"]
sol_val, sol_der_x, sol_der_y = (
[adnp.arccos(0.09), adnp.arccos(0.10), adnp.arccos(0.11)],
[
grad(lambda x, y: adnp.arccos(x + y), 0)(0.030, 0.060),
grad(lambda x, y: adnp.arccos(x + y), 0)(0.040, 0.060),
grad(lambda x, y: adnp.arccos(x + y), 0)(0.050, 0.060),
],
[
grad(lambda x, y: adnp.arccos(x + y), 1)(0.030, 0.060),
grad(lambda x, y: adnp.arccos(x + y), 1)(0.040, 0.060),
grad(lambda x, y: adnp.arccos(x + y), 1)(0.050, 0.060),
],
)
assert check_list(ans_val, sol_val)
assert check_list(sol_der_x, ans_der_x) & check_list(sol_der_y, ans_der_y)
x = ad.Variable(2, label="x")
with pytest.raises(Exception):
y = ad.arccos(x)
def test_cosh():
x = 0.3
ans = ad.cosh(x)
sol = adnp.cosh(x)
assert sol == ans
x = ad.Variable(0.3, label="x")
y = ad.cosh(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.cosh(0.3), grad(adnp.cosh)(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + 0.3
y = ad.cosh(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.cosh(0.6), grad(lambda x: adnp.cosh(x + 0.3))(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + ad.Variable(0.4, label="y")
y = ad.cosh(x)
ans_val, ans_der = y.val, [y.der["x"], y.der["y"]]
sol_val, sol_der = (
adnp.cosh(0.7),
[
grad(lambda x, y: adnp.cosh(x + y), 0)(0.3, 0.4),
grad(lambda x, y: adnp.cosh(x + y), 1)(0.3, 0.4),
],
)
assert ans_val == sol_val
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.cosh(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.cosh(0.3), adnp.cosh(0.4), adnp.cosh(0.5)],
[
grad(lambda x: adnp.cosh(x))(0.3),
grad(lambda x: adnp.cosh(x))(0.4),
grad(lambda x: adnp.cosh(x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.cosh(x + x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.cosh(0.3 * 2), adnp.cosh(0.4 * 2), adnp.cosh(0.5 * 2)],
[
grad(lambda x: adnp.cosh(x + x))(0.3),
grad(lambda x: adnp.cosh(x + x))(0.4),
grad(lambda x: adnp.cosh(x + x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.03, 0.04, 0.05], label="x")
y = ad.Variable([0.06, 0.06, 0.06], label="y")
y = ad.cosh(x + y)
ans_val, ans_der_x, ans_der_y = y.val, y.der["x"], y.der["y"]
sol_val, sol_der_x, sol_der_y = (
[adnp.cosh(0.09), adnp.cosh(0.10), adnp.cosh(0.11)],
[
grad(lambda x, y: adnp.cosh(x + y), 0)(0.030, 0.060),
grad(lambda x, y: adnp.cosh(x + y), 0)(0.040, 0.060),
grad(lambda x, y: adnp.cosh(x + y), 0)(0.050, 0.060),
],
[
grad(lambda x, y: adnp.cosh(x + y), 1)(0.030, 0.060),
grad(lambda x, y: adnp.cosh(x + y), 1)(0.040, 0.060),
grad(lambda x, y: adnp.cosh(x + y), 1)(0.050, 0.060),
],
)
assert check_list(ans_val, sol_val)
assert check_list(sol_der_x, ans_der_x) & check_list(sol_der_y, ans_der_y)
def test_tan():
x = 0.3
ans = ad.tan(x)
sol = adnp.tan(x)
assert sol == ans
x = ad.Variable(0.3, label="x")
y = ad.tan(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.tan(0.3), grad(adnp.tan)(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + 0.3
y = ad.tan(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.tan(0.6), grad(lambda x: adnp.tan(x + 0.3))(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + ad.Variable(0.4, label="y")
y = ad.tan(x)
ans_val, ans_der = y.val, [y.der["x"], y.der["y"]]
sol_val, sol_der = (
adnp.tan(0.7),
[
grad(lambda x, y: adnp.tan(x + y), 0)(0.3, 0.4),
grad(lambda x, y: adnp.tan(x + y), 1)(0.3, 0.4),
],
)
assert ans_val == sol_val
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.tan(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.tan(0.3), adnp.tan(0.4), adnp.tan(0.5)],
[
grad(lambda x: adnp.tan(x))(0.3),
grad(lambda x: adnp.tan(x))(0.4),
grad(lambda x: adnp.tan(x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.tan(x + x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.tan(0.3 * 2), adnp.tan(0.4 * 2), adnp.tan(0.5 * 2)],
[
grad(lambda x: adnp.tan(x + x))(0.3),
grad(lambda x: adnp.tan(x + x))(0.4),
grad(lambda x: adnp.tan(x + x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.03, 0.04, 0.05], label="x")
y = ad.Variable([0.06, 0.06, 0.06], label="y")
y = ad.tan(x + y)
ans_val, ans_der_x, ans_der_y = y.val, y.der["x"], y.der["y"]
sol_val, sol_der_x, sol_der_y = (
[adnp.tan(0.09), adnp.tan(0.10), adnp.tan(0.11)],
[
grad(lambda x, y: adnp.tan(x + y), 0)(0.030, 0.060),
grad(lambda x, y: adnp.tan(x + y), 0)(0.040, 0.060),
grad(lambda x, y: adnp.tan(x + y), 0)(0.050, 0.060),
],
[
grad(lambda x, y: adnp.tan(x + y), 1)(0.030, 0.060),
grad(lambda x, y: adnp.tan(x + y), 1)(0.040, 0.060),
grad(lambda x, y: adnp.tan(x + y), 1)(0.050, 0.060),
],
)
assert check_list(ans_val, sol_val)
assert check_list(sol_der_x, ans_der_x) & check_list(sol_der_y, ans_der_y)
def test_arctan():
x = 0.3
ans = ad.arctan(x)
sol = adnp.arctan(x)
assert sol == ans
x = ad.Variable(0.3, label="x")
y = ad.arctan(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.arctan(0.3), grad(adnp.arctan)(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + 0.3
y = ad.arctan(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.arctan(0.6), grad(lambda x: adnp.arctan(x + 0.3))(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + ad.Variable(0.4, label="y")
y = ad.arctan(x)
ans_val, ans_der = y.val, [y.der["x"], y.der["y"]]
sol_val, sol_der = (
adnp.arctan(0.7),
[
grad(lambda x, y: adnp.arctan(x + y), 0)(0.3, 0.4),
grad(lambda x, y: adnp.arctan(x + y), 1)(0.3, 0.4),
],
)
assert ans_val == sol_val
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.arctan(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.arctan(0.3), adnp.arctan(0.4), adnp.arctan(0.5)],
[
grad(lambda x: adnp.arctan(x))(0.3),
grad(lambda x: adnp.arctan(x))(0.4),
grad(lambda x: adnp.arctan(x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.arctan(x + x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.arctan(0.3 * 2), adnp.arctan(0.4 * 2), adnp.arctan(0.5 * 2)],
[
grad(lambda x: adnp.arctan(x + x))(0.3),
grad(lambda x: adnp.arctan(x + x))(0.4),
grad(lambda x: adnp.arctan(x + x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.03, 0.04, 0.05], label="x")
y = ad.Variable([0.06, 0.06, 0.06], label="y")
y = ad.arctan(x + y)
ans_val, ans_der_x, ans_der_y = y.val, y.der["x"], y.der["y"]
sol_val, sol_der_x, sol_der_y = (
[adnp.arctan(0.09), adnp.arctan(0.10), adnp.arctan(0.11)],
[
grad(lambda x, y: adnp.arctan(x + y), 0)(0.030, 0.060),
grad(lambda x, y: adnp.arctan(x + y), 0)(0.040, 0.060),
grad(lambda x, y: adnp.arctan(x + y), 0)(0.050, 0.060),
],
[
grad(lambda x, y: adnp.arctan(x + y), 1)(0.030, 0.060),
grad(lambda x, y: adnp.arctan(x + y), 1)(0.040, 0.060),
grad(lambda x, y: adnp.arctan(x + y), 1)(0.050, 0.060),
],
)
assert check_list(ans_val, sol_val)
assert check_list(sol_der_x, ans_der_x) & check_list(sol_der_y, ans_der_y)
def test_tanh():
x = 0.3
ans = ad.tanh(x)
sol = adnp.tanh(x)
assert sol == ans
x = ad.Variable(0.3, label="x")
y = ad.tanh(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.tanh(0.3), grad(adnp.tanh)(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + 0.3
y = ad.tanh(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.tanh(0.6), grad(lambda x: adnp.tanh(x + 0.3))(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + ad.Variable(0.4, label="y")
y = ad.tanh(x)
ans_val, ans_der = y.val, [y.der["x"], y.der["y"]]
sol_val, sol_der = (
adnp.tanh(0.7),
[
grad(lambda x, y: adnp.tanh(x + y), 0)(0.3, 0.4),
grad(lambda x, y: adnp.tanh(x + y), 1)(0.3, 0.4),
],
)
assert ans_val == sol_val
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.tanh(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.tanh(0.3), adnp.tanh(0.4), adnp.tanh(0.5)],
[
grad(lambda x: adnp.tanh(x))(0.3),
grad(lambda x: adnp.tanh(x))(0.4),
grad(lambda x: adnp.tanh(x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.tanh(x + x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.tanh(0.3 * 2), adnp.tanh(0.4 * 2), adnp.tanh(0.5 * 2)],
[
grad(lambda x: adnp.tanh(x + x))(0.3),
grad(lambda x: adnp.tanh(x + x))(0.4),
grad(lambda x: adnp.tanh(x + x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.03, 0.04, 0.05], label="x")
y = ad.Variable([0.06, 0.06, 0.06], label="y")
y = ad.tanh(x + y)
ans_val, ans_der_x, ans_der_y = y.val, y.der["x"], y.der["y"]
sol_val, sol_der_x, sol_der_y = (
[adnp.tanh(0.09), adnp.tanh(0.10), adnp.tanh(0.11)],
[
grad(lambda x, y: adnp.tanh(x + y), 0)(0.030, 0.060),
grad(lambda x, y: adnp.tanh(x + y), 0)(0.040, 0.060),
grad(lambda x, y: adnp.tanh(x + y), 0)(0.050, 0.060),
],
[
grad(lambda x, y: adnp.tanh(x + y), 1)(0.030, 0.060),
grad(lambda x, y: adnp.tanh(x + y), 1)(0.040, 0.060),
grad(lambda x, y: adnp.tanh(x + y), 1)(0.050, 0.060),
],
)
assert check_list(ans_val, sol_val)
assert check_list(sol_der_x, ans_der_x) & check_list(sol_der_y, ans_der_y)
def test_neg():
x = ad.Variable(3, label="x")
y = -x
assert y.val == -3
assert y.der == {"x": -1}
x = ad.Variable(3, label="x", der={"x": 2})
y = -x
assert y.val == -3
assert y.der == {"x": -2}
x = ad.Variable(np.arange(3), label="x")
y = -x
assert np.all(y.val == [0, -1, -2])
assert y.der == {"x": [-1, -1, -1]}
x = ad.Variable(0, label="x")
y = ad.Variable(3, label="y")
z = x + 2 * y
z2 = -z
assert z2.val == -6
assert z2.der == {"x": -1, "y": -2}
x = ad.Variable(np.arange(3), label="x")
y = ad.Variable(3 + np.arange(3), label="y")
z = x + 2 * y
z2 = -z
assert np.all(z2.val == [-6, -9, -12])
assert z2.der == {"x": [-1, -1, -1], "y": [-2, -2, -2]}
def test_pow():
x = ad.Variable(3, label="x")
z = x ** 2
assert z.val == 9
assert z.der == {"x": 6}
x = ad.Variable(0, label="x")
z = x ** 2
assert z.val == 0
assert z.der == {"x": 0}
x = ad.Variable([3, 2], label="x")
z = x ** 2
assert np.all(z.val == [9, 4])
assert np.all(z.der == {"x": [6, 4]})
x = ad.Variable(3, label="x")
y = ad.Variable(2, label="y")
z = x ** y
assert z.val == 9
assert z.der == {"x": 6, "y": 9 * np.log(3)}
x = ad.Variable([3, 2], label="x")
y = ad.Variable([2, 3], label="y")
z = x ** y
assert np.all(z.val == [9, 8])
assert (
compare_dicts_multi(z.der, {"x": [6, 12], "y": [9 * np.log(3), 8 * np.log(2)]})
== True
)
x = ad.Variable([np.e - 1, np.e - 1], label="x")
y = ad.Variable([1, 1], label="y")
z = x + y
z2 = z ** y
assert np.all(z2.val == [np.e, np.e])
assert compare_dicts_multi(z2.der, {"x": [1, 1], "y": [np.e + 1, np.e + 1]}) == True
x = ad.Variable([0, 0], label="x")
y = ad.Variable([1, 2], label="y")
z = x ** y
assert np.all(z.val == [0, 0])
assert compare_dicts_multi(z.der, {"x": [1, 0], "y": [0, 0]}) == True
def test_rpow():
x = ad.Variable(1, label="x")
z = np.e ** x
assert z.val == np.e
assert z.der == {"x": np.e}
x = ad.Variable(1, label="x")
z = 0 ** x
assert z.val == 0
assert z.der == {"x": 0}
x = ad.Variable([1, 2], label="x")
z = np.e ** x
assert np.all(z.val == [np.e, np.e ** 2])
assert np.all(z.der == {"x": [np.e, np.e ** 2]})
x = ad.Variable(2, label="x")
y = ad.Variable(-1, label="y")
z = np.e ** (x + 2 * y)
assert z.val == 1
assert z.der == {"x": 1, "y": 2}
x = ad.Variable([2, -2], label="x")
y = ad.Variable([-1, 1], label="y")
z = np.e ** (x + 2 * y)
assert np.all(z.val == [1, 1])
assert np.all(z.der == {"x": [1, 1], "y": [2, 2]})
def test_ne():
x = ad.Variable(1, label="x")
y = ad.Variable(1, label="y")
assert (x != x) == False
assert (x != y) == True
z1 = ad.Variable([1, 2], der={"x": [1, 2], "y": [1, 2]}, label="z1")
z2 = ad.Variable([1, 2], der={"x": [1, 2], "y": [1, 2]}, label="z2")
assert (z1 != z2) == False
z1 = ad.Variable(1, der={"x": 2, "y": 3}, label="z1")
z2 = ad.Variable(1, der={"x": 2, "y": 3}, label="z2")
assert (z1 != z2) == False
z1 = ad.Variable([1, 2], der={"x": [1, 2], "y": [1, 2]}, label="z1")
z2 = ad.Variable([1, 2], der={"x": [1, 2], "y": [1, 3]}, label="z2")
assert (z1 != z2) == True
x = ad.Variable(1, label="x")
y = ad.Variable(1, label="y")
z1 = ad.exp(x) + np.e * y
z2 = ad.exp(y) + np.e * x
assert (z1 != z2) == False
x = ad.Variable([1, 2, 3], label="x")
y = ad.Variable([2, 3], label="y")
assert (x != y) == True
z = 1
assert (x != z) == True
def test_lt():
x = ad.Variable(1, label="x")
y = ad.Variable(2, label="y")
assert (x < y) == True
x = ad.Variable([1, 2], label="x")
y = ad.Variable([2, 2], label="y")
assert np.all((x < y) == [True, False])
x = ad.Variable([1, 1, 1], label="x")
y = ad.Variable([2, 2], label="y")
with pytest.raises(Exception):
print(x < y)
def test_le():
x = ad.Variable(1, label="x")
y = ad.Variable(2, label="y")
assert (x <= y) == True
x = ad.Variable([1, 2], label="x")
y = ad.Variable([2, 2], label="y")
assert np.all((x <= y) == [True, True])
x = ad.Variable([1, 1, 1], label="x")
y = ad.Variable([2, 2], label="y")
with pytest.raises(Exception):
print(x <= y)
def test_gt():
x = ad.Variable(3, label="x")
y = ad.Variable(2, label="y")
assert (x > y) == True
x = ad.Variable([3, 2], label="x")
y = ad.Variable([2, 2], label="y")
assert np.all((x > y) == [True, False])
x = ad.Variable([1, 1, 1], label="x")
y = ad.Variable([2, 2], label="y")
with pytest.raises(Exception):
print(x > y)
def test_ge():
x = ad.Variable(3, label="x")
y = ad.Variable(2, label="y")
assert (x >= y) == True
x = ad.Variable([3, 2], label="x")
y = ad.Variable([2, 2], label="y")
assert np.all((x >= y) == [True, True])
x = ad.Variable([1, 1, 1], label="x")
y = ad.Variable([2, 2], label="y")
with pytest.raises(Exception):
print(x >= y)
def test_complicated_functions():
## Function 1
## sin(x) + cos(x) * 3*y - x^4 + ln(x*y)
x = | np.random.rand(5, 4) | numpy.random.rand |
"""
Copyright (c) 2010-2018 CNRS / Centre de Recherche Astrophysique de Lyon
Copyright (c) 2012-2017 <NAME> <<EMAIL>>
Copyright (c) 2014-2019 <NAME> <<EMAIL>>
Copyright (c) 2016 <NAME> <<EMAIL>>
Copyright (c) 2016-2019 <NAME> <<EMAIL>>
Copyright (c) 2018-2019 <NAME> <<EMAIL>>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import numpy as np
import types
import astropy.units as u
from astropy.io import fits
from astropy.stats import gaussian_sigma_to_fwhm, gaussian_fwhm_to_sigma
from astropy.convolution import convolve, Box1DKernel
from os.path import join, abspath, dirname
from scipy import interpolate, signal
from scipy.optimize import leastsq
from . import ABmag_filters, wavelet1D
from .arithmetic import ArithmeticMixin
from .data import DataArray
from .fitting import Gauss1D
from .objs import flux2mag
__all__ = ('Spectrum', 'vactoair', 'airtovac')
def vactoair(vacwl):
"""Calculate the approximate wavelength in air for vacuum wavelengths.
Parameters
----------
vacwl : ndarray
Vacuum wavelengths.
This uses an approximate formula from the IDL astronomy library
https://idlastro.gsfc.nasa.gov/ftp/pro/astro/vactoair.pro
"""
wave2 = vacwl * vacwl
n = 1.0 + 2.735182e-4 + 131.4182 / wave2 + 2.76249e8 / (wave2 * wave2)
# Do not extrapolate to very short wavelengths.
if not isinstance(vacwl, np.ndarray):
if vacwl < 2000:
n = 1.0
else:
ignore = np.where(vacwl < 2000)
n[ignore] = 1.0
return vacwl / n
def airtovac(airwl):
"""Convert air wavelengths to vacuum wavelengths.
Parameters
----------
vacwl : ndarray
Vacuum wavelengths.
This uses the IAU standard as implemented in the IDL astronomy library
https://idlastro.gsfc.nasa.gov/ftp/pro/astro/airtovac.pro
"""
sigma2 = (1e4 / airwl)**2. # Convert to wavenumber squared
n = 1.0 + (6.4328e-5 + 2.94981e-2 / (146. - sigma2) +
2.5540e-4 / (41. - sigma2))
if not isinstance(airwl, np.ndarray):
if airwl < 2000:
n = 1.0
else:
ignore = np.where(airwl < 2000)
n[ignore] = 1.0
return airwl * n
class Spectrum(ArithmeticMixin, DataArray):
"""Spectrum objects contain 1D arrays of numbers, optionally
accompanied by corresponding variances. These numbers represent
sample fluxes along a regularly spaced grid of wavelengths.
The spectral pixel values and their variances, if any, are
available as arrays[q that can be accessed via properties of the
Spectrum object called .data and .var, respectively. These arrays
are usually masked arrays, which share a boolean masking array
that can be accessed via a property called .mask. In principle,
these arrays can also be normal numpy arrays without masks, in
which case the .mask property holds the value,
numpy.ma.nomask. However non-masked arrays are only supported by a
subset of mpdaf functions at this time, so masked arrays should be
used where possible.
When a new Spectrum object is created, the data, variance and mask
arrays can either be specified as arguments, or the name of a FITS
file can be provided to load them from.
Parameters
----------
filename : string
An optional FITS file name from which to load the spectrum.
None by default. This argument is ignored if the data
argument is not None.
ext : int or (int,int) or string or (string,string)
The optional number/name of the data extension
or the numbers/names of the data and variance extensions.
wave : `mpdaf.obj.WaveCoord`
The wavelength coordinates of the spectrum.
unit : str or `astropy.units.Unit`
The physical units of the data values. Defaults to
`astropy.units.dimensionless_unscaled`.
data : float array
An optional 1 dimensional array containing the values of each
pixel of the spectrum, stored in ascending order of wavelength
(None by default). Where given, this array should be 1
dimensional.
var : float array
An optional 1 dimensional array containing the estimated
variances of each pixel of the spectrum, stored in ascending
order of wavelength (None by default).
Attributes
----------
filename : string
The name of the originating FITS file, if any. Otherwise None.
unit : `astropy.units.Unit`
The physical units of the data values.
primary_header : `astropy.io.fits.Header`
The FITS primary header instance, if a FITS file was provided.
data_header : `astropy.io.fits.Header`
The FITS header of the DATA extension.
wave : `mpdaf.obj.WaveCoord`
The wavelength coordinates of the spectrum.
"""
# Tell the DataArray base-class that Spectrum objects require 1 dimensional
# data arrays and wavelength coordinates.
_ndim_required = 1
_has_wave = True
def subspec(self, lmin, lmax=None, unit=u.angstrom):
"""Return the flux at a given wavelength, or the sub-spectrum
of a specified wavelength range.
A single flux value is returned if the lmax argument is None
(the default), or if the wavelengths assigned to the lmin and
lmax arguments are both within the same pixel. The value that
is returned is the value of the pixel whose wavelength is
closest to the wavelength specified by the lmin argument.
Note that is a wavelength range is asked for, a view on the original
spectrum is returned and both will be modified at the same time. If
you need to modify only the sub-spectrum, you'll need to copy() it
before.
Parameters
----------
lmin : float
The minimum wavelength of a wavelength range, or the wavelength
of a single pixel if lmax is None.
lmax : float or None
The maximum wavelength of the wavelength range.
unit : `astropy.units.Unit`
The wavelength units of the lmin and lmax arguments. The
default is angstroms. If unit is None, then lmin and lmax
are interpreted as array indexes within the spectrum.
Returns
-------
out : float or `~mpdaf.obj.Spectrum`
"""
if self.wave is None:
raise ValueError('Operation forbidden without world coordinates '
'along the spectral direction')
if lmax is None:
lmax = lmin
# Are lmin and lmax array indexes?
if unit is None:
pix_min = max(0, int(lmin + 0.5))
pix_max = min(self.shape[0], int(lmax + 0.5))
# Convert wavelengths to the nearest spectrum array indexes.
else:
pix_min = max(0, self.wave.pixel(lmin, nearest=True, unit=unit))
pix_max = min(self.shape[0],
self.wave.pixel(lmax, nearest=True, unit=unit) + 1)
# If the start and end of the wavelength range select the same pixel,
# return just the value of that pixel.
if (pix_min + 1) == pix_max:
return self[pix_min]
# Otherwise return a sub-spectrum.
else:
return self[pix_min:pix_max]
def get_step(self, unit=None):
"""Return the wavelength step size.
Parameters
----------
unit : `astropy.units.Unit`
The units of the returned step-size.
Returns
-------
out : float
The width of a spectrum pixel.
"""
if self.wave is not None:
return self.wave.get_step(unit)
def get_start(self, unit=None):
"""Return the wavelength value of the first pixel of the spectrum.
Parameters
----------
unit : `astropy.units.Unit`
The units of the returned wavelength.
Returns
-------
out : float
The wavelength of the first pixel of the spectrum.
"""
if self.wave is not None:
return self.wave.get_start(unit)
def get_end(self, unit=None):
"""Return the wavelength of the last pixel of the spectrum.
Parameters
----------
unit : `astropy.units.Unit`
The units of the returned wavelength.
Returns
-------
out : float
The wavelength of the final pixel of the spectrum.
"""
if self.wave is not None:
return self.wave.get_end(unit)
def get_range(self, unit=None):
"""Return the wavelength range (Lambda_min, Lambda_max) of the spectrum.
Parameters
----------
unit : `astropy.units.Unit`
The units of the returned wavelengths.
Returns
-------
out : float array
The minimum and maximum wavelengths.
"""
if self.wave is not None:
return self.wave.get_range(unit)
def mask_region(self, lmin=None, lmax=None, inside=True, unit=u.angstrom):
"""Mask spectrum pixels inside or outside a wavelength range, [lmin,lmax].
Parameters
----------
lmin : float
The minimum wavelength of the range, or None to choose the
wavelength of the first pixel in the spectrum.
lmax : float
The maximum wavelength of the range, or None to choose the
wavelength of the last pixel in the spectrum.
unit : `astropy.units.Unit`
The wavelength units of lmin and lmax. If None, lmin and
lmax are assumed to be pixel indexes.
inside : bool
If True, pixels inside the range [lmin,lmax] are masked.
If False, pixels outside the range [lmin,lmax] are masked.
"""
if self.wave is None:
raise ValueError('Operation forbidden without world coordinates '
'along the spectral direction')
else:
if lmin is None:
pix_min = 0
else:
if unit is None:
pix_min = max(0, int(lmin + 0.5))
else:
pix_min = max(0, self.wave.pixel(lmin, nearest=True,
unit=unit))
if lmax is None:
pix_max = self.shape[0]
else:
if unit is None:
pix_max = min(self.shape[0], int(lmax + 0.5))
else:
pix_max = min(self.shape[0],
self.wave.pixel(lmax, nearest=True,
unit=unit) + 1)
if inside:
self.data[pix_min:pix_max] = np.ma.masked
else:
self.data[:pix_min] = np.ma.masked
self.data[pix_max + 1:] = np.ma.masked
def _wavelengths_to_slice(self, lmin, lmax, unit):
"""Return the slice that selects a specified wavelength range.
Parameters
----------
lmin : float
The minimum wavelength of a wavelength range, or the wavelength
of a single pixel if lmax is None.
lmax : float or None
The maximum wavelength of the wavelength range.
unit : `astropy.units.Unit`
The wavelength units of the lmin and lmax arguments. The
default is angstroms. If unit is None, then lmin and lmax
are interpreted as array indexes within the spectrum.
Returns
-------
out : slice
The slice needed to select pixels within the specified wavelength
range.
"""
if unit is not None and self.wave is None:
raise ValueError('Operation forbidden without world coordinates '
'along the spectral direction')
# Get the pixel index that corresponds to the minimum wavelength.
if lmin is None:
i1 = 0
else:
if unit is None:
if lmin > self.shape[0]:
raise ValueError('Minimum and maximum wavelengths '
'are outside the spectrum range')
i1 = max(0, int(lmin + 0.5))
else:
i1 = self.wave.pixel(lmin, nearest=False, unit=unit)
if i1 > self.shape[0]:
raise ValueError('Minimum and maximum wavelengths '
'are outside the spectrum range')
i1 = self.wave.pixel(lmin, nearest=True, unit=unit)
# Get the pixel index that corresponds to the maximum wavelength.
if lmax is None:
i2 = self.shape[0]
else:
if unit is None:
if lmax < 0:
raise ValueError('Minimum and maximum wavelengths '
'are outside the spectrum range')
i2 = min(self.shape[0], int(lmax + 0.5))
else:
i2 = self.wave.pixel(lmax, nearest=False, unit=unit)
if i2 < 0:
raise ValueError('Minimum and maximum wavelengths '
'are outside the spectrum range')
i2 = self.wave.pixel(lmax, nearest=True, unit=unit) + 1
return slice(i1, i2)
def _interp(self, wavelengths, spline=False):
"""return the interpolated values corresponding to the wavelength
array.
Parameters
----------
wavelengths : array of float
wavelength values
unit : `astropy.units.Unit`
Type of the wavelength coordinates
spline : bool
False: linear interpolation (use `scipy.interpolate.interp1d`),
True: spline interpolation (use `scipy.interpolate.splrep`
and `scipy.interpolate.splev`).
"""
lbda = self.wave.coord()
data = np.pad(self.data.compressed(), 1, 'edge')
w = np.concatenate(([self.get_start() - 0.5 * self.get_step()],
np.compress(~self._mask, lbda),
[self.get_end() + 0.5 * self.get_step()]))
if spline:
if self._var is not None:
_weight = 1. / np.sqrt(np.abs(self.var.filled(np.inf)))
if self.mask is np.ma.nomask:
weight = np.empty(self.shape + 2, dtype=float)
weight[1:-1] = _weight
else:
ksel = np.where(self.mask == False)
weight = np.empty(np.shape(ksel)[1] + 2)
weight[1:-1] = _weight[ksel]
weight[0] = weight[1]
weight[-1] = weight[-2]
else:
weight = None
tck = interpolate.splrep(w, data, w=weight)
return interpolate.splev(wavelengths, tck, der=0)
else:
f = interpolate.interp1d(w, data)
return f(wavelengths)
def _interp_data(self, spline=False):
"""Return data array with interpolated values for masked pixels.
Parameters
----------
spline : bool
False: linear interpolation (use `scipy.interpolate.interp1d`),
True: spline interpolation (use `scipy.interpolate.splrep`
and `scipy.interpolate.splev`).
"""
if np.count_nonzero(self._mask) in (0, self.shape[0]):
return self._data
lbda = self.wave.coord()
wnew = lbda[self._mask]
data = self._data.copy()
data[self._mask] = self._interp(wnew, spline)
return data
def interp_mask(self, spline=False):
"""Interpolate masked pixels.
Parameters
----------
spline : bool
False: linear interpolation (use `scipy.interpolate.interp1d`),
True: spline interpolation (use `scipy.interpolate.splrep`
and `scipy.interpolate.splev`).
"""
self.data = np.ma.masked_invalid(self._interp_data(spline))
def rebin(self, factor, margin='center', inplace=False):
"""Combine neighboring pixels to reduce the size of a spectrum by an
integer factor.
Each output pixel is the mean of n pixels, where n is the
specified reduction factor.
Parameters
----------
factor : int
The integer reduction factor by which the spectrum should
be shrunk.
margin : string in 'center'|'right'|'left'|'origin'
When the dimension of the input spectrum is not an integer
multiple of the reduction factor, the spectrum is
truncated to remove just enough pixels that its length is
a multiple of the reduction factor. This sub-spectrum is
then rebinned in place of the original spectrum. The
margin parameter determines which pixels of the input
spectrum are truncated, and which remain.
The options are:
'origin' or 'center':
The start of the output spectrum is coincident
with the start of the input spectrum.
'center':
The center of the output spectrum is aligned
with the center of the input spectrum, within
one pixel.
'right':
The end of the output spectrum is coincident
with the end of the input spectrum.
inplace : bool
If False, return a rebinned copy of the spectrum (the default).
If True, rebin the original spectrum in-place, and return that.
Returns
-------
out : Spectrum
"""
# Delegate the rebinning to the generic DataArray function.
return self._rebin(factor, margin, inplace)
def _decimation_filter(self, newstep, atten, unit=None):
"""This is a private function Spectrum.resample(), used to apply
a decimation filter prior to resampling.
Parameters
----------
step : float
The new pixel size along the wavelength axis of the spectrum.
atten : float
The minimum attenuation (dB), of the antialiasing
decimation filter at the Nyquist folding frequency of the
new pixel size. Larger attenuations suppress aliasing
better at the expense of worsened resolution. A good value
to choose is 40dB, which produces a response that is very
similar to a blackman filter applied within the Fourier
plane, but with less ringing in the image plane.
unit : `astropy.units.Unit`
The wavelength units of the step argument. A value of None
is equivalent to specifying self.wave.unit.
"""
# Convert the attenuation from dB to a linear scale factor.
gcut = 10.0**(-atten / 20.0)
# Calculate the Nyquist folding frequency of the new pixel size.
nyquist_folding_freq = 0.5 / newstep
# Calculate the standard deviation of a Gaussian whose Fourier
# transform drops from unity at the center to gcut at the Nyquist
# folding frequency.
sigma = (0.5 / np.pi / nyquist_folding_freq *
np.sqrt(-2.0 * np.log(gcut)))
# Convert the standard deviation from wavelength units to input pixels.
sigma /= self.get_step(unit=unit)
# Choose dimensions for the gaussian filtering kernel. Choose an
# extent from -4*sigma to +4*sigma. This truncates the gaussian
# where it drops to about 3e-4 of its peak. The following
# calculation ensures that the dimensions of the array are odd, so
# that the gaussian will be symmetrically sampled either side of a
# central pixel. This prevents spectral shifts.
gshape = int(np.ceil(4.0 * sigma)) * 2 + 1
# fftconvolve requires that the kernel be no larger than the array
# that it is convolving, so reduce the size of the kernel array if
# needed. Be careful to choose an odd sized array.
n = self.shape[0]
if gshape > n:
gshape = n if n % 2 != 0 else (n - 1)
# Sample the gaussian filter symmetrically around the central pixel.
gx = np.arange(gshape, dtype=float) - gshape // 2
gy = np.exp(-0.5 * (gx / sigma)**2)
# Area-normalize the gaussian profile.
gy /= gy.sum()
# Filter the spectrum with the gaussian filter.
self.fftconvolve(gy, inplace=True)
def resample(self, step, start=None, shape=None, unit=u.angstrom,
inplace=False, atten=40.0, cutoff=0.25):
"""Resample a spectrum to have a different wavelength interval.
Parameters
----------
step : float
The new pixel size along the wavelength axis of the spectrum.
start : float
The wavelength at the center of the first pixel of the resampled
spectrum. If None (the default) the center of the first pixel
has the same wavelength before and after resampling.
unit : `astropy.units.Unit`
The wavelength units of the step and start arguments.
The default is u.angstrom.
shape : int
The dimension of the array of the new spectrum (ie. the number
of spectral pixels). If this is not specified, the shape is
selected to encompass the wavelength range from the chosen
start wavelength to the ending wavelength of the input spectrum.
inplace : bool
If False, return a resampled copy of the spectrum (the default).
If True, resample the original spectrum in-place, and return that.
atten : float
The minimum attenuation (dB), of the antialiasing
decimation filter at the Nyquist folding frequency of the
new pixel size. Larger attenuations suppress aliasing
better at the expense of worsened resolution. The default
attenuation is 40.0 dB. To disable antialiasing, specify
atten=0.0.
cutoff : float
Mask each output pixel of which at least this fraction of the
pixel was interpolated from masked input pixels.
Returns
-------
out : Spectrum
"""
out = self if inplace else self.copy()
# Don't allow the spectrum to be started beyond the far end of
# the spectrum, because this would result in an empty spectrum.
if start is not None and start > self.get_end(unit):
raise ValueError('The start value is past the end of the '
'spectrum range')
# Get wavelength world coordinates of the output spectrum.
newwave = self.wave.resample(step, start, unit)
# How many pixels should there be in the resampled spectrum?
# If the user didn't specify this, use newwave.shape, which
# holds the number of pixels of size 'step' needed to sample
# from 'start' to the end of the current wavelength range.
if shape is not None:
newwave.shape = shape
# Get the existing wavelength step size in the new units.
oldstep = self.wave.get_step(unit)
# If the spectrum is being resampled to a larger pixel size,
# then a decimation filter should be applied before
# resampling, to ensure that the new pixel size doesn't
# undersample rapidly changing features in the spectrum.
if step > oldstep and atten > 0.0:
out._decimation_filter(step, atten, unit=unit)
# Get the data, mask (and variance) arrays, and replace bad pixels with
# zeros.
if out._mask is not None: # Is out.data a masked array?
data = out.data.filled(0.0)
if out._var is not None:
var = out.var.filled(0.0)
else:
var = None
mask = out._mask
else: # Is out.data just a numpy array?
mask = ~ | np.isfinite(out._data) | numpy.isfinite |
import os
import glob
import time
import tensorflow as tf
import numpy as np
import tensorflow.contrib.slim as slim
import subprocess
from datetime import datetime
from tensorflow.python.ops import control_flow_ops
from modules.videosr_ops import *
class EASYFLOW(object):
def __init__(self):
self.num_frames = 7
self.crop_size = 100
self.max_steps = int(1e6)
self.batch_size = 20
self.learning_rate = 1e-4
self.train_dir = './easyflow_log/model1'
self.pathlist = open('./data/train/filelist_train.txt', 'rt').read().splitlines()
def input_producer(self, batch_size=10):
def read_data():
data_seq = tf.random_crop(self.data_queue, [1, self.num_frames])
input = tf.stack([tf.image.decode_png(tf.read_file(data_seq[0][i]), channels=3) for i in range(self.num_frames)])
input = preprocessing(input)
print('Input producer shape: ', input.get_shape())
return input
def preprocessing(input):
input = tf.cast(input, tf.float32) / 255.0
shape = tf.shape(input)[1:]
size = tf.convert_to_tensor([self.crop_size, self.crop_size, 3], dtype=tf.int32, name="size")
check = tf.Assert(tf.reduce_all(shape >= size), ["Need value.shape >= size, got ", shape, size])
shape = control_flow_ops.with_dependencies([check], shape)
limit = shape - size + 1
offset = tf.random_uniform(tf.shape(shape), dtype=size.dtype, maxval=size.dtype.max, seed=None) % limit
offset_in = tf.concat([[0], offset], axis=-1)
size_in = tf.concat([[self.num_frames], size], axis=-1)
input = tf.slice(input, offset_in, size_in)
input.set_shape([self.num_frames, self.crop_size, self.crop_size, 3])
return input
with tf.variable_scope('input'):
inList_all = []
for dataPath in self.pathlist:
inList = sorted(glob.glob(os.path.join(dataPath, 'input/*.png')))
inList_all.append(inList)
inList_all = tf.convert_to_tensor(inList_all, dtype=tf.string)
self.data_queue = tf.train.slice_input_producer([inList_all], capacity=40)
input = read_data()
batch_in = tf.train.batch([input], batch_size=batch_size, num_threads=3, capacity=40)
return batch_in
#
def forward(self, imga, imgb, scope='easyflow', reuse=False):
dims = len(imga.get_shape())
if dims == 5:
n, num_frame, height, width, num_channels = imga.get_shape().as_list()
imga = tf.reshape(imga, [n * num_frame, height, width, num_channels])
imgb = tf.reshape(imgb, [n * num_frame, height, width, num_channels])
n, h, w, c = imga.get_shape().as_list()
with tf.variable_scope(scope, reuse=reuse):
with slim.arg_scope([slim.conv2d], activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(uniform=True),
biases_initializer=tf.constant_initializer(0.0)), \
slim.arg_scope([slim.conv2d_transpose], activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(uniform=True),
biases_initializer=tf.constant_initializer(0.0)):
inputs = tf.concat([imga, imgb], 3, name='flow_inp')
c1 = slim.conv2d(inputs, 24, [5, 5], stride=2, scope='c1')
c2 = slim.conv2d(c1, 24, [3, 3], scope='c2')
c3 = slim.conv2d(c2, 24, [5, 5], stride=2, scope='c3')
c4 = slim.conv2d(c3, 24, [3, 3], scope='c4')
c5 = slim.conv2d(c4, 32, [3, 3], activation_fn=tf.nn.tanh, scope='c5')
c5_hr = tf.reshape(c5, [n, h//4, w//4, 2, 4, 4])
c5_hr = tf.transpose(c5_hr, [0, 1, 4, 2, 5, 3])
c5_hr = tf.reshape(c5_hr, [n, h, w, 2])
img_warp = imwarp_backward(c5_hr, imgb, [h, w])
c5_pack = tf.concat([inputs, c5_hr, img_warp], 3, name='cat')
s1 = slim.conv2d(c5_pack, 24, [5, 5], stride=2, scope='s1')
s2 = slim.conv2d(s1, 24, [3, 3], scope='s2')
s3 = slim.conv2d(s2, 24, [3, 3], scope='s3')
s4 = slim.conv2d(s3, 24, [3, 3], scope='s4')
s5 = slim.conv2d(s4, 8, [3, 3], activation_fn=tf.nn.tanh, scope='s5')
s5_hr = tf.reshape(s5, [n, h // 2, w //2, 2, 2, 2])
s5_hr = tf.transpose(s5_hr, [0, 1, 4, 2, 5, 3])
s5_hr = tf.reshape(s5_hr, [n, h, w, 2])
uv = c5_hr + s5_hr
if dims == 5:
uv = tf.reshape(uv, [self.batch_size, num_frame, height, width, 2])
return uv
def build_model(self):
frames_lr = self.input_producer(batch_size=self.batch_size)
n, t, h, w, c = frames_lr.get_shape().as_list()
idx0 = self.num_frames // 2
frames_y = rgb2y(frames_lr)
frames_ref_y = frames_y[:, idx0:idx0 + 1, :, :, :]
frames_ref_y = tf.tile(frames_ref_y, [1, self.num_frames, 1, 1, 1])
uv = self.forward(frames_y, frames_ref_y)
frames_ref_warp = imwarp_backward(uv, frames_ref_y, [h, w])
loss_data = tf.reduce_mean(tf.abs(frames_y - frames_ref_warp))
loss_tv = tf.reduce_sum(tf.image.total_variation(uv)) / uv.shape.num_elements()
self.loss = loss_data + 0.01 * loss_tv
def train(self):
def train_op_func(loss, var_list, is_gradient_clip=False):
if is_gradient_clip:
train_op = tf.train.AdamOptimizer(lr)
grads_and_vars = train_op.compute_gradients(loss, var_list=var_list)
unchanged_gvs = [(grad, var) for grad, var in grads_and_vars if not 'LSTM' in var.name]
rnn_grad = [grad for grad, var in grads_and_vars if 'LSTM' in var.name]
rnn_var = [var for grad, var in grads_and_vars if 'LSTM' in var.name]
capped_grad, _ = tf.clip_by_global_norm(rnn_grad, clip_norm=3)
capped_gvs = list(zip(capped_grad, rnn_var))
train_op = train_op.apply_gradients(grads_and_vars=capped_gvs + unchanged_gvs, global_step=global_step)
else:
train_op = tf.train.AdamOptimizer(lr).minimize(loss, var_list=var_list, global_step=global_step)
return train_op
"""Train easyflow network"""
global_step = tf.Variable(initial_value=0, trainable=False)
# Create folder for logs
if not tf.gfile.Exists(self.train_dir):
tf.gfile.MakeDirs(self.train_dir)
self.build_model()
decay_steps = 3e5
lr = tf.train.polynomial_decay(self.learning_rate, global_step, decay_steps, end_learning_rate=1e-6, power=0.9)
vars_all = tf.trainable_variables()
vars_sr = [v for v in vars_all if 'srmodel' in v.name]
vars_srcnn = [v for v in vars_all if 'srcnn' in v.name]
vars_flownet = [v for v in vars_all if 'flownet' in v.name]
train_all = train_op_func(self.loss, vars_all, is_gradient_clip=True)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
self.saver = tf.train.Saver(max_to_keep=50, keep_checkpoint_every_n_hours=1)
# self.load(sess, os.path.join(self.train_dir, 'checkpoints'))
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(self.train_dir, sess.graph, flush_secs=30)
for step in range(sess.run(global_step), self.max_steps):
start_time = time.time()
_, loss_value = sess.run([train_all, self.loss])
duration = time.time() - start_time
# print loss_value
assert not | np.isnan(loss_value) | numpy.isnan |
# -*- coding: utf-8 -*-
import numpy as np
from .base import Layer
from ztlearn.utils import one_hot
from ztlearn.utils import get_sentence_tokens
from ztlearn.initializers import InitializeWeights as init
from ztlearn.optimizers import OptimizationFunction as optimizer
class Embedding(Layer):
def __init__(self,
input_dim, # number of unique words in the text dataset
output_dim, # size of the embedding vectors
embeddings_init = 'uniform', # init type for the embedding matrix (weights)
input_length = 10): # size of input sentences
self.input_dim = input_dim
self.output_dim = output_dim
self.input_length = input_length
self.input_shape = None # required by the base class
self.init_method = None
self.optimizer_kwargs = None
self.is_trainable = True
@property
def trainable(self):
return self.is_trainable
@trainable.setter
def trainable(self, is_trainable):
self.is_trainable = is_trainable
@property
def weight_initializer(self):
return self.init_method
@weight_initializer.setter
def weight_initializer(self, init_method):
self.init_method = init_method
@property
def weight_optimizer(self):
return self.optimizer_kwargs
@weight_optimizer.setter
def weight_optimizer(self, optimizer_kwargs = {}):
self.optimizer_kwargs = optimizer_kwargs
@property
def layer_parameters(self):
return sum([np.prod(param.shape) for param in [self.weights]])
@property
def output_shape(self):
return (self.input_length, self.output_dim)
def prep_layer(self):
self.uniques_one_hot = one_hot(np.arange(self.input_dim)) # master one hot matrix
self.kernel_shape = (self.input_dim, self.output_dim)
self.weights = init(self.weight_initializer).initialize_weights(self.kernel_shape) # embeddings
# inputs should be gotten from sentences_tokens = get_sentence_tokens(text_input)
def pass_forward(self, inputs, train_mode = True, **kwargs):
self.inputs = inputs # tokenized inputs
embeded_inputs = []
for _, tokens in enumerate(self.inputs.tolist()):
for i, word_index in enumerate(tokens):
embed = | np.expand_dims(self.uniques_one_hot[word_index,:], 1) | numpy.expand_dims |
import torch
import numpy as np
from itertools import cycle
import cv2
cycol = cycle('bgrcmk')
def _gaussian_dist(xs, mean, var):
prob = torch.exp(-(xs-mean)**2 / (2*var))
return prob
def within(x, y, xlim, ylim):
val_inds = (x >= 0) & (y >= 0)
val_inds = val_inds & (x <= xlim) & (y <= ylim)
return val_inds
def identity_Rt(dtype=np.float32):
return np.hstack((np.eye(3, dtype=dtype), | np.zeros((3, 1), dtype=dtype) | numpy.zeros |
import math
import warnings
from copy import copy, deepcopy
from datetime import datetime
from typing import Mapping, MutableMapping, MutableSequence, Optional
import numpy as np # type: ignore
import pytest # type: ignore
from rads.rpn import (
ABS,
ACOS,
ACOSD,
ACOSH,
ADD,
AND,
ASIN,
ASIND,
ASINH,
ATAN,
ATAN2,
ATAND,
ATANH,
AVG,
BOXCAR,
BTEST,
CEIL,
CEILING,
COS,
COSD,
COSH,
D2R,
DIF,
DIV,
DUP,
DXDY,
EQ,
EXCH,
EXP,
FLOOR,
FMOD,
GAUSS,
GE,
GT,
HYPOT,
IAND,
INRANGE,
INV,
IOR,
ISAN,
ISNAN,
LE,
LOG,
LOG10,
LT,
MAX,
MIN,
MUL,
NAN,
NE,
NEG,
NINT,
OR,
PI,
POP,
POW,
R2,
R2D,
RINT,
SIN,
SIND,
SINH,
SQR,
SQRT,
SUB,
SUM,
TAN,
TAND,
TANH,
YMDHMS,
CompleteExpression,
E,
Expression,
Literal,
StackUnderflowError,
Token,
Variable,
token,
)
from rads.typing import FloatOrArray
GOLDEN_RATIO = math.log((1 + math.sqrt(5)) / 2)
class TestLiteral:
def test_init(self):
Literal(3)
Literal(3.14)
with pytest.raises(TypeError):
Literal("not a number") # type: ignore
def test_pops(self):
assert Literal(3).pops == 0
def test_puts(self):
assert Literal(3).puts == 1
def test_value(self):
assert Literal(3).value == 3
assert Literal(3.14).value == 3.14
def test_call(self):
stack: MutableSequence[FloatOrArray] = []
environment: MutableMapping[str, FloatOrArray] = {}
assert Literal(3.14)(stack, environment) is None
assert Literal(2.71)(stack, environment) is None
assert stack == [3.14, 2.71]
assert environment == {}
def test_eq(self):
assert Literal(3.14) == Literal(3.14)
assert not Literal(3.14) == Literal(2.71)
assert not Literal(3.14) == 3.14
def test_ne(self):
assert Literal(3.14) != Literal(2.71)
assert not Literal(3.14) != Literal(3.14)
assert Literal(3.14) != 3.14
def test_lt(self):
assert Literal(2.71) < Literal(3.14)
assert not Literal(3.14) < Literal(2.71)
with pytest.raises(TypeError):
Literal(2.71) < 3.14
with pytest.raises(TypeError):
2.71 < Literal(3.14)
def test_le(self):
assert Literal(2.71) <= Literal(3.14)
assert Literal(3.14) <= Literal(3.14)
assert not Literal(3.14) <= Literal(2.71)
with pytest.raises(TypeError):
Literal(2.71) <= 3.14
with pytest.raises(TypeError):
2.71 <= Literal(3.14)
def test_gt(self):
assert Literal(3.14) > Literal(2.71)
assert not Literal(2.71) > Literal(3.14)
with pytest.raises(TypeError):
Literal(3.14) > 2.71
with pytest.raises(TypeError):
3.14 > Literal(2.71)
def test_ge(self):
assert Literal(3.14) >= Literal(2.71)
assert Literal(3.14) >= Literal(3.14)
assert not Literal(2.71) >= Literal(3.14)
with pytest.raises(TypeError):
Literal(3.14) >= 2.71
with pytest.raises(TypeError):
3.14 >= Literal(2.71)
def test_repr(self):
assert repr(Literal(3)) == "Literal(3)"
assert repr(Literal(3.14)) == "Literal(3.14)"
def test_str(self):
assert str(Literal(3)) == "3"
assert str(Literal(3.14)) == "3.14"
def test_pi(self):
assert PI.value == pytest.approx(np.pi)
def test_e(self):
assert E.value == pytest.approx(np.e)
class TestVariable:
def test_init(self):
Variable("alt")
with pytest.raises(ValueError):
Variable("3")
with pytest.raises(ValueError):
Variable("3name")
with pytest.raises(TypeError):
Variable(3) # type: ignore
with pytest.raises(TypeError):
Variable(3.14) # type: ignore
def test_pops(self):
assert Variable("alt").pops == 0
def test_puts(self):
assert Variable("alt").puts == 1
def test_name(self):
assert Variable("alt").name == "alt"
def test_call(self):
stack: MutableSequence[FloatOrArray] = []
environment = {"alt": np.array([1, 2, 3]), "dry_tropo": 4, "wet_tropo": 5}
assert Variable("wet_tropo")(stack, environment) is None
assert Variable("alt")(stack, environment) is None
assert len(stack) == 2
assert stack[0] == 5
assert np.all(stack[1] == np.array([1, 2, 3]))
assert len(environment) == 3
assert "alt" in environment
assert "dry_tropo" in environment
assert "wet_tropo" in environment
assert np.all(environment["alt"] == np.array([1, 2, 3]))
assert environment["dry_tropo"] == 4
assert environment["wet_tropo"] == 5
with pytest.raises(KeyError):
assert Variable("alt")(stack, {}) is None
assert len(stack) == 2
assert stack[0] == 5
assert np.all(stack[1] == np.array([1, 2, 3]))
def test_eq(self):
assert Variable("alt") == Variable("alt")
assert not Variable("alt") == Variable("dry_tropo")
assert not Variable("alt") == "alt"
def test_ne(self):
assert Variable("alt") != Variable("dry_tropo")
assert not Variable("alt") != Variable("alt")
assert Variable("alt") != "alt"
def test_repr(self):
assert repr(Variable("alt")) == "Variable('alt')"
def test_str(self):
assert str(Variable("alt")) == "alt"
def contains_array(stack: MutableSequence[FloatOrArray]) -> bool:
for item in stack:
if isinstance(item, np.ndarray):
return True
return False
def contains_nan(stack: MutableSequence[FloatOrArray]) -> bool:
for item in stack:
try:
if math.isnan(item):
return True
except TypeError:
pass
return False
def assert_token(
operator: Token,
pre_stack: MutableSequence[FloatOrArray],
post_stack: MutableSequence[FloatOrArray],
environment: Optional[Mapping[str, FloatOrArray]] = None,
*,
approx: bool = False,
rtol: float = 1e-15,
atol: float = 1e-16,
) -> None:
"""Assert that a token modifies the stack properly.
Parameters
----------
operator
Operator to test.
pre_stack
Stack state before calling the operator.
post_stack
Desired stack state after calling the operator.
environment
Optional dictionary like object providing the environment for
variable lookup.
approx
Set to true to use approximate equality instead of exact.
rtol
Relative tolerance. Only used if :paramref:`approx` is True.
atol
Absolute tolerance. Only used if :paramref:`approx` is True.
Raises
------
AssertionError
If the operator does not produce the proper post stack state or the
environment parameter is changed.
"""
if not environment:
environment = {"dont_touch": 5}
original_environment = deepcopy(environment)
stack = pre_stack
operator(stack, environment)
# environment should be unchanged
assert environment == original_environment
# check stack
if approx or contains_nan(post_stack) or contains_array(post_stack):
assert len(stack) == len(post_stack)
for a, b in zip(stack, post_stack):
if isinstance(a, np.ndarray) or isinstance(b, np.ndarray):
if approx:
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, equal_nan=True
)
else:
np.testing.assert_equal(a, b)
else:
if math.isnan(b):
assert math.isnan(a)
elif approx:
assert a == pytest.approx(b, rel=rtol, abs=atol)
else:
assert a == b
else:
assert stack == post_stack
class TestSUBOperator:
def test_repr(self):
assert repr(SUB) == "SUB"
def test_pops(self):
assert SUB.pops == 2
def test_puts(self):
assert SUB.puts == 1
def test_no_copy(self):
assert copy(SUB) is SUB
assert deepcopy(SUB) is SUB
def test_call(self):
assert_token(SUB, [2, 4], [-2])
assert_token(SUB, [2, np.array([4, 1])], [np.array([-2, 1])])
assert_token(SUB, [np.array([4, 1]), 2], [np.array([2, -1])])
assert_token(SUB, [np.array([4, 1]), np.array([1, 4])], [np.array([3, -3])])
# extra stack elements
assert_token(SUB, [0, 2, 4], [0, -2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
SUB([], {})
with pytest.raises(StackUnderflowError):
SUB([1], {})
class TestADDOperator:
def test_repr(self):
assert repr(ADD) == "ADD"
def test_pops(self):
assert ADD.pops == 2
def test_puts(self):
assert ADD.puts == 1
def test_no_copy(self):
assert copy(ADD) is ADD
assert deepcopy(ADD) is ADD
def test_call(self):
assert_token(ADD, [2, 4], [6])
assert_token(ADD, [2, np.array([4, 1])], [np.array([6, 3])])
assert_token(ADD, [np.array([4, 1]), 2], [np.array([6, 3])])
assert_token(ADD, [np.array([4, 1]), np.array([1, 4])], [np.array([5, 5])])
# extra stack elements
assert_token(ADD, [0, 2, 4], [0, 6])
# not enough stack elements
with pytest.raises(StackUnderflowError):
ADD([], {})
with pytest.raises(StackUnderflowError):
ADD([1], {})
class TestMULOperator:
def test_repr(self):
assert repr(MUL) == "MUL"
def test_pops(self):
assert MUL.pops == 2
def test_puts(self):
assert MUL.puts == 1
def test_no_copy(self):
assert copy(MUL) is MUL
assert deepcopy(MUL) is MUL
def test_call(self):
assert_token(MUL, [2, 4], [8])
assert_token(MUL, [2, np.array([4, 1])], [np.array([8, 2])])
assert_token(MUL, [np.array([4, 1]), 2], [np.array([8, 2])])
assert_token(MUL, [np.array([4, 1]), np.array([1, 4])], [np.array([4, 4])])
# extra stack elements
assert_token(MUL, [0, 2, 4], [0, 8])
# not enough stack elements
with pytest.raises(StackUnderflowError):
MUL([], {})
with pytest.raises(StackUnderflowError):
MUL([1], {})
class TestPOPOperator:
def test_repr(self):
assert repr(POP) == "POP"
def test_pops(self):
assert POP.pops == 1
def test_puts(self):
assert POP.puts == 0
def test_no_copy(self):
assert copy(POP) is POP
assert deepcopy(POP) is POP
def test_call(self):
assert_token(POP, [1], [])
assert_token(POP, [1, 2], [1])
# not enough stack elements
with pytest.raises(StackUnderflowError):
POP([], {})
class TestNEGOperator:
def test_repr(self):
assert repr(NEG) == "NEG"
def test_pops(self):
assert NEG.pops == 1
def test_puts(self):
assert NEG.puts == 1
def test_no_copy(self):
assert copy(NEG) is NEG
assert deepcopy(NEG) is NEG
def test_call(self):
assert_token(NEG, [2], [-2])
assert_token(NEG, [-2], [2])
assert_token(NEG, [np.array([4, -1])], [np.array([-4, 1])])
assert_token(NEG, [np.array([-4, 1])], [np.array([4, -1])])
# extra stack elements
assert_token(NEG, [0, 2], [0, -2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
NEG([], {})
class TestABSOperator:
def test_repr(self):
assert repr(ABS) == "ABS"
def test_pops(self):
assert ABS.pops == 1
def test_puts(self):
assert ABS.puts == 1
def test_no_copy(self):
assert copy(ABS) is ABS
assert deepcopy(ABS) is ABS
def test_call(self):
assert_token(ABS, [2], [2])
assert_token(ABS, [-2], [2])
assert_token(ABS, [np.array([4, -1])], [np.array([4, 1])])
assert_token(ABS, [np.array([-4, 1])], [np.array([4, 1])])
# extra stack elements
assert_token(ABS, [0, -2], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
ABS([], {})
class TestINVOperator:
def test_repr(self):
assert repr(INV) == "INV"
def test_pops(self):
assert INV.pops == 1
def test_puts(self):
assert INV.puts == 1
def test_no_copy(self):
assert copy(INV) is INV
assert deepcopy(INV) is INV
def test_call(self):
assert_token(INV, [2], [0.5])
assert_token(INV, [-2], [-0.5])
assert_token(INV, [np.array([4, -1])], [np.array([0.25, -1])])
assert_token(INV, [np.array([-4, 1])], [np.array([-0.25, 1])])
# extra stack elements
assert_token(INV, [0, 2], [0, 0.5])
# not enough stack elements
with pytest.raises(StackUnderflowError):
INV([], {})
class TestSQRTOperator:
def test_repr(self):
assert repr(SQRT) == "SQRT"
def test_pops(self):
assert SQRT.pops == 1
def test_puts(self):
assert SQRT.puts == 1
def test_no_copy(self):
assert copy(SQRT) is SQRT
assert deepcopy(SQRT) is SQRT
def test_call(self):
assert_token(SQRT, [4], [2])
assert_token(SQRT, [np.array([4, 16])], [np.array([2, 4])])
# extra stack elements
assert_token(SQRT, [0, 4], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
SQRT([], {})
class TestSQROperator:
def test_repr(self):
assert repr(SQR) == "SQR"
def test_pops(self):
assert SQR.pops == 1
def test_puts(self):
assert SQR.puts == 1
def test_no_copy(self):
assert copy(EXP) is EXP
assert deepcopy(EXP) is EXP
def test_call(self):
assert_token(SQR, [2], [4])
assert_token(SQR, [-2], [4])
assert_token(SQR, [np.array([4, -1])], [np.array([16, 1])])
assert_token(SQR, [np.array([-4, 1])], [np.array([16, 1])])
# extra stack elements
assert_token(SQR, [0, -2], [0, 4])
# not enough stack elements
with pytest.raises(StackUnderflowError):
SQR([], {})
class TestEXPOperator:
def test_repr(self):
assert repr(EXP) == "EXP"
def test_pops(self):
assert EXP.pops == 1
def test_puts(self):
assert EXP.puts == 1
def test_no_copy(self):
assert copy(EXP) is EXP
assert deepcopy(EXP) is EXP
def test_call(self):
assert_token(EXP, [math.log(1)], [1.0], approx=True)
assert_token(EXP, [math.log(2)], [2.0], approx=True)
assert_token(
EXP, [np.array([np.log(4), np.log(1)])], [np.array([4.0, 1.0])], approx=True
)
# extra stack elements
assert_token(EXP, [0, np.log(1)], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
EXP([], {})
class TestLOGOperator:
def test_repr(self):
assert repr(LOG) == "LOG"
def test_pops(self):
assert LOG.pops == 1
def test_puts(self):
assert LOG.puts == 1
def test_no_copy(self):
assert copy(LOG) is LOG
assert deepcopy(LOG) is LOG
def test_call(self):
assert_token(LOG, [math.e], [1.0], approx=True)
assert_token(LOG, [math.e ** 2], [2.0], approx=True)
assert_token(LOG, [math.e ** -2], [-2.0], approx=True)
assert_token(
LOG,
[np.array([np.e ** 4, np.e ** -1])],
[np.array([4.0, -1.0])],
approx=True,
)
assert_token(
LOG,
[np.array([np.e ** -4, np.e ** 1])],
[np.array([-4.0, 1.0])],
approx=True,
)
# extra stack elements
assert_token(LOG, [0, np.e], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
LOG([], {})
class TestLOG10Operator:
def test_repr(self):
assert repr(LOG10) == "LOG10"
def test_pops(self):
assert LOG10.pops == 1
def test_puts(self):
assert LOG10.puts == 1
def test_no_copy(self):
assert copy(LOG10) is LOG10
assert deepcopy(LOG10) is LOG10
def test_call(self):
assert_token(LOG10, [10], [1.0], approx=True)
assert_token(LOG10, [10 ** 2], [2.0], approx=True)
assert_token(LOG10, [10 ** -2], [-2.0], approx=True)
assert_token(
LOG10, [np.array([10 ** 4, 10 ** -1])], [np.array([4.0, -1.0])], approx=True
)
assert_token(
LOG10, [np.array([10 ** -4, 10 ** 1])], [np.array([-4.0, 1.0])], approx=True
)
# extra stack elements
assert_token(LOG10, [0, 10], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
LOG10([], {})
class TestSINOperator:
def test_repr(self):
assert repr(SIN) == "SIN"
def test_pops(self):
assert SIN.pops == 1
def test_puts(self):
assert SIN.puts == 1
def test_no_copy(self):
assert copy(SIN) is SIN
assert deepcopy(SIN) is SIN
def test_call(self):
assert_token(SIN, [0.0], [0.0], approx=True)
assert_token(SIN, [math.pi / 6], [1 / 2], approx=True)
assert_token(SIN, [math.pi / 4], [1 / math.sqrt(2)], approx=True)
assert_token(SIN, [math.pi / 3], [math.sqrt(3) / 2], approx=True)
assert_token(SIN, [math.pi / 2], [1.0], approx=True)
assert_token(
SIN,
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
[np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
approx=True,
)
assert_token(
SIN,
[-np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
[-np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
approx=True,
)
# extra stack elements
assert_token(SIN, [0, math.pi / 2], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
SIN([], {})
class TestCOSOperator:
def test_repr(self):
assert repr(COS) == "COS"
def test_pops(self):
assert COS.pops == 1
def test_puts(self):
assert COS.puts == 1
def test_no_copy(self):
assert copy(COS) is COS
assert deepcopy(COS) is COS
def test_call(self):
assert_token(COS, [0.0], [1.0], approx=True)
assert_token(COS, [math.pi / 6], [math.sqrt(3) / 2], approx=True)
assert_token(COS, [math.pi / 4], [1 / math.sqrt(2)], approx=True)
assert_token(COS, [math.pi / 3], [1 / 2], approx=True)
assert_token(COS, [math.pi / 2], [0.0], approx=True)
assert_token(
COS,
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
[np.array([1.0, np.sqrt(3) / 2, 1 / np.sqrt(2), 1 / 2, 0.0])],
approx=True,
)
assert_token(
COS,
[-np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
[np.array([1.0, np.sqrt(3) / 2, 1 / np.sqrt(2), 1 / 2, 0.0])],
approx=True,
)
# extra stack elements
assert_token(COS, [0, math.pi / 2], [0, 0.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
COS([], {})
class TestTANOperator:
def test_repr(self):
assert repr(TAN) == "TAN"
def test_pops(self):
assert TAN.pops == 1
def test_puts(self):
assert TAN.puts == 1
def test_no_copy(self):
assert copy(TAN) is TAN
assert deepcopy(TAN) is TAN
def test_call(self):
assert_token(TAN, [0.0], [0.0], approx=True)
assert_token(TAN, [math.pi / 6], [1 / math.sqrt(3)], approx=True)
assert_token(TAN, [math.pi / 4], [1.0], approx=True)
assert_token(TAN, [math.pi / 3], [math.sqrt(3)], approx=True)
assert_token(
TAN,
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3])],
[np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
approx=True,
)
assert_token(
TAN,
[-np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3])],
[-np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
approx=True,
)
# extra stack elements
assert_token(TAN, [0, math.pi / 4], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
TAN([], {})
class TestSINDOperator:
def test_repr(self):
assert repr(SIND) == "SIND"
def test_pops(self):
assert SIND.pops == 1
def test_puts(self):
assert SIND.puts == 1
def test_no_copy(self):
assert copy(COSD) is COSD
assert deepcopy(COSD) is COSD
def test_call(self):
assert_token(SIND, [0], [0.0], approx=True)
assert_token(SIND, [30], [1 / 2], approx=True)
assert_token(SIND, [45], [1 / math.sqrt(2)], approx=True)
assert_token(SIND, [60], [math.sqrt(3) / 2], approx=True)
assert_token(SIND, [90], [1.0], approx=True)
assert_token(
SIND,
[np.array([0, 30, 45, 60, 90])],
[np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
approx=True,
)
assert_token(
SIND,
[-np.array([0, 30, 45, 60, 90])],
[-np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
approx=True,
)
# extra stack elements
assert_token(SIND, [0, 90], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
SIND([], {})
class TestCOSDOperator:
def test_repr(self):
assert repr(COSD) == "COSD"
def test_pops(self):
assert COSD.pops == 1
def test_puts(self):
assert COSD.puts == 1
def test_no_copy(self):
assert copy(COSD) is COSD
assert deepcopy(COSD) is COSD
def test_call(self):
assert_token(COSD, [0], [1.0], approx=True)
assert_token(COSD, [30], [math.sqrt(3) / 2], approx=True)
assert_token(COSD, [45], [1 / math.sqrt(2)], approx=True)
assert_token(COSD, [60], [1 / 2], approx=True)
assert_token(COSD, [90], [0.0], approx=True)
assert_token(
COSD,
[np.array([0, 30, 45, 60, 90])],
[np.array([1.0, np.sqrt(3) / 2, 1 / np.sqrt(2), 1 / 2, 0.0])],
approx=True,
)
assert_token(
COSD,
[-np.array([0, 30, 45, 60, 90])],
[np.array([1.0, np.sqrt(3) / 2, 1 / np.sqrt(2), 1 / 2, 0.0])],
approx=True,
)
# extra stack elements
assert_token(COSD, [0, 90], [0, 0.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
COSD([], {})
class TestTANDOperator:
def test_repr(self):
assert repr(TAND) == "TAND"
def test_pops(self):
assert TAND.pops == 1
def test_puts(self):
assert TAND.puts == 1
def test_no_copy(self):
assert copy(TAND) is TAND
assert deepcopy(TAND) is TAND
def test_call(self):
assert_token(TAND, [0], [0], approx=True)
assert_token(TAND, [30], [1 / math.sqrt(3)], approx=True)
assert_token(TAND, [45], [1.0], approx=True)
assert_token(TAND, [60], [math.sqrt(3)], approx=True)
assert_token(
TAND,
[np.array([0, 30, 45, 60])],
[np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
approx=True,
)
assert_token(
TAND,
[-np.array([0, 30, 45, 60])],
[-np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
approx=True,
)
# extra stack elements
assert_token(TAND, [0, 45], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
TAND([], {})
class TestSINHOperator:
def test_repr(self):
assert repr(SINH) == "SINH"
def test_pops(self):
assert SINH.pops == 1
def test_puts(self):
assert SINH.puts == 1
def test_no_copy(self):
assert copy(SINH) is SINH
assert deepcopy(SINH) is SINH
def test_call(self):
assert_token(SINH, [0.0], [0.0], approx=True)
assert_token(SINH, [GOLDEN_RATIO], [0.5], approx=True)
assert_token(
SINH, [np.array([0.0, GOLDEN_RATIO])], [np.array([0.0, 0.5])], approx=True
)
# extra stack elements
assert_token(SINH, [0, GOLDEN_RATIO], [0, 0.5], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
SINH([], {})
class TestCOSHOperator:
def test_repr(self):
assert repr(COSH) == "COSH"
def test_pops(self):
assert COSH.pops == 1
def test_puts(self):
assert COSH.puts == 1
def test_no_copy(self):
assert copy(COSH) is COSH
assert deepcopy(COSH) is COSH
def test_call(self):
assert_token(COSH, [0.0], [1.0], approx=True)
assert_token(COSH, [GOLDEN_RATIO], [math.sqrt(5) / 2], approx=True)
assert_token(
COSH,
[np.array([0.0, GOLDEN_RATIO])],
[np.array([1.0, np.sqrt(5) / 2])],
approx=True,
)
# extra stack elements
assert_token(COSH, [0, GOLDEN_RATIO], [0, math.sqrt(5) / 2], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
COSH([], {})
class TestTANHOperator:
def test_repr(self):
assert repr(TANH) == "TANH"
def test_pops(self):
assert TANH.pops == 1
def test_puts(self):
assert TANH.puts == 1
def test_no_copy(self):
assert copy(TANH) is TANH
assert deepcopy(TANH) is TANH
def test_call(self):
assert_token(TANH, [0.0], [0.0], approx=True)
assert_token(TANH, [GOLDEN_RATIO], [math.sqrt(5) / 5], approx=True)
assert_token(
TANH,
[np.array([0.0, GOLDEN_RATIO])],
[np.array([0.0, np.sqrt(5) / 5])],
approx=True,
)
# extra stack elements
assert_token(TANH, [0, GOLDEN_RATIO], [0, math.sqrt(5) / 5], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
TANH([], {})
class TestASINOperator:
def test_repr(self):
assert repr(ASIN) == "ASIN"
def test_pops(self):
assert ASIN.pops == 1
def test_puts(self):
assert ASIN.puts == 1
def test_no_copy(self):
assert copy(ASIN) is ASIN
assert deepcopy(ASIN) is ASIN
def test_call(self):
assert_token(ASIN, [0.0], [0.0], approx=True)
assert_token(ASIN, [1 / 2], [math.pi / 6], approx=True)
assert_token(ASIN, [1 / math.sqrt(2)], [math.pi / 4], approx=True)
assert_token(ASIN, [math.sqrt(3) / 2], [math.pi / 3], approx=True)
assert_token(ASIN, [1.0], [math.pi / 2], approx=True)
assert_token(
ASIN,
[np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
approx=True,
)
assert_token(
ASIN,
[-np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
[-np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
approx=True,
)
# extra stack elements
assert_token(ASIN, [0, 1.0], [0, math.pi / 2], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ASIN([], {})
class TestACOSOperator:
def test_repr(self):
assert repr(ACOS) == "ACOS"
def test_pops(self):
assert ACOS.pops == 1
def test_puts(self):
assert ACOS.puts == 1
def test_no_copy(self):
assert copy(ACOS) is ACOS
assert deepcopy(ACOS) is ACOS
def test_call(self):
assert_token(ACOS, [1.0], [0.0], approx=True)
assert_token(ACOS, [math.sqrt(3) / 2], [math.pi / 6], approx=True)
assert_token(ACOS, [1 / math.sqrt(2)], [math.pi / 4], approx=True)
assert_token(ACOS, [1 / 2], [math.pi / 3], approx=True)
assert_token(ACOS, [0.0], [math.pi / 2], approx=True)
assert_token(
ACOS,
[np.array([1.0, np.sqrt(3) / 2, 1 / np.sqrt(2), 1 / 2, 0.0])],
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
approx=True,
)
# extra stack elements
assert_token(ACOS, [0, 0.0], [0, math.pi / 2], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ACOS([], {})
class TestATANOperator:
def test_repr(self):
assert repr(ATAN) == "ATAN"
def test_pops(self):
assert ATAN.pops == 1
def test_puts(self):
assert ATAN.puts == 1
def test_no_copy(self):
assert copy(ATAN) is ATAN
assert deepcopy(ATAN) is ATAN
def test_call(self):
assert_token(ATAN, [0.0], [0.0], approx=True)
assert_token(ATAN, [1 / math.sqrt(3)], [math.pi / 6], approx=True)
assert_token(ATAN, [1.0], [math.pi / 4], approx=True)
assert_token(ATAN, [math.sqrt(3)], [math.pi / 3], approx=True)
assert_token(
ATAN,
[np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3])],
approx=True,
)
assert_token(
ATAN,
[-np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
[-np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3])],
approx=True,
)
# extra stack elements
assert_token(ATAN, [0, 1.0], [0, math.pi / 4], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ATAN([], {})
class TestASINDOperator:
def test_repr(self):
assert repr(ASIND) == "ASIND"
def test_pops(self):
assert ASIND.pops == 1
def test_puts(self):
assert ASIND.puts == 1
def test_no_copy(self):
assert copy(ASIND) is ASIND
assert deepcopy(ASIND) is ASIND
def test_call(self):
assert_token(ASIND, [0.0], [0], approx=True)
assert_token(ASIND, [1 / 2], [30], approx=True)
assert_token(ASIND, [1 / math.sqrt(2)], [45], approx=True)
assert_token(ASIND, [math.sqrt(3) / 2], [60], approx=True)
assert_token(ASIND, [1.0], [90], approx=True)
assert_token(
ASIND,
[np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
[np.array([0, 30, 45, 60, 90])],
approx=True,
)
assert_token(
ASIND,
[-np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
[-np.array([0, 30, 45, 60, 90])],
approx=True,
)
# extra stack elements
assert_token(ASIND, [0, 1.0], [0, 90], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ASIND([], {})
class TestACOSDOperator:
def test_repr(self):
assert repr(ACOSD) == "ACOSD"
def test_pops(self):
assert ACOSD.pops == 1
def test_puts(self):
assert ACOSD.puts == 1
def test_no_copy(self):
assert copy(ACOSD) is ACOSD
assert deepcopy(ACOSD) is ACOSD
def test_call(self):
assert_token(ACOSD, [1.0], [0], approx=True)
assert_token(ACOSD, [math.sqrt(3) / 2], [30], approx=True)
assert_token(ACOSD, [1 / math.sqrt(2)], [45], approx=True)
assert_token(ACOSD, [1 / 2], [60], approx=True)
assert_token(ACOSD, [0.0], [90], approx=True)
assert_token(
ACOSD,
[np.array([1.0, np.sqrt(3) / 2, 1 / np.sqrt(2), 1 / 2, 0.0])],
[np.array([0, 30, 45, 60, 90])],
approx=True,
)
# extra stack elements
assert_token(ACOSD, [0, 0.0], [0, 90], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ACOSD([], {})
class TestATANDOperator:
def test_repr(self):
assert repr(ATAND) == "ATAND"
def test_pops(self):
assert ATAND.pops == 1
def test_puts(self):
assert ATAND.puts == 1
def test_no_copy(self):
assert copy(ATAND) is ATAND
assert deepcopy(ATAND) is ATAND
def test_call(self):
assert_token(ATAND, [0.0], [0], approx=True)
assert_token(ATAND, [1 / math.sqrt(3)], [30], approx=True)
assert_token(ATAND, [1.0], [45], approx=True)
assert_token(ATAND, [math.sqrt(3)], [60], approx=True)
assert_token(
ATAND,
[np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
[np.array([0, 30, 45, 60])],
approx=True,
)
assert_token(
ATAND,
[-np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
[-np.array([0, 30, 45, 60])],
approx=True,
)
# extra stack elements
assert_token(ATAND, [0, 1.0], [0, 45], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ATAND([], {})
class TestASINHOperator:
def test_repr(self):
assert repr(ASINH) == "ASINH"
def test_pops(self):
assert ASINH.pops == 1
def test_puts(self):
assert ASINH.puts == 1
def test_no_copy(self):
assert copy(ASINH) is ASINH
assert deepcopy(ASINH) is ASINH
def test_call(self):
assert_token(ASINH, [0.0], [0.0], approx=True)
assert_token(ASINH, [0.5], [GOLDEN_RATIO], approx=True)
assert_token(
ASINH, [np.array([0.0, 0.5])], [np.array([0.0, GOLDEN_RATIO])], approx=True
)
# extra stack elements
assert_token(ASINH, [0, 0.5], [0, GOLDEN_RATIO], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ASINH([], {})
class TestACOSHOperator:
def test_repr(self):
assert repr(ACOSH) == "ACOSH"
def test_pops(self):
assert ACOSH.pops == 1
def test_puts(self):
assert ACOSH.puts == 1
def test_no_copy(self):
assert copy(ACOSH) is ACOSH
assert deepcopy(ACOSH) is ACOSH
def test_call(self):
assert_token(ACOSH, [1.0], [0.0], approx=True)
assert_token(ACOSH, [math.sqrt(5) / 2], [GOLDEN_RATIO], approx=True)
assert_token(
ACOSH,
[np.array([1.0, np.sqrt(5) / 2])],
[np.array([0.0, GOLDEN_RATIO])],
approx=True,
)
# extra stack elements
assert_token(ACOSH, [0, math.sqrt(5) / 2], [0, GOLDEN_RATIO], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ACOSH([], {})
class TestATANHOperator:
def test_repr(self):
assert repr(ATANH) == "ATANH"
def test_pops(self):
assert ATANH.pops == 1
def test_puts(self):
assert ATANH.puts == 1
def test_no_copy(self):
assert copy(ATANH) is ATANH
assert deepcopy(ATANH) is ATANH
def test_call(self):
assert_token(ATANH, [0.0], [0.0], approx=True)
assert_token(ATANH, [math.sqrt(5) / 5], [GOLDEN_RATIO], approx=True)
assert_token(
ATANH,
[np.array([0.0, np.sqrt(5) / 5])],
[np.array([0.0, GOLDEN_RATIO])],
approx=True,
)
# extra stack elements
assert_token(ATANH, [0, math.sqrt(5) / 5], [0, GOLDEN_RATIO], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ATANH([], {})
class TestISNANOperator:
def test_repr(self):
assert repr(ISNAN) == "ISNAN"
def test_pops(self):
assert ISNAN.pops == 1
def test_puts(self):
assert ISNAN.puts == 1
def test_no_copy(self):
assert copy(ISNAN) is ISNAN
assert deepcopy(ISNAN) is ISNAN
def test_call(self):
assert_token(ISNAN, [2], [False])
assert_token(ISNAN, [float("nan")], [True])
assert_token(ISNAN, [np.array([4, np.nan])], [np.array([False, True])])
assert_token(ISNAN, [np.array([np.nan, 1])], [np.array([True, False])])
# extra stack elements
assert_token(ISNAN, [0, float("nan")], [0, True])
# not enough stack elements
with pytest.raises(StackUnderflowError):
ISNAN([], {})
class TestISANOperator:
def test_repr(self):
assert repr(ISAN) == "ISAN"
def test_pops(self):
assert ISAN.pops == 1
def test_puts(self):
assert ISAN.puts == 1
def test_no_copy(self):
assert copy(ISAN) is ISAN
assert deepcopy(ISAN) is ISAN
def test_call(self):
assert_token(ISAN, [2], [True])
assert_token(ISAN, [float("nan")], [False])
assert_token(ISAN, [np.array([4, np.nan])], [np.array([True, False])])
assert_token(ISAN, [np.array([np.nan, 1])], [np.array([False, True])])
# extra stack elements
assert_token(ISAN, [0, 2], [0, True])
# not enough stack elements
with pytest.raises(StackUnderflowError):
ISAN([], {})
class TestRINTOperator:
def test_repr(self):
assert repr(RINT) == "RINT"
def test_pops(self):
assert RINT.pops == 1
def test_puts(self):
assert RINT.puts == 1
def test_no_copy(self):
assert copy(RINT) is RINT
assert deepcopy(RINT) is RINT
def test_call(self):
assert_token(RINT, [1.6], [2])
assert_token(RINT, [2.4], [2])
assert_token(RINT, [-1.6], [-2])
assert_token(RINT, [-2.4], [-2])
assert_token(RINT, [np.array([1.6, 2.4])], [np.array([2, 2])])
assert_token(RINT, [np.array([-1.6, -2.4])], [np.array([-2, -2])])
# extra stack elements
assert_token(RINT, [0, 1.6], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
RINT([], {})
class TestNINTOperator:
def test_repr(self):
assert repr(NINT) == "NINT"
def test_pops(self):
assert NINT.pops == 1
def test_puts(self):
assert NINT.puts == 1
def test_no_copy(self):
assert copy(NINT) is NINT
assert deepcopy(NINT) is NINT
def test_call(self):
assert_token(NINT, [1.6], [2])
assert_token(NINT, [2.4], [2])
assert_token(NINT, [-1.6], [-2])
assert_token(NINT, [-2.4], [-2])
assert_token(NINT, [np.array([1.6, 2.4])], [np.array([2, 2])])
assert_token(NINT, [np.array([-1.6, -2.4])], [np.array([-2, -2])])
# extra stack elements
assert_token(NINT, [0, 1.6], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
NINT([], {})
class TestCEILOperator:
def test_repr(self):
assert repr(CEIL) == "CEIL"
def test_pops(self):
assert CEIL.pops == 1
def test_puts(self):
assert CEIL.puts == 1
def test_no_copy(self):
assert copy(CEIL) is CEIL
assert deepcopy(CEIL) is CEIL
def test_call(self):
assert_token(CEIL, [1.6], [2])
assert_token(CEIL, [2.4], [3])
assert_token(CEIL, [-1.6], [-1])
assert_token(CEIL, [-2.4], [-2])
assert_token(CEIL, [np.array([1.6, 2.4])], [np.array([2, 3])])
assert_token(CEIL, [np.array([-1.6, -2.4])], [np.array([-1, -2])])
# extra stack elements
assert_token(CEIL, [0, 1.2], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
CEIL([], {})
class TestCEILINGOperator:
def test_repr(self):
assert repr(CEILING) == "CEILING"
def test_pops(self):
assert CEILING.pops == 1
def test_puts(self):
assert CEILING.puts == 1
def test_no_copy(self):
assert copy(CEILING) is CEILING
assert deepcopy(CEILING) is CEILING
def test_call(self):
assert_token(CEILING, [1.6], [2])
assert_token(CEILING, [2.4], [3])
assert_token(CEILING, [-1.6], [-1])
assert_token(CEILING, [-2.4], [-2])
assert_token(CEILING, [np.array([1.6, 2.4])], [np.array([2, 3])])
assert_token(CEILING, [np.array([-1.6, -2.4])], [np.array([-1, -2])])
# extra stack elements
assert_token(CEILING, [0, 1.2], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
CEILING([], {})
class TestFLOOROperator:
def test_repr(self):
assert repr(FLOOR) == "FLOOR"
def test_pops(self):
assert FLOOR.pops == 1
def test_puts(self):
assert FLOOR.puts == 1
def test_no_copy(self):
assert copy(FLOOR) is FLOOR
assert deepcopy(FLOOR) is FLOOR
def test_call(self):
assert_token(FLOOR, [1.6], [1])
assert_token(FLOOR, [2.4], [2])
assert_token(FLOOR, [-1.6], [-2])
assert_token(FLOOR, [-2.4], [-3])
assert_token(FLOOR, [np.array([1.6, 2.4])], [np.array([1, 2])])
assert_token(FLOOR, [np.array([-1.6, -2.4])], [np.array([-2, -3])])
# extra stack elements
assert_token(FLOOR, [0, 1.8], [0, 1])
# not enough stack elements
with pytest.raises(StackUnderflowError):
FLOOR([], {})
class TestD2ROperator:
def test_repr(self):
assert repr(D2R) == "D2R"
def test_pops(self):
assert D2R.pops == 1
def test_puts(self):
assert D2R.puts == 1
def test_no_copy(self):
assert copy(D2R) is D2R
assert deepcopy(D2R) is D2R
def test_call(self):
assert_token(D2R, [0], [0.0], approx=True)
assert_token(D2R, [30], [math.pi / 6], approx=True)
assert_token(D2R, [45], [math.pi / 4], approx=True)
assert_token(D2R, [60], [math.pi / 3], approx=True)
assert_token(D2R, [90], [math.pi / 2], approx=True)
assert_token(
D2R,
[np.array([0, 30, 45, 60, 90])],
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
approx=True,
)
assert_token(
D2R,
[-np.array([0, 30, 45, 60, 90])],
[-np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
approx=True,
)
# extra stack elements
assert_token(D2R, [0, 90], [0, math.pi / 2], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
D2R([], {})
class TestR2DOperator:
def test_repr(self):
assert repr(R2D) == "R2D"
def test_pops(self):
assert R2D.pops == 1
def test_puts(self):
assert R2D.puts == 1
def test_no_copy(self):
assert copy(R2D) is R2D
assert deepcopy(R2D) is R2D
def test_call(self):
assert_token(R2D, [0.0], [0], approx=True)
assert_token(R2D, [math.pi / 6], [30], approx=True)
assert_token(R2D, [math.pi / 4], [45], approx=True)
assert_token(R2D, [math.pi / 3], [60], approx=True)
assert_token(R2D, [math.pi / 2], [90], approx=True)
assert_token(
R2D,
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
[np.array([0, 30, 45, 60, 90])],
approx=True,
)
assert_token(
R2D,
[-np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
[-np.array([0, 30, 45, 60, 90])],
approx=True,
)
# extra stack elements
assert_token(R2D, [0, math.pi / 2], [0, 90], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
R2D([], {})
class TestYMDHMSOperator:
def test_repr(self):
assert repr(YMDHMS) == "YMDHMS"
def test_pops(self):
assert YMDHMS.pops == 1
def test_puts(self):
assert YMDHMS.puts == 1
def test_no_copy(self):
assert copy(YMDHMS) is YMDHMS
assert deepcopy(YMDHMS) is YMDHMS
def test_call(self):
epoch = datetime(1985, 1, 1, 0, 0, 0, 0)
date1 = datetime(2008, 7, 4, 12, 19, 19, 570865)
date2 = datetime(2019, 6, 26, 12, 31, 6, 930575)
seconds1 = (date1 - epoch).total_seconds()
seconds2 = (date2 - epoch).total_seconds()
assert_token(YMDHMS, [seconds1], [80704121919.570865], approx=True)
assert_token(YMDHMS, [seconds2], [190626123106.930575], approx=True)
assert_token(
YMDHMS,
[np.array([seconds1, seconds2])],
[np.array([80704121919.570865, 190626123106.930575])],
approx=True,
)
# extra stack elements
assert_token(YMDHMS, [0, seconds1], [0, 80704121919.570865], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
YMDHMS([], {})
class TestSUMOperator:
def test_repr(self):
assert repr(SUM) == "SUM"
def test_pops(self):
assert SUM.pops == 1
def test_puts(self):
assert SUM.puts == 1
def test_no_copy(self):
assert copy(SUM) is SUM
assert deepcopy(SUM) is SUM
def test_call(self):
assert_token(SUM, [2], [2])
assert_token(SUM, [-2], [-2])
assert_token(SUM, [float("nan")], [0])
assert_token(SUM, [np.array([4, -1])], [3])
assert_token(SUM, [np.array([-4, 1])], [-3])
assert_token(SUM, [np.array([1, np.nan, 3])], [4])
assert_token(SUM, [np.array([np.nan])], [0])
# extra stack elements
assert_token(SUM, [0, 2], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
SUM([], {})
class TestDIFFOperator:
def test_repr(self):
assert repr(DIF) == "DIF"
def test_pops(self):
assert DIF.pops == 1
def test_puts(self):
assert DIF.puts == 1
def test_no_copy(self):
assert copy(DIF) is DIF
assert deepcopy(DIF) is DIF
def test_call(self):
assert_token(DIF, [2], [np.array([np.nan])])
assert_token(DIF, [np.array([1, 2])], [np.array([np.nan, 1])])
assert_token(DIF, [np.array([1, 2, 5])], [np.array([np.nan, 1, 3])])
assert_token(
DIF, [np.array([1, np.nan, 5])], [np.array([np.nan, np.nan, np.nan])]
)
# extra stack elements
assert_token(DIF, [0, 2], [0, np.array([np.nan])])
with pytest.raises(StackUnderflowError):
DIF([], {})
class TestDUPOperator:
def test_repr(self):
assert repr(DUP) == "DUP"
def test_pops(self):
assert DUP.pops == 1
def test_puts(self):
assert DUP.puts == 2
def test_no_copy(self):
assert copy(DUP) is DUP
assert deepcopy(DUP) is DUP
def test_call(self):
assert_token(DUP, [2], [2, 2])
assert_token(DUP, [np.array([4, -1])], [np.array([4, -1]), np.array([4, -1])])
# extra stack elements
assert_token(DUP, [0, 2], [0, 2, 2])
with pytest.raises(StackUnderflowError):
DUP([], {})
class TestDIVOperator:
def test_repr(self):
assert repr(DIV) == "DIV"
def test_pops(self):
assert DIV.pops == 2
def test_puts(self):
assert DIV.puts == 1
def test_no_copy(self):
assert copy(DIV) is DIV
assert deepcopy(DIV) is DIV
def test_call(self):
assert_token(DIV, [10, 2], [5])
assert_token(DIV, [10, np.array([2, 5])], [np.array([5, 2])])
assert_token(DIV, [np.array([10, 4]), 2], [np.array([5, 2])])
assert_token(DIV, [np.array([8, 16]), np.array([2, 4])], [np.array([4, 4])])
# extra stack elements
assert_token(DIV, [0, 10, 2], [0, 5])
# not enough stack elements
with pytest.raises(StackUnderflowError):
DIV([], {})
with pytest.raises(StackUnderflowError):
DIV([1], {})
class TestPOWOperator:
def test_repr(self):
assert repr(POW) == "POW"
def test_pops(self):
assert POW.pops == 2
def test_puts(self):
assert POW.puts == 1
def test_no_copy(self):
assert copy(POW) is POW
assert deepcopy(POW) is POW
def test_call(self):
assert_token(POW, [1, 2], [1])
assert_token(POW, [2, 2], [4])
assert_token(POW, [2, 4], [16])
assert_token(POW, [2, np.array([1, 2, 3])], [np.array([2, 4, 8])])
assert_token(POW, [np.array([1, 2, 3]), 2], [np.array([1, 4, 9])])
assert_token(POW, [np.array([2, 3]), np.array([5, 6])], [np.array([32, 729])])
# extra stack elements
assert_token(POW, [0, 2, 4], [0, 16])
# not enough stack elements
with pytest.raises(StackUnderflowError):
POW([], {})
with pytest.raises(StackUnderflowError):
POW([1], {})
class TestFMODOperator:
def test_repr(self):
assert repr(FMOD) == "FMOD"
assert FMOD.pops == 2
assert FMOD.puts == 1
def test_pops(self):
assert repr(FMOD) == "FMOD"
assert FMOD.pops == 2
assert FMOD.puts == 1
def test_puts(self):
assert repr(FMOD) == "FMOD"
assert FMOD.pops == 2
assert FMOD.puts == 1
def test_no_copy(self):
assert copy(FMOD) is FMOD
assert deepcopy(FMOD) is FMOD
def test_call(self):
assert_token(FMOD, [1, 2], [1])
assert_token(FMOD, [2, 10], [2])
assert_token(FMOD, [12, 10], [2])
assert_token(FMOD, [13, np.array([10, 100])], [np.array([3, 13])])
assert_token(FMOD, [np.array([7, 15]), 10], [np.array([7, 5])])
assert_token(FMOD, [np.array([7, 15]), np.array([10, 5])], [np.array([7, 0])])
# extra stack elements
assert_token(FMOD, [0, 12, 10], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
FMOD([], {})
with pytest.raises(StackUnderflowError):
FMOD([1], {})
class TestMINOperator:
def test_repr(self):
assert repr(MIN) == "MIN"
def test_pops(self):
assert MIN.pops == 2
def test_puts(self):
assert MIN.puts == 1
def test_no_copy(self):
assert copy(MIN) is MIN
assert deepcopy(MIN) is MIN
def test_call(self):
assert_token(MIN, [2, 3], [2])
assert_token(MIN, [3, 2], [2])
assert_token(MIN, [2, np.array([1, 3])], [np.array([1, 2])])
assert_token(MIN, [np.array([1, 3]), 2], [np.array([1, 2])])
assert_token(MIN, [np.array([2, 3]), np.array([3, 2])], [np.array([2, 2])])
# # extra stack elements
assert_token(MIN, [0, 2, 3], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
MIN([], {})
with pytest.raises(StackUnderflowError):
MIN([1], {})
class TestMAXOperator:
def test_repr(self):
assert repr(MAX) == "MAX"
def test_pops(self):
assert MAX.pops == 2
def test_puts(self):
assert MAX.puts == 1
def test_no_copy(self):
assert copy(MAX) is MAX
assert deepcopy(MAX) is MAX
def test_call(self):
assert_token(MAX, [2, 3], [3])
assert_token(MAX, [3, 2], [3])
assert_token(MAX, [2, np.array([1, 3])], [np.array([2, 3])])
assert_token(MAX, [np.array([1, 3]), 2], [np.array([2, 3])])
assert_token(MAX, [np.array([2, 3]), np.array([3, 2])], [np.array([3, 3])])
# # extra stack elements
assert_token(MAX, [0, 2, 3], [0, 3])
# not enough stack elements
with pytest.raises(StackUnderflowError):
MAX([], {})
with pytest.raises(StackUnderflowError):
MAX([1], {})
class TestATAN2Operator:
def test_repr(self):
assert repr(ATAN2) == "ATAN2"
def test_pops(self):
assert ATAN2.pops == 2
def test_puts(self):
assert ATAN2.puts == 1
def test_no_copy(self):
assert copy(ATAN2) is ATAN2
assert deepcopy(ATAN2) is ATAN2
def test_call(self):
# NOTE: second parameter is x, first is y
assert_token(ATAN2, [0, 1], [0], approx=True)
assert_token(ATAN2, [1, math.sqrt(3)], [math.pi / 6], approx=True)
assert_token(ATAN2, [1, 1], [math.pi / 4], approx=True)
assert_token(ATAN2, [math.sqrt(3), 1], [math.pi / 3], approx=True)
assert_token(ATAN2, [1, 0], [math.pi / 2], approx=True)
assert_token(
ATAN2, [math.sqrt(3), -1], [math.pi / 2 + math.pi / 6], approx=True
)
assert_token(ATAN2, [1, -1], [math.pi / 2 + math.pi / 4], approx=True)
assert_token(
ATAN2, [1, -math.sqrt(3)], [math.pi / 2 + math.pi / 3], approx=True
)
assert_token(ATAN2, [0, -1], [math.pi / 2 + math.pi / 2], approx=True)
assert_token(
ATAN2,
[
np.array([0, 1, 1, np.sqrt(3), 1, np.sqrt(3), 1, 1, 0]),
np.array([1, np.sqrt(3), 1, 1, 0, -1, -1, -np.sqrt(3), -1]),
],
[
np.array(
[
0.0,
np.pi / 6,
np.pi / 4,
np.pi / 3,
np.pi / 2,
np.pi / 2 + np.pi / 6,
np.pi / 2 + np.pi / 4,
np.pi / 2 + np.pi / 3,
np.pi / 2 + np.pi / 2,
]
)
],
approx=True,
)
# extra stack elements
assert_token(ATAN2, [0, 1, 1], [0, math.pi / 4], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ATAN2([], {})
class TestHYPOTOperator:
def test_repr(self):
assert repr(HYPOT) == "HYPOT"
def test_pops(self):
assert HYPOT.pops == 2
def test_puts(self):
assert HYPOT.puts == 1
def test_no_copy(self):
assert copy(HYPOT) is HYPOT
assert deepcopy(HYPOT) is HYPOT
def test_call(self):
assert_token(HYPOT, [1, 1], [math.sqrt(2)], approx=True)
assert_token(HYPOT, [math.sqrt(3), 1], [2], approx=True)
assert_token(
HYPOT,
[1, np.array([np.sqrt(3), 1])],
[np.array([2, np.sqrt(2)])],
approx=True,
)
assert_token(
HYPOT,
[np.array([np.sqrt(3), 1]), 1],
[np.array([2, np.sqrt(2)])],
approx=True,
)
assert_token(
HYPOT,
[np.array([np.sqrt(3), 1]), np.array([1, 1])],
[np.array([2, np.sqrt(2)])],
approx=True,
)
# extra stack elements
assert_token(HYPOT, [0, math.sqrt(3), 1], [0, 2], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
HYPOT([], {})
with pytest.raises(StackUnderflowError):
HYPOT([1], {})
class TestR2Operator:
def test_repr(self):
assert repr(R2) == "R2"
def test_pops(self):
assert R2.pops == 2
def test_puts(self):
assert R2.puts == 1
def test_no_copy(self):
assert copy(R2) is R2
assert deepcopy(R2) is R2
def test_call(self):
assert_token(R2, [2, 3], [13])
assert_token(R2, [2, np.array([3, 4])], [np.array([13, 20])])
assert_token(R2, [np.array([3, 4]), 2], [np.array([13, 20])])
assert_token(R2, [np.array([1, 2]), np.array([3, 4])], [np.array([10, 20])])
# extra stack elements
assert_token(R2, [0, 2, 3], [0, 13], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
R2([], {})
with pytest.raises(StackUnderflowError):
R2([1], {})
class TestEQOperator:
def test_repr(self):
assert repr(EQ) == "EQ"
def test_pops(self):
assert EQ.pops == 2
def test_puts(self):
assert EQ.puts == 1
def test_no_copy(self):
assert copy(EQ) is EQ
assert deepcopy(EQ) is EQ
def test_call(self):
assert_token(EQ, [2, 2], [True])
assert_token(EQ, [2, 3], [False])
assert_token(
EQ, [2, np.array([1, np.nan, 2])], [np.array([False, False, True])]
)
assert_token(
EQ, [np.array([1, np.nan, 2]), 2], [np.array([False, False, True])]
)
assert_token(
EQ,
[np.array([1, np.nan, 3, 3]), np.array([1, np.nan, 2, 3])],
[np.array([True, False, False, True])],
)
# extra stack elements
assert_token(EQ, [0, 2, 2], [0, True])
# not enough stack elements
with pytest.raises(StackUnderflowError):
EQ([], {})
with pytest.raises(StackUnderflowError):
EQ([1], {})
class TestNEOperator:
def test_repr(self):
assert repr(NE) == "NE"
def test_pops(self):
assert NE.pops == 2
def test_puts(self):
assert NE.puts == 1
def test_no_copy(self):
assert copy(NE) is NE
assert deepcopy(NE) is NE
def test_call(self):
assert_token(NE, [2, 2], [False])
assert_token(NE, [2, 3], [True])
assert_token(NE, [2, np.array([1, np.nan, 2])], [np.array([True, True, False])])
assert_token(NE, [np.array([1, np.nan, 2]), 2], [np.array([True, True, False])])
assert_token(
NE,
[np.array([1, np.nan, 3, 3]), np.array([1, np.nan, 2, 3])],
[np.array([False, True, True, False])],
)
# extra stack elements
assert_token(NE, [0, 2, 2], [0, False])
# not enough stack elements
with pytest.raises(StackUnderflowError):
NE([], {})
with pytest.raises(StackUnderflowError):
NE([1], {})
class TestLTOperator:
def test_repr(self):
assert repr(LT) == "LT"
def test_pops(self):
assert LT.pops == 2
def test_puts(self):
assert LT.puts == 1
def test_no_copy(self):
assert copy(LT) is LT
assert deepcopy(LT) is LT
def test_call(self):
assert_token(LT, [2, 3], [True])
assert_token(LT, [2, 2], [False])
assert_token(LT, [3, 2], [False])
assert_token(LT, [2, np.array([1, 2, 3])], [np.array([False, False, True])])
assert_token(LT, [np.array([1, 2, 3]), 2], [np.array([True, False, False])])
assert_token(
LT,
[np.array([1, 2, 3]), np.array([3, 2, 1])],
[np.array([True, False, False])],
)
# extra stack elements
assert_token(LT, [0, 2, 3], [0, True])
# not enough stack elements
with pytest.raises(StackUnderflowError):
LT([], {})
with pytest.raises(StackUnderflowError):
LT([1], {})
class TestLEOperator:
def test_repr(self):
assert repr(LE) == "LE"
def test_pops(self):
assert LE.pops == 2
def test_puts(self):
assert LE.puts == 1
def test_no_copy(self):
assert copy(LE) is LE
assert deepcopy(LE) is LE
def test_le(self):
assert_token(LE, [2, 3], [True])
assert_token(LE, [2, 2], [True])
assert_token(LE, [3, 2], [False])
assert_token(LE, [2, np.array([1, 2, 3])], [np.array([False, True, True])])
assert_token(LE, [np.array([1, 2, 3]), 2], [np.array([True, True, False])])
assert_token(
LE,
[np.array([1, 2, 3]), np.array([3, 2, 1])],
[ | np.array([True, True, False]) | numpy.array |
import numpy as np
from os import listdir
from matplotlib import pyplot as plt
def apply_spectrum(data, pca, numinput=256, power=1.0):
colored = data.dot(np.diag(np.power(pca.sValues[:numinput], power)))
return colored/colored.std()
def get_params_and_errors(net, toy, nunits=256, folder='.',
filestart='toy', ds=1.0):
filelist = listdir(folder)
goodfiles = []
firing_rates = []
gains = []
errors = []
modfit = []
peaks = []
peakmodfits = []
exceptions = []
for file in filelist:
dsflag = False
if 'ds' in file:
dsflag = file.split('ds')[1].startswith(str(ds))
if file.endswith('.pickle') and file.startswith(filestart) and dsflag:
file = folder+file
try:
net.load(file)
except BaseException as ee:
exceptions.append(file)
continue
try:
fit = np.load(file + 'fit.npy')
except FileNotFoundError:
fit = net.modfits
ok = net.nunits == nunits
directtest = toy.test_fit(net.Q)
ok = ok and not (directtest - fit[-1]) > 0.01 and fit[-1] != 0
if ok:
modfit.append(fit[-1])
err = np.mean(net.errorhist[-1000:])
goodfiles.append(file)
errors.append(err)
firing_rates.append(net.p)
gains.append(net.gain)
peaks.append(np.min(net.errorhist))
peakmodfits.append(np.max(fit))
else:
exceptions.append(file)
print('Errors on ', str(len(exceptions)), ' files')
if len(goodfiles) == 0:
if len(exceptions) == 0:
raise FileNotFoundError('No valid files found.')
raise BaseException(exceptions[0])
return (goodfiles, firing_rates, gains, errors, peaks, modfit, peakmodfits)
def hp_scatter(firing_rates, gains, modfits, fig=None, ax=None):
if fig is None:
fig = plt.figure()
if ax is None:
ax = fig.add_subplot(111)
modfits = [0 if np.isnan(mf) else mf for mf in modfits]
sc = ax.scatter(firing_rates, gains, c=modfits, cmap='viridis', s=200)
ax.set_xlabel('Firing rate p')
ax.set_ylabel('Gain')
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlim([np.min(firing_rates)*0.8, np.max(firing_rates)*1.2])
ax.set_ylim([np.min(gains)*0.8, np.max(gains)*1.2])
fig.colorbar(sc, ax=ax)
def err_hp_scatter(firing_rates, gains, errors, fig=None, ax=None):
if fig is None:
fig = plt.figure()
if ax is None:
ax = fig.add_subplot(111)
goodfr = [firing_rates[ii] for ii in range(len(errors)) if errors[ii] < 1.0]
goodg = [gains[ii] for ii in range(len(errors)) if errors[ii] < 1.0]
badfr = [firing_rates[ii] for ii in range(len(errors)) if errors[ii] >= 1.0 or np.isnan(errors[ii])]
badg = [gains[ii] for ii in range(len(errors)) if errors[ii] >= 1.0 or np.isnan(errors[ii])]
errors = [er for er in errors if er < 1.0]
sc = ax.scatter(goodfr, goodg, c=errors, cmap='viridis_r', s=200)
fig.colorbar(sc, ax=ax)
ax.set_xlabel('Firing rate p')
ax.set_ylabel('Gain')
ax.scatter(badfr, badg, c='r', s=50, marker='x')
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlim([np.min(firing_rates)*0.8, | np.max(firing_rates) | numpy.max |
import Globals
import tkinter as tk
from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog,\
PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, \
FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk
import os
from os.path import normpath, basename
from PIL import Image, ImageTk
import cv2
from cv2 import imread, IMREAD_ANYCOLOR, IMREAD_ANYDEPTH, imwrite
import pydicom
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import matplotlib as mpl
from matplotlib import cm
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
import numpy as np
#Bresenham's line algorithm
def clearAll():
Globals.profiles_film_orientation.set('-')
Globals.profiles_film_orientation_menu.config(state=ACTIVE, bg = '#ffffff', width=15, relief=FLAT)
#Globals.profiles_depth.config(state=NORMAL, fg='black')
#Globals.profiles_depth.delete('1.0', END)
#Globals.profiles_depth.insert(INSERT, " ")
Globals.profiles_iscoenter_coords = []
Globals.profiles_film_isocenter = None
Globals.profiles_film_reference_point = None
Globals.profiles_mark_isocenter_up_down_line = []
Globals.profiles_mark_isocenter_right_left_line = []
Globals.profiles_mark_isocenter_oval = []
Globals.profiles_mark_reference_point_oval = []
Globals.profiles_mark_ROI_rectangle = []
Globals.profiles_ROI_coords = []
#if(Globals.profiles_isocenter_check and Globals.profiles_ROI_check):
# Globals.profiles_done_button.config(state=DISABLED)
Globals.profiles_isocenter_check = False
Globals.profiles_ROI_check = False
Globals.profiles_reference_point_check = False
Globals.profiles_ROI_reference_point_check = False
#if(Globals.profiles_film_window_open):
# Globals.profiles_film_window.destroy()
# Globals.profiles_film_window_open = False
Globals.profiles_upload_button_film.config(state=ACTIVE)
Globals.profiles_upload_button_doseplan.config(state=DISABLED)
Globals.profiles_upload_button_rtplan.config(state=DISABLED)
Globals.profiles_distance_isocenter_ROI = []
Globals.profiles_film_dataset = None
Globals.profiles_film_dataset_red_channel = None
Globals.profiles_film_dataset_ROI = None
Globals.profiles_film_dataset_ROI_red_channel = None
Globals.profiles_film_match_isocenter_dataset = np.zeros((7,7))
Globals.profiles_dataset_doseplan = None
Globals.profiles_dataset_rtplan = None
Globals.profiles_isocenter_mm = None
Globals.profiles_test_if_added_rtplan = False
Globals.profiles_test_if_added_doseplan = False
Globals.tab4_canvas.unbind("<Up>")
Globals.tab4_canvas.unbind("<Down>")
return
def getCoordsInRandomLine(x1,y1,x2,y2):
points = []
issteep = abs(y2-y1) - abs(x2-x1)
if issteep > 0:
x1, y1 = y1, x1
x2, y2 = y2, x2
rev = False
if x1 > x2:
x1, x2 = x2, x1
y1, y2 = y2, y1
rev = True
deltax = x2 - x1
deltay = abs(y2-y1)
error = int(deltax / 2)
y = y1
ystep = None
if y1 < y2:
ystep = 1
else:
ystep = -1
for x in range(x1, x2 + 1):
if issteep:
points.append((y, x))
else:
points.append((x, y))
error -= deltay
if error < 0:
y += ystep
error += deltax
# Reverse the list if the coordinates were reversed
if rev:
points.reverse()
return points
def drawProfiles(even):
if Globals.profiles_choice_of_profile_line_type.get() == 'h' or Globals.profiles_choice_of_profile_line_type.get() == 'v':
Globals.profiles_lines = []
if Globals.profiles_dataset_doseplan == None:
return
Globals.profiles_adjust_button_right.config(state=ACTIVE)
Globals.profiles_adjust_button_left.config(state=ACTIVE)
Globals.profiles_adjust_button_down.config(state=ACTIVE)
Globals.profiles_adjust_button_up.config(state=ACTIVE)
Globals.profiles_adjust_button_return.config(state=ACTIVE)
def draw(line_orient, dataset_film, dataset_doseplan):
Globals.profile_plot_canvas.delete('all')
fig= Figure(figsize=(5,3))
a = fig.add_subplot(111)
plot_canvas = FigureCanvasTkAgg(fig, master=Globals.profile_plot_canvas)
plot_canvas.get_tk_widget().grid(row=0,column=0,columnspan=4, sticky=N+E+W+S, padx=(5,0), pady=(0,0))
#annotation = a.annotate("HEI", xy=(0,0), xytext=(0,20))
#annotation.set_visible(False)
#txt = tk.Text(Globals.profile_plot_canvas, width=50, height=6)
#txt.insert(INSERT, " ")
#txt.grid(row=1, column = 1, sticky=N+E+W+S, pady=(5,0), padx=(5,0))
#txt.config(bg='#ffffff', font=('calibri', '10'), state=DISABLED, relief=FLAT, bd= 0)
cols = (' ', 'Point match', 'Distance', 'Dose', 'Rel. to max', 'Rel. to target')
listBox = ttk.Treeview(Globals.profile_plot_canvas, columns=cols, show='headings')
for col in cols:
listBox.heading(col, text=col, anchor=W)
listBox.column(col ,width=84, stretch=False, anchor=W)
listBox.grid(row=1, column=0, columnspan=4)
lst = [['Film: ', ' ', ' ', ' ', ' ', ' '],\
['Doseplan: ', ' ', ' ', ' ', ' ', ' ']]
for i, (name, m, dis, d, rdROI, rdTarget) in enumerate(lst):
listBox.insert("", "end", values=(name, m, dis, d, rdROI, rdTarget))
#a.text(0,0, "", fontsize=7, bbox=dict(facecolor='gray', alpha=0.1))
#txt.set_visible(False)
v_line = a.axvline(x=0, ymin=0, ymax=50, c='gray')
#v_line.set_visible(False)
if line_orient == 'h':
if(Globals.profiles_dataset_doseplan.PixelSpacing==[1, 1]):
dy = Globals.profiles_doseplan_dataset_ROI.shape[1]/2
elif(Globals.profiles_dataset_doseplan.PixelSpacing==[2, 2]):
dy = Globals.profiles_doseplan_dataset_ROI.shape[1]*2/2
else:
dy = Globals.profiles_doseplan_dataset_ROI.shape[1]*3/2
dx = dataset_film.shape[1]*0.2/2
x = np.linspace(-dx,dx, dataset_film.shape[1])
y = np.linspace(-dy,dy, Globals.profiles_doseplan_dataset_ROI.shape[1])
plot_film = dataset_film[Globals.profiles_coordinate_in_dataset,:]/100
plot_doseplan = dataset_doseplan[Globals.profiles_coordinate_in_dataset, :]
film = a.plot(x,plot_film, color='r', label='Film')
dose = a.plot(y,plot_doseplan, color='b', label='Doseplan')
elif line_orient == 'v':
if(Globals.profiles_dataset_doseplan.PixelSpacing==[1, 1]):
dy = Globals.profiles_doseplan_dataset_ROI.shape[0]/2
elif(Globals.profiles_dataset_doseplan.PixelSpacing==[2, 2]):
dy = Globals.profiles_doseplan_dataset_ROI.shape[0]*2/2
else:
dy = Globals.profiles_doseplan_dataset_ROI.shape[0]*3/2
dx = dataset_film.shape[0]*0.2/2
x = np.linspace(-dx,dx, dataset_film.shape[0])
y = np.linspace(-dy,dy, Globals.profiles_doseplan_dataset_ROI.shape[0])
plot_film = dataset_film[:,Globals.profiles_coordinate_in_dataset]/100
plot_doseplan = dataset_doseplan[:, Globals.profiles_coordinate_in_dataset] #Globals.profiles_doseplan_dataset_ROI
film=a.plot(x,plot_film, color='r', label='Film')
dose=a.plot(y,plot_doseplan, color='b', label='Doseplan')
elif line_orient == 'd':
start_f_x, start_f_y = Globals.profiles_line_coords_film[0]
end_f_x, end_f_y = Globals.end_point
dx=np.sqrt(((end_f_x-start_f_x)*0.2)**2 + ((end_f_y-start_f_y)*0.2)**2)/2
if(Globals.profiles_dataset_doseplan.PixelSpacing==[1, 1]):
start_d_x, start_d_y = Globals.profiles_line_coords_doseplan[0]
end_d_x, end_d_y = Globals.end_point
end_d_x=end_d_x/5; end_d_y=end_d_y/5
dy=np.sqrt(((end_d_x-start_d_x))**2 + ((end_d_y-start_d_y))**2)/2
elif(Globals.profiles_dataset_doseplan.PixelSpacing==[2, 2]):
start_d_x, start_d_y = Globals.profiles_line_coords_doseplan[0]
end_d_x, end_d_y = Globals.end_point
end_d_x=end_d_x/10; end_d_y=end_d_y/10
dy=np.sqrt(((end_d_x-start_d_x)*2)**2 + ((end_d_y-start_d_y)*2)**2)/2
else:
start_d_x, start_d_y = Globals.profiles_line_coords_doseplan[0]
end_d_x, end_d_y = Globals.end_point
end_d_x=end_d_x/15; end_d_y=end_d_y/15
dy=np.sqrt(((end_d_x-start_d_x)*3)**2 + ((end_d_y-start_d_y)*3)**2)/2
print(dx, dy)
x = np.linspace(-dx,dx,len(dataset_film))
y = np.linspace(-dy,dy,len(dataset_doseplan))
plot_film=dataset_film/100
plot_doseplan=dataset_doseplan
film = a.plot(x,plot_film, color='r', label='Film')
dose= a.plot(y,plot_doseplan, 'b', label='Doseplan')
else:
messagebox.showerror("Error", "Fatal error. Something has gone wrong, try again \n(Code: draw")
return
a.legend()
a.set_title("Profiles", fontsize=12)
a.set_ylabel("Dose (Gy)", fontsize=12)
a.set_xlabel("Distance (mm)", fontsize=12)
def mouseMove(event):
if event.inaxes == a:
dist = event.xdata
idx_film = np.searchsorted(x, dist)
idx_doseplan = np.searchsorted(y, dist)
if idx_film == 0:
idx_film = 0
elif idx_film == len(x):
idx_film = len(x)-1
else:
if abs(x[idx_film-1]-dist) < abs(x[idx_film]-dist):
idx_film = idx_film-1
else:
idx_film = idx_film
if idx_doseplan == 0:
idx_doseplan = 0
elif idx_doseplan == len(y):
idx_doseplan = len(y)-1
else:
if abs(y[idx_doseplan-1]-dist) < abs(y[idx_doseplan]-dist):
idx_doseplan = idx_doseplan-1
else:
idx_doseplan = idx_doseplan
idx_film = int(np.round(idx_film))
if idx_film < 0:
idx_film = 0
if idx_film >= len(plot_film):
idx_film = len(plot_film) - 1
#if Globals.profiles_dataset_doseplan.PixelSpacing == [1, 1]:
# idx_doseplan = int(np.round(idx_doseplan/1))
#elif Globals.profiles_dataset_doseplan.PixelSpacing == [2, 2]:
# idx_doseplan = int(np.round(idx_doseplan/2))
##else:
# idx_doseplan = np.round(idx_doseplan/3)
idx_doseplan = int(np.round(idx_doseplan))
if idx_doseplan < 0:
idx_doseplan = 0
if idx_doseplan >= len(plot_doseplan):
idx_doseplan = len(plot_doseplan) - 1
match_text = "\tGraph match: \t"
match = str(np.round(min(plot_film[idx_film], plot_doseplan[idx_doseplan])/max(plot_film[idx_film], plot_doseplan[idx_doseplan])*100, 2)) + "\n"
distance_text = "Distance:\t "
dose_text = "Dose: \t"
rel_target_dose_text = "Relative to target dose: \t "
rel_mx_dose_ROI_text = "Relative to max dose in ROI: \n"
distance = str(np.round(dist,2)) + "\n"
film = "FILM: \t"
dose_film = str(np.round(plot_film[idx_film],2)) + "\t"
rel_target_dose_film = str(np.round(100*plot_film[idx_film]/Globals.max_dose_doseplan,2)) + "\t\t\t"
rel_mx_dose_ROI_film = str(np.round(100*plot_film[idx_film]/np.max(plot_film),2)) + "\n"
doseplan = "DOSEPLAN: \t"
dose_doseplan = str(np.round(plot_doseplan[idx_doseplan],2)) + "\t"
rel_target_dose_doseplan = str(np.round(100*plot_doseplan[idx_doseplan]/Globals.max_dose_doseplan,2)) + "\t\t\t"
rel_mx_dose_ROI_doseplan = str(np.round(100*plot_doseplan[idx_doseplan]/np.max(plot_doseplan),2))
notation = match_text + distance_text + dose_text, rel_target_dose_text + rel_mx_dose_ROI_text +\
film + dose_film + rel_target_dose_film + rel_mx_dose_ROI_film+\
doseplan + dose_doseplan + rel_target_dose_doseplan + rel_mx_dose_ROI_doseplan
children = listBox.get_children()
for item in children:
listBox.delete(item)
lst = [['Film: ', match, distance, dose_film, rel_mx_dose_ROI_film, rel_target_dose_film],\
['Doseplan: ', match, distance, dose_doseplan, rel_mx_dose_ROI_doseplan, rel_target_dose_doseplan]]
for i, (name, m, dis, d, rdROI, rdTarget) in enumerate(lst):
listBox.insert("", "end", values=(name, m, dis, d, rdROI, rdTarget))
y_min = max(plot_film[idx_film], plot_doseplan[idx_doseplan])-0.3*max(np.max(plot_film), np.max(plot_doseplan))
if y_min < 0:
y_min = 0
y_max = max(plot_film[idx_film], plot_doseplan[idx_doseplan])+0.3*max(np.max(plot_film), np.max(plot_doseplan))
if y_max > max(np.max(plot_film), np.max(plot_doseplan)):
y_max = max(np.max(plot_film), np.max(plot_doseplan))
v_line.set_xdata(dist)
#v_line.set_ylim(y_min,y_max)
#v_line.set_ymax = y
#v_line.set_ymax = y_max # =
#v_line = a.axvline(x=dist, ymin=0, ymax=40, c='gray')
v_line.set_visible(True)
fig.canvas.draw_idle()
def freezeData(event):
fig.canvas.mpl_disconnect(cid)
v_line.set_visible(False)
fig.canvas.draw_idle()
def startData(event):
fig.canvas.mpl_disconnect(cid2)
fig.canvas.mpl_disconnect(cid3)
draw(line_orient, dataset_film, dataset_doseplan)
cid3 = fig.canvas.mpl_connect('button_press_event', startData)
cid2 = fig.canvas.mpl_connect('button_press_event', freezeData)
else:
return
cid3 = None
cid = fig.canvas.mpl_connect('motion_notify_event', mouseMove)
fig.tight_layout()
if even:
draw('d', Globals.profiles_dataset_film_variable_draw, Globals.profiles_dataset_doesplan_variable_draw)
return
if(Globals.profiles_choice_of_profile_line_type.get() == 'h' and Globals.profiles_dataset_doseplan.PixelSpacing == [1, 1]):
dataset_film = np.zeros(\
(Globals.profiles_doseplan_dataset_ROI.shape[0], Globals.profiles_film_dataset_ROI_red_channel_dose.shape[1]))
for i in range(dataset_film.shape[0]-1):
dataset_film[i,:] = Globals.profiles_film_dataset_ROI_red_channel_dose[int((i*5)+2),:]
try:
dataset_film[dataset_film.shape[0]-1,:] = Globals.profiles_film_dataset_ROI_red_channel_dose[int((dataset_film.shape[0]-1)*5+2), :]
except:
dataset_film[dataset_film.shape[0]-1,:] = \
Globals.profiles_film_dataset_ROI_red_channel_dose[Globals.profiles_film_dataset_ROI_red_channel_dose.shape[0]-1,:]
line_doseplan = Globals.doseplan_write_image.create_line(0,Globals.doseplan_write_image_var_x,\
Globals.doseplan_write_image_width,Globals.doseplan_write_image_var_x, fill='red')
line_film_dosemap = Globals.film_dose_write_image.create_line(0,Globals.doseplan_write_image_var_x,\
Globals.doseplan_write_image_width,Globals.doseplan_write_image_var_x, fill='red')
line_film = Globals.film_write_image.create_line(0,Globals.doseplan_write_image_var_x,\
Globals.doseplan_write_image_width,Globals.doseplan_write_image_var_x, fill='red')
Globals.profiles_lines.append(line_doseplan)
Globals.profiles_lines.append(line_film_dosemap)
Globals.profiles_lines.append(line_film)
def up_button_pressed(event):
temp_x = Globals.doseplan_write_image_var_x - 5
if(temp_x < 0):
#Outside the frame
return
#inside the frame
Globals.doseplan_write_image_var_x = temp_x
Globals.profiles_coordinate_in_dataset = int(temp_x/5)
Globals.doseplan_write_image.coords(line_doseplan,0,Globals.doseplan_write_image_var_x,\
Globals.doseplan_write_image_width,Globals.doseplan_write_image_var_x)
Globals.film_dose_write_image.coords(line_film_dosemap, 0,Globals.doseplan_write_image_var_x,\
Globals.doseplan_write_image_width,Globals.doseplan_write_image_var_x)
Globals.film_write_image.coords(line_film, 0,Globals.doseplan_write_image_var_x,\
Globals.doseplan_write_image_width,Globals.doseplan_write_image_var_x)
draw('h', dataset_film, Globals.profiles_doseplan_dataset_ROI)
def down_button_pressed(event):
temp_x = Globals.doseplan_write_image_var_x + 5
if(temp_x >= Globals.doseplan_write_image_height):
#Outside the frame
return
#Inside the frame
Globals.profiles_coordinate_in_dataset = int(temp_x/5)
Globals.doseplan_write_image_var_x = temp_x
Globals.doseplan_write_image.coords(line_doseplan,0,Globals.doseplan_write_image_var_x,\
Globals.doseplan_write_image_width,Globals.doseplan_write_image_var_x)
Globals.film_dose_write_image.coords(line_film_dosemap,0,Globals.doseplan_write_image_var_x,\
Globals.doseplan_write_image_width,Globals.doseplan_write_image_var_x)
Globals.film_write_image.coords(line_film, 0,Globals.doseplan_write_image_var_x,\
Globals.doseplan_write_image_width,Globals.doseplan_write_image_var_x)
draw('h', dataset_film, Globals.profiles_doseplan_dataset_ROI)
Globals.form.bind("<Up>", up_button_pressed)
Globals.form.bind("<Down>", down_button_pressed)
if Globals.profiles_first_time_in_drawProfiles:
Globals.profiles_first_time_in_drawProfiles = False
draw('h', dataset_film, Globals.profiles_doseplan_dataset_ROI)
elif(Globals.profiles_choice_of_profile_line_type.get()=='h' and Globals.profiles_dataset_doseplan.PixelSpacing==[2, 2]):
dataset_film = np.zeros(\
(Globals.profiles_doseplan_dataset_ROI.shape[0], Globals.profiles_film_dataset_ROI_red_channel_dose.shape[1]))
for i in range(dataset_film.shape[0]-1):
dataset_film[i,:] = Globals.profiles_film_dataset_ROI_red_channel_dose[int((i*10)+5),:]
try:
dataset_film[dataset_film.shape[0]-1,:] = Globals.profiles_film_dataset_ROI_red_channel_dose[int((dataset_film.shape[0]-1)*10+5), :]
except:
dataset_film[dataset_film.shape[0]-1,:] = \
Globals.profiles_film_dataset_ROI_red_channel_dose[Globals.profiles_film_dataset_ROI_red_channel_dose.shape[0]-1,:]
line_doseplan = Globals.doseplan_write_image.create_line(0,Globals.doseplan_write_image_var_x,\
Globals.doseplan_write_image_width,Globals.doseplan_write_image_var_x, fill='red')
line_film_dosemap = Globals.film_dose_write_image.create_line(0,Globals.doseplan_write_image_var_x,\
Globals.doseplan_write_image_width,Globals.doseplan_write_image_var_x, fill='red')
line_film = Globals.film_write_image.create_line(0,Globals.doseplan_write_image_var_x,\
Globals.doseplan_write_image_width,Globals.doseplan_write_image_var_x, fill='red')
Globals.profiles_lines.append(line_doseplan)
Globals.profiles_lines.append(line_film_dosemap)
Globals.profiles_lines.append(line_film)
def up_button_pressed(event):
temp_x = Globals.doseplan_write_image_var_x - 10
if(temp_x < 0):
#Outside the frame
return
#inside the frame
Globals.doseplan_write_image_var_x = temp_x
Globals.profiles_coordinate_in_dataset = int(temp_x/10)
Globals.doseplan_write_image.coords(line_doseplan,0,Globals.doseplan_write_image_var_x,\
Globals.doseplan_write_image_width,Globals.doseplan_write_image_var_x)
Globals.film_dose_write_image.coords(line_film_dosemap, 0,Globals.doseplan_write_image_var_x,\
Globals.doseplan_write_image_width,Globals.doseplan_write_image_var_x)
Globals.film_write_image.coords(line_film, 0,Globals.doseplan_write_image_var_x,\
Globals.doseplan_write_image_width,Globals.doseplan_write_image_var_x)
draw('h', dataset_film, Globals.profiles_doseplan_dataset_ROI)
def down_button_pressed(event):
temp_x = Globals.doseplan_write_image_var_x + 10
if(temp_x >= Globals.doseplan_write_image_height):
#Outside the frame
return
#Inside the frame
Globals.profiles_coordinate_in_dataset = int(temp_x/10)
Globals.doseplan_write_image_var_x = temp_x
Globals.doseplan_write_image.coords(line_doseplan,0,Globals.doseplan_write_image_var_x,\
Globals.doseplan_write_image_width,Globals.doseplan_write_image_var_x)
Globals.film_dose_write_image.coords(line_film_dosemap,0,Globals.doseplan_write_image_var_x,\
Globals.doseplan_write_image_width,Globals.doseplan_write_image_var_x)
Globals.film_write_image.coords(line_film, 0,Globals.doseplan_write_image_var_x,\
Globals.doseplan_write_image_width,Globals.doseplan_write_image_var_x)
draw('h', dataset_film, Globals.profiles_doseplan_dataset_ROI)
Globals.form.bind("<Up>", up_button_pressed)
Globals.form.bind("<Down>", down_button_pressed)
if Globals.profiles_first_time_in_drawProfiles:
Globals.profiles_first_time_in_drawProfiles = False
draw('h', dataset_film, Globals.profiles_doseplan_dataset_ROI)
elif(Globals.profiles_choice_of_profile_line_type.get() == 'h' and Globals.profiles_dataset_doseplan.PixelSpacing==[3, 3]):
dataset_film = np.zeros(\
(Globals.profiles_doseplan_dataset_ROI.shape[0], Globals.profiles_film_dataset_ROI_red_channel_dose.shape[1]))
for i in range(dataset_film.shape[0]-1):
dataset_film[i,:] = Globals.profiles_film_dataset_ROI_red_channel_dose[int((i*15)+7),:]
try:
dataset_film[dataset_film.shape[0]-1,:] = Globals.profiles_film_dataset_ROI_red_channel_dose[int((dataset_film.shape[0]-1)*15+7), :]
except:
dataset_film[dataset_film.shape[0]-1,:] = \
Globals.profiles_film_dataset_ROI_red_channel_dose[Globals.profiles_film_dataset_ROI_red_channel_dose.shape[0]-1,:]
line_doseplan = Globals.doseplan_write_image.create_line(0,Globals.doseplan_write_image_var_x,\
Globals.doseplan_write_image_width,Globals.doseplan_write_image_var_x, fill='red')
line_film_dosemap = Globals.film_dose_write_image.create_line(0,Globals.doseplan_write_image_var_x,\
Globals.doseplan_write_image_width,Globals.doseplan_write_image_var_x, fill='red')
line_film = Globals.film_write_image.create_line(0,Globals.doseplan_write_image_var_x,\
Globals.doseplan_write_image_width,Globals.doseplan_write_image_var_x, fill='red')
Globals.profiles_lines.append(line_doseplan)
Globals.profiles_lines.append(line_film_dosemap)
Globals.profiles_lines.append(line_film)
def up_button_pressed(event):
temp_x = Globals.doseplan_write_image_var_x - 15
if(temp_x < 0):
#Outside the frame
return
#inside the frame
Globals.doseplan_write_image_var_x = temp_x
Globals.profiles_coordinate_in_dataset = int(temp_x/15)
Globals.doseplan_write_image.coords(line_doseplan,0,Globals.doseplan_write_image_var_x,\
Globals.doseplan_write_image_width,Globals.doseplan_write_image_var_x)
Globals.film_dose_write_image.coords(line_film_dosemap, 0,Globals.doseplan_write_image_var_x,\
Globals.doseplan_write_image_width,Globals.doseplan_write_image_var_x)
Globals.film_write_image.coords(line_film, 0,Globals.doseplan_write_image_var_x,\
Globals.doseplan_write_image_width,Globals.doseplan_write_image_var_x)
draw('h', dataset_film, Globals.profiles_doseplan_dataset_ROI)
def down_button_pressed(event):
temp_x = Globals.doseplan_write_image_var_x + 15
if(temp_x >= Globals.doseplan_write_image_height):
#Outside the frame
return
#Inside the frame
Globals.profiles_coordinate_in_dataset = int(temp_x/15)
Globals.doseplan_write_image_var_x = temp_x
Globals.doseplan_write_image.coords(line_doseplan,0,Globals.doseplan_write_image_var_x,\
Globals.doseplan_write_image_width,Globals.doseplan_write_image_var_x)
Globals.film_dose_write_image.coords(line_film_dosemap,0,Globals.doseplan_write_image_var_x,\
Globals.doseplan_write_image_width,Globals.doseplan_write_image_var_x)
Globals.film_write_image.coords(line_film, 0,Globals.doseplan_write_image_var_x,\
Globals.doseplan_write_image_width,Globals.doseplan_write_image_var_x)
draw('h', dataset_film,Globals.profiles_doseplan_dataset_ROI)
Globals.form.bind("<Up>", up_button_pressed)
Globals.form.bind("<Down>", down_button_pressed)
if Globals.profiles_first_time_in_drawProfiles:
Globals.profiles_first_time_in_drawProfiles = False
draw('h', dataset_film, Globals.profiles_doseplan_dataset_ROI)
elif(Globals.profiles_choice_of_profile_line_type.get() == 'v' and Globals.profiles_dataset_doseplan.PixelSpacing == [1, 1]):
dataset_film = np.zeros(\
(Globals.profiles_film_dataset_ROI_red_channel_dose.shape[0], Globals.profiles_doseplan_dataset_ROI.shape[1]))
for i in range(dataset_film.shape[1]-1):
dataset_film[:,i] = Globals.profiles_film_dataset_ROI_red_channel_dose[:,int((i*5)+2)]
try:
dataset_film[:,dataset_film.shape[1]-1] = Globals.profiles_film_dataset_ROI_red_channel_dose[:,int((dataset_film.shape[1]-1)*5+2)]
except:
dataset_film[:,dataset_film.shape[1]-1] = \
Globals.profiles_film_dataset_ROI_red_channel_dose[:,Globals.profiles_film_dataset_ROI_red_channel_dose.shape[1]-1]
line_doseplan = Globals.doseplan_write_image.create_line(Globals.doseplan_write_image_var_y, 0,\
Globals.doseplan_write_image_var_y, Globals.doseplan_write_image_height, fill='red')
line_film_dosemap = Globals.film_dose_write_image.create_line(Globals.doseplan_write_image_var_y,0,\
Globals.doseplan_write_image_var_y, Globals.doseplan_write_image_height, fill='red')
line_film = Globals.film_write_image.create_line(Globals.doseplan_write_image_var_y,0,\
Globals.doseplan_write_image_var_y,Globals.doseplan_write_image_height, fill='red')
Globals.profiles_lines.append(line_doseplan)
Globals.profiles_lines.append(line_film_dosemap)
Globals.profiles_lines.append(line_film)
def left_button_pressed(event):
temp_y = Globals.doseplan_write_image_var_y - 5
if(temp_y < 0):
#Outside the frame
return
#inside the frame
Globals.doseplan_write_image_var_y = temp_y
Globals.profiles_coordinate_in_dataset = int(temp_y/5)
Globals.doseplan_write_image.coords(line_doseplan,Globals.doseplan_write_image_var_y, 0,\
Globals.doseplan_write_image_var_y, Globals.doseplan_write_image_height)
Globals.film_dose_write_image.coords(line_film_dosemap, Globals.doseplan_write_image_var_y, 0,\
Globals.doseplan_write_image_var_y, Globals.doseplan_write_image_height)
Globals.film_write_image.coords(line_film, Globals.doseplan_write_image_var_y, 0,\
Globals.doseplan_write_image_var_y, Globals.doseplan_write_image_height)
draw('v', dataset_film, Globals.profiles_doseplan_dataset_ROI)
def right_button_pressed(event):
temp_y = Globals.doseplan_write_image_var_y + 5
if(temp_y >= Globals.doseplan_write_image_width):
#Outside the frame
return
#Inside the frame
Globals.profiles_coordinate_in_dataset = int(temp_y/5)
Globals.doseplan_write_image_var_y = temp_y
Globals.doseplan_write_image.coords(line_doseplan,Globals.doseplan_write_image_var_y, 0,\
Globals.doseplan_write_image_var_y, Globals.doseplan_write_image_height)
Globals.film_dose_write_image.coords(line_film_dosemap,Globals.doseplan_write_image_var_y, 0,\
Globals.doseplan_write_image_var_y, Globals.doseplan_write_image_height)
Globals.film_write_image.coords(line_film, Globals.doseplan_write_image_var_y, 0,\
Globals.doseplan_write_image_var_y, Globals.doseplan_write_image_height)
draw('v', dataset_film, Globals.profiles_doseplan_dataset_ROI)
Globals.form.bind("<Left>", left_button_pressed)
Globals.form.bind("<Right>", right_button_pressed)
if Globals.profiles_first_time_in_drawProfiles:
Globals.profiles_first_time_in_drawProfiles = False
draw('v', dataset_film, Globals.profiles_doseplan_dataset_ROI)
elif(Globals.profiles_choice_of_profile_line_type.get() == 'v' and Globals.profiles_dataset_doseplan.PixelSpacing == [2, 2]):
dataset_film = np.zeros(\
(Globals.profiles_film_dataset_ROI_red_channel_dose.shape[0], Globals.profiles_doseplan_dataset_ROI.shape[1]))
for i in range(dataset_film.shape[1]-1):
dataset_film[:,i] = Globals.profiles_film_dataset_ROI_red_channel_dose[:,int((i*10)+5)]
try:
dataset_film[:,dataset_film.shape[1]-1] = Globals.profiles_film_dataset_ROI_red_channel_dose[:,int((dataset_film.shape[1]-1)*10+5)]
except:
dataset_film[:,dataset_film.shape[1]-1] = \
Globals.profiles_film_dataset_ROI_red_channel_dose[:,Globals.profiles_film_dataset_ROI_red_channel_dose.shape[1]-1]
line_doseplan = Globals.doseplan_write_image.create_line(Globals.doseplan_write_image_var_y, 0,\
Globals.doseplan_write_image_var_y, Globals.doseplan_write_image_height, fill='red')
line_film_dosemap = Globals.film_dose_write_image.create_line(Globals.doseplan_write_image_var_y,0,\
Globals.doseplan_write_image_var_y, Globals.doseplan_write_image_height, fill='red')
line_film = Globals.film_write_image.create_line(Globals.doseplan_write_image_var_y,0,\
Globals.doseplan_write_image_var_y,Globals.doseplan_write_image_height, fill='red')
Globals.profiles_lines.append(line_doseplan)
Globals.profiles_lines.append(line_film_dosemap)
Globals.profiles_lines.append(line_film)
def left_button_pressed(event):
temp_y = Globals.doseplan_write_image_var_y - 10
if(temp_y < 0):
#Outside the frame
return
#inside the frame
Globals.doseplan_write_image_var_y = temp_y
Globals.profiles_coordinate_in_dataset = int(temp_y/10)
Globals.doseplan_write_image.coords(line_doseplan,Globals.doseplan_write_image_var_y, 0,\
Globals.doseplan_write_image_var_y, Globals.doseplan_write_image_height)
Globals.film_dose_write_image.coords(line_film_dosemap, Globals.doseplan_write_image_var_y, 0,\
Globals.doseplan_write_image_var_y, Globals.doseplan_write_image_height)
Globals.film_write_image.coords(line_film, Globals.doseplan_write_image_var_y, 0,\
Globals.doseplan_write_image_var_y, Globals.doseplan_write_image_height)
draw('v', dataset_film, Globals.profiles_doseplan_dataset_ROI)
def right_button_pressed(event):
temp_y = Globals.doseplan_write_image_var_y + 10
if(temp_y >= Globals.doseplan_write_image_width):
#Outside the frame
return
#Inside the frame
Globals.profiles_coordinate_in_dataset = int(temp_y/10)
Globals.doseplan_write_image_var_y = temp_y
Globals.doseplan_write_image.coords(line_doseplan,Globals.doseplan_write_image_var_y, 0,\
Globals.doseplan_write_image_var_y, Globals.doseplan_write_image_height)
Globals.film_dose_write_image.coords(line_film_dosemap,Globals.doseplan_write_image_var_y, 0,\
Globals.doseplan_write_image_var_y, Globals.doseplan_write_image_height)
Globals.film_write_image.coords(line_film, Globals.doseplan_write_image_var_y, 0,\
Globals.doseplan_write_image_var_y, Globals.doseplan_write_image_height)
draw('v', dataset_film, Globals.profiles_doseplan_dataset_ROI)
Globals.form.bind("<Left>", left_button_pressed)
Globals.form.bind("<Right>", right_button_pressed)
if Globals.profiles_first_time_in_drawProfiles:
Globals.profiles_first_time_in_drawProfiles = False
draw('v', dataset_film, Globals.profiles_doseplan_dataset_ROI)
elif(Globals.profiles_choice_of_profile_line_type.get() == 'v' and Globals.profiles_dataset_doseplan.PixelSpacing == [3, 3]):
dataset_film = np.zeros(\
(Globals.profiles_film_dataset_ROI_red_channel_dose.shape[0], Globals.profiles_doseplan_dataset_ROI.shape[1]))
for i in range(dataset_film.shape[1]-1):
dataset_film[:,i] = Globals.profiles_film_dataset_ROI_red_channel_dose[:,int((i*15)+7)]
try:
dataset_film[:,dataset_film.shape[1]-1] = Globals.profiles_film_dataset_ROI_red_channel_dose[:,int((dataset_film.shape[1]-1)*15+7)]
except:
dataset_film[:,dataset_film.shape[1]-1] = \
Globals.profiles_film_dataset_ROI_red_channel_dose[:,Globals.profiles_film_dataset_ROI_red_channel_dose.shape[1]-1]
line_doseplan = Globals.doseplan_write_image.create_line(Globals.doseplan_write_image_var_y, 0,\
Globals.doseplan_write_image_var_y, Globals.doseplan_write_image_height, fill='red')
line_film_dosemap = Globals.film_dose_write_image.create_line(Globals.doseplan_write_image_var_y,0,\
Globals.doseplan_write_image_var_y, Globals.doseplan_write_image_height, fill='red')
line_film = Globals.film_write_image.create_line(Globals.doseplan_write_image_var_y,0,\
Globals.doseplan_write_image_var_y,Globals.doseplan_write_image_height, fill='red')
Globals.profiles_lines.append(line_doseplan)
Globals.profiles_lines.append(line_film_dosemap)
Globals.profiles_lines.append(line_film)
def left_button_pressed(event):
temp_y = Globals.doseplan_write_image_var_y - 15
if(temp_y < 0):
#Outside the frame
return
#inside the frame
Globals.doseplan_write_image_var_y = temp_y
Globals.profiles_coordinate_in_dataset = int(temp_y/15)
Globals.doseplan_write_image.coords(line_doseplan,Globals.doseplan_write_image_var_y, 0,\
Globals.doseplan_write_image_var_y, Globals.doseplan_write_image_height)
Globals.film_dose_write_image.coords(line_film_dosemap, Globals.doseplan_write_image_var_y, 0,\
Globals.doseplan_write_image_var_y, Globals.doseplan_write_image_height)
Globals.film_write_image.coords(line_film, Globals.doseplan_write_image_var_y, 0,\
Globals.doseplan_write_image_var_y, Globals.doseplan_write_image_height)
draw('v', dataset_film, Globals.profiles_doseplan_dataset_ROI)
def right_button_pressed(event):
temp_y = Globals.doseplan_write_image_var_y + 15
if(temp_y >= Globals.doseplan_write_image_width):
#Outside the frame
return
#Inside the frame
Globals.profiles_coordinate_in_dataset = int(temp_y/15)
Globals.doseplan_write_image_var_y = temp_y
Globals.doseplan_write_image.coords(line_doseplan,Globals.doseplan_write_image_var_y, 0,\
Globals.doseplan_write_image_var_y, Globals.doseplan_write_image_height)
Globals.film_dose_write_image.coords(line_film_dosemap,Globals.doseplan_write_image_var_y, 0,\
Globals.doseplan_write_image_var_y, Globals.doseplan_write_image_height)
Globals.film_write_image.coords(line_film, Globals.doseplan_write_image_var_y, 0,\
Globals.doseplan_write_image_var_y, Globals.doseplan_write_image_height)
draw('v', dataset_film, Globals.profiles_doseplan_dataset_ROI)
Globals.form.bind("<Left>", left_button_pressed)
Globals.form.bind("<Right>", right_button_pressed)
if Globals.profiles_first_time_in_drawProfiles:
Globals.profiles_first_time_in_drawProfiles = False
draw('v', dataset_film, Globals.profiles_doseplan_dataset_ROI)
elif(Globals.profiles_choice_of_profile_line_type.get() == 'd' and Globals.profiles_dataset_doseplan.PixelSpacing == [1, 1]):
start_point = [0,0]
def mousePushed(event):
start_point = [event.y, event.x]
if not len(Globals.profiles_lines)==0:
Globals.doseplan_write_image.delete(Globals.profiles_lines[0])
Globals.film_dose_write_image.delete(Globals.profiles_lines[1])
Globals.film_write_image.delete(Globals.profiles_lines[2])
Globals.profiles_lines = []
line_doseplan = Globals.doseplan_write_image.create_line(start_point[1], start_point[0],start_point[1],start_point[0], fill='red')
line_film_dosemap = Globals.film_dose_write_image.create_line(start_point[1], start_point[0],start_point[1],start_point[0], fill='red')
line_film = Globals.film_write_image.create_line(start_point[1], start_point[0],start_point[1],start_point[0], fill='red')
Globals.profiles_lines.append(line_doseplan)
Globals.profiles_lines.append(line_film_dosemap)
Globals.profiles_lines.append(line_film)
def mouseMoving(event):
Globals.doseplan_write_image.coords(line_doseplan, start_point[1], start_point[0], event.x, event.y)
Globals.film_dose_write_image.coords(line_film_dosemap, start_point[1], start_point[0], event.x, event.y)
Globals.film_write_image.coords(line_film, start_point[1], start_point[0], event.x, event.y)
Globals.film_dose_write_image.bind("<B1-Motion>", mouseMoving)
def mouseReleased(event):
Globals.end_point = [event.y, event.x]
Globals.doseplan_write_image.coords(line_doseplan, start_point[1], start_point[0], event.x, event.y)
Globals.film_dose_write_image.coords(line_film_dosemap, start_point[1], start_point[0], event.x, event.y)
Globals.film_write_image.coords(line_film, start_point[1], start_point[0], event.x, event.y)
Globals.profiles_line_coords_film = getCoordsInRandomLine(start_point[1], start_point[0], Globals.end_point[1], Globals.end_point[0])
Globals.profiles_line_coords_doseplan = getCoordsInRandomLine(int(start_point[1]/5), int(start_point[0]/5), \
int(Globals.end_point[1]/5), int(Globals.end_point[0]/5))
Globals.profiles_dataset_film_variable_draw = np.zeros(len(Globals.profiles_line_coords_film))
Globals.profiles_dataset_doesplan_variable_draw=np.zeros(len(Globals.profiles_line_coords_doseplan))
for i in range(len(Globals.profiles_dataset_film_variable_draw)):
coord = Globals.profiles_line_coords_film[i]
try:
Globals.profiles_dataset_film_variable_draw[i] = Globals.profiles_film_dataset_ROI_red_channel_dose[coord[0]-1, coord[1]-1]
except:
return
for i in range(len(Globals.profiles_dataset_doesplan_variable_draw)):
coord = Globals.profiles_line_coords_doseplan[i]
try:
Globals.profiles_dataset_doesplan_variable_draw[i] = Globals.profiles_doseplan_dataset_ROI[coord[0]-1, coord[1]-1]
except:
return
draw('d', Globals.profiles_dataset_film_variable_draw, Globals.profiles_dataset_doesplan_variable_draw)
Globals.film_dose_write_image.bind("<ButtonRelease-1>", mouseReleased)
Globals.film_dose_write_image.bind("<Button-1>", mousePushed)
elif(Globals.profiles_choice_of_profile_line_type.get() == 'd' and Globals.profiles_dataset_doseplan.PixelSpacing == [2, 2]):
start_point = [0,0]
def mousePushed(event):
start_point = [event.y, event.x]
if not len(Globals.profiles_lines)==0:
Globals.doseplan_write_image.delete(Globals.profiles_lines[0])
Globals.film_dose_write_image.delete(Globals.profiles_lines[1])
Globals.film_write_image.delete(Globals.profiles_lines[2])
Globals.profiles_lines = []
line_doseplan = Globals.doseplan_write_image.create_line(start_point[1], start_point[0],start_point[1],start_point[0], fill='red')
line_film_dosemap = Globals.film_dose_write_image.create_line(start_point[1], start_point[0],start_point[1],start_point[0], fill='red')
line_film = Globals.film_write_image.create_line(start_point[1], start_point[0],start_point[1],start_point[0], fill='red')
Globals.profiles_lines.append(line_doseplan)
Globals.profiles_lines.append(line_film_dosemap)
Globals.profiles_lines.append(line_film)
def mouseMoving(event):
Globals.doseplan_write_image.coords(line_doseplan, start_point[1], start_point[0], event.x, event.y)
Globals.film_dose_write_image.coords(line_film_dosemap, start_point[1], start_point[0], event.x, event.y)
Globals.film_write_image.coords(line_film, start_point[1], start_point[0], event.x, event.y)
Globals.film_dose_write_image.bind("<B1-Motion>", mouseMoving)
def mouseReleased(event):
Globals.end_point = [event.y, event.x]
Globals.doseplan_write_image.coords(line_doseplan, start_point[1], start_point[0], event.x, event.y)
Globals.film_dose_write_image.coords(line_film_dosemap, start_point[1], start_point[0], event.x, event.y)
Globals.film_write_image.coords(line_film, start_point[1], start_point[0], event.x, event.y)
Globals.profiles_line_coords_film = getCoordsInRandomLine(start_point[1], start_point[0], Globals.end_point[1], Globals.end_point[0])
Globals.profiles_line_coords_doseplan = getCoordsInRandomLine(int(start_point[1]/10), int(start_point[0]/10), \
int(Globals.end_point[1]/10), int(Globals.end_point[0]/10))
Globals.profiles_dataset_film_variable_draw = np.zeros(len(Globals.profiles_line_coords_film))
Globals.profiles_dataset_doesplan_variable_draw=np.zeros(len(Globals.profiles_line_coords_doseplan))
for i in range(len(Globals.profiles_dataset_film_variable_draw)):
coord = Globals.profiles_line_coords_film[i]
try:
Globals.profiles_dataset_film_variable_draw[i] = \
Globals.profiles_film_dataset_ROI_red_channel_dose[coord[0]-1, coord[1]-1]
except:
return
for i in range(len(Globals.profiles_dataset_doesplan_variable_draw)):
try:
Globals.profiles_dataset_doesplan_variable_draw[i] = \
Globals.profiles_doseplan_dataset_ROI[int(Globals.profiles_line_coords_doseplan[i][1])-1, int(Globals.profiles_line_coords_doseplan[i][0])-1]
except:
return
draw('d', Globals.profiles_dataset_film_variable_draw, Globals.profiles_dataset_doesplan_variable_draw)
Globals.film_dose_write_image.bind("<ButtonRelease-1>", mouseReleased)
Globals.film_dose_write_image.bind("<Button-1>", mousePushed)
elif(Globals.profiles_choice_of_profile_line_type.get() == 'd' and Globals.profiles_dataset_doseplan.PixelSpacing == [3, 3]):
start_point = [0,0]
def mousePushed(event):
start_point = [event.y, event.x]
if not len(Globals.profiles_lines)==0:
Globals.doseplan_write_image.delete(Globals.profiles_lines[0])
Globals.film_dose_write_image.delete(Globals.profiles_lines[1])
Globals.film_write_image.delete(Globals.profiles_lines[2])
Globals.profiles_lines = []
line_doseplan = Globals.doseplan_write_image.create_line(start_point[1], start_point[0],start_point[1],start_point[0], fill='red')
line_film_dosemap = Globals.film_dose_write_image.create_line(start_point[1], start_point[0],start_point[1],start_point[0], fill='red')
line_film = Globals.film_write_image.create_line(start_point[1], start_point[0],start_point[1],start_point[0], fill='red')
Globals.profiles_lines.append(line_doseplan)
Globals.profiles_lines.append(line_film_dosemap)
Globals.profiles_lines.append(line_film)
def mouseMoving(event):
Globals.doseplan_write_image.coords(line_doseplan, start_point[1], start_point[0], event.x, event.y)
Globals.film_dose_write_image.coords(line_film_dosemap, start_point[1], start_point[0], event.x, event.y)
Globals.film_write_image.coords(line_film, start_point[1], start_point[0], event.x, event.y)
Globals.film_dose_write_image.bind("<B1-Motion>", mouseMoving)
def mouseReleased(event):
Globals.end_point = [event.y, event.x]
Globals.doseplan_write_image.coords(line_doseplan, start_point[1], start_point[0], event.x, event.y)
Globals.film_dose_write_image.coords(line_film_dosemap, start_point[1], start_point[0], event.x, event.y)
Globals.film_write_image.coords(line_film, start_point[1], start_point[0], event.x, event.y)
Globals.profiles_line_coords_film = getCoordsInRandomLine(start_point[1], start_point[0], Globals.end_point[1], Globals.end_point[0])
Globals.profiles_line_coords_doseplan = getCoordsInRandomLine(int(start_point[1]/15), int(start_point[0]/15), \
int(Globals.end_point[1]/15), int(Globals.end_point[0]/15))
Globals.profiles_dataset_film_variable_draw = np.zeros(len(Globals.profiles_line_coords_film))
Globals.profiles_dataset_doesplan_variable_draw=np.zeros(len(Globals.profiles_line_coords_doseplan))
for i in range(len(Globals.profiles_dataset_film_variable_draw)):
coord = Globals.profiles_line_coords_film[i]
try:
Globals.profiles_dataset_film_variable_draw[i] = Globals.profiles_film_dataset_ROI_red_channel_dose[coord[0]-1, coord[1]-1]
except:
return
for i in range(len(Globals.profiles_dataset_doesplan_variable_draw)):
try:
Globals.profiles_dataset_doesplan_variable_draw[i] = Globals.profiles_doseplan_dataset_ROI[int(Globals.profiles_line_coords_doseplan[i][0])-1, \
int(Globals.profiles_line_coords_doseplan[i][1])-1]
except:
return
draw('d', Globals.profiles_dataset_film_variable_draw, Globals.profiles_dataset_doesplan_variable_draw)
Globals.film_dose_write_image.bind("<ButtonRelease-1>", mouseReleased)
Globals.film_dose_write_image.bind("<Button-1>", mousePushed)
else:
messagebox.showerror("Error", "Fatal error. Something went wrong, try again \n(Code: drawProfiles)")
return
def trace_profileLineType(var, indx, mode):
test_drawProfiles()
def test_drawProfiles():
if Globals.profiles_dataset_doseplan == None:
return
else:
Globals.doseplan_write_image.delete(Globals.profiles_lines[0])
Globals.film_dose_write_image.delete(Globals.profiles_lines[1])
Globals.film_write_image.delete(Globals.profiles_lines[2])
Globals.form.unbind("<Up>")
Globals.form.unbind("<Down>")
Globals.form.unbind("<Left>")
Globals.form.unbind("<Rigth>")
Globals.profiles_first_time_in_drawProfiles = True
drawProfiles(False)
def adjustROILeft(line_orient):
if not line_orient == 'd':
Globals.doseplan_write_image.delete(Globals.profiles_lines[0])
Globals.film_dose_write_image.delete(Globals.profiles_lines[1])
Globals.film_write_image.delete(Globals.profiles_lines[2])
if(Globals.profiles_film_variable_ROI_coords[2]-1 < 0):
messagebox.showwarning("Warning", "Reached end of film \n(Code: adjustROILeft)")
return
Globals.profiles_film_variable_ROI_coords = \
[Globals.profiles_film_variable_ROI_coords[0], Globals.profiles_film_variable_ROI_coords[1],\
Globals.profiles_film_variable_ROI_coords[2]-1, Globals.profiles_film_variable_ROI_coords[3]-1]
Globals.profiles_film_dataset_ROI_red_channel_dose = \
Globals.profiles_film_dataset_red_channel_dose\
[Globals.profiles_film_variable_ROI_coords[0]:Globals.profiles_film_variable_ROI_coords[1],\
Globals.profiles_film_variable_ROI_coords[2]:Globals.profiles_film_variable_ROI_coords[3]]
Globals.profiles_first_time_in_drawProfiles = True
if line_orient == 'd':
for i in range(len(Globals.profiles_dataset_film_variable_draw)):
coord = Globals.profiles_line_coords_film[i]
try:
Globals.profiles_dataset_film_variable_draw[i] = Globals.profiles_film_dataset_ROI_red_channel_dose[coord[0]-1, coord[1]-1]
except:
return
for i in range(len(Globals.profiles_dataset_doesplan_variable_draw)):
try:
Globals.profiles_dataset_doesplan_variable_draw[i] = Globals.profiles_doseplan_dataset_ROI[int(Globals.profiles_line_coords_doseplan[i][0])-1, int(Globals.profiles_line_coords_doseplan[i][1])-1]
except:
return
drawProfiles(True)
else:
drawProfiles(False)
def adjustROIRight(line_orient):
if not line_orient == 'd':
Globals.doseplan_write_image.delete(Globals.profiles_lines[0])
Globals.film_dose_write_image.delete(Globals.profiles_lines[1])
Globals.film_write_image.delete(Globals.profiles_lines[2])
if(Globals.profiles_film_variable_ROI_coords[3]+1 > Globals.profiles_film_dataset_red_channel_dose.shape[1]):
messagebox.showwarning("Warning", "Reached end of film \n(Code: adjustROIRight)")
return
Globals.profiles_film_variable_ROI_coords = \
[Globals.profiles_film_variable_ROI_coords[0], Globals.profiles_film_variable_ROI_coords[1],\
Globals.profiles_film_variable_ROI_coords[2]+1, Globals.profiles_film_variable_ROI_coords[3]+1]
Globals.profiles_film_dataset_ROI_red_channel_dose = \
Globals.profiles_film_dataset_red_channel_dose\
[Globals.profiles_film_variable_ROI_coords[0]:Globals.profiles_film_variable_ROI_coords[1],\
Globals.profiles_film_variable_ROI_coords[2]:Globals.profiles_film_variable_ROI_coords[3]]
Globals.profiles_first_time_in_drawProfiles = True
if line_orient == 'd':
for i in range(len(Globals.profiles_dataset_film_variable_draw)):
coord = Globals.profiles_line_coords_film[i]
try:
Globals.profiles_dataset_film_variable_draw[i] = Globals.profiles_film_dataset_ROI_red_channel_dose[coord[0]-1, coord[1]-1]
except:
return
for i in range(len(Globals.profiles_dataset_doesplan_variable_draw)):
try:
Globals.profiles_dataset_doesplan_variable_draw[i] = Globals.profiles_doseplan_dataset_ROI[int(Globals.profiles_line_coords_doseplan[i][0])-1, int(Globals.profiles_line_coords_doseplan[i][1])-1]
except:
return
drawProfiles(True)
else:
drawProfiles(False)
def adjustROIUp(line_orient):
if not line_orient == 'd':
Globals.doseplan_write_image.delete(Globals.profiles_lines[0])
Globals.film_dose_write_image.delete(Globals.profiles_lines[1])
Globals.film_write_image.delete(Globals.profiles_lines[2])
if(Globals.profiles_film_variable_ROI_coords[0]-1 < 0):
messagebox.showwarning("Warning", "Reached end of film \n(Code: adjustROIUp)")
return
Globals.profiles_film_variable_ROI_coords = \
[Globals.profiles_film_variable_ROI_coords[0]-1, Globals.profiles_film_variable_ROI_coords[1]-1,\
Globals.profiles_film_variable_ROI_coords[2], Globals.profiles_film_variable_ROI_coords[3]]
Globals.profiles_film_dataset_ROI_red_channel_dose = \
Globals.profiles_film_dataset_red_channel_dose\
[Globals.profiles_film_variable_ROI_coords[0]:Globals.profiles_film_variable_ROI_coords[1],\
Globals.profiles_film_variable_ROI_coords[2]:Globals.profiles_film_variable_ROI_coords[3]]
Globals.profiles_first_time_in_drawProfiles = True
if line_orient == 'd':
for i in range(len(Globals.profiles_dataset_film_variable_draw)):
coord = Globals.profiles_line_coords_film[i]
try:
Globals.profiles_dataset_film_variable_draw[i] = Globals.profiles_film_dataset_ROI_red_channel_dose[coord[0]-1, coord[1]-1]
except:
return
for i in range(len(Globals.profiles_dataset_doesplan_variable_draw)):
try:
Globals.profiles_dataset_doesplan_variable_draw[i] = Globals.profiles_doseplan_dataset_ROI[int(Globals.profiles_line_coords_doseplan[i][0])-1, int(Globals.profiles_line_coords_doseplan[i][1])-1]
except:
return
drawProfiles(True)
else:
drawProfiles(False)
def adjustROIDown(line_orient):
if not line_orient == 'd':
Globals.doseplan_write_image.delete(Globals.profiles_lines[0])
Globals.film_dose_write_image.delete(Globals.profiles_lines[1])
Globals.film_write_image.delete(Globals.profiles_lines[2])
if(Globals.profiles_film_variable_ROI_coords[1]+1 > Globals.profiles_film_dataset_red_channel_dose.shape[0]):
messagebox.showwarning("Warning", "Reached end of film \n(Code: adjustROIDown)")
return
Globals.profiles_film_variable_ROI_coords = \
[Globals.profiles_film_variable_ROI_coords[0]+1, Globals.profiles_film_variable_ROI_coords[1]+1,\
Globals.profiles_film_variable_ROI_coords[2], Globals.profiles_film_variable_ROI_coords[3]]
Globals.profiles_film_dataset_ROI_red_channel_dose = \
Globals.profiles_film_dataset_red_channel_dose\
[Globals.profiles_film_variable_ROI_coords[0]:Globals.profiles_film_variable_ROI_coords[1],\
Globals.profiles_film_variable_ROI_coords[2]:Globals.profiles_film_variable_ROI_coords[3]]
Globals.profiles_first_time_in_drawProfiles = True
if line_orient == 'd':
for i in range(len(Globals.profiles_dataset_film_variable_draw)):
coord = Globals.profiles_line_coords_film[i]
try:
Globals.profiles_dataset_film_variable_draw[i] = Globals.profiles_film_dataset_ROI_red_channel_dose[coord[0]-1, coord[1]-1]
except:
return
for i in range(len(Globals.profiles_dataset_doesplan_variable_draw)):
try:
Globals.profiles_dataset_doesplan_variable_draw[i] = Globals.profiles_doseplan_dataset_ROI[int(Globals.profiles_line_coords_doseplan[i][0])-1, int(Globals.profiles_line_coords_doseplan[i][1])-1]
except:
return
drawProfiles(True)
else:
drawProfiles(False)
def returnToOriginalROICoordinates(line_orient):
if not line_orient == 'd':
Globals.doseplan_write_image.delete(Globals.profiles_lines[0])
Globals.film_dose_write_image.delete(Globals.profiles_lines[1])
Globals.film_write_image.delete(Globals.profiles_lines[2])
Globals.profiles_film_variable_ROI_coords = \
[Globals.profiles_ROI_coords[0][1], Globals.profiles_ROI_coords[2][1],\
Globals.profiles_ROI_coords[0][0], Globals.profiles_ROI_coords[1][0]]
Globals.profiles_film_dataset_ROI_red_channel_dose = \
Globals.profiles_film_dataset_red_channel_dose\
[Globals.profiles_film_variable_ROI_coords[0]:Globals.profiles_film_variable_ROI_coords[1],\
Globals.profiles_film_variable_ROI_coords[2]:Globals.profiles_film_variable_ROI_coords[3]]
Globals.profiles_first_time_in_drawProfiles = True
if line_orient == 'd':
for i in range(len(Globals.profiles_dataset_film_variable_draw)):
coord = Globals.profiles_line_coords_film[i]
try:
Globals.profiles_dataset_film_variable_draw[i] = Globals.profiles_film_dataset_ROI_red_channel_dose[coord[0]-1, coord[1]-1]
except:
return
for i in range(len(Globals.profiles_dataset_doesplan_variable_draw)):
Globals.profiles_dataset_doesplan_variable_draw[i] = Globals.profiles_doseplan_dataset_ROI[int(Globals.profiles_line_coords_doseplan[i][0])-1, int(Globals.profiles_line_coords_doseplan[i][1])-1]
drawProfiles(True)
else:
drawProfiles(False)
def pixel_to_dose(P,a,b,c):
ret = c + b/(P-a)
return ret
def processDoseplan_usingReferencePoint(only_one):
################ RT Plan ######################
#Find each coordinate in mm to isocenter relative to first element in doseplan
iso_1 = abs(Globals.profiles_dataset_doseplan.ImagePositionPatient[0] - Globals.profiles_dataset_rtplan.BeamSequence[0].ControlPointSequence[0].IsocenterPosition[0])
iso_2 = abs(Globals.profiles_dataset_doseplan.ImagePositionPatient[1] - Globals.profiles_dataset_rtplan.BeamSequence[0].ControlPointSequence[0].IsocenterPosition[1])
iso_3 = abs(Globals.profiles_dataset_doseplan.ImagePositionPatient[2] - Globals.profiles_dataset_rtplan.BeamSequence[0].ControlPointSequence[0].IsocenterPosition[2])
#Given as [x,y,z] in patient coordinates
Globals.profiles_isocenter_mm = [iso_1, iso_2, iso_3]
#Reads input displacement from phantom on reference point in film
#lateral = Globals.profiles_input_lateral_displacement.get("1.0",'end-1c')
#vertical = Globals.profiles_input_vertical_displacement.get("1.0", 'end-1c')
#longit = Globals.profiles_input_longitudinal_displacement.get("1.0", 'end-1c')
#if(lateral==" "):lateral=0
#if(vertical==" "):vertical=0
#if(longit==" "):longit=0
try:
Globals.profiles_vertical = int(Globals.profiles_vertical)
except:
messagebox.showerror("Error", "Could not read the vertical displacements\n (Code: displacements to integer)")
return
try:
Globals.profiles_lateral = int(Globals.profiles_lateral)
except:
messagebox.showerror("Error", "Could not read the lateral displacements\n (Code: displacements to integer)")
return
try:
Globals.profiles_longitudinal = int(Globals.profiles_longitudinal)
except:
messagebox.showerror("Error", "Could not read the longitudinal displacements\n (Code: displacements to integer)")
return
lateral = Globals.profiles_lateral
longit = Globals.profiles_longitudinal
vertical = Globals.profiles_vertical
isocenter_px = np.zeros(3)
distance_in_doseplan_ROI_reference_point_px = []
if(Globals.profiles_dataset_doseplan.PixelSpacing==[1, 1]):
#make isocenter coordinates into pixel values
isocenter_px[0] = np.round(iso_1)
isocenter_px[1] = np.round(iso_2)
isocenter_px[2] = np.round(iso_3)
#find the pixel distance from reference point to ROI corners
distance_in_doseplan_ROI_reference_point_px.append([np.round(Globals.profiles_distance_reference_point_ROI[0][0]),\
np.round(Globals.profiles_distance_reference_point_ROI[0][1])])
distance_in_doseplan_ROI_reference_point_px.append([np.round(Globals.profiles_distance_reference_point_ROI[1][0]),\
np.round(Globals.profiles_distance_reference_point_ROI[1][1])])
distance_in_doseplan_ROI_reference_point_px.append([np.round(Globals.profiles_distance_reference_point_ROI[2][0]),\
np.round(Globals.profiles_distance_reference_point_ROI[2][1])])
distance_in_doseplan_ROI_reference_point_px.append([np.round(Globals.profiles_distance_reference_point_ROI[3][0]),\
np.round(Globals.profiles_distance_reference_point_ROI[3][1])])
#Input to px
lateral_px = np.round(lateral)
vertical_px = np.round(vertical)
longit_px = np.round(longit)
#displacment to px
doseplan_lateral_displacement_px = np.round(Globals.profiles_doseplan_lateral_displacement)
doseplan_vertical_displacement_px = np.round(Globals.profiles_doseplan_vertical_displacement)
doseplan_longitudinal_displacement_px = np.round(Globals.profiles_doseplan_longitudianl_displacement)
elif(Globals.profiles_dataset_doseplan.PixelSpacing==[2, 2]):
#make isocenter coordinates into pixel values
isocenter_px[0] = np.round(iso_1/2)
isocenter_px[1] = np.round(iso_2/2)
isocenter_px[2] = np.round(iso_3/2)
#find the pixel distance from reference point to ROI corners
distance_in_doseplan_ROI_reference_point_px.append([np.round((Globals.profiles_distance_reference_point_ROI[0][0])/2),\
np.round((Globals.profiles_distance_reference_point_ROI[0][1])/2)])
distance_in_doseplan_ROI_reference_point_px.append([np.round((Globals.profiles_distance_reference_point_ROI[1][0])/2),\
np.round((Globals.profiles_distance_reference_point_ROI[1][1])/2)])
distance_in_doseplan_ROI_reference_point_px.append([np.round((Globals.profiles_distance_reference_point_ROI[2][0])/2),\
np.round((Globals.profiles_distance_reference_point_ROI[2][1])/2)])
distance_in_doseplan_ROI_reference_point_px.append([np.round((Globals.profiles_distance_reference_point_ROI[3][0])/2),\
np.round((Globals.profiles_distance_reference_point_ROI[3][1])/2)])
#Input to px
lateral_px = np.round(lateral/2)
vertical_px = np.round(vertical/2)
longit_px = np.round(longit/2)
#displacment to pc
doseplan_lateral_displacement_px = np.round((Globals.profiles_doseplan_lateral_displacement)/2)
doseplan_vertical_displacement_px = np.round((Globals.profiles_doseplan_vertical_displacement)/2)
doseplan_longitudinal_displacement_px = np.round((Globals.profiles_doseplan_longitudianl_displacement)/2)
else:
#make isocenter coordinates into pixel values
isocenter_px[0] = np.round(iso_1/3)
isocenter_px[1] = np.round(iso_2/3)
isocenter_px[2] = np.round(iso_3/3)
#find the pixel distance from reference point to ROI corners
distance_in_doseplan_ROI_reference_point_px.append([np.round((Globals.profiles_distance_reference_point_ROI[0][0])/3),\
np.round((Globals.profiles_distance_reference_point_ROI[0][1])/3)])
distance_in_doseplan_ROI_reference_point_px.append([np.round((Globals.profiles_distance_reference_point_ROI[1][0])/3),\
np.round((Globals.profiles_distance_reference_point_ROI[1][1])/3)])
distance_in_doseplan_ROI_reference_point_px.append([np.round((Globals.profiles_distance_reference_point_ROI[2][0])/3),\
np.round((Globals.profiles_distance_reference_point_ROI[2][1])/3)])
distance_in_doseplan_ROI_reference_point_px.append([np.round((Globals.profiles_distance_reference_point_ROI[3][0])/3),\
np.round((Globals.profiles_distance_reference_point_ROI[3][1])/3)])
#Input to px
lateral_px = np.round(lateral/3)
vertical_px = np.round(vertical/3)
longit_px = np.round(longit/3)
#displacment to pc
doseplan_lateral_displacement_px = np.round((Globals.profiles_doseplan_lateral_displacement)/3)
doseplan_vertical_displacement_px = np.round((Globals.profiles_doseplan_vertical_displacement)/3)
doseplan_longitudinal_displacement_px = np.round((Globals.profiles_doseplan_longitudianl_displacement)/3)
temp_ref_point_doseplan = np.zeros(3)
#Finding reference point in doseplan
if(Globals.profiles_doseplan_patient_position=='HFS'):
temp_ref_point_doseplan[0] = int(isocenter_px[0]+ doseplan_lateral_displacement_px - lateral_px)
temp_ref_point_doseplan[1] = int(isocenter_px[1]- doseplan_vertical_displacement_px + vertical_px)
temp_ref_point_doseplan[2] = int(isocenter_px[2]+ doseplan_longitudinal_displacement_px - longit_px)
elif(Globals.profiles_doseplan_patient_position=='HFP'):
temp_ref_point_doseplan[0] = isocenter_px[0]- doseplan_lateral_displacement_px+ lateral_px
temp_ref_point_doseplan[1] = isocenter_px[1]+ doseplan_vertical_displacement_px - vertical_px
temp_ref_point_doseplan[2] = isocenter_px[2]+ doseplan_longitudinal_displacement_px - longit_px
elif(Globals.profiles_doseplan_patient_position=='HFDR'):
temp_ref_point_doseplan[0] = isocenter_px[0]- doseplan_vertical_displacement_px + vertical_px
temp_ref_point_doseplan[1] = isocenter_px[1]+ doseplan_lateral_displacement_px - lateral_px
temp_ref_point_doseplan[2] = isocenter_px[2]+ doseplan_longitudinal_displacement_px - longit_px
elif(Globals.profiles_doseplan_patient_position=='HFDL'):
temp_ref_point_doseplan[0] = isocenter_px[0]+ doseplan_vertical_displacement_px - vertical_px
temp_ref_point_doseplan[1] = isocenter_px[1]- doseplan_lateral_displacement_px + lateral_px
temp_ref_point_doseplan[2] = isocenter_px[2]+ doseplan_longitudinal_displacement_px - longit_px
elif(Globals.profiles_doseplan_patient_position=='FFS'):
temp_ref_point_doseplan[0] = isocenter_px[0]- doseplan_lateral_displacement_px + lateral_px
temp_ref_point_doseplan[1] = isocenter_px[1]+ doseplan_vertical_displacement_px - vertical_px
temp_ref_point_doseplan[2] = isocenter_px[2]- doseplan_longitudinal_displacement_px + longit_px
elif(Globals.profiles_doseplan_patient_position=='FFP'):
temp_ref_point_doseplan[0] = isocenter_px[0]+ doseplan_lateral_displacement_px- lateral_px
temp_ref_point_doseplan[1] = isocenter_px[1]- doseplan_vertical_displacement_px + vertical_px
temp_ref_point_doseplan[2] = isocenter_px[2]- doseplan_longitudinal_displacement_px + longit_px
elif(Globals.profiles_doseplan_patient_position=='FFDR'):
temp_ref_point_doseplan[0] = isocenter_px[0]- doseplan_vertical_displacement_px + vertical_px
temp_ref_point_doseplan[1] = isocenter_px[1]- doseplan_lateral_displacement_px + lateral_px
temp_ref_point_doseplan[2] = isocenter_px[2]- doseplan_longitudinal_displacement_px + longit_px
else:
temp_ref_point_doseplan[0] = isocenter_px[0] + doseplan_vertical_displacement_px - vertical_px
temp_ref_point_doseplan[1] = isocenter_px[1] + doseplan_lateral_displacement_px - lateral_px
temp_ref_point_doseplan[2] = isocenter_px[2]- doseplan_longitudinal_displacement_px + longit_px
Globals.profiles_reference_point_in_doseplan = temp_ref_point_doseplan
reference_point = np.zeros(3)
######################## Doseplan ##################################
#dataset_swapped is now the dataset entered the same way as expected with film (slice, rows, columns)
#isocenter_px and reference_point is not turned according to the doseplan and film orientation.
if(Globals.profiles_dataset_doseplan.ImageOrientationPatient==[1, 0, 0, 0, 1, 0]):
reference_point[0] = temp_ref_point_doseplan[2]
reference_point[1] = temp_ref_point_doseplan[1]
reference_point[2] = temp_ref_point_doseplan[0]
if(Globals.profiles_film_orientation.get()=='Coronal'):
#number of frames -> rows
#rows -> number of frames
#columns -> columns
dataset_swapped = np.swapaxes(Globals.profiles_dataset_doseplan.pixel_array, 0,1)
#temp_iso = isocenter_px[0]
#isocenter_px[0] = isocenter_px[1]
#isocenter_px[1] = temp_iso
temp_ref = reference_point[0]
reference_point[0] = reference_point[1]
reference_point[1] = temp_ref
elif(Globals.profiles_film_orientation.get()=='Sagittal'):
#column -> number of frames
#number of frames -> rows
#rows -> columns
dataset_swapped = np.swapaxes(Globals.profiles_dataset_doseplan.pixel_array, 0,2)
#temp_iso = isocenter_px[0]
#isocenter_px[0] = isocenter_px[2]
#isocenter_px[2] = temp_iso
temp_ref = reference_point[0]
reference_point[0] = reference_point[2]
reference_point[2] = temp_ref
#dataset_swapped = np.swapaxes(dataset_swapped, 0,1)
#temp_iso = isocenter_px[0]
#isocenter_px[0] = isocenter_px[1]
#isocenter_px[1] = temp_iso
#temp_ref = reference_point[0]
#reference_point[0] = reference_point[1]
#reference_point[1] = temp_ref
elif(Globals.profiles_film_orientation.get()=='Axial'):
dataset_swapped = Globals.profiles_dataset_doseplan.pixel_array
else:
messagebox.showerror("Error", "Something has gone wrong here.")
clearAll()
return
elif(Globals.profiles_dataset_doseplan.ImageOrientationPatient==[1, 0, 0, 0, 0, 1]):
reference_point[0] = temp_ref_point_doseplan[1]
reference_point[1] = temp_ref_point_doseplan[2]
reference_point[2] = temp_ref_point_doseplan[0]
if(Globals.profiles_film_orientation.get()=='Coronal'):
dataset_swapped = Globals.profiles_dataset_doseplan.pixel_array
elif(Globals.profiles_film_orientation.get()=='Sagittal'):
dataset_swapped = np.swapaxes(Globals.profiles_dataset_doseplan.pixel_array, 0,2)
temp_ref = reference_point[0]
reference_point[0] = reference_point[2]
reference_point[2] = temp_ref
dataset_swapped = np.swapaxes(dataset_swapped, 1,2)
temp_ref = reference_point[1]
reference_point[1] = reference_point[2]
reference_point[2] = temp_ref
elif(Globals.profiles_film_orientation.get()=='Axial'):
dataset_swapped = np.swapaxes(Globals.profiles_dataset_doseplan.pixel_array, 0,1)
temp_ref = reference_point[0]
reference_point[0] = reference_point[1]
reference_point[1] = temp_ref
else:
messagebox.showerror("Error", "Something has gone wrong.")
clearAll()
return
elif(Globals.profiles_dataset_doseplan.ImageOrientationPatient==[0, 1, 0, 1, 0, 0]):
reference_point[0] = temp_ref_point_doseplan[2]
reference_point[1] = temp_ref_point_doseplan[0]
reference_point[2] = temp_ref_point_doseplan[1]
if(Globals.profiles_film_orientation.get()=='Coronal'):
dataset_swapped = np.swapaxes(Globals.profiles_dataset_doseplan.pixel_array, 0,2)
temp_ref = reference_point[0]
reference_point[0] = reference_point[2]
reference_point[2] = temp_ref
dataset_swapped = np.swapaxes(dataset_swapped, 1,2)
temp_ref = reference_point[1]
reference_point[1] = reference_point[2]
reference_point[2] = temp_ref
elif(Globals.profiles_film_orientation.get()=='Sagittal'):
dataset_swapped = np.swapaxes(Globals.profiles_dataset_doseplan.pixel_array, 0,1)
temp_ref = reference_point[0]
reference_point[0] = reference_point[1]
reference_point[1] = temp_ref
dataset_swapped = np.swapaxes(dataset_swapped, 1,2)
temp_ref = reference_point[1]
reference_point[1] = reference_point[2]
reference_point[2] = temp_ref
elif(Globals.profiles_film_orientation.get()=='Axial'):
dataset_swapped = np.swapaxes(Globals.profiles_dataset_doseplan.pixel_array, 1,2)
temp_ref = reference_point[1]
reference_point[1] = reference_point[2]
reference_point[2] = temp_ref
else:
messagebox.showerror("Error", "Something has gone wrong.")
clearAll()
return
elif(Globals.profiles_dataset_doseplan.ImageOrientationPatient==[0, 1, 0, 0, 0, 1]):
reference_point[0] = temp_ref_point_doseplan[0]
reference_point[1] = temp_ref_point_doseplan[2]
reference_point[2] = temp_ref_point_doseplan[1]
if(Globals.profiles_film_orientation.get()=='Coronal'):
dataset_swapped = np.swapaxes(Globals.profiles_dataset_doseplan.pixel_array, 0,2)
temp_ref = reference_point[0]
reference_point[0] = reference_point[2]
reference_point[2] = temp_ref
elif(Globals.profiles_film_orientation.get()=='Sagittal'):
dataset_swapped = np.swapaxes(Globals.profiles_dataset_doseplan.pixel_array, 1,2)
temp_ref = reference_point[1]
reference_point[1] = reference_point[2]
reference_point[2] = temp_ref
elif(Globals.profiles_film_orientation.get()=='Axial'):
dataset_swapped = np.swapaxes(Globals.profiles_dataset_doseplan.pixel_array, 0,1)
temp_ref = reference_point[0]
reference_point[0] = reference_point[1]
reference_point[1] = temp_ref
dataset_swapped = np.swapaxes(dataset_swapped, 1,2)
temp_ref = reference_point[1]
reference_point[1] = reference_point[2]
reference_point[2] = temp_ref
else:
messagebox.showerror("Error", "Something has gone wrong.")
clearAll()
return
elif(Globals.profiles_dataset_doseplan.ImageOrientationPatient==[0, 0, 1, 1, 0, 0]):
reference_point[0] = temp_ref_point_doseplan[1]
reference_point[1] = temp_ref_point_doseplan[0]
reference_point[2] = temp_ref_point_doseplan[2]
if(Globals.profiles_film_orientation.get()=='Coronal'):
dataset_swapped = np.swapaxes(Globals.profiles_dataset_doseplan.pixel_array, 1,2)
temp_ref = reference_point[1]
reference_point[1] = reference_point[2]
reference_point[2] = temp_ref
elif(Globals.profiles_film_orientation.get()=='Sagittal'):
dataset_swapped = np.swapaxes(Globals.profiles_dataset_doseplan.pixel_array, 0,1)
temp_ref = reference_point[0]
reference_point[0] = reference_point[1]
reference_point[1] = temp_ref
elif(Globals.profiles_film_orientation.get()=='Axial'):
dataset_swapped = np.swapaxes(Globals.profiles_dataset_doseplan.pixel_array, 0,1)
temp_ref = reference_point[0]
reference_point[0] = reference_point[1]
reference_point[1] = temp_ref
dataset_swapped = np.swapaxes(dataset_swapped, 0,2)
temp_ref = reference_point[0]
reference_point[0] = reference_point[2]
reference_point[2] = temp_ref
else:
messagebox.showerror("Error", "Something has gone wrong.")
clearAll()
return
elif(Globals.profiles_dataset_doseplan.ImageOrientationPatient==[0, 0, 1, 0, 1, 0]):
reference_point[0] = temp_ref_point_doseplan[0]
reference_point[1] = temp_ref_point_doseplan[1]
reference_point[2] = temp_ref_point_doseplan[2]
if(Globals.profiles_film_orientation.get()=='Coronal'):
dataset_swapped = np.swapaxes(Globals.profiles_dataset_doseplan.pixel_array, 0,2)
temp_ref = reference_point[0]
reference_point[0] = reference_point[2]
reference_point[2] = temp_ref
dataset_swapped = np.swapaxes(dataset_swapped, 0,1)
temp_ref = reference_point[0]
reference_point[0] = reference_point[1]
reference_point[1] = temp_ref
elif(Globals.profiles_film_orientation.get()=='Sagittal'):
dataset_swapped = Globals.profiles_dataset_doseplan.pixel_array
elif(Globals.profiles_film_orientation.get()=='Axial'):
dataset_swapped = np.swapaxes(Globals.profiles_dataset_doseplan.pixel_array, 0,2)
temp_ref = reference_point[0]
reference_point[0] = reference_point[2]
reference_point[2] = temp_ref
else:
messagebox.showerror("Error", "Something has gone wrong.")
clearAll()
return
else:
messagebox.showerror("Error", "Something has gone wrong.")
clearAll()
return
if(reference_point[0]<0 or reference_point[0]>dataset_swapped.shape[0]):
messagebox.showerror("Error", "Reference point is outside of dosematrix\n\
(Code: first dimension, number of frames in dosematrix)")
return
if(reference_point[1]<0 or reference_point[1]>dataset_swapped.shape[1]):
messagebox.showerror("Error", "Reference point is outside of dosematrix\n\
(Code: second dimension, rows in dosematrix)")
return
if(reference_point[2]<0 or reference_point[2]>dataset_swapped.shape[2]):
messagebox.showerror("Error", "Reference point is outside of dosematrix\n\
(Code: third dimension, columns in dosematrix)")
return
dose_slice = dataset_swapped[int(reference_point[0]),:,:]
#calculate the coordinates of the Region of Interest in doseplan (marked on the film)
#and checks if it actualy exists in dosematrix
doseplan_ROI_coords = []
top_left_test_side = False; top_left_test_down = False
top_right_test_side = False; top_right_test_down = False
bottom_left_test_side = False; bottom_left_test_down = False
bottom_right_test_side = False; bottom_right_test_down = False
top_left_side_corr = 0; top_left_down_corr = 0
top_right_side_corr = 0; top_right_down_corr = 0
bottom_left_side_corr = 0; bottom_left_down_corr = 0
bottom_right_side_corr = 0; bottom_right_down_corr = 0
top_left_to_side = reference_point[2] - distance_in_doseplan_ROI_reference_point_px[0][0]
top_left_down = reference_point[1] - distance_in_doseplan_ROI_reference_point_px[0][1]
if(top_left_to_side < 0):
top_left_test_side = True
top_left_side_corr = abs(top_left_to_side)
top_left_to_side = 0
if(top_left_to_side > dose_slice.shape[1]):
messagebox.showerror("Fatal Error", "Fatal error: marked ROI is out of range in doseplan. Try again")
clearAll()
return
if(top_left_down < 0):
top_left_test_down = True
top_left_down_corr = abs(top_left_down)
top_left_down = 0
if(top_left_down > dose_slice.shape[0]):
messagebox.showerror("Fatal Error", "Fatal error: marked ROI is out of range in doseplan. Try again")
clearAll()
return
top_right_to_side = reference_point[2] - distance_in_doseplan_ROI_reference_point_px[1][0]
top_right_down = reference_point[1] - distance_in_doseplan_ROI_reference_point_px[1][1]
if(top_right_to_side < 0):
messagebox.showerror("Fatal Error", "Fatal error: marked ROI is out of range in doseplan. Try again")
clearAll()
return
if(top_right_to_side > dose_slice.shape[1]):
top_right_test_side = True
top_right_side_corr = top_right_to_side - dose_slice.shape[1]
top_right_to_side = dose_slice.shape[1]
if(top_right_down < 0):
top_right_test_down = True
top_right_down_corr = abs(top_right_down)
top_right_down = 0
if(top_right_down > dose_slice.shape[0]):
messagebox.showerror("Fatal Error", "Fatal error: marked ROI is out of range in doseplan. Try again")
clearAll()
return
bottom_left_to_side = reference_point[2] - distance_in_doseplan_ROI_reference_point_px[2][0]
bottom_left_down = reference_point[1] - distance_in_doseplan_ROI_reference_point_px[2][1]
if(bottom_left_to_side < 0):
bottom_left_test_side = True
bottom_left_side_corr = abs(bottom_left_to_side)
bottom_left_to_side = 0
if(bottom_left_to_side > dose_slice.shape[1]):
messagebox.showerror("Fatal Error", "Fatal error: marked ROI is out of range in doseplan. Try again")
clearAll()
return
if(bottom_left_down < 0):
messagebox.showerror("Fatal Error", "Fatal error: marked ROI is out of range in doseplan. Try again")
clearAll()
return
if(bottom_left_down > dose_slice.shape[0]):
bottom_left_down_corr = bottom_left_down - dose_slice.shape[0]
bottom_left_down = dose_slice.shape[0]
bottom_left_test_down = True
bottom_right_to_side = reference_point[2] - distance_in_doseplan_ROI_reference_point_px[3][0]
bottom_right_down = reference_point[1] - distance_in_doseplan_ROI_reference_point_px[3][1]
if(bottom_right_to_side < 0):
messagebox.showerror("Fatal Error", "Fatal error: marked ROI is out of range in doseplan. Try again")
clearAll()
return
if(bottom_right_to_side > dose_slice.shape[1]):
bottom_right_side_corr = bottom_right_to_side - dose_slice.shape[1]
bottom_right_to_side = dose_slice.shape[1]
bottom_right_test_side = True
if(bottom_right_down < 0):
messagebox.showerror("Fatal Error", "Fatal error: marked ROI is out of range in doseplan. Try again")
clearAll()
return
if(bottom_right_down > dose_slice.shape[0]):
bottom_right_down_corr = bottom_right_down - dose_slice.shape[0]
bottom_right_down = dose_slice.shape[0]
bottom_right_test_down = True
if(top_right_test_side or top_right_test_down or top_left_test_side or top_left_test_down \
or bottom_right_test_side or bottom_right_test_down or bottom_left_test_side or bottom_left_test_down):
ROI_info = "Left side: " + str(max(top_left_side_corr, bottom_left_side_corr)) + " pixels.\n"\
+ "Right side: " + str(max(top_right_side_corr, bottom_right_side_corr)) + " pixels.\n "\
+ "Top side: " + str(max(top_left_down_corr, top_right_down_corr)) + " pixels.\n"\
+ "Bottom side: " + str(max(bottom_left_down_corr, bottom_right_down_corr)) + " pixels."
messagebox.showinfo("ROI info", "The ROI marked on the film did not fit with the size of the doseplan and had to \
be cut.\n" + ROI_info )
doseplan_ROI_coords.append([top_left_to_side, top_left_down])
doseplan_ROI_coords.append([top_right_to_side, top_right_down])
doseplan_ROI_coords.append([bottom_left_to_side, bottom_left_down])
doseplan_ROI_coords.append([bottom_right_to_side, bottom_right_down])
if only_one:
Globals.profiles_doseplan_dataset_ROI = \
dose_slice[int(top_left_down):int(bottom_left_down), int(top_left_to_side):int(top_right_to_side)]*Globals.profiles_dataset_doseplan.DoseGridScaling
img=Globals.profiles_doseplan_dataset_ROI
if(Globals.profiles_dataset_doseplan.PixelSpacing==[1, 1]):
img = cv2.resize(img, dsize=(img.shape[1]*5,img.shape[0]*5))
elif(Globals.profiles_dataset_doseplan.PixelSpacing==[2, 2]):
img = cv2.resize(img, dsize=(img.shape[1]*10,img.shape[0]*10))
else:
img = cv2.resize(img, dsize=(img.shape[1]*15,img.shape[0]*15))
mx=np.max(img)
Globals.max_dose_doseplan = mx
img = img/mx
PIL_img_doseplan_ROI = Image.fromarray(np.uint8(cm.viridis(img)*255))
wid = PIL_img_doseplan_ROI.width;heig = PIL_img_doseplan_ROI.height
doseplan_canvas = tk.Canvas(Globals.profiles_film_panedwindow)
doseplan_canvas.grid(row=2, column=0, sticky=N+S+W+E)
Globals.profiles_film_panedwindow.add(doseplan_canvas, \
height=max(heig, Globals.profiles_doseplan_text_image.height()), \
width=wid + Globals.profiles_doseplan_text_image.width())
doseplan_canvas.config(bg='#ffffff', relief=FLAT, highlightthickness=0, \
height=max(heig, Globals.profiles_doseplan_text_image.height()), \
width=wid + Globals.profiles_doseplan_text_image.width())
Globals.doseplan_write_image = tk.Canvas(doseplan_canvas)
Globals.doseplan_write_image.grid(row=0,column=1,sticky=N+S+W+E)
Globals.doseplan_write_image.config(bg='#ffffff', relief=FLAT, highlightthickness=0, width=wid, height=heig)
doseplan_text_image_canvas = tk.Canvas(doseplan_canvas)
doseplan_text_image_canvas.grid(row=0,column=0,sticky=N+S+W+E)
doseplan_text_image_canvas.config(bg='#ffffff', relief=FLAT, highlightthickness=0, \
width=Globals.profiles_doseplan_text_image.width(), height=Globals.profiles_doseplan_text_image.height())
scaled_image_visual = PIL_img_doseplan_ROI
scaled_image_visual = ImageTk.PhotoImage(image=scaled_image_visual)
Globals.doseplan_write_image_width = scaled_image_visual.width()
Globals.doseplan_write_image_height = scaled_image_visual.height()
Globals.doseplan_write_image.create_image(0,0,image=scaled_image_visual, anchor="nw")
Globals.doseplan_write_image.image = scaled_image_visual
doseplan_text_image_canvas.create_image(0,0,image=Globals.profiles_doseplan_text_image, anchor="nw")
doseplan_text_image_canvas.image=Globals.profiles_doseplan_text_image
drawProfiles(False)
else:
img=dose_slice[int(top_left_down):int(bottom_left_down), int(top_left_to_side):int(top_right_to_side)]
Globals.profiles_doseplan_dataset_ROI_several.append(img)
Globals.profiles_number_of_doseplans+=1
if(Globals.profiles_dataset_doseplan.PixelSpacing==[1, 1]):
Globals.profiles_several_img.append(img)#cv2.resize(img, dsize=(img.shape[1]*5,img.shape[0]*5)))
elif(Globals.profiles_dataset_doseplan.PixelSpacing==[2, 2]):
Globals.profiles_several_img.append(img)#cv2.resize(img, dsize=(img.shape[1]*10,img.shape[0]*10)))
else:
Globals.profiles_several_img.append(img)#cv2.resize(img, dsize=(img.shape[1]*15,img.shape[0]*15)))
def processDoseplan_usingIsocenter(only_one):
################ RT Plan ######################
#Find each coordinate in mm to isocenter relative to first element in doseplan
iso_1 = abs(Globals.profiles_dataset_doseplan.ImagePositionPatient[0] - Globals.profiles_dataset_rtplan.BeamSequence[0].ControlPointSequence[0].IsocenterPosition[0])
iso_2 = abs(Globals.profiles_dataset_doseplan.ImagePositionPatient[1] - Globals.profiles_dataset_rtplan.BeamSequence[0].ControlPointSequence[0].IsocenterPosition[1])
iso_3 = abs(Globals.profiles_dataset_doseplan.ImagePositionPatient[2] - Globals.profiles_dataset_rtplan.BeamSequence[0].ControlPointSequence[0].IsocenterPosition[2])
#Given as [x,y,z] in patient coordinates
Globals.profiles_isocenter_mm = [iso_1, iso_2, iso_3]
#Isocenter in pixel relative to the first element in the doseplan
isocenter_px = np.zeros(3)
distance_in_doseplan_ROI_reference_point_px = []
if(Globals.profiles_dataset_doseplan.PixelSpacing==[1, 1]):
isocenter_px[0] = np.round(iso_1)#np.round(Globals.profiles_isocenter_mm[0])
isocenter_px[1] = np.round(iso_2)#np.round(Globals.profiles_isocenter_mm[1])
isocenter_px[2] = np.round(iso_3)#np.round(Globals.profiles_isocenter_mm[2])
#Change distance in film to pixel in doseplan
distance_in_doseplan_ROI_reference_point_px.append([np.round(Globals.profiles_distance_isocenter_ROI[0][0]),\
np.round(Globals.profiles_distance_isocenter_ROI[0][1])])
distance_in_doseplan_ROI_reference_point_px.append([np.round(Globals.profiles_distance_isocenter_ROI[1][0]),\
np.round(Globals.profiles_distance_isocenter_ROI[1][1])])
distance_in_doseplan_ROI_reference_point_px.append([np.round(Globals.profiles_distance_isocenter_ROI[2][0]),\
np.round(Globals.profiles_distance_isocenter_ROI[2][1])])
distance_in_doseplan_ROI_reference_point_px.append([np.round(Globals.profiles_distance_isocenter_ROI[3][0]),\
np.round(Globals.profiles_distance_isocenter_ROI[3][1])])
elif(Globals.profiles_dataset_doseplan.PixelSpacing==[2, 2]):
isocenter_px[0] = np.round(iso_1/2)#np.round(Globals.profiles_isocenter_mm[0]/2)
isocenter_px[1] = np.round(iso_2/2)#np.round(Globals.profiles_isocenter_mm[1]/2)
isocenter_px[2] = np.round(iso_3/2)#np.round(Globals.profiles_isocenter_mm[2]/2)
#Change distance in film to pixel in doseplan
distance_in_doseplan_ROI_reference_point_px.append([np.round((Globals.profiles_distance_isocenter_ROI[0][0])/2),\
np.round((Globals.profiles_distance_isocenter_ROI[0][1])/2)])
distance_in_doseplan_ROI_reference_point_px.append([np.round((Globals.profiles_distance_isocenter_ROI[1][0])/2),\
| np.round((Globals.profiles_distance_isocenter_ROI[1][1])/2) | numpy.round |
"""
One-dimensional & multiple variables
=================================================================
In this example, the Spectral Representation Method is used to generate stochastic processes from a prescribed Power
Spectrum and associated Cross Spectral Density. This example illustrates how to use the SRM class for a one dimensional
and 'm' variable case and compare the statistics of the generated stochastic processes with the expected values.
"""
#%% md
#
# Import the necessary libraries. Here we import standard libraries such as numpy and matplotlib, but also need to
# import the :class:`.SpectralRepresentation` class from the :class:`stochastic_processes` module of UQpy.
#%%
from UQpy.stochastic_process import SpectralRepresentation
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn')
#%% md
#
# The input parameters necessary for the generation of the stochastic processes are given below:
#%%
n_sim = 10000 # Num of samples
n = 1 # Num of dimensions
m = 3 # Num of variables
T = 10 # Time(1 / T = dw)
nt = 256 # Num.of Discretized Time
F = 1 / T * nt / 2 # Frequency.(Hz)
nf = 128 # Num of Discretized Freq.
# # Generation of Input Data(Stationary)
dt = T / nt
t = np.linspace(0, T - dt, nt)
df = F / nf
f = np.linspace(0, F - df, nf)
#%% md
#
# Make sure that the input parameters are in order to prevent aliasing
#%%
t_u = 2*np.pi/2/F
if dt>t_u:
print('Error')
#%% md
#
# Defining the Power Spectral Density Function (S) and the Cross Spectral Density (g)
#%%
S_11 = 38.3 / (1 + 6.19 * f) ** (5 / 3)
S_22 = 43.4 / (1 + 6.98 * f) ** (5 / 3)
S_33 = 135 / (1 + 21.8 * f) ** (5 / 3)
g_12 = np.exp(-0.1757 * f)
g_13 = np.exp(-3.478 * f)
g_23 = np.exp(-3.392 * f)
S_list = | np.array([S_11, S_22, S_33]) | numpy.array |
import numpy as np
import scipy.io as sio
import matplotlib.pyplot as plt
from PIL import Image
from algorithms.relief import Relief
from algorithms.relieff import Relieff
from algorithms.reliefmss import ReliefMSS
from algorithms.reliefseq import ReliefSeq
from algorithms.turf import TuRF
from algorithms.vlsrelief import VLSRelief
from algorithms.iterative_relief import IterativeRelief
from algorithms.irelief import IRelief
from algorithms.boostedsurf2 import BoostedSURF
from algorithms.ecrelieff import ECRelieff
from algorithms.multisurf2 import MultiSURF
from algorithms.multisurfstar2 import MultiSURFStar
from algorithms.surf import SURF
from algorithms.surfstar import SURFStar
from algorithms.swrfstar import SWRFStar
# number of best features to mark.
N_TO_SELECT = 500
# Load the CatDog dataset and create a target vector where: 0 - cat, 1 - dog
data = sio.loadmat('./datasets/selected/catdog/data.mat')['data']
target = np.hstack(( | np.repeat(0, 80) | numpy.repeat |
from typing import Tuple, List, Dict, Any
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder, Imputer, FunctionTransformer
from sklearn.pipeline import Pipeline
from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split, GridSearchCV, cross_validate, KFold
from sklearn.metrics import mean_squared_error, make_scorer
import joblib
import mlflow
pd.options.display.max_columns = None
CURRENT_EXPERIMENT_NAME = 'feature engineering'
def filter_by(df: pd.DataFrame, **kwargs) -> pd.DataFrame:
df_out = df
for key, value in kwargs.items():
if type(value) is list:
df_out = df_out[df_out[key].isin(value)]
else:
df_out = df_out[df_out[key] == value]
return df_out
def missing_rate(df: pd.DataFrame) -> pd.Series:
return df.isnull().sum() / len(df)
def reduce_mem_usage(df: pd.DataFrame, verbose: bool = True):
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
start_mem = df.memory_usage().sum() / (1024 ** 2)
for col in df.columns:
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage().sum() / 1024**2
if verbose:
print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(
end_mem, 100 * (start_mem - end_mem) / start_mem)
)
return df
def rmse(y_true, y_pred) -> float:
return np.sqrt(mean_squared_error(y_true, y_pred))
rmse_score = make_scorer(rmse, greater_is_better=False)
def add_key_prefix(d: Dict, prefix = 'best_') -> Dict:
return {prefix + key: value for key, value in d.items()}
def df_from_cv_results(d: Dict):
df = pd.DataFrame(d)
score_columns = ['mean_test_score', 'mean_train_score']
param_columns = [c for c in df.columns if c.startswith('param_')]
return pd.concat([
-df.loc[:, score_columns],
df.loc[:, param_columns],
], axis=1).sort_values(by='mean_test_score')
def sample(*args, frac: float = 0.01) -> np.ndarray:
n_rows = args[0].shape[0]
random_index = np.random.choice(n_rows, int(n_rows * frac), replace=False)
gen = (
a[random_index] for a in args
)
if len(args) == 1:
return next(gen)
else:
return gen
class BaseTransformer(BaseEstimator, TransformerMixin):
def fit(self, x: pd.DataFrame, y = None):
return self
def transform(self, x: pd.DataFrame) -> pd.DataFrame:
return x
class ColumnTransformer(BaseTransformer):
def __init__(self, defs: Dict[str, BaseTransformer]):
self.defs = defs
def fit(self, x: pd.DataFrame, y: np.ndarray = None):
for col, transformer in self.defs.items():
transformer.fit(x[col], y)
return self
def transform(self, x: pd.DataFrame) -> pd.DataFrame:
xp = x.copy()
for col, transformer in self.defs.items():
xp[col] = transformer.transform(x[col])
return xp
def fit_transform(self, x: pd.DataFrame, y: np.ndarray = None) -> pd.DataFrame:
xp = x.copy()
for col, transformer in self.defs.items():
if hasattr(transformer, 'fit_transform'):
xp[col] = transformer.fit_transform(x[col], y)
else:
xp[col] = transformer.fit(x[col], y).transform(x[col])
return xp
class WrappedLabelEncoder(BaseTransformer):
def __init__(self):
self.le = LabelEncoder()
def fit(self, x, y = None):
self.le.fit(x)
return self
def transform(self, x):
return self.le.transform(x)
class WeatherImputer(BaseTransformer):
def transform(self, w: pd.DataFrame) -> pd.DataFrame:
# add missing datetime
dt_min, dt_max = w['timestamp'].min(), w['timestamp'].max()
empty_df = pd.DataFrame({'timestamp': pd.date_range(start=dt_min, end=dt_max, freq='H')})
w_out = pd.concat([
ws.merge(
empty_df, on='timestamp', how='outer'
).sort_values(
by='timestamp'
).assign(
site_id=site_id
) for site_id, ws in w.groupby('site_id')
], ignore_index=True)
# large missing rate columns; fill by -999
w_out['cloud_coverage'] = w_out['cloud_coverage'].fillna(-999).astype(np.int16)
# small missing rate columns; fill by same value forward and backward
w_out = pd.concat([
ws.fillna(method='ffill').fillna(method='bfill') for _, ws in w_out.groupby('site_id')
], ignore_index=True)
# fill nan by mean over all sites
w_mean = w_out.groupby('timestamp').mean().drop(columns=['site_id']).reset_index()
w_mean = w_out.loc[:, ['site_id', 'timestamp']].merge(w_mean, on='timestamp', how='left')
w_out = w_out.where(~w_out.isnull(), w_mean)
# float -> uint
w_out['site_id'] = w_out['site_id'].astype(np.uint8)
return w_out
class WeatherEngineerer(BaseTransformer):
@staticmethod
def shift_by(wdf: pd.DataFrame, n: int) -> pd.DataFrame:
method = 'bfill' if n > 0 else 'ffill'
return pd.concat([
ws.iloc[:, [2, 4, 8]].shift(n).fillna(method=method) for _, ws in wdf.groupby('site_id')
], axis=0)
def weather_weighted_average(self, w: pd.DataFrame, hours: int = 5) -> pd.DataFrame:
ahours = abs(hours)
sign = int(hours / ahours)
w_weighted_average = sum(
[self.shift_by(w, (i+1)*sign) * (ahours-i) for i in range(ahours)]
) / (np.arange(ahours) + 1).sum()
w_weighted_average.columns = ['{0}_wa{1}'.format(c, hours) for c in w_weighted_average.columns]
return pd.concat([w, w_weighted_average], axis=1)
@staticmethod
def dwdt(df: pd.DataFrame, base_col: str) -> pd.DataFrame:
df_out = df.copy()
df_out[base_col + '_dt_wa1'] = df[base_col] - df[base_col + '_wa1']
df_out[base_col + '_dt_wa-1'] = df[base_col] - df[base_col + '_wa-1']
df_out[base_col + '_dt_wa5'] = df[base_col] - df[base_col + '_wa5']
df_out[base_col + '_dt_wa-5'] = df[base_col] - df[base_col + '_wa-5']
return df_out
@staticmethod
def wet(df: pd.DataFrame, suffix: str) -> pd.DataFrame:
df_out = df.copy()
df_out['wet' + suffix] = df['air_temperature' + suffix] - df['dew_temperature' + suffix]
return df_out
def transform(self, w_in: pd.DataFrame) -> pd.DataFrame:
w = w_in.pipe(self.weather_weighted_average, hours=1) \
.pipe(self.weather_weighted_average, hours=-1) \
.pipe(self.weather_weighted_average) \
.pipe(self.weather_weighted_average, hours=-5)
w = w.pipe(self.dwdt, base_col='air_temperature') \
.pipe(self.dwdt, base_col='dew_temperature') \
.pipe(self.dwdt, base_col='wind_speed') \
.pipe(self.wet, suffix='') \
.pipe(self.wet, suffix='_wa1') \
.pipe(self.wet, suffix='_wa-1') \
.pipe(self.wet, suffix='_wa5') \
.pipe(self.wet, suffix='_wa-5')
return w
class WindDirectionEncoder(BaseTransformer):
@staticmethod
def _from_degree(degree: int) -> int:
val = int((degree / 22.5) + 0.5)
arr = [i for i in range(0,16)]
return arr[(val % 16)]
def transform(self, x: pd.Series) -> pd.Series:
return x.apply(self._from_degree)
class WindSpeedEncoder(BaseTransformer):
def transform(self, x: pd.Series) -> pd.Series:
return pd.cut(
x,
bins=[0, 0.3, 1.6, 3.4, 5.5, 8, 10.8, 13.9, 17.2, 20.8, 24.5, 28.5, 33, 1000],
right=False, labels=False,
)
weather_pipeline = Pipeline(steps=[
('impute_missing_value', WeatherImputer()),
('feature_engineering', WeatherEngineerer()),
('label_encode', ColumnTransformer({
'wind_direction': WindDirectionEncoder(),
'wind_speed': WindSpeedEncoder(),
'wind_speed_wa1': WindSpeedEncoder(),
'wind_speed_wa-1': WindSpeedEncoder(),
'wind_speed_wa5': WindSpeedEncoder(),
'wind_speed_wa-5': WindSpeedEncoder(),
}))
])
class BuildingMetadataEngineerer(BaseTransformer):
def transform(self, bm_in: pd.DataFrame) -> pd.DataFrame:
bm = bm_in.copy()
bm['log_square_feet'] = np.log(bm['square_feet'])
bm['square_feet_per_floor'] = bm['square_feet'] / bm['floor_count']
bm['log_square_feet_per_floor'] = bm['log_square_feet'] / bm['floor_count']
bm['building_age'] = 2019 - bm['year_built']
bm['square_feet_per_age'] = bm['square_feet'] / bm['building_age']
bm['log_square_feet_per_age'] = bm['log_square_feet'] / bm['building_age']
return bm
class BuildingMetadataImputer(BaseTransformer):
def transform(self, bm: pd.DataFrame) -> pd.DataFrame:
return bm.fillna(-999)
building_metadata_pipeline = Pipeline(steps=[
('label_encode', ColumnTransformer({
'primary_use': WrappedLabelEncoder(),
})),
('feature_engineering', BuildingMetadataEngineerer()),
('impute_missing_value', BuildingMetadataImputer()),
])
class BuildingMetaJoiner(BaseTransformer):
def __init__(self, bm: pd.DataFrame = None):
self.bm = bm
def transform(self, x: pd.DataFrame) -> pd.DataFrame:
if self.bm is None:
return x
else:
return x.merge(
self.bm,
on='building_id',
how='left',
)
class WeatherJoiner(BaseTransformer):
def __init__(self, w: pd.DataFrame = None):
self.w = w
def transform(self, x: pd.DataFrame) -> pd.DataFrame:
if self.w is None:
return x
else:
return x.merge(
self.w,
on=['site_id', 'timestamp'],
how='left',
)
class DatetimeFeatureEngineerer(BaseTransformer):
def __init__(self, col: str = 'timestamp'):
self.col = col
def transform(self, x: pd.DataFrame) -> pd.DataFrame:
xp = x.copy()
ts = x[self.col]
xp['month'] = ts.dt.month.astype(np.int8)
xp['week'] = ts.dt.week.astype(np.int8)
xp['day_of_week'] = ts.dt.weekday.astype(np.int8)
xp['time_period'] = pd.cut(
ts.dt.hour,
bins=[0, 3, 6, 9, 12, 15, 18, 21, 25],
right=False, labels=False,
)
holidays = [
'2016-01-01', '2016-01-18', '2016-02-15', '2016-05-30', '2016-07-04',
'2016-09-05', '2016-10-10', '2016-11-11', '2016-11-24', '2016-12-26',
'2017-01-01', '2017-01-16', '2017-02-20', '2017-05-29', '2017-07-04',
'2017-09-04', '2017-10-09', '2017-11-10', '2017-11-23', '2017-12-25',
'2018-01-01', '2018-01-15', '2018-02-19', '2018-05-28', '2018-07-04',
'2018-09-03', '2018-10-08', '2018-11-12', '2018-11-22', '2018-12-25',
'2019-01-01'
]
xp['is_holiday'] = (ts.dt.date.astype('str').isin(holidays)).astype(np.int8)
return xp
class TargetEncoder(BaseTransformer):
def __init__(self, cv: int = 5, smoothing: int = 1):
self.agg = None
self.cv = cv
self.smoothing = 1
def transform(self, x: pd.Series):
if self.agg is None:
raise ValueError('you shold fit() before predict()')
encoded = pd.merge(x, self.agg, left_on=x.name, right_index=True, how='left')
encoded = encoded.fillna(encoded.mean())
xp = encoded['y']
xp.name = x.name
return xp
def fit_transform(self, x: pd.Series, y: np.ndarray = None) -> pd.Series:
df = pd.DataFrame({'x': x, 'y': y})
self.agg = df.groupby('x').mean()
fold = KFold(n_splits=self.cv, shuffle=True)
xp = x.copy()
for idx_train, idx_test in fold.split(x):
df_train = df.loc[idx_train, :]
df_test = df.loc[idx_test, :]
agg_train = df_train.groupby('x').mean()
encoded = pd.merge(df_test, agg_train, left_on='x', right_index=True, how='left', suffixes=('', '_mean'))['y_mean']
encoded = encoded.fillna(encoded.mean())
xp[encoded.index] = encoded
return xp
class ColumnDropper(BaseTransformer):
def __init__(self, cols: List[str]):
self.cols = cols
def transform(self, x: pd.DataFrame, y = None) -> pd.DataFrame:
return x.drop(columns=self.cols)
class ArrayTransformer(BaseTransformer):
def transform(self, x: pd.DataFrame, y = None) -> np.ndarray:
return x.values
def pipeline_factory() -> Pipeline:
return Pipeline(steps=[
# join
('join_building_meta', BuildingMetaJoiner(
building_metadata_pipeline.fit_transform(
building_metadata
)
)),
('join_weather', WeatherJoiner(
weather_pipeline.fit_transform(
pd.concat([weather_train, weather_test], axis=0, ignore_index=True)
)
)),
# feature engineering
('feature_engineering_from_datetime', DatetimeFeatureEngineerer()),
('target_encode', ColumnTransformer({
'primary_use': TargetEncoder(),
'meter': TargetEncoder(),
'cloud_coverage': TargetEncoder(),
'time_period': TargetEncoder(),
'wind_direction': TargetEncoder(),
'wind_speed': TargetEncoder(),
'wind_speed_wa1': TargetEncoder(),
'wind_speed_wa-1': TargetEncoder(),
'wind_speed_wa5': TargetEncoder(),
'wind_speed_wa-5': TargetEncoder(),
})),
# drop columns
('drop_columns', ColumnDropper([
'building_id', 'timestamp', 'site_id', 'precip_depth_1_hr',
])),
# pd.DataFrame -> np.ndarray
('df_to_array', ArrayTransformer()),
# regressor
('regressor', RandomForestRegressor()),
])
def cv(pipeline: Pipeline, df: pd.DataFrame, n_jobs: int = -1, **params) -> Tuple[float, float]:
x = df.drop(columns='meter_reading')
y = np.log1p(df['meter_reading'].values)
default_params = dict(
n_estimators=10,
max_depth=None,
max_features='auto',
min_samples_leaf=1,
)
merged_params = {**default_params, **params}
pipeline_params = {**merged_params, 'n_jobs': n_jobs}
pipeline_params = add_key_prefix(pipeline_params, 'regressor__')
pipeline.set_params(**pipeline_params)
mlflow.set_experiment(CURRENT_EXPERIMENT_NAME)
with mlflow.start_run():
mlflow.log_params(merged_params)
scores = cross_validate(
pipeline, x, y,
cv=3,
scoring=rmse_score,
return_train_score=True,
verbose=2,
)
rmse_val = - np.mean(scores['test_score'])
rmse_train = - np.mean(scores['train_score'])
mlflow.log_metrics(dict(
rmse_val=rmse_val,
rmse_train=rmse_train,
))
return rmse_val, rmse_train
def oneshot(pipeline: Pipeline, df: pd.DataFrame, **params):
x = df.drop(columns='meter_reading')
y = | np.log1p(df['meter_reading'].values) | numpy.log1p |
# Circulant acoustic
import numpy as np
from scipy.linalg import toeplitz
def circ_1_level_acoustic(Toep, L, M, N, on_off):
import numpy as np
from scipy.linalg import toeplitz
# Create 1-level circulant approximation to Toeplitz operator
circ_L_opToep = np.zeros((L, M, N), dtype=np.complex128)
A = Toep
# Now construct circulant approximation
c1 = np.zeros((L, M, N), dtype=np.complex128)
for i in range(1, L):
c1[i, :, :] = (L - i)/L * A[i, :, :] + i/L * A[(L-1)-i+1, :, :]
# from IPython import embed; embed()
# Fix up for 1st element
c1[0, :, :] = A[0, :, :]
c1_fft = np.fft.fft(c1.T).T
circ_L_opToep = c1_fft
if (on_off in 'on'):
# Construct 1-level preconditioner
circ = np.zeros((L, M*N, M*N), dtype=np.complex128)
for i_loop in range(0, L):
temp = np.zeros((M*N, M*N), dtype=np.complex128)
chan = np.zeros((N, M, M), dtype=np.complex128)
# First block
for i in range(0, N):
chan[i, :, :] = toeplitz(c1_fft[i_loop, 0:M, i], c1_fft[i_loop, 0:M, i])
result = chan[toeplitz(np.arange(0, N))].transpose(0, 2, 1, 3).reshape(M*N, M*N).copy()
temp[0:M*N, 0:M*N] = result
circ[i_loop, :, :] = temp
else:
circ = 0
return circ, circ_L_opToep
def circ_2_level_acoustic(circ_L_opToep, L, M, N):
import numpy as np
from scipy.linalg import toeplitz
circ_M_opToep = np.zeros((L, M, N), dtype=np.complex128)
circ2 = np.zeros((L, M, N, N), dtype=np.complex128)
for i_loop in range(0, L):
# FIX ME: Don't need to create new A-F arrays, get rid of them
A = circ_L_opToep[i_loop, :, :]
c1 = np.zeros((M, N), dtype=np.complex128)
for i in range(1, M):
c1[i, :] = (M - i)/M * A[i, :] + i/M * A[(M-1)-i+1, :]
c1[0, :] = A[0, :]
c1_fft = np.fft.fft(c1, axis=0)
circ_M_opToep[i_loop, :, :] = c1_fft
for j_loop in range(0, M):
temp = np.zeros((N, N), dtype=np.complex128)
# First block
temp[0:N, 0:N] = toeplitz(c1_fft[j_loop, 0:N], c1_fft[j_loop, 0:N])
circ2[i_loop, j_loop, :, :] = temp
return circ2, circ_L_opToep
# Matrix-vector product with 2-level circulant preconditioner
def mvp_circ2_acoustic(JInVec, circ2_inv, L, M, N, idx):
import numpy as np
V_R = JInVec.reshape(L, M, N, order='F')
V_R[np.invert(idx)] = 0.0
Vrhs = V_R.reshape(L*M*N, 1, order='F')
temp = Vrhs.reshape(L,M*N, order='F')
temp = np.fft.fft(temp, axis=0).T # transpose is application of permutation matrix
for i in range(0, L):
TEMP = temp[:, i].reshape(M,N, order='F')
TEMP = np.fft.fft(TEMP, axis=0).T
for j in range(0, M):
TEMP[:,j] = np.matmul(circ2_inv[i, j, :, :], TEMP[:, j])
TEMP = np.fft.ifft(TEMP.T, axis=0)
temp[:, i] = TEMP.reshape(1,M*N, order='F')
temp = np.fft.ifft(temp.T, axis=0) # transpose is application of permutation matrix transpose
TEMP = temp.reshape(L*M*N,1, order='F')
TEMP_RO = TEMP.reshape(L, M, N, order='F')
TEMP_RO[ | np.invert(idx) | numpy.invert |
import os
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
import mikeio
from mikeio import Dataset, Dfsu, Dfs2, Dfs0
from mikeio.eum import EUMType, ItemInfo, EUMUnit
@pytest.fixture
def ds1():
nt = 10
ne = 7
d1 = np.zeros([nt, ne]) + 0.1
d2 = np.zeros([nt, ne]) + 0.2
data = [d1, d2]
time = pd.date_range(start=datetime(2000, 1, 1), freq="S", periods=nt)
items = [ItemInfo("Foo"), ItemInfo("Bar")]
return Dataset(data, time, items)
@pytest.fixture
def ds2():
nt = 10
ne = 7
d1 = np.zeros([nt, ne]) + 1.0
d2 = np.zeros([nt, ne]) + 2.0
data = [d1, d2]
time = pd.date_range(start=datetime(2000, 1, 1), freq="S", periods=nt)
items = [ItemInfo("Foo"), ItemInfo("Bar")]
return Dataset(data, time, items)
def test_create_wrong_data_type_error():
data = ["item 1", "item 2"]
nt = 2
time = pd.date_range(start=datetime(2000, 1, 1), freq="S", periods=nt)
with pytest.raises(TypeError, match="numpy"):
Dataset(data=data, time=time)
def test_get_names():
data = []
nt = 100
d = np.zeros([nt, 100, 30]) + 1.0
data.append(d)
time = pd.date_range(start=datetime(2000, 1, 1), freq="S", periods=nt)
items = [ItemInfo("Foo")]
ds = Dataset(data, time, items)
assert ds.items[0].name == "Foo"
assert ds.items[0].type == EUMType.Undefined
assert repr(ds.items[0].unit) == "undefined"
def test_select_subset_isel():
nt = 100
d1 = np.zeros([nt, 100, 30]) + 1.5
d2 = np.zeros([nt, 100, 30]) + 2.0
d1[0, 10, :] = 2.0
d2[0, 10, :] = 3.0
data = [d1, d2]
time = pd.date_range(start=datetime(2000, 1, 1), freq="S", periods=nt)
items = [ItemInfo("Foo"), ItemInfo("Bar")]
ds = Dataset(data, time, items)
selds = ds.isel(10, axis=1)
assert len(selds.items) == 2
assert len(selds.data) == 2
assert selds["Foo"].shape == (100, 30)
assert selds["Foo"][0, 0] == 2.0
assert selds["Bar"][0, 0] == 3.0
def test_select_subset_isel_axis_out_of_range_error(ds2):
assert len(ds2.shape) == 2
dss = ds2.isel(idx=0)
# After subsetting there is only one dimension
assert len(dss.shape) == 1
with pytest.raises(ValueError):
dss.isel(idx=0, axis="spatial")
def test_select_temporal_subset_by_idx():
nt = 100
d1 = np.zeros([nt, 100, 30]) + 1.5
d2 = np.zeros([nt, 100, 30]) + 2.0
d1[0, 10, :] = 2.0
d2[0, 10, :] = 3.0
data = [d1, d2]
time = pd.date_range(start=datetime(2000, 1, 1), freq="S", periods=nt)
items = [ItemInfo("Foo"), ItemInfo("Bar")]
ds = Dataset(data, time, items)
selds = ds.isel([0, 1, 2], axis=0)
assert len(selds) == 2
assert selds["Foo"].shape == (3, 100, 30)
def test_temporal_subset_fancy():
nt = (24 * 31) + 1
d1 = np.zeros([nt, 100, 30]) + 1.5
d2 = np.zeros([nt, 100, 30]) + 2.0
data = [d1, d2]
time = pd.date_range("2000-1-1", freq="H", periods=nt)
items = [ItemInfo("Foo"), ItemInfo("Bar")]
ds = Dataset(data, time, items)
assert ds.time[0].hour == 0
assert ds.time[-1].hour == 0
selds = ds["2000-01-01 00:00":"2000-01-02 00:00"]
assert len(selds) == 2
assert selds["Foo"].shape == (25, 100, 30)
selds = ds[:"2000-01-02 00:00"]
assert selds["Foo"].shape == (25, 100, 30)
selds = ds["2000-01-31 00:00":]
assert selds["Foo"].shape == (25, 100, 30)
selds = ds["2000-01-30":]
assert selds["Foo"].shape == (49, 100, 30)
def test_subset_with_datetime_is_not_supported():
nt = (24 * 31) + 1
d1 = np.zeros([nt, 100, 30]) + 1.5
d2 = np.zeros([nt, 100, 30]) + 2.0
data = [d1, d2]
time = pd.date_range("2000-1-2", freq="H", periods=nt)
items = [ItemInfo("Foo"), ItemInfo("Bar")]
ds = Dataset(data, time, items)
with pytest.raises(ValueError):
ds[datetime(2000, 1, 1)]
def test_select_item_by_name():
nt = 100
d1 = np.zeros([nt, 100, 30]) + 1.5
d2 = np.zeros([nt, 100, 30]) + 2.0
d1[0, 10, :] = 2.0
d2[0, 10, :] = 3.0
data = [d1, d2]
time = pd.date_range("2000-1-2", freq="H", periods=nt)
items = [ItemInfo("Foo"), ItemInfo("Bar")]
ds = Dataset(data, time, items)
foo_data = ds["Foo"]
assert foo_data[0, 10, 0] == 2.0
def test_select_multiple_items_by_name():
nt = 100
d1 = np.zeros([nt, 100, 30]) + 1.5
d2 = np.zeros([nt, 100, 30]) + 2.0
d3 = np.zeros([nt, 100, 30]) + 3.0
data = [d1, d2, d3]
time = pd.date_range("2000-1-2", freq="H", periods=nt)
# items = [ItemInfo("Foo"), ItemInfo("Bar"), ItemInfo("Baz")]
items = [ItemInfo(x) for x in ["Foo", "Bar", "Baz"]]
ds = Dataset(data, time, items)
assert len(ds) == 3 # Length of a dataset is the number of items
newds = ds[["Baz", "Foo"]]
assert newds.items[0].name == "Baz"
assert newds.items[1].name == "Foo"
assert newds["Foo"][0, 10, 0] == 1.5
assert len(newds) == 2
def test_select_multiple_items_by_index():
nt = 100
d1 = np.zeros([nt, 100, 30]) + 1.5
d2 = np.zeros([nt, 100, 30]) + 2.0
d3 = np.zeros([nt, 100, 30]) + 3.0
data = [d1, d2, d3]
time = pd.date_range("2000-1-2", freq="H", periods=nt)
items = [ItemInfo(x) for x in ["Foo", "Bar", "Baz"]]
ds = Dataset(data, time, items)
assert len(ds) == 3 # Length of a dataset is the number of items
newds = ds[[2, 0]]
assert newds.items[0].name == "Baz"
assert newds.items[1].name == "Foo"
assert newds["Foo"][0, 10, 0] == 1.5
assert len(newds) == 2
def test_select_item_by_iteminfo():
nt = 100
d1 = np.zeros([nt, 100, 30]) + 1.5
d2 = np.zeros([nt, 100, 30]) + 2.0
d1[0, 10, :] = 2.0
d2[0, 10, :] = 3.0
data = [d1, d2]
time = pd.date_range("2000-1-2", freq="H", periods=nt)
items = [ItemInfo("Foo"), ItemInfo("Bar")]
ds = Dataset(data, time, items)
foo_item = items[0]
foo_data = ds[foo_item]
assert foo_data[0, 10, 0] == 2.0
def test_select_subset_isel_multiple_idxs():
nt = 100
d1 = np.zeros([nt, 100, 30]) + 1.5
d2 = np.zeros([nt, 100, 30]) + 2.0
data = [d1, d2]
time = pd.date_range("2000-1-2", freq="H", periods=nt)
items = [ItemInfo("Foo"), ItemInfo("Bar")]
ds = Dataset(data, time, items)
selds = ds.isel([10, 15], axis=1)
assert len(selds.items) == 2
assert len(selds.data) == 2
assert selds["Foo"].shape == (100, 2, 30)
def test_decribe(ds1):
df = ds1.describe()
assert df.columns[0] == "Foo"
assert df.loc["mean"][1] == pytest.approx(0.2)
assert df.loc["max"][0] == pytest.approx(0.1)
def test_create_undefined():
nt = 100
d1 = np.zeros([nt])
d2 = np.zeros([nt])
data = [d1, d2]
time = pd.date_range("2000-1-2", freq="H", periods=nt)
# items = 2
ds = Dataset(data, time)
assert len(ds.items) == 2
assert len(ds.data) == 2
assert ds.items[0].name == "Item 1"
assert ds.items[0].type == EUMType.Undefined
def test_create_named_undefined():
nt = 100
d1 = np.zeros([nt])
d2 = np.zeros([nt])
data = [d1, d2]
time = pd.date_range("2000-1-2", freq="H", periods=nt)
ds = Dataset(data=data, time=time, items=["Foo", "Bar"])
assert len(ds.items) == 2
assert len(ds.data) == 2
assert ds.items[1].name == "Bar"
assert ds.items[1].type == EUMType.Undefined
def test_to_dataframe_single_timestep():
nt = 1
d1 = np.zeros([nt])
d2 = np.zeros([nt])
data = [d1, d2]
time = pd.date_range("2000-1-2", freq="H", periods=nt)
items = [ItemInfo("Foo"), ItemInfo("Bar")]
ds = Dataset(data, time, items)
df = ds.to_dataframe()
assert list(df.columns) == ["Foo", "Bar"]
assert isinstance(df.index, pd.DatetimeIndex)
def test_to_dataframe():
nt = 100
d1 = np.zeros([nt])
d2 = np.zeros([nt])
data = [d1, d2]
time = pd.date_range("2000-1-2", freq="H", periods=nt)
items = [ItemInfo("Foo"), ItemInfo("Bar")]
ds = Dataset(data, time, items)
df = ds.to_dataframe()
assert list(df.columns) == ["Foo", "Bar"]
assert isinstance(df.index, pd.DatetimeIndex)
def test_multidimensional_to_dataframe_no_supported():
nt = 100
d1 = np.zeros([nt, 2])
time = pd.date_range("2000-1-2", freq="H", periods=nt)
items = [ItemInfo("Foo")]
ds = Dataset([d1], time, items)
with pytest.raises(ValueError):
ds.to_dataframe()
def test_get_data():
data = []
nt = 100
d = np.zeros([nt, 100, 30]) + 1.0
data.append(d)
time = pd.date_range("2000-1-2", freq="H", periods=nt)
items = [ItemInfo("Foo")]
ds = Dataset(data, time, items)
assert ds.data[0].shape == (100, 100, 30)
def test_interp_time():
nt = 4
d = np.zeros([nt, 10, 3])
d[1] = 2.0
d[3] = 4.0
data = [d]
time = pd.date_range("2000-1-1", freq="D", periods=nt)
items = [ItemInfo("Foo")]
ds = Dataset(data, time, items)
assert ds.data[0].shape == (nt, 10, 3)
dsi = ds.interp_time(dt=3600)
assert ds.time[0] == dsi.time[0]
assert dsi.data[0].shape == (73, 10, 3)
def test_interp_time_to_other_dataset():
# Arrange
## Dataset 1
nt = 4
data = [np.zeros([nt, 10, 3])]
time = pd.date_range("2000-1-1", freq="D", periods=nt)
items = [ItemInfo("Foo")]
ds1 = Dataset(data, time, items)
assert ds1.data[0].shape == (nt, 10, 3)
## Dataset 2
nt = 12
data = [np.ones([nt, 10, 3])]
time = pd.date_range("2000-1-1", freq="H", periods=nt)
items = [ItemInfo("Foo")]
ds2 = Dataset(data, time, items)
# Act
## interp
dsi = ds1.interp_time(dt=ds2.time)
# Assert
assert dsi.time[0] == ds2.time[0]
assert dsi.time[-1] == ds2.time[-1]
assert len(dsi.time) == len(ds2.time)
assert dsi.data[0].shape[0] == ds2.data[0].shape[0]
# Accept dataset as argument
dsi2 = ds1.interp_time(ds2)
assert dsi2.time[0] == ds2.time[0]
def test_extrapolate():
# Arrange
## Dataset 1
nt = 2
data = [np.zeros([nt, 10, 3])]
time = pd.date_range("2000-1-1", freq="D", periods=nt)
items = [ItemInfo("Foo")]
ds1 = Dataset(data, time, items)
assert ds1.data[0].shape == (nt, 10, 3)
## Dataset 2 partly overlapping with Dataset 1
nt = 3
data = [np.ones([nt, 10, 3])]
time = pd.date_range("2000-1-2", freq="H", periods=nt)
items = [ItemInfo("Foo")]
ds2 = Dataset(data, time, items)
# Act
## interp
dsi = ds1.interp_time(dt=ds2.time, fill_value=1.0)
# Assert
assert dsi.time[0] == ds2.time[0]
assert dsi.time[-1] == ds2.time[-1]
assert len(dsi.time) == len(ds2.time)
assert dsi.data[0][0] == pytest.approx(0.0)
assert dsi.data[0][1] == pytest.approx(1.0) # filled
assert dsi.data[0][2] == pytest.approx(1.0) # filled
def test_extrapolate_not_allowed():
## Dataset 1
nt = 2
data = [np.zeros([nt, 10, 3])]
time = pd.date_range("2000-1-1", freq="D", periods=nt)
items = [ItemInfo("Foo")]
ds1 = Dataset(data, time, items)
assert ds1.data[0].shape == (nt, 10, 3)
## Dataset 2 partly overlapping with Dataset 1
nt = 3
data = [np.ones([nt, 10, 3])]
time = pd.date_range("2000-1-2", freq="H", periods=nt)
items = [ItemInfo("Foo")]
ds2 = Dataset(data, time, items)
with pytest.raises(ValueError):
dsi = ds1.interp_time(dt=ds2.time, fill_value=1.0, extrapolate=False)
def test_get_data_2():
nt = 100
data = []
d = np.zeros([nt, 100, 30]) + 1.0
data.append(d)
time = pd.date_range("2000-1-2", freq="H", periods=nt)
items = [ItemInfo("Foo")]
ds = Dataset(data, time, items)
assert data[0].shape == (100, 100, 30)
def test_get_data_name():
nt = 100
data = []
d = np.zeros([nt, 100, 30]) + 1.0
data.append(d)
time = pd.date_range("2000-1-2", freq="H", periods=nt)
items = [ItemInfo("Foo")]
ds = Dataset(data, time, items)
assert ds["Foo"].shape == (100, 100, 30)
def test_set_data_name():
nt = 100
time = pd.date_range("2000-1-2", freq="H", periods=nt)
items = [ItemInfo("Foo")]
ds = Dataset([np.zeros((nt, 10))], time, items)
assert ds["Foo"][0, 0] == 0.0
ds["Foo"] = np.zeros((nt, 10)) + 1.0
assert ds["Foo"][0, 0] == 1.0
ds[0] = np.zeros((nt, 10)) + 2.0 # Set using position
assert ds["Foo"][0, 0] == 2.0 # Read using name
with pytest.raises(ValueError):
ds[[0, 1]] = (
| np.zeros((nt, 10)) | numpy.zeros |
#!/usr/bin/env python
"""
@package ion_functions.qc_functions
@file ion_functions/qc_functions.py
@author <NAME>
@brief Module containing QC functions ported from matlab samples in DPS documents
"""
from ion_functions.qc.qc_extensions import stuckvalues, spikevalues, gradientvalues, ntp_to_month
import time
import numpy as np
import numexpr as ne
from scipy.interpolate import LinearNDInterpolator
from ion_functions import utils
from ion_functions.utils import fill_value
# try to load the OOI logging module, using default Python logging module if
# unavailable
try:
from ooi.logging import log
except ImportError:
import logging
log = logging.getLogger('ion-functions')
def is_fill(arr):
return np.atleast_1d(arr)[-1] == -9999. # Not the normal fill value, it's hardcoded to the QC params
def is_none(arr):
return arr is None or (np.atleast_1d(arr)[-1] == None)
def dataqc_globalrangetest_minmax(dat, dat_min, dat_max, strict_validation=False):
'''
Python wrapper for dataqc_globalrangetest
Combines the min/max arguments into list for dataqc_globalrangetest
'''
if is_none(dat_min) or is_none(dat_max) or is_fill(dat_min) or is_fill(dat_max):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
return dataqc_globalrangetest(dat, [np.atleast_1d(dat_min)[-1], np.atleast_1d(dat_max)[-1]], strict_validation=strict_validation)
def dataqc_globalrangetest(dat, datlim, strict_validation=False):
"""
Description:
Data quality control algorithm testing if measurements fall into a
user-defined valid range. Returns 1 for presumably good data and 0 for
data presumed bad.
Implemented by:
2010-07-28: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
2013-05-30: <NAME>. Performance improvements by adding
strict_validation flag.
Usage:
qcflag = dataqc_globalrangetest(dat, datlim, strict_validation)
where
qcflag = Boolean, 0 if value is outside range, else = 1.
dat = Input dataset, any scalar or vector. Must be numeric and real.
datlim = Two-element vector with the minimum and maximum values
considered to be valid.
strict_validation = Flag (default is False) to assert testing of input
types (e.g. isreal, isnumeric)
References:
OOI (2012). Data Product Specification for Global Range Test. Document
Control Number 1341-10004. https://alfresco.oceanobservatories.org
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10004_Data_Product_SPEC_GLBLRNG_OOI.pdf)
"""
dat = np.atleast_1d(dat)
datlim = np.atleast_1d(datlim)
if strict_validation:
if not utils.isnumeric(dat).all():
raise ValueError('\'dat\' must be numeric')
if not utils.isreal(dat).all():
raise ValueError('\'dat\' must be real')
if not utils.isnumeric(datlim).all():
raise ValueError('\'datlim\' must be numeric')
if not utils.isreal(datlim).all():
raise ValueError('\'datlim\' must be real')
if len(datlim) < 2: # Must have at least 2 elements
raise ValueError('\'datlim\' must have at least 2 elements')
return (datlim.min() <= dat) & (dat <= datlim.max()).astype('int8')
def dataqc_localrangetest_wrapper(dat, datlim, datlimz, dims, pval_callback):
if is_none(datlim) or np.all(np.atleast_1d(datlim).flatten() == -9999):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
if is_none(datlimz) or np.all(np.atleast_1d(datlim).flatten() == -9999):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
if is_none(dims):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
if is_none(pval_callback):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
z = []
for dim in dims:
if dim == 'month':
# Convert time vector to vector of months
v = pval_callback('time')
v = np.asanyarray(v, dtype=np.float)
v = ntp_to_month(v)
z.append(v)
else:
# Fetch the dimension from the callback method
v = pval_callback(dim)
z.append(v)
if len(dims)>1:
z = np.column_stack(z)
else:
z = z[0]
datlimz = datlimz[:,0]
return dataqc_localrangetest(dat, z, datlim, datlimz)
def dataqc_localrangetest(dat, z, datlim, datlimz, strict_validation=False):
"""
Description:
Data quality control algorithm testing if measurements fall into a
user-defined valid range. This range is not constant but varies with
measurement location. Returns 1 for presumably good data and 0 for data
presumed bad.
Implemented by:
2012-07-17: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
Usage:
qcflag = dataqc_localrangetest(dat, z, datlim, datlimz)
where
qcflag = Boolean, 0 if value is outside range, else = 1.
dat = input data set, a numeric real scalar or column vector.
z = location of measurement dat. must have same # of rows as dat and
same # of columns as datlimz
datlim = two column array with the minimum (column 1) and maximum
(column 2) values considered valid.
datlimz = array with the locations where datlim is given. must have
same # of rows as datlim and same # of columns as z.
References:
OOI (2012). Data Product Specification for Local Range Test. Document
Control Number 1341-10005. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10005_Data_Product_SPEC_LOCLRNG_OOI.pdf)
"""
if strict_validation:
# check if dat and datlim are matrices
if not utils.isvector(dat):
raise ValueError('\'dat\' must be a matrix')
if not utils.ismatrix(datlim):
raise ValueError('\'datlim\' must be a matrix')
# check if all inputs are numeric and real
for k, arg in {'dat': dat, 'z': z, 'datlim': datlim,
'datlimz': datlimz}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
if len(datlim.shape) == 3 and datlim.shape[0] == 1:
datlim = datlim.reshape(datlim.shape[1:])
if len(datlimz.shape) == 3 and datlimz.shape[0] == 1:
datlimz = datlimz.reshape(datlimz.shape[1:])
# test size and shape of the input arrays datlimz and datlim, setting test
# variables.
array_size = datlimz.shape
if len(array_size) == 1:
numlim = array_size[0]
ndim = 1
else:
numlim = array_size[0]
ndim = array_size[1]
array_size = datlim.shape
tmp1 = array_size[0]
tmp2 = array_size[1]
if tmp1 != numlim:
raise ValueError('\'datlim\' and \'datlimz\' must '
'have the same number of rows.')
if tmp2 != 2:
raise ValueError('\'datlim\' must be structured as 2-D array '
'with exactly 2 columns and 1 through N rows.')
# test the size and shape of the z input array
array_size = z.shape
if len(array_size) == 1:
num = array_size[0]
tmp2 = 1
else:
num = array_size[0]
tmp2 = array_size[1]
if tmp2 != ndim:
raise ValueError('\'z\' must have the same number of columns '
'as \'datlimz\'.')
if num != dat.size:
raise ValueError('Len of \'dat\' must match number of '
'rows in \'z\'')
# test datlim, values in column 2 must be greater than those in column 1
if not all(datlim[:, 1] > datlim[:, 0]):
raise ValueError('Second column values of \'datlim\' should be '
'greater than first column values.')
# calculate the upper and lower limits for the data set
if ndim == 1:
# determine the lower limits using linear interpolation
lim1 = np.interp(z, datlimz, datlim[:, 0], left=np.nan, right=np.nan)
# determine the upper limits using linear interpolation
lim2 = np.interp(z, datlimz, datlim[:, 1], left=np.nan, right=np.nan)
else:
# Compute Delaunay Triangulation and use linear interpolation to
# determine the N-dimensional lower limits
F = LinearNDInterpolator(datlimz, datlim[:, 0].reshape(numlim, 1))
lim1 = F(z).reshape(dat.size)
# Compute Delaunay Triangulation and use linear interpolation to
# determine the N-dimensional upper limits
F = LinearNDInterpolator(datlimz, datlim[:, 1].reshape(numlim, 1))
lim2 = F(z).reshape(dat.size)
# replace NaNs from above interpolations
ff = (np.isnan(lim1)) | (np.isnan(lim2))
lim1[ff] = np.max(datlim[:, 1])
lim2[ff] = np.min(datlim[:, 0])
# compute the qcflags
qcflag = (dat >= lim1) & (dat <= lim2)
return qcflag.astype('int8')
def dataqc_spiketest_wrapper(dat, acc, N, L, strict_validation=False):
if is_none(acc) or is_fill(acc) or is_none(N) or is_fill(N) or is_none(L) or is_fill(L):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
return dataqc_spiketest(dat, np.atleast_1d(acc)[-1], np.atleast_1d(N)[-1], np.atleast_1d(L)[-1], strict_validation=strict_validation)
def dataqc_spiketest(dat, acc, N=5, L=5, strict_validation=False):
"""
Description:
Data quality control algorithm testing a time series for spikes.
Returns 1 for presumably good data and 0 for data presumed bad.
The time series is divided into windows of len L (an odd integer
number). Then, window by window, each value is compared to its (L-1)
neighboring values: a range R of these (L-1) values is computed (max.
minus min.), and replaced with the measurement accuracy ACC if ACC>R. A
value is presumed to be good, i.e. no spike, if it deviates from the
mean of the (L-1) peers by less than a multiple of the range,
N*max(R,ACC).
Further than (L-1)/2 values from the start or end points, the peer
values are symmetrically before and after the test value. Within that
range of the start and end, the peers are the first/last L values
(without the test value itself).
The purpose of ACC is to restrict spike detection to deviations
exceeding a minimum threshold value (N*ACC) even if the data have
little variability. Use ACC=0 to disable this behavior.
Implemented by:
2012-07-28: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
2013-05-30: <NAME>. Performance optimizations.
Usage:
qcflag = dataqc_spiketest(dat, acc, N, L)
where
qcflag = Boolean, 0 if value is outside range, else = 1.
dat = input data set, a numeric, real vector.
acc = Accuracy of any input measurement.
N = (optional, defaults to 5) Range multiplier, cf. above
L = (optional, defaults to 5) Window len, cf. above
References:
OOI (2012). Data Product Specification for Spike Test. Document
Control Number 1341-10006. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10006_Data_Product_SPEC_SPKETST_OOI.pdf)
"""
dat = np.atleast_1d(dat)
if strict_validation:
if not utils.isnumeric(dat).all():
raise ValueError('\'dat\' must be numeric')
if not utils.isreal(dat).all():
raise ValueError('\'dat\' must be real')
if not utils.isvector(dat):
raise ValueError('\'dat\' must be a vector')
for k, arg in {'acc': acc, 'N': N, 'L': L}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
dat = np.asanyarray(dat, dtype=np.float)
out = spikevalues(dat, L, N, acc)
return out
def dataqc_polytrendtest_wrapper(dat, t, ord_n, nstd, strict_validation=False):
if is_none(ord_n) or is_fill(ord_n) or is_none(nstd) or is_fill(ord_n):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
return dataqc_polytrendtest(dat, t, np.atleast_1d(ord_n)[-1], np.atleast_1d(nstd)[-1], strict_validation=strict_validation)
def dataqc_polytrendtest(dat, t, ord_n=1, nstd=3, strict_validation=False):
"""
Description:
Data quality control algorithm testing if measurements contain a
significant portion of a polynomial. Returns 1 if this is not the case,
else 0.
The purpose of this test is to check if a significant fraction of the
variability in a time series can be explained by a drift, possibly
interpreted as a sensor drift. This drift is assumed to be a polynomial
of order ORD. Use ORD=1 to consider a linear drift
The time series dat is passed to MatLab's POLYFIT routine to obtain a
polynomial fit PP to dat, and the difference dat-PP is compared to the
original dat. If the standard deviation of (dat-PP) is less than that
of dat by a factor of NSTD, the time series is assumed to contain a
significant trend (output will be 0), else not (output will be 1).
Implemented by:
2012-10-29: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
2013-05-30: <NAME>. Performance optimizations.
Usage:
qcflag = dataqc_polytrendtest(dat, t, ord_n, nstd, strict_validation)
where
qcflag = Boolean, 0 a trend is detected, 1 elsewhere.
dat = Input dataset, a numeric real vector.
t = time record associated with dat
ord_n (optional, defaults to 1) = Polynomial order.
nstd (optional, defaults to 3) = Factor by how much the standard
deviation must be reduced before qcflag switches from 1 to 0
strict_validation (optional, defaults to False) = Flag asserting
testing of inputs.
References:
OOI (2012). Data Product Specification for Trend Test. Document
Control Number 1341-10007. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10007_Data_Product_SPEC_TRNDTST_OOI.pdf)
"""
dat = np.atleast_1d(dat)
t = np.atleast_1d(t)
if strict_validation:
for k, arg in {'dat': dat, 't': t, 'ord_n': ord_n, 'nstd': nstd}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
for k, arg in {'dat': dat, 't': t}.iteritems():
if not utils.isvector(arg):
raise ValueError('\'{0}\' must be a vector'.format(k))
for k, arg in {'ord_n': ord_n, 'nstd': nstd}.iteritems():
if not utils.isscalar(arg):
raise ValueError('\'{0}\' must be a scalar'.format(k))
ord_n = int(round(abs(ord_n)))
nstd = int(abs(nstd))
ll = len(dat)
# Not needed because time is incorporated as 't'
# t = range(ll)
pp = np.polyfit(t, dat, ord_n)
datpp = np.polyval(pp, t)
# test for a trend
if np.atleast_1d((np.std(dat - datpp) * nstd) < np.std(dat)).all():
trndtst = 0
else:
trndtst = 1
# insure output size equals input, even though test yields a single value.
qcflag = np.ones(dat.shape).astype('int8') * trndtst
return qcflag
def dataqc_stuckvaluetest_wrapper(x, reso, num, strict_validation=False):
if is_none(reso) or is_fill(reso) or is_none(num) or is_fill(num):
out = np.empty(x.shape, np.int8)
out.fill(-99)
return out
return dataqc_stuckvaluetest(x, np.atleast_1d(reso)[-1], np.atleast_1d(num)[-1], strict_validation=strict_validation)
def dataqc_stuckvaluetest(x, reso, num=10, strict_validation=False):
"""
Description:
Data quality control algorithm testing a time series for "stuck
values", i.e. repeated occurences of one value. Returns 1 for
presumably good data and 0 for data presumed bad.
Implemented by:
2012-10-29: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
Usage:
qcflag = =dataqc_stuckvaluetest(x, RESO, NUM);
where
qcflag = Boolean output: 0 where stuck values are found, 1 elsewhere.
x = Input time series (vector, numeric).
reso = Resolution; repeat values less than reso apart will be
considered "stuck values".
num = Minimum number of successive values within reso of each other
that will trigger the "stuck value". num is optional and defaults
to 10 if omitted or empty.
References:
OOI (2012). Data Product Specification for Stuck Value Test. Document
Control Number 1341-10008. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10008_Data_Product_SPEC_STUCKVL_OOI.pdf)
"""
dat = np.atleast_1d(x)
if strict_validation:
if not utils.isnumeric(dat).all():
raise ValueError('\'x\' must be numeric')
if not utils.isvector(dat):
raise ValueError('\'x\' must be a vector')
if not utils.isreal(dat).all():
raise ValueError('\'x\' must be real')
for k, arg in {'reso': reso, 'num': num}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isscalar(arg):
raise ValueError('\'{0}\' must be a scalar'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
num = np.abs(num)
dat = np.asanyarray(dat, dtype=np.float)
ll = len(x)
if ll < num:
# Warn - 'num' is greater than len(x), returning zeros
out = np.zeros(dat.size, dtype='int8')
else:
out = stuckvalues(dat, reso, num)
return out
def dataqc_gradienttest_wrapper(dat, x, ddatdx, mindx, startdat, toldat, strict_validation=False):
if is_none(ddatdx) or is_fill(ddatdx) or is_none(mindx) or is_fill(mindx) or is_none(startdat) or is_fill(startdat) or is_none(toldat) or is_fill(toldat):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
outqc = dataqc_gradienttest(dat, x, [-np.atleast_1d(ddatdx)[-1], np.atleast_1d(ddatdx)[-1]], np.atleast_1d(mindx)[-1], np.atleast_1d(startdat)[-1], np.atleast_1d(toldat)[-1], strict_validation=strict_validation)
return outqc
def dataqc_gradienttest(dat, x, ddatdx, mindx, startdat, toldat, strict_validation=False):
"""
Description
Data quality control algorithm testing if changes between successive
data points fall within a certain range.
Input data dat are given as a function of coordinate x. The algorithm
will flag dat values as bad if the change deltaDAT/deltaX between
successive dat values exceeds thresholds given in ddatdx. Once the
threshold is exceeded, following dat are considered bad until a dat
value returns to within toldat of the last known good value.
It is possible to remove data points that are too close together in x
coordinates (use mindx).
By default, the first value of dat is considered good. To change this,
use startdat and toldat to set as the first good data point the first
one that comes within toldat of startdat.
Implemented by:
2012-07-17: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
Usage:
outdat, outx, outqc = dataqc_gradienttest(dat, x, ddatdx, mindx,
startdat, toldat);
where
outdat = same as dat except that NaNs and values not meeting mindx are
removed.
outx = same as x except that NaNs and values not meeting mindx are
removed.
outqc = output quality control flags for outdat. 0 means bad data, 1
means good data.
dat = input dataset, a numeric real vector.
x = coordinate (e.g. time, distance) along which dat is given. Must be
of the same size as dat and strictly increasing.
ddatdx = two-element vector defining the valid range of ddat/dx
from one point to the next.
mindx = scalar. minimum dx for which this test will be applied (data
that are less than mindx apart will be deleted). defaults to zero
if NaN/empty.
startdat = start value (scalar) of dat that is presumed good. defaults
to first non-NaN value of dat if NaN/empty.
toldat = tolerance value (scalar) for dat; threshold to within which
dat must return to be counted as good, after exceeding a ddatdx
threshold detected bad data.
References:
OOI (2012). Data Product Specification for Gradient Test. Document
Control Number 1341-100010.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-10010_Data_Product_SPEC_GRDTEST_OOI.pdf)
"""
if strict_validation:
if not utils.isvector(dat) or not utils.isvector(x):
raise ValueError('\'dat\' and \'x\' must be vectors')
if len(dat) != len(x):
raise ValueError('\'dat\' and \'x\' must be of equal len')
if not all(np.diff(x) > 0):
raise ValueError('\'x\' must be montonically increasing')
dat = np.asanyarray(dat, dtype=np.float).flatten()
x = np.asanyarray(x, dtype=np.float).flatten()
if np.isnan(mindx):
mindx = 0
mindx = mindx or 0
if np.isnan(startdat):
startdat = 0
startdat = startdat or 0
# No strict validation here, they are scalards and they must be validated
# before going into the C-layer
if not utils.isscalar(mindx):
raise ValueError("'mindx' must be scalar, NaN, or empty.")
if not utils.isscalar(startdat):
raise ValueError("'startdat' must be scalar, NaN, or empty.")
# Confirm that there are still data points left, else abort:
if np.abs(x[0] - x[-1]) < mindx:
out = np.zeros(x.shape)
out.fill(1)
log.warn('Too few values to inspect')
return out
grad_min = ddatdx[0]
grad_max = ddatdx[1]
out = gradientvalues(dat, x, grad_min, grad_max, mindx, startdat, toldat)
return out
def dataqc_solarelevation(lon, lat, dt):
"""
Description
Computes instantaneous no-sky solar radiation and altitude from date
and time stamp and position data. It is put together from expressions
taken from Appendix E in the 1978 edition of Almanac for Computers,
Nautical Almanac Office, U.S. Naval Observatory. They are reduced
accuracy expressions valid for the years 1800-2100. Solar declination
computed from these expressions is accurate to at least 1'. The solar
constant (1368.0 W/m^2) represents a mean of satellite measurements
made over the last sunspot cycle (1979-1995) taken from Coffey et al
(1995), Earth System Monitor, 6, 6-10.
This code is a python implementation of soradna1.m available in Air-Sea
Toolbox.
Implemented by:
1997-03-08: Version 1.0 (author unknown) of soradna1.m.
1998-08-28: Version 1.1 (author unknown) of soradna1.m.
1999-08-05: Version 2.0 (author unknown) of soradna1.m.
2013-04-07: <NAME>. Initial python implementation. Note,
this function is derived from old, unmaintained code. More robust
implementations exist (e.g. PyEphem and PySolar) that will probably
calculate these values more accurately.
Usage:
z, sorad = dataqc_solarelevation(lon, lat, dt)
where
z = solar altitude [degrees]
sorad = no atmosphere solar radiation [W m^-2]
lon = longitude (east is positive) [decimal degress]
lat = latitude [decimal degrees]
dt = date and time stamp in UTC [seconds since 1970-01-01]
Examples
dt = 1329177600 # 2012-02-14 00:00:00
z, sorad = dataqc_solarelevation(120, 30, dt)
z = 15.1566, sorad = 366.8129
OOI (2012). Data Product Specification for Solar Elevation. Document
Control Number 1341-100011.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-10011_Data_Product_SPEC_SOLRELV_OOI.pdf)
"""
# Test lengths and types of inputs. Latitude and longitude must be the same
# size and can either be a scalar or a vecotr. The date and time stamp
# can also be either a scalar or a vector. If all three inputs are vectors,
# they must be of the same length.
if len(lon) != len(lat):
raise ValueError('\'lon\' and \'lat\' must be the same size')
if utils.isvector(lon) and utils.isvector(lat) and utils.isvector(dt):
# test their lengths
if not len(lon) == len(lat) == len(dt):
raise ValueError('If all inputs are vectors, these must all '
'be of the same length')
# set constants (using values from as_consts.m)
# ------ short-wave flux calculations
# the solar constant [W m^-2] represents a mean of satellite measurements
# made over the last sunspot cycle (1979-1995), taken from Coffey et al.
# (1995), Earth System Monitor, 6, 6-10.
solar_const = 1368.0
# Create a time tuple in UTC from the Epoch time input, and then create
# scalars or numpy arrays of time elements for subsequent calculations.
ldt = len(dt)
yy = np.zeros(ldt, dtype=np.int)
mn = | np.zeros(ldt, dtype=np.int) | numpy.zeros |
import pandas as pd
import geopandas as gp
import numpy as np
from shapely.geometry import Point, LineString, MultiLineString
def to2D(geometry):
"""Flatten a 3D line to 2D.
Parameters
----------
geometry : LineString
Input 3D geometry
Returns
-------
LineString
Output 2D geometry
"""
return LineString( | np.column_stack(geometry.xy) | numpy.column_stack |
def icp(a, b,
max_time=1
):
import cv2
import numpy
# import copy
# import pylab
import time
import sys
import sklearn.neighbors
import scipy.optimize
def res(p, src, dst):
T = numpy.matrix([[numpy.cos(p[2]), -numpy.sin(p[2]), p[0]],
[numpy.sin(p[2]), numpy.cos(p[2]), p[1]],
[0, 0, 1]])
n = numpy.size(src, 0)
xt = numpy.ones([n, 3])
xt[:, :-1] = src
xt = (xt * T.T).A
d = numpy.zeros(numpy.shape(src))
d[:, 0] = xt[:, 0] - dst[:, 0]
d[:, 1] = xt[:, 1] - dst[:, 1]
r = numpy.sum(numpy.square(d[:, 0]) + numpy.square(d[:, 1]))
return r
def jac(p, src, dst):
T = numpy.matrix([[numpy.cos(p[2]), -numpy.sin(p[2]), p[0]],
[numpy.sin(p[2]), numpy.cos(p[2]), p[1]],
[0, 0, 1]])
n = numpy.size(src, 0)
xt = numpy.ones([n, 3])
xt[:, :-1] = src
xt = (xt * T.T).A
d = numpy.zeros(numpy.shape(src))
d[:, 0] = xt[:, 0] - dst[:, 0]
d[:, 1] = xt[:, 1] - dst[:, 1]
dUdth_R = numpy.matrix([[-numpy.sin(p[2]), -numpy.cos(p[2])],
[numpy.cos(p[2]), -numpy.sin(p[2])]])
dUdth = (src * dUdth_R.T).A
g = numpy.array([numpy.sum(2 * d[:, 0]),
numpy.sum(2 * d[:, 1]),
numpy.sum(2 * (d[:, 0] * dUdth[:, 0] + d[:, 1] * dUdth[:, 1]))])
return g
def hess(p, src, dst):
n = numpy.size(src, 0)
T = numpy.matrix([[numpy.cos(p[2]), -numpy.sin(p[2]), p[0]],
[numpy.sin(p[2]), | numpy.cos(p[2]) | numpy.cos |
from __future__ import division, absolute_import, print_function
import pytest
from numpy import array
from numpy.compat import long
from numpy.testing import assert_, assert_raises
from . import util
class TestReturnComplex(util.F2PyTest):
def check_function(self, t):
tname = t.__doc__.split()[0]
if tname in ['t0', 't8', 's0', 's8']:
err = 1e-5
else:
err = 0.0
assert_(abs(t(234j) - 234.0j) <= err)
assert_(abs(t(234.6) - 234.6) <= err)
assert_(abs(t(long(234)) - 234.0) <= err)
assert_(abs(t(234.6 + 3j) - (234.6 + 3j)) <= err)
#assert_( abs(t('234')-234.)<=err)
#assert_( abs(t('234.6')-234.6)<=err)
assert_(abs(t(-234) + 234.) <= err)
assert_(abs(t([234]) - 234.) <= err)
assert_(abs(t((234,)) - 234.) <= err)
assert_(abs(t(array(234)) - 234.) <= err)
assert_(abs(t(array(23 + 4j, 'F')) - (23 + 4j)) <= err)
assert_(abs(t(array([234])) - 234.) <= err)
assert_(abs(t(array([[234]])) - 234.) <= err)
assert_(abs(t(array([234], 'b')) + 22.) <= err)
assert_(abs(t(array([234], 'h')) - 234.) <= err)
assert_(abs(t(array([234], 'i')) - 234.) <= err)
assert_(abs(t(array([234], 'l')) - 234.) <= err)
assert_(abs(t(array([234], 'q')) - 234.) <= err)
assert_(abs(t(array([234], 'f')) - 234.) <= err)
assert_(abs(t(array([234], 'd')) - 234.) <= err)
assert_(abs(t(array([234 + 3j], 'F')) - (234 + 3j)) <= err)
assert_(abs(t(array([234], 'D')) - 234.) <= err)
#assert_raises(TypeError, t, array([234], 'a1'))
| assert_raises(TypeError, t, 'abc') | numpy.testing.assert_raises |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 22 15:35:13 2018
@author: ben
"""
from osgeo import gdal, gdalconst, osr
import numpy as np
import h5py
from scipy.interpolate import RectBivariateSpline
from scipy.stats import scoreatpercentile
import pointCollection as pc
from . import WV_date
#import os
class data(object):
def __init__(self, fields=None, t_axis=2):
self.x=None
self.y=None
self.projection=None
self.filename=None
self.extent=None
self.interpolator={}
self.nan_interpolator={}
self.time=None
self.size=None
self.shape=None
self.t_axis=t_axis
if fields is None:
self.fields=list()
else:
self.fields=fields
for field in self.fields:
setattr(self, field, None)
def __copy__(self, fields=None):
if fields is None:
fields=self.fields
temp=pc.grid.data()
for field in ['x','y','projection','filename','extent','time', 't'] + fields:
if hasattr(self, field):
setattr(temp, field, getattr(self, field))
temp.fields=fields.copy()
temp.__update_size_and_shape__()
return temp
def __repr__(self):
out=f"{self.__class__} with shape {self.shape},"+"\n"
out += "with fields:"+"\n"
out += f"{self.fields}"
return out
def copy(self, fields=None):
return self.__copy__()
def __getitem__(self, *args, **kwargs):
"""
wrapper for the copy_subset() method
"""
return self.copy_subset(*args, **kwargs)
def __update_extent__(self):
self.extent=[np.min(self.x), np.max(self.x), np.min(self.y), np.max(self.y)]
def __update_size_and_shape__(self):
for field in ['z']+self.fields:
try:
self.size=getattr(self, field).size
self.shape=getattr(self, field).shape
except Exception:
pass
def from_dict(self, thedict):
for field in thedict:
setattr(self, field, thedict[field])
if field not in self.fields and field not in ['x','y','time', 't']:
self.fields.append(field)
self.__update_extent__()
self.__update_size_and_shape__()
return self
def assign(self, newdata):
for field in newdata.keys():
setattr(self, field, newdata[field])
if field not in self.fields:
self.fields.append(field)
return self
def from_geotif(self, file, date_format=None, **kwargs):
"""
Read a raster from a geotif
"""
self.filename=file
if date_format is not None:
self.get_date(date_format)
ds=gdal.Open(file, gdalconst.GA_ReadOnly)
self.from_gdal(ds, **kwargs)
return self
def from_gdal(self, ds, field='z', bands=None, bounds=None, extent=None, skip=1, min_res=None):
"""
make a pointCollection.grid.data from a gdal dataset
Parameters
----------
ds : gdal dataset
Can be a dataset from gdal.Open, or a memory dataset
field : str, optional
Fieldname for the read data. The default is 'z'.
bands : list, optional
Bands to read. The default is None.
bounds : list-like, optional
boundaries to read, [[xmin, xmax], [ymin, ymax]]. If not specified,
read the whole file. The default is None.
extent : list-like, optional
Extent of the file to read, [xmin, xmax, ymin, ymax].
The default is None.
skip : Integer, optional
Specifies that every skip'th value should be read. The default is 1.
min_res : TYPE, optional
Attempt to read with a skip value chosen to match min_res.
The default is None.
Raises
------
AttributeError
If too many bands are requested, throws an error
Returns
-------
TYPE
pc.grid.data object containing the map data.
"""
GT=ds.GetGeoTransform()
if min_res is not None:
skip=np.max([1, np.ceil(min_res/np.abs(GT[1]))]).astype(int)
proj=ds.GetProjection()
if bands is None:
n_bands=ds.RasterCount
bands=np.arange(n_bands, dtype=int)+1
if not isinstance(bands, (list, tuple, np.ndarray)):
bands=[bands]
# get geolocation info, allocate outputs
band=ds.GetRasterBand(1)
nodataValue=band.GetNoDataValue()
# ii and jj are the pixel center coordinates. 0,0 in GDAL is the upper-left
# corner of the first pixel.
ii=np.arange(0, band.XSize)+0.5
jj=np.arange(0, band.YSize)+0.5
x=GT[0]+GT[1]*ii
y=GT[3]+GT[5]*jj
if extent is not None:
bounds=[[extent[0], extent[1]], [extent[2], extent[3]]]
if bounds is not None:
cols = np.where(( x>=bounds[0][0] ) & ( x<= bounds[0][1] ))[0]
rows = np.where(( y>=bounds[1][0] ) & ( y<= bounds[1][1] ))[0]
else:
rows=np.arange(band.YSize, dtype=int)
cols=np.arange(band.XSize, dtype=int)
z=list()
for band_num in bands:
if band_num > ds.RasterCount:
raise AttributeError()
band=ds.GetRasterBand(int(band_num))
try:
z.append(band.ReadAsArray(int(cols[0]), int(rows[0]), int(cols[-1]-cols[0]+1), int(rows[-1]-rows[0]+1))[::-1,:])
except IndexError as e:
raise e
if skip > 1:
z[-1]=z[-1][::skip, ::skip]
if len(bands)==1:
z=z[0]
else:
z=np.stack(z, axis=2)
ds=None
if skip >1:
cols=cols[::skip]
rows=rows[::skip]
if nodataValue is not None and np.isfinite(nodataValue):
bad = z==np.array(nodataValue).astype(z.dtype)
z = np.float64(z)
z[bad] = np.NaN
else:
z = np.float64(z)
x=x[cols]
y=y[rows]
self.x=x
self.y=y[::-1]
self.assign({field: z})
self.projection=proj
self.__update_extent__()
self.__update_size_and_shape__()
return self
def from_h5(self, h5_file, field_mapping=None, group='/', fields=None, bounds=None, bands=None, skip=1, t_axis=None):
"""
Read a raster from an hdf5 file
"""
if t_axis is not None:
self.t_axis=t_axis
if field_mapping is None:
field_mapping={}
self.filename=h5_file
dims=['x','y','t','time']
if group[0] != '/':
group='/'+group
t=None
with h5py.File(h5_file,'r') as h5f:
x=np.array(h5f[group+'/x'])
y=np.array(h5f[group+'/y'])
if 't' in h5f[group]:
t=np.array(h5f[group]['t'])
elif 'time' in h5f[group]:
t=np.array(h5f[group]['time'])
if t is not None and bands is not None:
t=t[bands]
# if no field mapping provided, add everything in the group
if len(field_mapping.keys())==0:
for key in h5f[group].keys():
if key in dims:
continue
if fields is not None and key not in fields:
continue
if key not in field_mapping:
if hasattr(h5f[group][key],'shape'):
field_mapping.update({key:key})
if bounds is not None:
cols = np.where(( x>=bounds[0][0] ) & ( x<= bounds[0][1] ))[0]
rows = np.where(( y>=bounds[1][0] ) & ( y<= bounds[1][1] ))[0]
else:
rows=np.arange(y.size, dtype=int)
cols=np.arange(x.size, dtype=int)
if len(rows) > 0 and len(cols) > 0:
for self_field in field_mapping:
f_field_name=group+'/'+field_mapping[self_field]
if f_field_name not in h5f:
continue
f_field=h5f[f_field_name]
if len(h5f[f_field_name].shape) == 2:
setattr(self, self_field,\
np.array(h5f[f_field_name][rows[0]:rows[-1]+1, cols[0]:cols[-1]+1]))
else:
if bands is None:
if len(f_field.shape) > 2:
if self.t_axis==2:
setattr(self, self_field,\
np.array(f_field[rows[0]:rows[-1]+1, cols[0]:cols[-1]+1,:]))
elif self.t_axis==0:
setattr(self, self_field,\
np.array(f_field[:,rows[0]:rows[-1]+1, cols[0]:cols[-1]+1]))
else:
setattr(self, self_field,\
np.array(f_field[rows[0]:rows[-1]+1, cols[0]:cols[-1]+1]))
else:
if self.t_axis==2:
setattr(self, self_field,\
np.array(f_field[rows[0]:rows[-1]+1, cols[0]:cols[-1]+1,bands]))
elif self.t_axis==0:
setattr(self, self_field,\
np.array(f_field[bands, rows[0]:rows[-1]+1, cols[0]:cols[-1]+1]))
if self_field not in self.fields:
self.fields.append(self_field)
self.x=x[cols]
self.y=y[rows]
if t is not None:
self.t=t
self.__update_extent__()
self.__update_size_and_shape__()
return self
def to_h5(self, out_file, fields=None, group='/', replace=False, nocompression=False):
"""
write a grid data object to an hdf5 file
"""
# check whether overwriting existing files
# append to existing files as default
mode = 'w' if replace else 'a'
if fields is None:
fields=self.fields
if group[0] != '/':
group='/'+group
with h5py.File(out_file,mode) as h5f:
try:
h5f.create_group(group)
except Exception:
pass
for field in ['x','y','time', 't'] + fields:
# if field exists, overwrite it
if field in h5f[group]:
if hasattr(self, field):
h5f[group+'/'+field][...] = getattr(self, field)
else:
#Otherwise, try to create the dataset
try:
if nocompression or field in ['x','y','time']:
h5f.create_dataset(group+'/'+field, data=getattr(self, field))
else:
h5f.create_dataset(group+'/'+field, data=getattr(self, field), chunks=True, compression="gzip")
except Exception:
pass
def to_geotif(self, out_file, **kwargs):
"""
write a grid object to a geotif
Parameters
----------
out_file : str
file name to write
**kwargs :
keywords to be passed to the to_gdal() method
Returns:
None
"""
out_ds=self.to_gdal(out_file=out_file, driver='GTiff',**kwargs)
return out_ds
def to_gdal(self, driver='MEM', out_file='', field='z', srs_proj4=None, srs_wkt=None, srs_epsg=None, dtype=gdal.GDT_Float32, options=["compress=LZW"]):
"""
Write a grid object to a gdal memory object
"""
z=np.atleast_3d(getattr(self, field))
ny,nx,nband = z.shape
dx=np.abs(np.diff(self.x[0:2]))[0]
dy=np.abs(np.diff(self.y[0:2]))[0]
# no supported creation options with in memory rasters
if driver=='MEM':
options=[]
# set up the dataset with creation options
out_ds=gdal.GetDriverByName(driver).Create(out_file, nx, ny, nband, dtype, options=options)
# top left x, w-e pixel resolution, rotation
# top left y, rotation, n-s pixel resolution
out_ds.SetGeoTransform((self.x.min()-dx/2, dx, 0, self.y.max()+dy/2, 0., -dy))
# set the spatial projection reference information
sr=osr.SpatialReference()
if srs_proj4 is not None:
sr.ImportFromProj4(srs_proj4)
elif srs_wkt is not None:
sr.ImportFromWkt(srs_wkt)
elif srs_epsg is not None:
sr.ImportFromEPSG(srs_epsg)
else:
raise ValueError("must specify at least one of srs_proj4, srs_wkt, srs_epsg")
# export the spatial projection reference information to file
out_ds.SetProjection(sr.ExportToWkt())
# for each output band
for band in range(nband):
# change orientation to upper left corner
out_ds.GetRasterBand(band+1).WriteArray(z[::-1,:,band])
# set fill value for band
try:
fill_value = getattr(self,'fill_value')
out_ds.GetRasterBand(band+1).SetNoDataValue(fill_value)
except:
pass
if driver not in ('MEM',):
out_ds.FlushCache()
out_ds = None
return out_ds
def as_points(self, field='z', keep_all=False):
"""
Return a pointCollection.data object containing the points in the grid
"""
x,y=np.meshgrid(self.x, self.y)
if keep_all:
result = pc.data(filename=self.filename).\
from_dict({'x':x.ravel(),'y':y.ravel(),'z':getattr(self, field).ravel()})
else:
good=np.isfinite(getattr(self, field)).ravel()
result = pc.data(filename=self.filename).\
from_dict({'x':x.ravel()[good],'y':y.ravel()[good],'z':getattr(self, field).ravel()[good]})
if self.time is not None:
result.assign({'time':self.time+np.zeros_like(getattr(result, field))})
return result
def add_alpha_band(self, alpha=None, field='z', nodata_vals=None):
if alpha is None:
if nodata_vals is not None:
alpha=np.ones_like(getattr(self, field)[:,:,0])
if hasattr(nodata_vals, 'len') and len(nodata_vals)==3:
for ii in range(3):
alpha[~np.isfinite(getattr(self, field)[:,:,ii]) | (getattr(self, field)[:,:,ii]==nodata_vals[ii])]=0
elif nodata_vals is not None:
alpha[np.all(~np.isfinite(getattr(self, field)) | (getattr(self, field)==nodata_vals), axis=2)]=0
else:
alpha=np.any(~np.isfinite(getattr(self, field)), axis=2)
if len(getattr(self, field).shape)==3 and getattr(self, field).shape[2]==4:
getattr(self, field)[:,:,-1]=alpha
else:
if len(alpha.shape)<3:
alpha.shape=(alpha.shape[0], alpha.shape[1], 1)
setattr(self, field, np.concatenate([getattr(self, field), alpha], axis=2))
return self
def get_date(self, date_format=None):
"""
Get the date from the filename of a Worldview file
"""
if date_format is None or date_format == 'year':
self.time = WV_date.WV_year(self.filename)
elif date_format == 'matlab':
self.time = WV_date.WV_MatlabDate(self.filename)
return self
def normalize(self, field='z', z0=[0., 255.], z1=[0., 1.], truncate=True, dtype=np.float64):
"""
Normalize the z range.
"""
getattr(self, field)[:] = (getattr(self, field).astype(np.float64)-z0[0])/(z0[1]-z0[0])*(z1[1]-z1[0])+z1[0]
if truncate:
getattr(self, field)[getattr(self, field) < z1[0]] = z1[0]
getattr(self, field)[getattr(self, field) > z1[1]] = z1[1]
setattr(self, field, getattr(self, field).astype(dtype))
return self
def calc_gradient(self, field='z'):
"""
calculate the gradient of a field
Parameters
----------
field : TYPE, optional
DESCRIPTION. The default is 'z'.
Returns
-------
None.
"""
gy, gx=np.gradient(getattr(self, field), self.y, self.x)
self.assign({field+'_x':gx, field+'_y':gy})
def toRGB(self, cmap, field='z', caxis=None, alpha=None):
"""
Convert a field to RGB
"""
if caxis is None:
caxis=[getattr(self, field).min(), getattr(self, field).max()]
self.normalize(z0=caxis)
setattr(self, field, cmap(getattr(self, field)))
if alpha is not None:
self.add_alpha_band(alpha)
return self
def index(self, row_ind, col_ind, fields=None, band_ind=None):
"""
slice a grid by row or column
"""
if fields is None:
fields=self.fields
self.x=self.x[col_ind]
self.y=self.y[row_ind]
for field in fields:
if len(getattr(self, field).shape) == 2:
setattr(self, field, getattr(self, field)[row_ind,:][:, col_ind])
else:
if self.t_axis==2:
if band_ind is None:
setattr(self, field, getattr(self, field)[row_ind,:, :][:, col_ind,:])
else:
setattr(self, field, getattr(self, field)[row_ind,:, :][:, col_ind,band_ind])
self.t=self.t[band_ind]
elif self.t_axis==0:
if band_ind is None:
setattr(self, field, getattr(self, field)[:, row_ind,:][:, :, col_ind])
else:
setattr(self, field, getattr(self, field)[:, row_ind, :][band_ind, :, col_ind])
self.t=self.t[band_ind]
self.__update_extent__()
self.__update_size_and_shape__()
return self
def copy_subset(self, rc_ind, band_ind=None, fields=None):
if fields is None:
fields=self.fields
return self.copy(fields=fields).index(rc_ind[0], rc_ind[1], band_ind=band_ind)
def crop(self, XR, YR, fields=None):
"""
Return a subset of a grid by x and y range
"""
col_ind = np.flatnonzero((self.x >= XR[0]) & (self.x <= XR[1]))
row_ind = np.flatnonzero((self.y >= YR[0]) & (self.y <= YR[1]))
try:
self.index(row_ind, col_ind, fields)
return self
except Exception as e:
print("grid: self extent is: ", self.extent)
print("XR is %s", XR)
print("YR is %s", YR)
print("Error is" )
print(e)
def show(self, field='z', band=None, ax=None, xy_scale=1, gradient=False, stretch_pct=None, **kwargs):
import matplotlib.pyplot as plt
kwargs['extent']=np.array(self.extent)*xy_scale
kwargs['origin']='lower'
if band is None:
zz=getattr(self, field)
else:
zz=getattr(self, field)[:,:,band]
if gradient:
zz=np.gradient(zz.squeeze(), self.x[1]-self.x[0], self.y[1]-self.y[0])[0]
if 'stretch_pct' not in kwargs:
stretch_pct=[5, 95]
if 'cmap' not in kwargs:
kwargs['cmap']='gray'
if stretch_pct is not None:
LH=scoreatpercentile(zz.ravel()[np.isfinite(zz.ravel())], stretch_pct)
kwargs['vmin']=LH[0]
kwargs['vmax']=LH[1]
if ax is None:
h_im = plt.imshow(zz, **kwargs)
else:
h_im = ax.imshow(zz, **kwargs)
return h_im
def interp(self, x, y, gridded=False, band=0, field='z'):
"""
interpolate a 2-D grid to a set of x and y points
"""
if field not in self.interpolator:
if len(getattr(self, field).shape) > 2:
z0 = getattr(self, field)[:,:,band]
else:
z0 = getattr(self, field).copy()
NaN_mask = np.isfinite(z0)==0
z0[NaN_mask] = 0
if self.y[1]> self.y[0]:
self.interpolator[field] = RectBivariateSpline(self.y, self.x, z0, kx=1, ky=1)
if np.any(NaN_mask.ravel()):
self.nan_interpolator[field] = RectBivariateSpline(self.y, self.x, NaN_mask.astype(float), kx=1, ky=1)
else:
self.interpolator[field] = RectBivariateSpline(self.y[::-1], self.x, z0[::-1,:], kx=1, ky=1)
if np.any(NaN_mask.ravel()):
self.nan_interpolator[field] = RectBivariateSpline(self.y[::-1], self.x, NaN_mask[::-1,:].astype(float), kx=1, ky=1)
if gridded:
result=np.zeros((len(y), len(x)))+np.NaN
good_x = np.flatnonzero((x >= np.min(self.x)) & (x <= np.max(self.x)))
good_y = np.flatnonzero((y >= np.min(self.y)) & (y <= np.max(self.y)))
if (len(good_y)>0) and (len(good_x)>0):
good_x = slice(good_x[0], good_x[-1]+1)
good_y = slice(good_y[0], good_y[-1]+1)
result[good_y, good_x] = self.interpolator[field](y[good_y], x[good_x])
if field in self.nan_interpolator:
to_NaN=np.ones_like(result, dtype=bool)
to_NaN[good_y, good_x] = self.nan_interpolator[field](y[good_y], x[good_x])
result[to_NaN] = np.NaN
else:
result = np.zeros_like(x)+np.NaN
good = (x >= np.min(self.x)) & (x <= np.max(self.x)) & \
(y >= np.min(self.y)) & (y <= np.max(self.y))
result[good]=self.interpolator[field].ev(y[good], x[good])
if field in self.nan_interpolator:
to_NaN = good
# nan_interpolator returns nonzero for NaN points in self.z
to_NaN[good] = self.nan_interpolator[field].ev(y[good], x[good]) != 0
result[to_NaN] = np.NaN
return result
def bounds(self, pad=0):
"""
Return the x and y bounds of a grid
"""
return [[np.min(self.x)-pad, np.max(self.x)+pad], [ | np.min(self.y) | numpy.min |
#!/usr/bin/env python
import sys
import matplotlib.pyplot as plt #graphics library
import numpy as np #numerical library (FFT etc)
import Nio # NCAR python io library
# set up the physical domain of the experiment to be run
# zs = topography (Fzs = FFT(zs))
# dx = grid cell size (m)
# U = wind speed (m/s)
# Hw = water vapor scale height
# Ndsq = Brunt Vaisalla freq. squared (s^-2)
# also sets up parameters for the linear model (e.g. tauf,c, cw)
# tauf = average hydrometeor fall time (seconds) ~500s? for rain w/Hw=5000, 1500s? for snow w/Hw=3000
# tauc = average hydrometeor formation time (seconds) ~500s for rain, longer for snow, faster for warmer
def setup_experiment(wind=2, experiment=1, verbose=False):
U = [5.0,10.0,15.0,25.0][wind] # wind speed
Ndsq = 3.6e-5; # dry BV freq sq. #original
# Ndsq = 0.002**2 #4e-6 # dry BV freq sq.
# Ndsq = 0.00011 #1e-4 # dry BV freq sq.
# experiment D1 D2 D3
# h height of the hill 1800 1400 1040 [meters]
# sigma half-width 60 40 3.1 [grid cells]
# z0 base of the hill 1700 2000 2200 [meters]
# G number of grids 420 250 52 [grid cells]
Nx = [420.,250.,52.][experiment]*2 # length of domain (grid cells)
hm = [1800.0,1400.0,1040.0][experiment] # mnt height (m)
xm = Nx/2.0 # mountain location in domain (grid cell)
am = [60.0,40.0,3.1][experiment] # mountain half-width (grid cells)
dx = 2000.0 # grid spacing (m)
Lx = Nx*dx # length of domain (m)
x = np.linspace(0,Lx,Nx) # distance array (m)
zo = [1700.0,2000.0,2200.0][experiment] # mountain base height (m) NOT REALLY USED CORRECTLY YET
p0 = 101325 * (1 - 2.25577e-5*zo)**5.25588 # compute base pressure
T2m = 268.0 # 270.56 #needs to be selected by experiment?
base_mr=[0.00325,0.0032,0.003][experiment] # mixing ratio at the base of the mountain
# base_mr = 0.003255
# base_mr = 0.0025687
# hw = 4000.0 # scale of water vapor (see formula in SB04,appendix)
# hw = hw - zo
# hw = 3000.0
# zo=0.0
# ----------------------------------------------------------------
# Make the mountain (theoretical)
#
# zs = hm*exp(-(x-xm).^2/am^2); Gaussian
zs = hm/(1.0+((x/dx-xm)/am)**2.) # eqn from Trude
zs = zs-zs[Nx/4] # sets the zero point to be 1/4 of the way in because we have doubled the size of the domain
zs[zs<0]=0 # set anything below 0 to 0
zs += zo
# -----------------------------------------------------------------
# put zs in Fourier space
Fzs = np.fft.fftshift(np.fft.fft(zs))/Nx
# linear model paramters (see calculations in SB'04):
# -------------------------------------------------------------------------------
t0 = 273.16
# p0 = 100000
# p0 = 82000.0 (now calculated above from z0)
L = 2.5e6
ratio = 18.015/28.964
R = 287.0
Rv = 461.0
cp = 1004.0
g = 9.81
es = 611.21*np.exp(17.502*(T2m-t0)/(T2m-32.19));
qs0 = ratio *es/(p0-es);
cap_gamma = -(g * (1.+(L*qs0)/(R*T2m)) / (cp + (L**2*qs0*ratio) / (R*T2m**2)));
env_gamma = Ndsq*T2m/g + cap_gamma #Ndsq calculated from potential temperature profile cap_gamma converts to real temp?
hw = np.abs((Rv*T2m**2)/(L*env_gamma))
# if env_gamma pulled from model, enforce reasonable values with Ndsq=min(Ndsq,0.012)
# cw below calculated from Trude's profile, Ndsq=0.00011, T2m=271K, p0=820mb, dth/dz=0.004K/m dt/dz=-0.0054
# could calculate Ndsq as = (env_gamma-cap_gamma)*g/T2m
Ndsq = (-0.0054 - cap_gamma) * g / T2m
# -------------------------------------------------------------------------------
# cw = 1.9 # sensitivity (commonly set to 1.1, see paper SB04) = cap_gamma / env_gamma
cw = cap_gamma / env_gamma
# using base_mr from profile, but maybe it should be qs0 above?
cwqv = cw * base_mr # sensitivity times q_vs (set to: 0.01 kg/kg, see paper SB04)
z0 = 0 # at the height (AGL?) where you want the precip
vterm = 2.0 # vertical terminal velocity for e.g. snow = 2m/s rain=10m/s
tauf= hw / vterm # BS'11: =zg/Vt ( =hw/Vt for moisture level: around 500s (->750s) is normally good)
tauc= 2000.0 # cloud->hydrometero conversion time. probably around 500s for rain,
# shorter for warmer condition, longer for snow?
if verbose:
print(" Ndsq=",Ndsq)
print(" Environmental lapse rate=0.004K/m")
print(" \"Dry\" lapse rate=0.0017K/m")
print(" Base MR=",base_mr)
print(" Scale height=",hw)
print(" tauc=",tauc)
print(" tauf=",tauf)
print(" cwqv=",cwqv)
# ---------------------------------------------------------------------------------
params = {"cw":cw,"cwqv":cwqv,"z0":z0,"tauf":tauf,"tauc":tauc,"hw":hw,"Ndsq":Ndsq}
return (x,zs,Fzs,U,dx,params)
def get_params(T2m,U,Ndsq,zs,env_gamma,verbose=False):
"""docstring for get_params"""
Nx = len(zs) # length of domain (grid cells)
# hm = [1800.0,1400.0,1040.0][experiment] # mnt height (m)
# xm = Nx/2.0 # mountain location in domain (grid cell)
# am = [60.0,40.0,3.1][experiment] # mountain half-width (grid cells)
# dx = 2000.0 # grid spacing (m)
# Lx = Nx*dx # length of domain (m)
# x = np.linspace(0,Lx,Nx) # distance array (m)
# zo = [1700.0,2000.0,2200.0][experiment] # mountain base height (m) NOT REALLY USED CORRECTLY YET
zo = 0.0
p0 = 101325 * (1 - 2.25577e-5*zo)**5.25588 # compute base pressure
# p0 = 101325.0
# T2m = 268.0# 270.56 #needs to be selected for each experiment...?
# hw = 4000.0 # scale of water vapor (see formula in SB04,appendix)
# base_mr=[0.00325,0.0032,0.003][experiment] # mixing ratio at the base of the mountain
# base_mr = 0.003255
# base_mr = 0.0025687
# -----------------------------------------------------------------
# put zs in Fourier space
Fzs = np.fft.fftshift(np.fft.fft(zs))/Nx
# linear model paramters (see calculations in SB'04):
# -------------------------------------------------------------------------------
t0 = 273.16
# p0 = 100000
# p0 = 82000.0 (now calculated above from z0)
L = 2.5e6
ratio = 18.015/28.964
R = 287.0
Rv = 461.0
cp = 1004.0
g = 9.81
es = 611.21* | np.exp(17.502*(T2m-t0)/(T2m-32.19)) | numpy.exp |
import io
import contextlib
import warnings
import numpy as np
import scipy as sp
from copy import deepcopy
from sklearn.base import clone
from sklearn.utils.validation import check_is_fitted
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.metaestimators import if_delegate_has_method
from joblib import Parallel, delayed
from hyperopt import fmin, tpe
from .utils import ParameterSampler, _check_param, _check_boosting
from .utils import _set_categorical_indexes, _get_categorical_support
from .utils import _feature_importances, _shap_importances
class _BoostSearch(BaseEstimator):
"""Base class for BoostSearch meta-estimator.
Warning: This class should not be used directly. Use derived classes
instead.
"""
def __init__(self):
pass
def _validate_param_grid(self, fit_params):
"""Private method to validate fitting parameters."""
if not isinstance(self.param_grid, dict):
raise ValueError("Pass param_grid in dict format.")
self._param_grid = self.param_grid.copy()
for p_k, p_v in self._param_grid.items():
self._param_grid[p_k] = _check_param(p_v)
if 'eval_set' not in fit_params:
raise ValueError(
"When tuning parameters, at least "
"a evaluation set is required.")
self._eval_score = np.argmax if self.greater_is_better else np.argmin
self._score_sign = -1 if self.greater_is_better else 1
rs = ParameterSampler(
n_iter=self.n_iter,
param_distributions=self._param_grid,
random_state=self.sampling_seed
)
self._param_combi, self._tuning_type = rs.sample()
self._trial_id = 1
if self.verbose > 0:
n_trials = self.n_iter if self._tuning_type is 'hyperopt' \
else len(self._param_combi)
print("\n{} trials detected for {}\n".format(
n_trials, tuple(self.param_grid.keys())))
def _fit(self, X, y, fit_params, params=None):
"""Private method to fit a single boosting model and extract results."""
model = self._build_model(params)
if isinstance(model, _BoostSelector):
model.fit(X=X, y=y, **fit_params)
else:
with contextlib.redirect_stdout(io.StringIO()):
model.fit(X=X, y=y, **fit_params)
results = {'params': params, 'status': 'ok'}
if isinstance(model, _BoostSelector):
results['booster'] = model.estimator_
results['model'] = model
else:
results['booster'] = model
results['model'] = None
if 'eval_set' not in fit_params:
return results
if self.boost_type_ == 'XGB':
# w/ eval_set and w/ early_stopping_rounds
if hasattr(results['booster'], 'best_score'):
results['iterations'] = results['booster'].best_iteration
# w/ eval_set and w/o early_stopping_rounds
else:
valid_id = list(results['booster'].evals_result_.keys())[-1]
eval_metric = list(results['booster'].evals_result_[valid_id])[-1]
results['iterations'] = \
len(results['booster'].evals_result_[valid_id][eval_metric])
else:
# w/ eval_set and w/ early_stopping_rounds
if results['booster'].best_iteration_ is not None:
results['iterations'] = results['booster'].best_iteration_
# w/ eval_set and w/o early_stopping_rounds
else:
valid_id = list(results['booster'].evals_result_.keys())[-1]
eval_metric = list(results['booster'].evals_result_[valid_id])[-1]
results['iterations'] = \
len(results['booster'].evals_result_[valid_id][eval_metric])
if self.boost_type_ == 'XGB':
# w/ eval_set and w/ early_stopping_rounds
if hasattr(results['booster'], 'best_score'):
results['loss'] = results['booster'].best_score
# w/ eval_set and w/o early_stopping_rounds
else:
valid_id = list(results['booster'].evals_result_.keys())[-1]
eval_metric = list(results['booster'].evals_result_[valid_id])[-1]
results['loss'] = \
results['booster'].evals_result_[valid_id][eval_metric][-1]
else:
valid_id = list(results['booster'].best_score_.keys())[-1]
eval_metric = list(results['booster'].best_score_[valid_id])[-1]
results['loss'] = results['booster'].best_score_[valid_id][eval_metric]
if params is not None:
if self.verbose > 0:
msg = "trial: {} ### iterations: {} ### eval_score: {}".format(
str(self._trial_id).zfill(4),
str(results['iterations']).zfill(5),
round(results['loss'], 5)
)
print(msg)
self._trial_id += 1
results['loss'] *= self._score_sign
return results
def fit(self, X, y, trials=None, **fit_params):
"""Fit the provided boosting algorithm while searching the best subset
of features (according to the selected strategy) and choosing the best
parameters configuration (if provided).
It takes the same arguments available in the estimator fit.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training input samples.
y : array-like of shape (n_samples,)
Target values.
trials : hyperopt.Trials() object, default=None
A hyperopt trials object, used to store intermediate results for all
optimization runs. Effective (and required) only when hyperopt
parameter searching is computed.
**fit_params : Additional fitting arguments.
Returns
-------
self : object
"""
self.boost_type_ = _check_boosting(self.estimator)
if self.param_grid is None:
results = self._fit(X, y, fit_params)
for v in vars(results['model']):
if v.endswith("_") and not v.startswith("__"):
setattr(self, str(v), getattr(results['model'], str(v)))
else:
self._validate_param_grid(fit_params)
if self._tuning_type == 'hyperopt':
if trials is None:
raise ValueError(
"trials must be not None when using hyperopt."
)
search = fmin(
fn=lambda p: self._fit(
params=p, X=X, y=y, fit_params=fit_params
),
space=self._param_combi, algo=tpe.suggest,
max_evals=self.n_iter, trials=trials,
rstate=np.random.RandomState(self.sampling_seed),
show_progressbar=False, verbose=0
)
all_results = trials.results
else:
all_results = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose * int(bool(self.n_jobs))
)(delayed(self._fit)(X, y, fit_params, params)
for params in self._param_combi)
# extract results from parallel loops
self.trials_, self.iterations_, self.scores_, models = [], [], [], []
for job_res in all_results:
self.trials_.append(job_res['params'])
self.iterations_.append(job_res['iterations'])
self.scores_.append(self._score_sign * job_res['loss'])
if isinstance(job_res['model'], _BoostSelector):
models.append(job_res['model'])
else:
models.append(job_res['booster'])
# get the best
id_best = self._eval_score(self.scores_)
self.best_params_ = self.trials_[id_best]
self.best_iter_ = self.iterations_[id_best]
self.best_score_ = self.scores_[id_best]
self.estimator_ = models[id_best]
for v in vars(models[id_best]):
if v.endswith("_") and not v.startswith("__"):
setattr(self, str(v), getattr(models[id_best], str(v)))
return self
def predict(self, X, **predict_params):
"""Predict X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Samples.
**predict_params : Additional predict arguments.
Returns
-------
pred : ndarray of shape (n_samples,)
The predicted values.
"""
check_is_fitted(self)
if hasattr(self, 'transform'):
X = self.transform(X)
return self.estimator_.predict(X, **predict_params)
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X, **predict_params):
"""Predict X probabilities.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Samples.
**predict_params : Additional predict arguments.
Returns
-------
pred : ndarray of shape (n_samples, n_classes)
The predicted values.
"""
check_is_fitted(self)
if hasattr(self, 'transform'):
X = self.transform(X)
return self.estimator_.predict_proba(X, **predict_params)
def score(self, X, y, sample_weight=None):
"""Return the score on the given test data and labels.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples,)
True values for X.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
Accuracy for classification, R2 for regression.
"""
check_is_fitted(self)
if hasattr(self, 'transform'):
X = self.transform(X)
return self.estimator_.score(X, y, sample_weight=sample_weight)
class _BoostSelector(BaseEstimator, TransformerMixin):
"""Base class for feature selection meta-estimator.
Warning: This class should not be used directly. Use derived classes
instead.
"""
def __init__(self):
pass
def transform(self, X):
"""Reduces the input X to the features selected by Boruta.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Samples.
Returns
-------
X : array-like of shape (n_samples, n_features_)
The input samples with only the selected features by Boruta.
"""
check_is_fitted(self)
shapes = np.shape(X)
if len(shapes) != 2:
raise ValueError("X must be 2D.")
if shapes[1] != self.support_.shape[0]:
raise ValueError(
"Expected {} features, received {}.".format(
self.support_.shape[0], shapes[1]))
if isinstance(X, np.ndarray):
return X[:, self.support_]
elif hasattr(X, 'loc'):
return X.loc[:, self.support_]
else:
raise ValueError("Data type not understood.")
class _Boruta(_BoostSelector):
"""Base class for BoostBoruta meta-estimator.
Warning: This class should not be used directly. Use derived classes
instead.
Notes
-----
The code for the Boruta algorithm is inspired and improved from:
https://github.com/scikit-learn-contrib/boruta_py
"""
def __init__(self,
estimator, *,
perc=100,
alpha=0.05,
max_iter=100,
early_stopping_boruta_rounds=None,
importance_type='feature_importances',
train_importance=True,
verbose=0):
self.estimator = estimator
self.perc = perc
self.alpha = alpha
self.max_iter = max_iter
self.early_stopping_boruta_rounds = early_stopping_boruta_rounds
self.importance_type = importance_type
self.train_importance = train_importance
self.verbose = verbose
def _create_X(self, X, feat_id_real):
"""Private method to add shadow features to the original ones. """
if isinstance(X, np.ndarray):
X_real = X[:, feat_id_real].copy()
X_sha = X_real.copy()
X_sha = np.apply_along_axis(self._random_state.permutation, 0, X_sha)
X = np.hstack((X_real, X_sha))
elif hasattr(X, 'iloc'):
X_real = X.iloc[:, feat_id_real].copy()
X_sha = X_real.copy()
X_sha = X_sha.apply(self._random_state.permutation)
X_sha = X_sha.astype(X_real.dtypes)
X = X_real.join(X_sha, rsuffix='_SHA')
else:
raise ValueError("Data type not understood.")
return X
def _check_fit_params(self, fit_params, feat_id_real=None):
"""Private method to validate and check fit_params."""
_fit_params = deepcopy(fit_params)
estimator = clone(self.estimator)
# add here possible estimator checks in each iteration
_fit_params = _set_categorical_indexes(
self.support_, self._cat_support, _fit_params, duplicate=True)
if feat_id_real is None: # final model fit
if 'eval_set' in _fit_params:
_fit_params['eval_set'] = list(map(lambda x: (
self.transform(x[0]), x[1]
), _fit_params['eval_set']))
else:
if 'eval_set' in _fit_params: # iterative model fit
_fit_params['eval_set'] = list(map(lambda x: (
self._create_X(x[0], feat_id_real), x[1]
), _fit_params['eval_set']))
if 'feature_name' in _fit_params: # LGB
_fit_params['feature_name'] = 'auto'
if 'feature_weights' in _fit_params: # XGB import warnings
warnings.warn(
"feature_weights is not supported when selecting features. "
"It's automatically set to None.")
_fit_params['feature_weights'] = None
return _fit_params, estimator
def _do_tests(self, dec_reg, hit_reg, iter_id):
"""Private method to operate Bonferroni corrections on the feature
selections."""
active_features = np.where(dec_reg >= 0)[0]
hits = hit_reg[active_features]
# get uncorrected p values based on hit_reg
to_accept_ps = sp.stats.binom.sf(hits - 1, iter_id, .5).flatten()
to_reject_ps = sp.stats.binom.cdf(hits, iter_id, .5).flatten()
# Bonferroni correction with the total n_features in each iteration
to_accept = to_accept_ps <= self.alpha / float(len(dec_reg))
to_reject = to_reject_ps <= self.alpha / float(len(dec_reg))
# find features which are 0 and have been rejected or accepted
to_accept = np.where((dec_reg[active_features] == 0) * to_accept)[0]
to_reject = np.where((dec_reg[active_features] == 0) * to_reject)[0]
# updating dec_reg
dec_reg[active_features[to_accept]] = 1
dec_reg[active_features[to_reject]] = -1
return dec_reg
def fit(self, X, y, **fit_params):
"""Fit the Boruta algorithm to automatically tune
the number of selected features."""
self.boost_type_ = _check_boosting(self.estimator)
if self.max_iter < 1:
raise ValueError('max_iter should be an integer >0.')
if self.perc <= 0 or self.perc > 100:
raise ValueError('The percentile should be between 0 and 100.')
if self.alpha <= 0 or self.alpha > 1:
raise ValueError('alpha should be between 0 and 1.')
if self.early_stopping_boruta_rounds is None:
es_boruta_rounds = self.max_iter
else:
if self.early_stopping_boruta_rounds < 1:
raise ValueError(
'early_stopping_boruta_rounds should be an integer >0.')
es_boruta_rounds = self.early_stopping_boruta_rounds
importances = ['feature_importances', 'shap_importances']
if self.importance_type not in importances:
raise ValueError(
"importance_type must be one of {}. Get '{}'".format(
importances, self.importance_type))
if self.importance_type == 'shap_importances':
if not self.train_importance and not 'eval_set' in fit_params:
raise ValueError(
"When train_importance is set to False, using "
"shap_importances, pass at least a eval_set.")
eval_importance = not self.train_importance and 'eval_set' in fit_params
shapes = np.shape(X)
if len(shapes) != 2:
raise ValueError("X must be 2D.")
n_features = shapes[1]
# create mask for user-defined categorical features
self._cat_support = _get_categorical_support(n_features, fit_params)
# holds the decision about each feature:
# default (0); accepted (1); rejected (-1)
dec_reg = np.zeros(n_features, dtype=np.int)
dec_history = np.zeros((self.max_iter, n_features), dtype=np.int)
# counts how many times a given feature was more important than
# the best of the shadow features
hit_reg = np.zeros(n_features, dtype=np.int)
# record the history of the iterations
imp_history = | np.zeros(n_features, dtype=np.float) | numpy.zeros |
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import matplotlib, warnings
import numpy as np
import CoolProp
from CoolProp.CoolProp import PropsSI
from CoolProp.Plots.Common import BasePlot, PropertyDict, SIunits
def SimpleCycle(Ref,Te,Tc,DTsh,DTsc,eta_a,Ts_Ph='Ph',**kwargs):
"""
This function plots a simple four-component cycle, on the current axis, or that given by the optional parameter *axis*
Required parameters:
* Ref : A string for the refrigerant
* Te : Evap Temperature in K
* Tc : Condensing Temperature in K
* DTsh : Evaporator outlet superheat in K
* DTsc : Condenser outlet subcooling in K
* eta_a : Adiabatic efficiency of compressor (no units) in range [0,1]
Optional parameters:
* Ts_Ph : 'Ts' for a Temperature-Entropy plot, 'Ph' for a Pressure-Enthalpy
* axis : An axis to use instead of the active axis
* skipPlot : If True, won't actually plot anything, just print COP
"""
warnings.warn("This function has been deprecated. Please consider converting it to an object inheriting from \"BaseCycle\".",DeprecationWarning)
for i in kwargs:
warnings.warn("This function has been deprecated, your input \"{0}: {1}\" will be ignored".format(i,kwargs[i]),DeprecationWarning)
from CoolProp.Plots import SimpleCompressionCycle
cycle = SimpleCompressionCycle(fluid_ref=Ref, graph_type=Ts_Ph)
cycle.simple_solve_dt(Te, Tc, DTsh, DTsc, eta_a, SI=True)
print(cycle.COP_cooling(),cycle.COP_heating())
def TwoStage(Ref,Q,Te,Tc,DTsh,DTsc,eta_oi,f_p,Tsat_ic,DTsh_ic,Ts_Ph='Ph',prints=False,skipPlot=False,axis=None,**kwargs):
"""
This function plots a two-stage cycle, on the current axis, or that given by the optional parameter *axis*
Required parameters:
* Ref : Refrigerant [string]
* Q : Cooling capacity [W]
* Te : Evap Temperature [K]
* Tc : Condensing Temperature [K]
* DTsh : Evaporator outlet superheat [K]
* DTsc : Condenser outlet subcooling [K]
* eta_oi : Adiabatic efficiency of compressor (no units) in range [0,1]
* f_p : fraction of compressor power lost as ambient heat transfer in range [0,1]
* Tsat_ic : Saturation temperature corresponding to intermediate pressure [K]
* DTsh_ic : Superheating at outlet of intermediate stage [K]
Optional parameters:
* Ts_Ph : 'Ts' for a Temperature-Entropy plot, 'Ph' for a Pressure-Enthalpy
* prints : True to print out some values
* axis : An axis to use instead of the active axis
* skipPlot : If True, won't actually plot anything, just print COP
"""
warnings.warn("This function has been deprecated. PLease consider converting it to an object inheriting from \"BaseCycle\".",DeprecationWarning)
T=np.zeros((8))
h=np.zeros_like(T)
p=np.zeros_like(T)
s=np.zeros_like(T)
rho=np.zeros_like(T)
T[0]=np.NAN
s[0]=np.NAN
T[1]=Te+DTsh
pe=PropsSI('P','T',Te,'Q',1.0,Ref)
pc=PropsSI('P','T',Tc,'Q',1.0,Ref)
pic=PropsSI('P','T',Tsat_ic,'Q',1.0,Ref)
Tbubble_c=PropsSI('T','P',pc,'Q',0,Ref)
Tbubble_e=PropsSI('T','P',pe,'Q',0,Ref)
h[1]=PropsSI('H','T',T[1],'P',pe,Ref)
s[1]=PropsSI('S','T',T[1],'P',pe,Ref)
rho[1]=PropsSI('D','T',T[1],'P',pe,Ref)
T[5]=Tbubble_c-DTsc
h[5]=PropsSI('H','T',T[5],'P',pc,Ref)
s[5]=PropsSI('S','T',T[5],'P',pc,Ref)
rho[5]=PropsSI('D','T',T[5],'P',pc,Ref)
mdot=Q/(h[1]-h[5])
rho1=PropsSI('D','T',T[1],'P',pe,Ref)
h2s=PropsSI('H','S',s[1],'P',pic,Ref)
Wdot1=mdot*(h2s-h[1])/eta_oi
h[2]=h[1]+(1-f_p)*Wdot1/mdot
T[2]=PropsSI('T','H',h[2],'P',pic,Ref)
s[2]=PropsSI('S','T',T[2],'P',pic,Ref)
rho[2]=PropsSI('D','T',T[2],'P',pic,Ref)
T[3]=288
p[3]=pic
h[3]=PropsSI('H','T',T[3],'P',pic,Ref)
s[3]=PropsSI('S','T',T[3],'P',pic,Ref)
rho[3]=PropsSI('D','T',T[3],'P',pic,Ref)
rho3=PropsSI('D','T',T[3],'P',pic,Ref)
h4s=PropsSI('H','T',s[3],'P',pc,Ref)
Wdot2=mdot*(h4s-h[3])/eta_oi
h[4]=h[3]+(1-f_p)*Wdot2/mdot
T[4]=PropsSI('T','H',h[4],'P',pc,Ref)
s[4]=PropsSI('S','T',T[4],'P',pc,Ref)
rho[4]=PropsSI('D','T',T[4],'P',pc,Ref)
sbubble_e=PropsSI('S','T',Tbubble_e,'Q',0,Ref)
sbubble_c=PropsSI('S','T',Tbubble_c,'Q',0,Ref)
sdew_e=PropsSI('S','T',Te,'Q',1,Ref)
sdew_c=PropsSI('S','T',Tc,'Q',1,Ref)
hsatL=PropsSI('H','T',Tbubble_e,'Q',0,Ref)
hsatV=PropsSI('H','T',Te,'Q',1,Ref)
ssatL=PropsSI('S','T',Tbubble_e,'Q',0,Ref)
ssatV=PropsSI('S','T',Te,'Q',1,Ref)
vsatL=1/PropsSI('D','T',Tbubble_e,'Q',0,Ref)
vsatV=1/PropsSI('D','T',Te,'Q',1,Ref)
x=(h[5]-hsatL)/(hsatV-hsatL)
s[6]=x*ssatV+(1-x)*ssatL
T[6]=x*Te+(1-x)*Tbubble_e
rho[6]=1.0/(x*vsatV+(1-x)*vsatL)
h[6]=h[5]
h[7]=h[1]
s[7]=s[1]
T[7]=T[1]
p=[np.nan,pe,pic,pic,pc,pc,pe,pe]
COP=Q/(Wdot1+Wdot2)
RE=h[1]-h[6]
if prints==True:
print('x5:',x)
print('COP:', COP)
print('COPH', (Q+Wdot1+Wdot2)/(Wdot1+Wdot2))
print(T[2]-273.15,T[4]-273.15,p[2]/p[1],p[4]/p[3])
print(mdot,mdot*(h[4]-h[5]),pic)
print('Vdot1',mdot/rho1,'Vdisp',mdot/rho1/(3500/60.)*1e6/0.7)
print('Vdot2',mdot/rho3,'Vdisp',mdot/rho3/(3500/60.)*1e6/0.7)
print(mdot*(h[4]-h[5]),Tc-273.15)
for i in range(1,len(T)-1):
print('%d & %g & %g & %g & %g & %g \\\\' %(i,T[i]-273.15,p[i],h[i],s[i],rho[i]))
else:
print(Tsat_ic,COP)
if skipPlot==False:
if axis==None:
ax=matplotlib.pyplot.gca()
else:
ax=axis
if Ts_Ph in ['ph','Ph']:
ax.plot(h,p)
elif Ts_Ph in ['Ts','ts']:
s_copy=s.copy()
T_copy=T.copy()
for i in range(1,len(s)-1):
ax.plot(s[i],T[i],'bo',mfc='b',mec='b')
dT=[0,-5,5,-20,5,5,5]
ds=[0,0.05,0,0,0,0,0]
ax.text(s[i]+ds[i],T[i]+dT[i],str(i))
s=list(s)
T=list(T)
s.insert(7,sdew_e)
T.insert(7,Te)
s.insert(5,sbubble_c)
T.insert(5,Tbubble_c)
s.insert(5,sdew_c)
T.insert(5,Tc)
ax.plot(s,T)
s=s_copy
T=T_copy
else:
raise TypeError('Type of Ts_Ph invalid')
return COP
def EconomizedCycle(Ref,Qin,Te,Tc,DTsh,DTsc,eta_oi,f_p,Ti,Ts_Ph='Ts',skipPlot=False,axis=None,**kwargs):
"""
This function plots an economized cycle, on the current axis, or that given by the optional parameter *axis*
Required parameters:
* Ref : Refrigerant [string]
* Qin : Cooling capacity [W]
* Te : Evap Temperature [K]
* Tc : Condensing Temperature [K]
* DTsh : Evaporator outlet superheat [K]
* DTsc : Condenser outlet subcooling [K]
* eta_oi : Adiabatic efficiency of compressor (no units) in range [0,1]
* f_p : fraction of compressor power lost as ambient heat transfer in range [0,1]
* Ti : Saturation temperature corresponding to intermediate pressure [K]
Optional parameters:
* Ts_Ph : 'Ts' for a Temperature-Entropy plot, 'Ph' for a Pressure-Enthalpy
* axis : An axis to use instead of the active axis
* skipPlot : If True, won't actually plot anything, just print COP
"""
warnings.warn("This function has been deprecated. Please consider converting it to an object inheriting from \"BaseCycle\".",DeprecationWarning)
from scipy.optimize import newton
m=1
T=np.zeros((11))
h=np.zeros_like(T)
p=np.zeros_like(T)
s=np.zeros_like(T)
rho=np.zeros_like(T)
T[0]=np.NAN
s[0]=np.NAN
T[1]=Te+DTsh
pe=PropsSI('P','T',Te,'Q',1.0,Ref)
pc=PropsSI('P','T',Tc,'Q',1.0,Ref)
pi=PropsSI('P','T',Ti,'Q',1.0,Ref)
p[1]=pe
h[1]=PropsSI('H','T',T[1],'P',pe,Ref)
s[1]=PropsSI('S','T',T[1],'P',pe,Ref)
rho[1]=PropsSI('D','T',T[1],'P',pe,Ref)
h2s=PropsSI('H','S',s[1],'P',pi,Ref)
wdot1=(h2s-h[1])/eta_oi
h[2]=h[1]+(1-f_p[0])*wdot1
p[2]=pi
#T[2]=T_hp(Ref,h[2],pi,T2s)
T[2]=PropsSI('T','H',h[2],'P',pi,Ref)
s[2]=PropsSI('S','T',T[2],'P',pi,Ref)
rho[2]=PropsSI('D','T',T[2],'P',pi,Ref)
T[5]=Tc-DTsc
h[5]=PropsSI('H','T',T[5],'P',pc,Ref)
s[5]=PropsSI('S','T',T[5],'P',pc,Ref)
rho[5]=PropsSI('D','T',T[5],'P',pc,Ref)
p[5]=pc
p[6]=pi
h[6]=h[5]
p[7]=pi
p[8]=pi
p[6]=pi
T[7]=Ti
h[7]=PropsSI('H','T',Ti,'Q',1,Ref)
s[7]=PropsSI('S','T',Ti,'Q',1,Ref)
rho[7]=PropsSI('D','T',Ti,'Q',1,Ref)
T[8]=Ti
h[8]=PropsSI('H','T',Ti,'Q',0,Ref)
s[8]=PropsSI('S','T',Ti,'Q',0,Ref)
rho[8]=PropsSI('D','T',Ti,'Q',0,Ref)
x6=(h[6]-h[8])/(h[7]-h[8]) #Vapor Quality
s[6]=s[7]*x6+s[8]*(1-x6)
rho[6]=1.0/(x6/rho[7]+(1-x6)/rho[8])
T[6]=Ti
#Injection mass flow rate
x=m*(h[6]-h[8])/(h[7]-h[6])
p[3]=pi
h[3]=(m*h[2]+x*h[7])/(m+x)
#T[3]=T_hp(Ref,h[3],pi,T[2])
T[3]=PropsSI('T','H',h[3],'P',pi,Ref)
s[3]=PropsSI('S','T',T[3],'P',pi,Ref)
rho[3]=PropsSI('D','T',T[3],'P',pi,Ref)
T4s=newton(lambda T: PropsSI('S','T',T,'P',pc,Ref)-s[3],T[2]+30)
h4s=PropsSI('H','T',T4s,'P',pc,Ref)
p[4]=pc
wdot2=(h4s-h[3])/eta_oi
h[4]=h[3]+(1-f_p[1])*wdot2
#T[4]=T_hp(Ref,h[4],pc,T4s)
T[4]=PropsSI('T','H',h[4],'P',pc,Ref)
s[4]=PropsSI('S','T',T[4],'P',pc,Ref)
rho[4]=PropsSI('D','T',T[4],'P',pc,Ref)
p[9]=pe
h[9]=h[8]
T[9]=Te
hsatL_e=PropsSI('H','T',Te,'Q',0,Ref)
hsatV_e=PropsSI('H','T',Te,'Q',1,Ref)
ssatL_e=PropsSI('S','T',Te,'Q',0,Ref)
ssatV_e=PropsSI('S','T',Te,'Q',1,Ref)
vsatL_e=1/PropsSI('D','T',Te,'Q',0,Ref)
vsatV_e=1/PropsSI('D','T',Te,'Q',1,Ref)
x9=(h[9]-hsatL_e)/(hsatV_e-hsatL_e) #Vapor Quality
s[9]=ssatV_e*x9+ssatL_e*(1-x9)
rho[9]=1.0/(x9*vsatV_e+(1-x9)*vsatL_e)
s[10]=s[1]
T[10]=T[1]
h[10]=h[1]
p[10]=p[1]
Tbubble_e=Te
Tbubble_c=Tc
sbubble_e=PropsSI('S','T',Tbubble_e,'Q',0,Ref)
sbubble_c=PropsSI('S','T',Tbubble_c,'Q',0,Ref)
sdew_e=PropsSI('S','T',Te,'Q',1,Ref)
sdew_c=PropsSI('S','T',Tc,'Q',1,Ref)
Wdot1=m*wdot1
Wdot2=(m+x)*wdot2
if skipPlot==False:
if axis==None:
ax=matplotlib.pyplot.gca()
else:
ax=axis
if Ts_Ph in ['ph','Ph']:
ax.plot(h,p)
ax.set_yscale('log')
elif Ts_Ph in ['Ts','ts']:
ax.plot(np.r_[s[7],s[3]],np.r_[T[7],T[3]],'b')
s_copy=s.copy()
T_copy=T.copy()
dT=[0,-5,5,-12,5,12,-12,0,0,0]
ds=[0,0.05,0.05,0,0.05,0,0.0,0.05,-0.05,-0.05]
for i in range(1,len(s)-1):
ax.plot(s[i],T[i],'bo',mfc='b',mec='b')
ax.text(s[i]+ds[i],T[i]+dT[i],str(i),ha='center',va='center')
s=list(s)
T=list(T)
s.insert(10,sdew_e)
T.insert(10,Te)
s.insert(5,sbubble_c)
T.insert(5,Tbubble_c)
s.insert(5,sdew_c)
T.insert(5,Tc)
ax.plot(s,T,'b')
s=s_copy
T=T_copy
else:
raise TypeError('Type of Ts_Ph invalid')
COP=m*(h[1]-h[9])/(m*(h[2]-h[1])+(m+x)*(h[4]-h[3]))
for i in range(1,len(T)-1):
print('%d & %g & %g & %g & %g & %g \\\\' %(i,T[i]-273.15,p[i],h[i],s[i],rho[i]))
print(x,m*(h[1]-h[9]),(m*(h[2]-h[1])+(m+x)*(h[4]-h[3])),COP)
mdot=Qin/(h[1]-h[9])
mdot_inj=x*mdot
print('x9',x9,)
print('Qcond',(mdot+mdot_inj)*(h[4]-h[5]),'T4',T[4]-273.15)
print(mdot,mdot+mdot_inj)
f=3500/60.
eta_v=0.7
print('Vdisp1: ',mdot/(rho[1]*f*eta_v)*1e6,'cm^3')
print('Vdisp2: ',(mdot+mdot_inj)/(rho[1]*f*eta_v)*1e6,'cm^3')
return COP
#class SimpleCycle(object):
# """A class that calculates a simple thermodynamic cycle"""
# def __init__(self, *args, **kwargs):
# object.__init__(self, *args, **kwargs)
# (states, steps, fluid):
# Parameters
# ----------
# x_type : int, str
# Either a letter or an integer that specifies the property type for the x-axis
# y_type : int, str
# Either a letter or an integer that specifies the property type for the y-axis
# states : list
# A collection of state points that follows a fixed scheme defined
# in the implementing subclass.
# fluid_ref : str, CoolProp.AbstractState
# The fluid property provider, either a subclass of CoolProp.AbstractState
# or a string that can be used to generate a CoolProp.AbstractState instance
# via :func:`Common.process_fluid_state`.
# steps : int
# The number of steps used for going from one state to another
#
# for more properties, see :class:`CoolProp.Plots.Common.Base2DObject`.
# # See http://stackoverflow.com/questions/1061283/lt-instead-of-cmp
# class ComparableMixin:
# """A mixin class that implements all comparing mathods except for __lt__"""
# def __eq__(self, other):
# return not self<other and not other<self
# def __ne__(self, other):
# return self<other or other<self
# def __gt__(self, other):
# return other<self
# def __ge__(self, other):
# return not self<other
# def __le__(self, other):
# return not other<self
class StatePoint(PropertyDict):
"""A simple fixed dimension dict represented by an object with attributes"""
# Significant digits in SI units
ROUND_DECIMALS = {
CoolProp.iDmass : 5,
CoolProp.iHmass : 5,
CoolProp.iP : 2,
CoolProp.iSmass : 5,
CoolProp.iT : 5,
CoolProp.iUmass : 5,
CoolProp.iQ : 5
}
def __iter__(self):
"""Make sure we always iterate in the same order"""
keys = [CoolProp.iDmass,CoolProp.iHmass,CoolProp.iP,CoolProp.iSmass,CoolProp.iT]
for key in sorted(keys):
yield key
def __str__(self):
return str(self.__dict__)
def __prop_compare(self,other,typ):
# TODO
if self[typ] is None and other[typ] is None: return 0
elif self[typ] is None and other[typ] is not None: return -1
elif self[typ] is not None and other[typ] is None: return 1
else:
A = np.round(self[typ] , self.ROUND_DECIMALS[typ])
B = np.round(other[typ], self.ROUND_DECIMALS[typ])
if A>B: return 1
elif A<B: return -1
elif A==B: return 0
else: raise ValueError("Comparison failed.")
def __eq__(self, other):
for i in self:
if not self.__prop_compare(other,i) == 0:
return False
return True
def __hash__(self):
return hash(repr(self))
class StateContainer(object):
"""A collection of values for the main properties, built to mixin with :class:`CoolProp.Plots.Common.PropertyDict`
Examples
--------
This container has overloaded accessor methods. Just pick your own flavour
or mix the styles as you like:
>>> from __future__ import print_function
>>> import CoolProp
>>> from CoolProp.Plots.SimpleCycles import StateContainer
>>> T0 = 300.000; p0 = 200000.000; h0 = 112745.749; s0 = 393.035
>>> cycle_states = StateContainer()
>>> cycle_states[0,'H'] = h0
>>> cycle_states[0]['S'] = s0
>>> cycle_states[0][CoolProp.iP] = p0
>>> cycle_states[0,CoolProp.iT] = T0
>>> cycle_states[1,"T"] = 300.064
>>> print(cycle_states)
Stored State Points:
state T (K) p (Pa) d (kg/m3) h (J/kg) s (J/kg/K)
0 300.000 200000.000 - 112745.749 393.035
1 300.064 - - - -
"""
def __init__(self,unit_system=SIunits()):
self._points = {}
self._units = unit_system
@property
def points(self): return self._points
@points.setter
def points(self, value): self._points = value
@property
def units(self): return self._units
@units.setter
def units(self, value): self._units = value
def get_point(self, index, SI=True):
if SI:
state = self[index]
else:
state = self[index]
for i in state:
state[i] = self.units[i].from_SI(state[i])
return state
def set_point(self, index, value, SI=True):
if SI:
self._points[index] = value
else:
for i in value:
self._points[index][i] = self.units[i].to_SI(value[i])
def _list_like(self, value):
"""Try to detect a list-like structure excluding strings"""
return (not hasattr(value, "strip") and
(hasattr(value, "__getitem__") or
hasattr(value, "__iter__")))
# return is_sequence(value) # use from pandas.core.common import is_sequence
def __len__(self):
"""Some cheating to get the correct behaviour"""
return len(self._points)
def __iter__(self):
"""Make sure we iterate in the righ order"""
for key in sorted(self._points):
yield key
def __getitem__(self, index):
"""Another tweak that changes the default access path"""
if self._list_like(index):
len_var = len(index)
if len_var==0:
raise IndexError("Received empty index.")
elif len_var==1:
return self._points[index[0]]
elif len_var==2:
return self._points[index[0]][index[1]]
else:
raise IndexError("Received too long index.")
return self._points[index]
def __setitem__(self, index, value):
"""Another tweak that changes the default access path"""
if self._list_like(index):
len_var = len(index)
if len_var==0:
raise IndexError("Received empty index.")
elif len_var==1:
self._points[index[0]] = value
elif len_var==2:
# safeguard against empty entries
if index[0] not in self._points:
self._points[index[0]] = StatePoint()
self._points[index[0]][index[1]] = value
else:
raise IndexError("Received too long index.")
else:
self._points[index] = value
def __str__(self):
out = "Stored State Points:\n"
keys = True
for i in self._points:
if keys:
row = [u"{0:>5s}".format("state")]
for j in self._points[i]:
label = u"{0:s} ({1:s})".format(self.units[j].symbol,self.units[j].unit)
row.append(u"{0:>11s}".format(label))
out = out + u" ".join(row) + "\n"
keys = False
row = [u"{0:>5s}".format(str(i))]
for j in self._points[i]:
try:
row.append(u"{0:11.3f}".format(self.units[j].from_SI(self._points[i][j])))
except:
row.append(u"{0:>11s}".format("-"))
out = out + u" ".join(row) + "\n"
return out
def append(self,new):
i = 0 + self.__len__()
for j in new:
self[i,j] = new[j]
return self
def extend(self,new):
i = 0 + self.__len__()
for j in new:
for k in new[j]:
self[i,k] = new[j][k]
i = i +1
return self
@property
def D(self): return np.array([self._points[k].D for k in self])
@property
def H(self): return np.array([self._points[k].H for k in self])
@property
def P(self): return np.array([self._points[k].P for k in self])
@property
def S(self): return np.array([self._points[k].S for k in self])
@property
def T(self): return np.array([self._points[k].T for k in self])
@property
def U(self): return np.array([self._points[k].U for k in self])
@property
def Q(self): return np.array([self._points[k].Q for k in self])
class BaseCycle(BasePlot):
"""A simple thermodynamic cycle, should not be used on its own."""
# Define the iteration keys
PROPERTIES = {
CoolProp.iDmass : 'density',
CoolProp.iHmass : 'specific enthalpy',
CoolProp.iP : 'pressure',
CoolProp.iSmass : 'specific entropy',
CoolProp.iT : 'temperature'
}
STATECOUNT=0
"""A list of accepted numbers of states"""
STATECHANGE=None
"""A list of lists of tuples that defines how the state transitions
behave for the corresponding entry in BaseCycle.STATECOUNT"""
def __init__(self, fluid_ref, graph_type, unit_system='EUR', **kwargs):
"""Initialises a simple cycle calculator
Parameters
----------
fluid_ref : str, CoolProp.AbstractState
The fluid property provider, either a subclass of CoolProp.AbstractState
or a string that can be used to generate a CoolProp.AbstractState instance
via :func:`Common.process_fluid_state`.
graph_type : string
The graph type to be plotted, like \"PH\" or \"TS\"
unit_system : string, ['EUR','KSI','SI']
Select the units used for the plotting. 'EUR' is bar, kJ, C; 'KSI' is kPa, kJ, K; 'SI' is Pa, J, K
for more properties, see :class:`CoolProp.Plots.Common.BasePlot`.
"""
self._cycle_states = StateContainer()
self._steps = 2
BasePlot.__init__(self, fluid_ref, graph_type, unit_system, **kwargs)
@property
def cycle_states(self): return self._cycle_states
@cycle_states.setter
def cycle_states(self, value):
if len(value) != self.STATECOUNT:
raise ValueError("Your number of states ({0:d}) is not in the list of allowed state counts: {1:s}.".format(len(value),str(self.STATECOUNT)))
self._cycle_states = value
@property
def steps(self): return self._steps
@steps.setter
def steps(self, value): self._steps = int(max([value,2]))
@BasePlot.system.setter
def system(self, value):
if value in self.UNIT_SYSTEMS:
self._system = self.UNIT_SYSTEMS[value]
elif isinstance(value, PropertyDict):
self._system = value
else:
raise ValueError("Invalid unit_system input \"{0:s}\", expected a string from {1:s}".format(str(value),str(self.UNIT_SYSTEMS.keys())))
self._cycle_states.units = self._system
def valid_states(self):
"""Check the formats of BaseCycle.STATECOUNT and BaseCycle.STATECHANGE"""
if len(self.STATECHANGE) != self.STATECOUNT:
raise ValueError("Invalid number of states and or state change operations")
return True
def fill_states(self,objs=None):
"""Try to populate all fields in the state objects"""
if objs is None:
objs = self._cycle_states
local = True
else:
local = False
for i in objs:
full = True
for j in objs[i]:
if objs[i][j] is None:
full = False
if full: continue
if (objs[i][CoolProp.iDmass] is not None and
objs[i][CoolProp.iT] is not None):
self._state.update(CoolProp.DmassT_INPUTS, objs[i][CoolProp.iDmass], objs[i][CoolProp.iT])
elif (objs[i][CoolProp.iP] is not None and
objs[i][CoolProp.iHmass] is not None):
self._state.update(CoolProp.HmassP_INPUTS, objs[i][CoolProp.iHmass], objs[i][CoolProp.iP])
elif (objs[i][CoolProp.iP] is not None and
objs[i][CoolProp.iSmass] is not None):
self._state.update(CoolProp.PSmass_INPUTS, objs[i][CoolProp.iP], objs[i][CoolProp.iSmass])
else:
warnings.warn("Please fill the state[{0:s}] manually.".format(str(i)))
continue
for j in objs[i]:
if objs[i][j] is None:
objs[i][j] = self._state.keyed_output(j)
if local: self._cycle_states = objs
return objs
def state_change(self,in1,in2,start,ty1='lin',ty2='lin'):
"""Calculates a state change defined by the properties in1 and in2
Uses self.states[start] and self.states[start+1] (or self.states[0]) to define
the process and interpolates between the values.
Parameters
----------
in1 : int
The index of the first defined property.
in2 : int
The index of the second defined property.
start : int
The index of the start state.
ty1 : str
The key that defines the type of state change for in1, lin or log.
ty2 : str
The key that defines the type of state change for in2, lin or log.
Returns
-------
scalar or array_like
a list of the length of self.steps+1 that describes the process. It includes start and end state.
"""
self.fill_states()
end = start + 1
if end >= len(self.cycle_states): end -= len(self.cycle_states)
start = self.cycle_states[start]
end = self.cycle_states[end]
#
val = []
inv = [in1,in2]
typ = [ty1,ty2]
for i,v in enumerate(inv):
if typ[i] == 'lin':
val.append(np.linspace(start[v], end[v], self.steps))
elif typ[i] == 'log':
val.append(np.logspace( | np.log10(start[v]) | numpy.log10 |
from __future__ import division
import numpy as np
import tensorflow as tf
import matplotlib
matplotlib.use('Agg')
def generator_conditional(z, conditioning): # Convolution Generator
with tf.variable_scope("generator", reuse=tf.AUTO_REUSE):
z_combine = tf.concat([z, conditioning], -1)
conv1_g = tf.layers.conv1d(inputs=z_combine, filters=256, kernel_size=5, padding='same')
# conv1_g_bn = tf.layers.batch_normalization(conv1_g, training=training)
conv1_g = tf.nn.leaky_relu(conv1_g)
conv2_g = tf.layers.conv1d(inputs=conv1_g, filters=128, kernel_size=3, padding='same')
conv2_g = tf.nn.leaky_relu(conv2_g)
conv3_g = tf.layers.conv1d(inputs=conv2_g, filters=64, kernel_size=3, padding='same')
conv3_g = tf.nn.leaky_relu(conv3_g)
conv4_g = tf.layers.conv1d(inputs=conv3_g, filters=2, kernel_size=3, padding='same')
return conv4_g
def discriminator_condintional(x, conditioning):
with tf.variable_scope("discriminator", reuse=tf.AUTO_REUSE):
z_combine = tf.concat([x, conditioning], -1)
conv1 = tf.layers.conv1d(inputs=z_combine, filters=256, kernel_size=5, padding='same')
conv1 = tf.nn.relu(conv1)
conv1 = tf.reduce_mean(conv1, axis=0, keep_dims=True)
conv2 = tf.layers.conv1d(inputs=conv1, filters=128, kernel_size=3, padding='same')
conv2 = tf.nn.relu(conv2)
conv3 = tf.layers.conv1d(inputs=conv2, filters=64, kernel_size=3, padding='same')
conv3 = tf.nn.relu(conv3)
conv4 = tf.layers.conv1d(inputs=conv3, filters=16, kernel_size=3, padding='same')
FC = tf.nn.relu(tf.layers.dense(conv4, 100, activation=None))
D_logit = tf.layers.dense(FC, 1, activation=None)
D_prob = tf.nn.sigmoid(D_logit)
return D_prob, D_logit
def encoding(x):
with tf.variable_scope("encoding", reuse=tf.AUTO_REUSE):
conv1 = tf.layers.conv1d(inputs=x, filters=256, kernel_size=5, padding='same')
conv1 = tf.nn.relu(conv1)
conv2 = tf.layers.conv1d(inputs=conv1, filters=128, kernel_size=3, padding='same')
conv2 = tf.nn.relu(conv2)
conv3 = tf.layers.conv1d(inputs=conv2, filters=64, kernel_size=3, padding='same')
conv3 = tf.nn.relu(conv3)
conv4 = tf.layers.conv1d(inputs=conv3, filters=2, kernel_size=3, padding='same')
layer_4_normalized = tf.scalar_mul(tf.sqrt(tf.cast(block_length, tf.float32)),
tf.nn.l2_normalize(conv4, dim=1)) # normalize the encoding.
return layer_4_normalized
def decoding(x):
with tf.variable_scope("decoding", reuse=tf.AUTO_REUSE):
conv1 = tf.layers.conv1d(inputs=x, filters=256, kernel_size=5, padding='same')
conv1 = tf.nn.relu(conv1)
conv2_ori = tf.layers.conv1d(inputs=conv1, filters=128, kernel_size=5, padding='same')
conv2 = tf.nn.relu(conv2_ori)
conv2 = tf.layers.conv1d(inputs=conv2, filters=128, kernel_size=5, padding='same')
conv2 = tf.nn.relu(conv2)
conv2 = tf.layers.conv1d(inputs=conv2, filters=128, kernel_size=5, padding='same')
conv2 += conv2_ori
conv2 = tf.nn.relu(conv2)
conv3_ori = tf.layers.conv1d(inputs=conv2, filters=64, kernel_size=5, padding='same')
conv3 = tf.nn.relu(conv3_ori)
conv3 = tf.layers.conv1d(inputs=conv3, filters=64, kernel_size=5, padding='same')
conv3 = tf.nn.relu(conv3)
conv3 = tf.layers.conv1d(inputs=conv3, filters=64, kernel_size=3, padding='same')
conv3 += conv3_ori
conv3 = tf.nn.relu(conv3)
conv4 = tf.layers.conv1d(inputs=conv3, filters=32, kernel_size=3, padding='same')
conv4 = tf.nn.relu(conv4)
Decoding_logit = tf.layers.conv1d(inputs=conv4, filters=1, kernel_size=3, padding='same')
Decoding_prob = tf.nn.sigmoid(Decoding_logit)
return Decoding_logit, Decoding_prob
def sample_Z(sample_size):
''' Sampling the generation noise Z from normal distribution'''
return | np.random.normal(size=sample_size) | numpy.random.normal |
import gpflow
import tensorflow as tf
import numpy as np
import unittest
from testing.gpflow_testcase import GPflowTestCase
class DumbModel(gpflow.model.Model):
def __init__(self):
gpflow.model.Model.__init__(self)
self.a = gpflow.param.Param(3.)
def build_likelihood(self):
return -tf.square(self.a)
class NoArgsModel(DumbModel):
@gpflow.model.AutoFlow()
def function(self):
return self.a
class TestNoArgs(GPflowTestCase):
def setUp(self):
with self.test_session():
self.m = NoArgsModel()
self.m.compile()
def test_return(self):
with self.test_session():
self.assertTrue(np.allclose(self.m.function(), 3.))
def test_kill(self):
# make sure autoflow dicts are removed when _needs_recompile is set.
with self.test_session():
keys = [k for k in self.m.__dict__.keys() if k[-11:] == '_AF_storage']
self.assertTrue(len(keys) == 0, msg="no AF storage should be present to start.")
self.m.function()
keys = [k for k in self.m.__dict__.keys() if k[-11:] == '_AF_storage']
self.assertTrue(len(keys) == 1, msg="AF storage should be present after function call.")
self.m._needs_recompile = True
keys = [k for k in self.m.__dict__.keys() if k[-11:] == '_AF_storage']
self.assertTrue(len(keys) == 0, msg="no AF storage should be present after recompile switch set.")
class AddModel(DumbModel):
@gpflow.model.AutoFlow((tf.float64,), (tf.float64,))
def add(self, x, y):
return tf.add(x, y)
class TestShareArgs(GPflowTestCase):
"""
This is designed to replicate bug #85, where having two models caused
autoflow functions to fail because the tf_args were shared over the
instances.
"""
def setUp(self):
with self.test_session():
self.m1 = AddModel()
self.m1.compile()
self.m2 = AddModel()
self.m2.compile()
rng = np.random.RandomState(0)
self.x = rng.randn(10, 20)
self.y = rng.randn(10, 20)
def test_share_args(self):
with self.test_session():
self.m1.add(self.x, self.y)
self.m2.add(self.x, self.y)
self.m1.add(self.x, self.y)
class TestAutoFlowSessionGraphArguments(GPflowTestCase):
"""AutoFlow tests for external session and graph."""
def setUp(self):
self.models = [AddModel() for _ in range(5)]
self.session = tf.Session()
self.graph = tf.Graph()
self.x = np.array([1., 1., 1.])
self.y = np.array([1., 2., 3.])
def test_wrong_arguments(self):
"""Wrong arguments for AutoFlow wrapped function."""
m1 = self.models[0]
self.assertRaises(ValueError, m1.add, [1.], [1.],
unknown1='argument1')
self.assertRaises(ValueError, m1.add, [1.], [1.],
graph=tf.Graph(), unknown1='argument1')
self.assertRaises(ValueError, m1.add, [1.], [1.],
session=self.session, unknown2='argument2')
self.assertRaises(ValueError, m1.add, [1.], [1.],
graph=tf.Graph(), session=tf.Session())
def test_storage_properties(self):
"""External graph and session passed to AutoFlow."""
m1, m2, m3, m4, m5 = self.models
storage_name = '_add_AF_storage'
m1.add(self.x, self.y)
m2.add(self.x, self.y, session=self.session)
m3.add(self.x, self.y, graph=self.graph)
with self.graph.as_default():
m4.add(self.x, self.y)
with self.test_session() as sess_default:
m5.add(self.x, self.y)
sessions = [getattr(m, storage_name)['session'] for m in self.models]
sess1, sess2, sess3, sess4, sess5 = sessions
sessions_set = set(map(str, sessions))
self.assertEqual(len(sessions_set), 5)
self.assertEqual(sess1.graph, sess2.graph)
self.assertEqual(sess3.graph, sess4.graph)
self.assertEqual(sess5.graph, sess_default.graph)
self.assertEqual(sess5, sess_default)
m2.add(self.x, self.y)
sess2_second_run = getattr(m2, storage_name)['session']
self.assertTrue(isinstance(sess2_second_run, tf.Session))
self.assertEqual(sess2, sess2_second_run)
m4.add(self.x, self.y, graph=tf.get_default_graph())
sess4_second_run = getattr(m4, storage_name)['session']
self.assertTrue(isinstance(sess4_second_run, tf.Session))
self.assertNotEqual(sess4, sess4_second_run)
with self.test_session():
m5.add(self.x, self.y, graph=sess_default.graph)
sess5_second_run = getattr(m5, storage_name)['session']
self.assertTrue(isinstance(sess5_second_run, tf.Session))
self.assertEqual(sess5, sess5_second_run)
self.assertEqual(sess5_second_run, sess_default)
m5.add(self.x, self.y, graph=sess_default.graph)
sess5_third_run = getattr(m5, storage_name)['session']
self.assertTrue(isinstance(sess5_third_run, tf.Session))
self.assertNotEqual(sess5, sess5_third_run)
self.assertNotEqual(sess5_third_run, sess_default)
sess5_third_run.close()
_ = [sess.close() for sess in sessions]
_ = [getattr(m, storage_name)['session'].close() for m in self.models]
def test_autoflow_results(self):
"""AutoFlow computation results for external session and graph."""
expected = self.x + self.y
m1, m2, m3, m4, m5 = self.models
def assert_add(model, **kwargs):
result = model.add(self.x, self.y, **kwargs)
self.assertTrue(np.all(result == expected))
assert_add(m1)
assert_add(m2, session=self.session)
assert_add(m3, graph=self.graph)
with self.graph.as_default():
assert_add(m4)
with self.test_session():
assert_add(m5)
class TestAdd(GPflowTestCase):
def setUp(self):
with self.test_session():
self.m = AddModel()
self.m.compile()
rng = np.random.RandomState(0)
self.x = rng.randn(10, 20)
self.y = rng.randn(10, 20)
def test_add(self):
with self.test_session():
self.assertTrue(np.allclose(self.x + self.y, self.m.add(self.x, self.y)))
class IncrementModel(DumbModel):
def __init__(self):
DumbModel.__init__(self)
self.a = gpflow.param.DataHolder(np.array([3.]))
@gpflow.model.AutoFlow((tf.float64,))
def inc(self, x):
return x + self.a
class TestDataHolder(GPflowTestCase):
def setUp(self):
with self.test_session():
self.m = IncrementModel()
rng = np.random.RandomState(0)
self.x = rng.randn(10, 20)
def test_add(self):
with self.test_session():
self.assertTrue(np.allclose(self.x + 3, self.m.inc(self.x)))
class TestGPmodel(GPflowTestCase):
def setUp(self):
rng = np.random.RandomState(0)
X, Y = rng.randn(2, 10, 1)
self.m = gpflow.svgp.SVGP(X, Y, kern=gpflow.kernels.Matern32(1),
likelihood=gpflow.likelihoods.StudentT(),
Z=X[::2].copy())
self.Xtest = np.random.randn(100, 1)
self.Ytest = np.random.randn(100, 1)
def test_predict_f(self):
with self.test_session():
mu, var = self.m.predict_f(self.Xtest)
def test_predict_y(self):
with self.test_session():
mu, var = self.m.predict_y(self.Xtest)
def test_predict_density(self):
with self.test_session():
self.m.predict_density(self.Xtest, self.Ytest)
def test_multiple_AFs(self):
with self.test_session():
self.m.compute_log_likelihood()
self.m.compute_log_prior()
self.m.compute_log_likelihood()
class TestResetGraph(GPflowTestCase):
def setUp(self):
tf.reset_default_graph()
k = gpflow.kernels.Matern32(1)
X, Y = np.random.randn(2, 10, 1)
self.Xnew = | np.random.randn(5, 1) | numpy.random.randn |
"""
Signal
======
The signal module constains all kinds of signal processing related functions.
.. inheritance-diagram:: acoustics.signal
Filtering
*********
.. autoclass:: Filterbank
.. autofunction:: bandpass_filter
.. autofunction:: octave_filter
.. autofunction:: bandpass
.. autofunction:: lowpass
.. autofunction:: highpass
.. autofunction:: octavepass
.. autofunction:: convolve
Windowing
*********
.. autofunction:: window_scaling_factor
.. autofunction:: apply_window
Spectra
*******
Different types of spectra exist.
.. autofunction:: amplitude_spectrum
.. autofunction:: auto_spectrum
.. autofunction:: power_spectrum
.. autofunction:: density_spectrum
.. autofunction:: angle_spectrum
.. autofunction:: phase_spectrum
Frequency bands
***************
.. autoclass:: Band
.. autoclass:: Frequencies
.. autoclass:: EqualBand
.. autoclass:: OctaveBand
.. autofunction:: integrate_bands
.. autofunction:: octaves
.. autofunction:: third_octaves
Hilbert transform
*****************
.. autofunction:: amplitude_envelope
.. autofunction:: instantaneous_phase
.. autofunction:: instantaneous_frequency
Conversion
**********
.. autofunction:: decibel_to_neper
.. autofunction:: neper_to_decibel
Other
*****
.. autofunction:: isolate
.. autofunction:: zero_crossings
.. autofunction:: rms
.. autofunction:: ms
.. autofunction:: normalize
.. autofunction:: ir2fr
.. autofunction:: wvd
"""
from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
from scipy.sparse import spdiags
from scipy.signal import butter, lfilter, freqz, filtfilt, sosfilt
import acoustics.octave
#from acoustics.octave import REFERENCE
import acoustics.bands
from scipy.signal import hilbert
from acoustics.standards.iso_tr_25417_2007 import REFERENCE_PRESSURE
from acoustics.standards.iec_61672_1_2013 import (NOMINAL_OCTAVE_CENTER_FREQUENCIES,
NOMINAL_THIRD_OCTAVE_CENTER_FREQUENCIES)
try:
from pyfftw.interfaces.numpy_fft import rfft
except ImportError:
from numpy.fft import rfft
def bandpass_filter(lowcut, highcut, fs, order=8, output='sos'):
"""Band-pass filter.
:param lowcut: Lower cut-off frequency
:param highcut: Upper cut-off frequency
:param fs: Sample frequency
:param order: Filter order
:param output: Output type. {'ba', 'zpk', 'sos'}. Default is 'sos'. See also :func:`scipy.signal.butter`.
:returns: Returned value depends on `output`.
A Butterworth filter is used.
.. seealso:: :func:`scipy.signal.butter`.
"""
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
output = butter(order/2, [low, high], btype='band', output=output)
return output
def bandpass(signal, lowcut, highcut, fs, order=8, zero_phase=False):
"""Filter signal with band-pass filter.
:param signal: Signal
:param lowcut: Lower cut-off frequency
:param highcut: Upper cut-off frequency
:param fs: Sample frequency
:param order: Filter order
:param zero_phase: Prevent phase error by filtering in both directions (filtfilt)
A Butterworth filter is used. Filtering is done with second-order sections.
.. seealso:: :func:`bandpass_filter` for the filter that is used.
"""
sos = bandpass_filter(lowcut, highcut, fs, order, output='sos')
if zero_phase:
return _sosfiltfilt(sos, signal)
else:
return sosfilt(sos, signal)
def bandstop(signal, lowcut, highcut, fs, order=8, zero_phase=False):
"""Filter signal with band-stop filter.
:param signal: Signal
:param lowcut: Lower cut-off frequency
:param highcut: Upper cut-off frequency
:param fs: Sample frequency
:param order: Filter order
:param zero_phase: Prevent phase error by filtering in both directions (filtfilt)
"""
return lowpass(signal, lowcut, fs, order=(order//2), zero_phase=zero_phase) + highpass(signal, highcut, fs, order=(order//2), zero_phase=zero_phase)
def lowpass(signal, cutoff, fs, order=4, zero_phase=False):
"""Filter signal with low-pass filter.
:param signal: Signal
:param fs: Sample frequency
:param cutoff: Cut-off frequency
:param order: Filter order
:param zero_phase: Prevent phase error by filtering in both directions (filtfilt)
A Butterworth filter is used. Filtering is done with second-order sections.
.. seealso:: :func:`scipy.signal.butter`.
"""
sos = butter(order, cutoff/(fs/2.0), btype='low', output='sos')
if zero_phase:
return _sosfiltfilt(sos, signal)
else:
return sosfilt(sos, signal)
def highpass(signal, cutoff, fs, order=4, zero_phase=False):
"""Filter signal with low-pass filter.
:param signal: Signal
:param fs: Sample frequency
:param cutoff: Cut-off frequency
:param order: Filter order
:param zero_phase: Prevent phase error by filtering in both directions (filtfilt)
A Butterworth filter is used. Filtering is done with second-order sections.
.. seealso:: :func:`scipy.signal.butter`.
"""
sos = butter(order, cutoff/(fs/2.0), btype='high', output='sos')
if zero_phase:
return _sosfiltfilt(sos, signal)
else:
return sosfilt(sos, signal)
def octave_filter(center, fs, fraction, order=8, output='sos'):
"""Fractional-octave band-pass filter.
:param center: Centerfrequency of fractional-octave band.
:param fs: Sample frequency
:param fraction: Fraction of fractional-octave band.
:param order: Filter order
:param output: Output type. {'ba', 'zpk', 'sos'}. Default is 'sos'. See also :func:`scipy.signal.butter`.
A Butterworth filter is used.
.. seealso:: :func:`bandpass_filter`
"""
ob = OctaveBand(center=center, fraction=fraction)
return bandpass_filter(ob.lower[0], ob.upper[0], fs, order, output=output)
def octavepass(signal, center, fs, fraction, order=8, zero_phase=True):
"""Filter signal with fractional-octave bandpass filter.
:param signal: Signal
:param center: Centerfrequency of fractional-octave band.
:param fs: Sample frequency
:param fraction: Fraction of fractional-octave band.
:param order: Filter order
:param zero_phase: Prevent phase error by filtering in both directions (filtfilt)
A Butterworth filter is used. Filtering is done with second-order sections.
.. seealso:: :func:`octave_filter`
"""
sos = octave_filter(center, fs, fraction, order)
if zero_phase:
return _sosfiltfilt(sos, signal)
else:
return sosfilt(sos, signal)
def convolve(signal, ltv, mode='full'):
"""
Perform convolution of signal with linear time-variant system ``ltv``.
:param signal: Vector representing input signal :math:`u`.
:param ltv: 2D array where each column represents an impulse response
:param mode: 'full', 'valid', or 'same'. See :func:`np.convolve` for an explanation of the options.
The convolution of two sequences is given by
.. math:: \mathbf{y} = \mathbf{t} \\star \mathbf{u}
This can be written as a matrix-vector multiplication
.. math:: \mathbf{y} = \mathbf{T} \\cdot \mathbf{u}
where :math:`T` is a Toeplitz matrix in which each column represents an impulse response.
In the case of a linear time-invariant (LTI) system, each column represents a time-shifted copy of the first column.
In the time-variant case (LTV), every column can contain a unique impulse response, both in values as in size.
This function assumes all impulse responses are of the same size.
The input matrix ``ltv`` thus represents the non-shifted version of the Toeplitz matrix.
.. seealso:: :func:`np.convolve`, :func:`scipy.signal.convolve` and :func:`scipy.signal.fftconvolve` for convolution with LTI system.
"""
assert(len(signal) == ltv.shape[1])
n = ltv.shape[0] + len(signal) - 1 # Length of output vector
un = np.concatenate((signal, np.zeros(ltv.shape[0] - 1))) # Resize input vector
offsets = np.arange(0, -ltv.shape[0], -1) # Offsets for impulse responses
Cs = spdiags(ltv, offsets, n, n) # Sparse representation of IR's.
out = Cs.dot(un) # Calculate dot product.
if mode=='full':
return out
elif mode=='same':
start = ltv.shape[0]/2 - 1 + ltv.shape[0]%2
stop = len(signal) + ltv.shape[0]/2 - 1 + ltv.shape[0]%2
return out[start:stop]
elif mode=='valid':
length = len(signal) - ltv.shape[0]
start = ltv.shape[0] - 1
stop = len(signal)
return out[start:stop]
def ir2fr(ir, fs, N=None):
"""
Convert impulse response into frequency response. Returns single-sided RMS spectrum.
:param ir: Impulser response
:param fs: Sample frequency
:param N: Blocks
Calculates the positive frequencies using :func:`np.fft.rfft`.
Corrections are then applied to obtain the single-sided spectrum.
.. note:: Single-sided spectrum. Therefore, the amount of bins returned is either N/2 or N/2+1.
"""
#ir = ir - np.mean(ir) # Remove DC component.
N = N if N else ir.shape[-1]
fr = rfft(ir, n=N) / N
f = np.fft.rfftfreq(N, 1.0/fs) #/ 2.0
fr *= 2.0
fr[..., 0] /= 2.0 # DC component should not be doubled.
if not N%2: # if not uneven
fr[..., -1] /= 2.0 # And neither should fs/2 be.
#f = np.arange(0, N/2+1)*(fs/N)
return f, fr
def decibel_to_neper(decibel):
"""
Convert decibel to neper.
:param decibel: Value in decibel (dB).
:returns: Value in neper (Np).
The conversion is done according to
.. math :: \\mathrm{dB} = \\frac{\\log{10}}{20} \\mathrm{Np}
"""
return np.log(10.0) / 20.0 * decibel
def neper_to_decibel(neper):
"""
Convert neper to decibel.
:param neper: Value in neper (Np).
:returns: Value in decibel (dB).
The conversion is done according to
.. math :: \\mathrm{Np} = \\frac{20}{\\log{10}} \\mathrm{dB}
"""
return 20.0 / np.log(10.0) * neper
class Frequencies(object):
"""
Object describing frequency bands.
"""
def __init__(self, center, lower, upper, bandwidth=None):
self.center = np.asarray(center)
"""
Center frequencies.
"""
self.lower = np.asarray(lower)
"""
Lower frequencies.
"""
self.upper = np.asarray(upper)
"""
Upper frequencies.
"""
self.bandwidth = np.asarray(bandwidth) if bandwidth is not None else np.asarray(self.upper) - np.asarray(self.lower)
"""
Bandwidth.
"""
def __iter__(self):
for i in range(len(self.center)):
yield self[i]
def __len__(self):
return len(self.center)
def __str__(self):
return str(self.center)
def __repr__(self):
return "Frequencies({})".format(str(self.center))
def angular(self):
"""Angular center frequency in radians per second.
"""
return 2.0 * np.pi * self.center
class EqualBand(Frequencies):
"""
Equal bandwidth spectrum. Generally used for narrowband data.
"""
def __init__(self, center=None, fstart=None, fstop=None, nbands=None, bandwidth=None):
"""
:param center: Vector of center frequencies.
:param fstart: First center frequency.
:param fstop: Last center frequency.
:param nbands: Amount of frequency bands.
:param bandwidth: Bandwidth of bands.
"""
if center is not None:
try:
nbands = len(center)
except TypeError:
center = [center]
nbands = 1
u = np.unique(np.diff(center).round(decimals=3))
n = len(u)
if n == 1:
bandwidth = u
elif n > 1:
raise ValueError("Given center frequencies are not equally spaced.")
else:
pass
fstart = center[0] #- bandwidth/2.0
fstop = center[-1] #+ bandwidth/2.0
elif fstart is not None and fstop is not None and nbands:
bandwidth = (fstop - fstart) / (nbands-1)
elif fstart is not None and fstop is not None and bandwidth:
nbands = round((fstop - fstart) / bandwidth) + 1
elif fstart is not None and bandwidth and nbands:
fstop = fstart + nbands * bandwidth
elif fstop is not None and bandwidth and nbands:
fstart = fstop - (nbands-1) * bandwidth
else:
raise ValueError("Insufficient parameters. Cannot determine fstart, fstop, bandwidth.")
center = fstart + np.arange(0, nbands) * bandwidth # + bandwidth/2.0
upper = fstart + np.arange(0, nbands) * bandwidth + bandwidth/2.0
lower = fstart + np.arange(0, nbands) * bandwidth - bandwidth/2.0
super(EqualBand, self).__init__(center, lower, upper, bandwidth)
def __getitem__(self, key):
return type(self)(center=self.center[key], bandwidth=self.bandwidth)
def __repr__(self):
return "EqualBand({})".format(str(self.center))
class OctaveBand(Frequencies):
"""Fractional-octave band spectrum.
"""
def __init__(self, center=None, fstart=None, fstop=None, nbands=None, fraction=1, reference=acoustics.octave.REFERENCE):
if center is not None:
try:
nbands = len(center)
except TypeError:
center = [center]
center = np.asarray(center)
indices = acoustics.octave.index_of_frequency(center, fraction=fraction, ref=reference)
elif fstart is not None and fstop is not None:
nstart = acoustics.octave.index_of_frequency(fstart, fraction=fraction, ref=reference)
nstop = acoustics.octave.index_of_frequency(fstop, fraction=fraction, ref=reference)
indices = np.arange(nstart, nstop+1)
elif fstart is not None and nbands is not None:
nstart = acoustics.octave.index_of_frequency(fstart, fraction=fraction, ref=reference)
indices = np.arange(nstart, nstart+nbands)
elif fstop is not None and nbands is not None:
nstop = acoustics.octave.index_of_frequency(fstop, fraction=fraction, ref=reference)
indices = np.arange(nstop-nbands, nstop)
else:
raise ValueError("Insufficient parameters. Cannot determine fstart and/or fstop.")
center = acoustics.octave.exact_center_frequency(None, fraction=fraction, n=indices, ref=reference)
lower = acoustics.octave.lower_frequency(center, fraction=fraction)
upper = acoustics.octave.upper_frequency(center, fraction=fraction)
bandwidth = upper - lower
nominal = acoustics.octave.nominal_center_frequency(None, fraction, indices)
super(OctaveBand, self).__init__(center, lower, upper, bandwidth)
self.fraction = fraction
"""Fraction of fractional-octave filter.
"""
self.reference = reference
"""Reference center frequency.
"""
self.nominal = nominal
"""Nominal center frequencies.
"""
def __getitem__(self, key):
return type(self)(center=self.center[key], fraction=self.fraction, reference=self.reference)
def __repr__(self):
return "OctaveBand({})".format(str(self.center))
def ms(x):
"""Mean value of signal `x` squared.
:param x: Dynamic quantity.
:returns: Mean squared of `x`.
"""
return (np.abs(x)**2.0).mean()
def rms(x):
"""Root mean squared of signal `x`.
:param x: Dynamic quantity.
.. math:: x_{rms} = lim_{T \\to \\infty} \\sqrt{\\frac{1}{T} \int_0^T |f(x)|^2 \\mathrm{d} t }
:seealso: :func:`ms`.
"""
return np.sqrt(ms(x))
def normalize(y, x=None):
"""normalize power in y to a (standard normal) white noise signal.
Optionally normalize to power in signal `x`.
#The mean power of a Gaussian with :math:`\\mu=0` and :math:`\\sigma=1` is 1.
"""
#return y * np.sqrt( (np.abs(x)**2.0).mean() / (np.abs(y)**2.0).mean() )
if x is not None:
x = ms(x)
else:
x = 1.0
return y * np.sqrt( x / ms(y) )
#return y * np.sqrt( 1.0 / (np.abs(y)**2.0).mean() )
## Broken? Caused correlation in auralizations....weird!
def window_scaling_factor(window, axis=-1):
"""
Calculate window scaling factor.
:param window: Window.
When analysing broadband (filtered noise) signals it is common to normalize
the windowed signal so that it has the same power as the un-windowed one.
.. math:: S = \\sqrt{\\frac{\\sum_{i=0}^N w_i^2}{N}}
"""
return np.sqrt((window*window).mean(axis=axis))
def apply_window(x, window):
"""
Apply window to signal.
:param x: Instantaneous signal :math:`x(t)`.
:param window: Vector representing window.
:returns: Signal with window applied to it.
.. math:: x_s(t) = x(t) / S
where :math:`S` is the window scaling factor.
.. seealso:: :func:`window_scaling_factor`.
"""
s = window_scaling_factor(window) # Determine window scaling factor.
n = len(window)
windows = x//n # Amount of windows.
x = x[0:windows*n] # Truncate final part of signal that does not fit.
#x = x.reshape(-1, len(window)) # Reshape so we can apply window.
y = np.tile(window, windows)
return x * y / s
def amplitude_spectrum(x, fs, N=None):
"""
Amplitude spectrum of instantaneous signal :math:`x(t)`.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency :math:`f_s`.
:param N: Amount of FFT bins.
The amplitude spectrum gives the amplitudes of the sinusoidal the signal is built
up from, and the RMS (root-mean-square) amplitudes can easily be found by dividing
these amplitudes with :math:`\\sqrt{2}`.
The amplitude spectrum is double-sided.
"""
N = N if N else x.shape[-1]
fr = np.fft.fft(x, n=N) / N
f = np.fft.fftfreq(N, 1.0/fs)
return np.fft.fftshift(f), np.fft.fftshift(fr, axes=[-1])
def auto_spectrum(x, fs, N=None):
"""
Auto-spectrum of instantaneous signal :math:`x(t)`.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency :math:`f_s`.
:param N: Amount of FFT bins.
The auto-spectrum contains the squared amplitudes of the signal. Squared amplitudes
are used when presenting data as it is a measure of the power/energy in the signal.
.. math:: S_{xx} (f_n) = \\overline{X (f_n)} \\cdot X (f_n)
The auto-spectrum is double-sided.
"""
f, a = amplitude_spectrum(x, fs, N=N)
return f, (a*a.conj()).real
def power_spectrum(x, fs, N=None):
"""
Power spectrum of instantaneous signal :math:`x(t)`.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency :math:`f_s`.
:param N: Amount of FFT bins.
The power spectrum, or single-sided autospectrum, contains the squared RMS amplitudes of the signal.
A power spectrum is a spectrum with squared RMS values. The power spectrum is
calculated from the autospectrum of the signal.
.. warning:: Does not include scaling to reference value!
.. seealso:: :func:`auto_spectrum`
"""
N = N if N else x.shape[-1]
f, a = auto_spectrum(x, fs, N=N)
a = a[..., N//2:]
f = f[..., N//2:]
a *= 2.0
a[..., 0] /= 2.0 # DC component should not be doubled.
if not N%2: # if not uneven
a[..., -1] /= 2.0 # And neither should fs/2 be.
return f, a
def angle_spectrum(x, fs, N=None):
"""
Phase angle spectrum of instantaneous signal :math:`x(t)`.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency :math:`f_s`.
:param N: Amount of FFT bins.
This function returns a single-sided wrapped phase angle spectrum.
.. seealso:: :func:`phase_spectrum` for unwrapped phase spectrum.
"""
N = N if N else x.shape[-1]
f, a = amplitude_spectrum(x, fs, N)
a = np.angle(a)
a = a[..., N//2:]
f = f[..., N//2:]
return f, a
def phase_spectrum(x, fs, N=None):
"""
Phase spectrum of instantaneous signal :math:`x(t)`.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency :math:`f_s`.
:param N: Amount of FFT bins.
This function returns a single-sided unwrapped phase spectrum.
.. seealso:: :func:`angle_spectrum` for wrapped phase angle.
"""
f, a = angle_spectrum(x, fs, N=None)
return f, np.unwrap(a)
#def power_and_phase_spectrum(x, fs, N=None):
#"""
#Power spectrum and phase of instantaneous signal :math:`x(t)`.
#:param x: Instantaneous signal :math:`x(t)`.
#:param fs: Sample frequency :math:`f_s`.
#:param N: Amount of FFT bins.
#Often one is interested in both the power spectrum and the phase. This function returns the power and a single-sided phase spectrum.
#For an explanation of the power spectrum, see :func:`power_spectrum`.
#"""
#returns f, power, phase
def density_spectrum(x, fs, N=None):
"""
Density spectrum of instantaneous signal :math:`x(t)`.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency :math:`f_s`.
:param N: Amount of FFT bins.
A density spectrum considers the amplitudes per unit frequency.
Density spectra are used to compare spectra with different frequency resolution as the
magnitudes are not influenced by the resolution because it is per Hertz. The amplitude
spectra on the other hand depend on the chosen frequency resolution.
"""
N = N if N else x.shape[-1]
fr = np.fft.fft(x, n=N) / fs
f = np.fft.fftfreq(N, 1.0/fs)
return np.fft.fftshift(f), np.fft.fftshift(fr)
#def auto_density_spectrum(x, fs, N=None):
#"""
#Auto density spectrum of instantaneous signal :math:`x(t)`.
#"""
#f, d = density_spectrum(x, fs, N=N)
#return f, (d*d.conj()).real
#def power_density_spectrum(x, fs, N=None):
#"""
#Power density spectrum.
#"""
#N = N if N else x.shape[-1]
#f, a = auto_density_spectrum(x, fs, N=N)
#a = a[N//2:]
#f = f[N//2:]
#a *= 2.0
#a[..., 0] /= 2.0 # DC component should not be doubled.
#if not N%2: # if not uneven
#a[..., -1] /= 2.0 # And neither should fs/2 be.
#return f, a
def integrate_bands(data, a, b):
"""
Reduce frequency resolution of power spectrum. Merges frequency bands by integration.
:param data: Vector with narrowband powers.
:param a: Instance of :class:`Frequencies`.
:param b: Instance of :class:`Frequencies`.
.. note:: Needs rewriting so that the summation goes over axis=1.
"""
try:
if b.fraction%a.fraction:
raise NotImplementedError("Non-integer ratio of fractional-octaves are not supported.")
except AttributeError:
pass
lower, _ = np.meshgrid(b.lower, a.center)
upper, _ = | np.meshgrid(b.upper, a.center) | numpy.meshgrid |
'''
Author: <NAME>, <NAME>
Acknowledgment: Derived some functions from <NAME>'s work
Description: This script is used to train the dual neural network for tool substitution with material properties.
'''
import os, sys
import numpy as np
import cPickle as pickle
import csv
import keras
from keras.models import Sequential, Model
from keras.layers import Dense, Input, Lambda, Dropout, merge, MaxPooling1D, Flatten, Conv1D
import keras.backend as K
from keras import optimizers
from keras import regularizers
from keras.utils import plot_model
from keras.utils.np_utils import to_categorical
from sklearn.preprocessing import Normalizer, StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.utils import shuffle
from sklearn.externals import joblib
from sklearn.metrics import accuracy_score
import random
def sigmoid(z):
return 1/(1+np.exp(-z))
def features_scio(csv_file):
# Take csv file and retrieve scio_processed_data corresponding to input
features = {}
obj_materials = {}
wavelengthCount = 331
with open(csv_file) as f:
reader = csv.reader(f)
for idx, row in enumerate(reader):
if idx == 10:
wavelengths = [float(r.strip().split('_')[-1].split()[0]) + 740.0 for r in row[10:wavelengthCount+10]]
try:
int(row[0]) # To skip first few rows until first integer encountered
if '.ply' not in row[3]:
obj_name = row[3] + '.ply'
else:
obj_name = row[3]
features_list = [float(elt) for elt in row[10:wavelengthCount+10]]
features_list = firstDeriv(features_list, wavelengths)
features[obj_name] = features_list
material_name = row[4]
obj_materials[obj_name] = row[4]
except:
pass
return features, obj_materials
def loadScioDataset(pklFile, csvFile, materialNames=[], objectNames=[]):
saveFilename = pklFile + '.pkl'
if os.path.isfile(saveFilename):
with open(saveFilename, 'rb') as f:
X, y_materials, y_objects, wavelengths = pickle.load(f)
else:
X = []
y_materials = []
y_objects = []
filename = csvFile + '.csv'
print(filename)
wavelengthCount = 331
with open(filename, 'rb') as f:
reader = csv.reader(f)
for i, row in enumerate(reader):
if i < 10 or i == 11:
continue
if i == 10:
# Header row
wavelengths = [float(r.strip().split('_')[-1].split()[0]) + 740.0 for r in row[10:wavelengthCount+10]]
continue
obj = row[3].strip()
material = row[4].strip()
if material not in materialNames:
continue
index = materialNames.index(material)
if objectNames is not None and obj not in objectNames[index]:
continue
values = [float(v) for v in row[10:wavelengthCount+10]]
X.append(values)
y_materials.append(index)
y_objects.append(obj)
with open(saveFilename, 'wb') as f:
pickle.dump([X, y_materials, y_objects, wavelengths], f, protocol=pickle.HIGHEST_PROTOCOL)
return X, y_materials, y_objects, wavelengths
def firstDeriv(x, wavelengths):
# First derivative of measurements with respect to wavelength
x = np.copy(x)
for i, xx in enumerate(x):
dx = np.zeros(xx.shape, np.float)
dx[0:-1] = np.diff(xx)/np.diff(wavelengths)
dx[-1] = (xx[-1] - xx[-2])/(wavelengths[-1] - wavelengths[-2])
x[i] = dx
return x
## Function to create data pairs
def data_pairs_creation(data, data_pairs, n_classes):
pairs = []
labels = []
n = [len(data_pairs[d]) for d in range(len(n_classes))]
for i in range(int(n[1])):
for j in range(i+1,int(n[1])):
z1, z2 = data_pairs[1][i], data_pairs[1][j]
pairs.append([data[z1],data[z2]])
labels.append(1)
if j >= int(n[0]):
continue
else:
z3, z4 = data_pairs[1][i], data_pairs[0][j]
pairs.append([data[z3],data[z4]])
labels.append(0)
return np.array(pairs), | np.array(labels) | numpy.array |
from collections import OrderedDict
import numpy as np
from baloo import DataFrame, Index, MultiIndex, Series
from .test_frame import assert_dataframe_equal
# TODO: fix |S4!!
class TestJoins(object):
def test_merge_sorted_unique_single_on_inner(self, df1, df2):
actual = df1.merge(df2, on='a')
expected = DataFrame(OrderedDict((('index', np.array([3, 5])),
('b_x', np.array([2, 4], dtype=np.float32)),
('d', np.array(['abc', 'def'], dtype=np.dtype('|S4'))),
('b_y', Series(np.arange(3, 5, dtype=np.float32))),
('c', np.arange(4, 6)))),
Index(np.array([1, 3]), np.dtype(np.int64), 'a'))
assert_dataframe_equal(actual, expected)
def test_merge_sorted_unique_multi_on_inner(self, df1, df2):
actual = df1.merge(df2, on=['a', 'b'], is_on_sorted=True)
expected = DataFrame(OrderedDict((('index', np.array([5])),
('d', np.array(['def'], dtype=np.dtype('|S4'))),
('c', np.array([5])))),
MultiIndex([np.array([3]), np.array([4], dtype=np.float32)], ['a', 'b']))
assert_dataframe_equal(actual, expected)
def test_merge_sorted_unique_single_on_left(self, df1, df2):
actual = df1.merge(df2, on='a', how='left')
expected = DataFrame(OrderedDict((('index', np.arange(2, 7)),
('b_x', np.arange(1, 6, dtype=np.float32)),
('d', np.array(['None', 'abc', 'None', 'def', 'None'], dtype=np.bytes_)),
('b_y', Series(np.array([-999., 3., -999., 4., -999.], dtype=np.float32))),
('c', np.array([-999, 4, -999, 5, -999])))),
Index(np.arange(5), np.dtype(np.int64), 'a'))
assert_dataframe_equal(actual, expected)
def test_merge_sorted_unique_single_on_right(self, df1, df2):
actual = df1.merge(df2, on='a', how='right')
expected = DataFrame(OrderedDict((('index', np.array([3, 5, -999])),
('b_x', np.array([2, 4, -999.], dtype=np.float32)),
('d', np.array(['abc', 'def', 'efgh'], dtype=np.dtype('|S4'))),
('b_y', Series(np.array([3., 4., 5.], dtype=np.float32))),
('c', np.array([4, 5, 6])))),
Index(np.array([1, 3, 5]), np.dtype(np.int64), 'a'))
assert_dataframe_equal(actual, expected)
def test_merge_sorted_unique_single_on_outer(self, df1, df2):
actual = df1.merge(df2, on='a', how='outer')
expected = DataFrame(OrderedDict((('index', np.array([2, 3, 4, 5, 6, -999])),
('b_x', np.array([1, 2, 3, 4, 5, -999.], dtype=np.float32)),
('d', np.array(['None', 'abc', 'None', 'def', 'None', 'efgh'], dtype=np.dtype('|S4'))),
('b_y', Series(np.array([-999., 3., -999., 4., -999., 5.], dtype=np.float32))),
('c', np.array([-999, 4, -999, 5, -999, 6])))),
Index(np.arange(0, 6), np.dtype(np.int64), 'a'))
assert_dataframe_equal(actual, expected)
# seems unnecessary to run for all cases since join just delegates to merge
def test_join(self):
df1 = DataFrame(OrderedDict((('a', np.arange(5)), ('b', np.arange(1, 6, dtype=np.float64)))),
Index(np.arange(0, 5)))
df2 = DataFrame(OrderedDict((('b', np.arange(3, 6, dtype=np.float32)), ('c', np.arange(4, 7)))),
Index(np.array(np.array([1, 3, 5]))))
actual = df1.join(df2, lsuffix='_x')
expected = DataFrame(OrderedDict((('a', np.arange(5)),
('b_x', | np.arange(1, 6, dtype=np.float64) | numpy.arange |
"""
Copyright 2018-2019 CS Systรจmes d'Information
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import time
from _functools import reduce as freduce
import logging
from math import sqrt
import sys
from unittest.case import TestCase, skipIf
from ikats.core.library.exception import IkatsException, IkatsNotFoundError, IkatsConflictError
from ikats.core.library.spark import ScManager
from ikats.core.resource.api import IkatsApi
from ikats.core.resource.client.temporal_data_mgr import DTYPE
from ikats.core.resource.interface import ResourceLocator
from ikats.algo.correlation import loop
from ikats.algo.correlation.data import is_triangular_matrix, get_triangular_matrix
import numpy as np
# Context to use for every test
CONTEXT = "FlightIdentifier"
# Variable to use for every test
VARIABLE = "metric"
# PRECISION: Accepted absolute tolerance for floating point check
# - important: the PRECISION depends on the configured loop.ConfigCorrelationLoop.the_digits_number
# => see default value used by loop.correlation_ts_list_loop
PRECISION = 1e-3
class TestCorrelationLoop(TestCase):
"""
Unittest class of the implementation correlation.loop
"""
log = logging.getLogger("TestCorrelationLoop")
@staticmethod
def __save_dataset(dataset_definition,
variable_identifier=VARIABLE,
context_identifier=CONTEXT,
var_type=DTYPE.string,
ctx_type=DTYPE.number):
"""
Saves the unittest dataset and returns the result
:param dataset_definition: details about the dataset
:type dataset_definition: dict
:return: the result: list of TSUIDS
:rtype: list
"""
tsuids = []
for funcid, [meta_dict, ts] in dataset_definition.items():
tsuid = IkatsApi.ts.create(fid=funcid, data=ts)['tsuid']
tsuids.append(tsuid)
IkatsApi.md.create(tsuid=tsuid, name='ikats_start_date', value=str(ts[0][0]),
data_type=DTYPE.date, force_update=True)
IkatsApi.md.create(tsuid=tsuid, name='ikats_end_date', value=str(ts[-1][0]),
data_type=DTYPE.date, force_update=True)
IkatsApi.md.create(tsuid=tsuid, name="qual_nb_points", value=str(len(ts)),
data_type=DTYPE.number, force_update=True)
period = (ts[-1][0] - ts[0][0]) / len(ts)
IkatsApi.md.create(tsuid=tsuid, name='qual_ref_period', value=str(period),
data_type=DTYPE.number, force_update=True)
if variable_identifier in meta_dict:
IkatsApi.md.create(tsuid=tsuid, name=variable_identifier, value=meta_dict[variable_identifier],
data_type=var_type, force_update=True)
if context_identifier in meta_dict:
IkatsApi.md.create(tsuid=tsuid, name=context_identifier, value=meta_dict[context_identifier],
data_type=ctx_type, force_update=True)
if len(tsuids) % 20 == 0:
TestCorrelationLoop.log.info("%s TS created so far", len(tsuids))
TestCorrelationLoop.log.info("%s TS created", len(tsuids))
return tsuids
@staticmethod
def __remove_dataset(tsuids):
"""
Remove the timeseries defined
:param tsuids: list of tsuids
:type tsuids: list of str
:raise exception: error deleting some timeseries
"""
failed = []
for tsuid in tsuids:
try:
IkatsApi.ts.delete(tsuid)
except (TypeError, IkatsNotFoundError, IkatsConflictError, SystemError):
failed.append(tsuid)
if len(failed) > 0:
raise Exception("Failed to clean the timeseries: TSUIDS={}".format(failed))
@classmethod
def setUpClass(cls):
"""
Sets up the unittest:
* Prepares the log
* Prepares the required input data: ts and metadata
:param cls: The TestCorrelationLoop class
:type cls: class
"""
cls.log.setLevel(logging.INFO)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(logging.INFO)
cls.log.addHandler(stream_handler)
ScManager.log.setLevel(logging.INFO)
ScManager.log.addHandler(stream_handler)
@staticmethod
def __my_pearson(x_val, y_val):
"""
Local pearson computation used to check the coefficient value computed by algorithm.
:param x_val: first set of data to use for computation
:type x_val: np.array
:param y_val: second set of data to use for computation
:type y_val: np.array
:return: the pearson correlation coefficient
:rtype: float
"""
x_mean = (sum(x_val) + 0.0) / len(x_val)
y_mean = (sum(y_val) + 0.0) / len(y_val)
x_val_2 = x_val - x_mean
y_val_2 = y_val - y_mean
x_var = sum(x_val_2 ** 2) / len(x_val)
y_var = sum(y_val_2 ** 2) / len(y_val)
corr = sum(x_val_2 * y_val_2) / sqrt(x_var * y_var) / len(x_val)
return corr
def __pearson_from_dataset(self, dataset, context, var_one, var_two):
"""
Computes the pearson correlation coeff for the test, with the function TestCorrelationLoop.__my_pearson
:param dataset: the unit test dataset must be a dictionary of
- keys: funcIDS
- values: [ <metadata dict>, <Timeseries numpy array> ]
:type dataset: dict
:param context: the context value
:type context: int or str
:param var_one: the name of the variable one
:type var_one: str
:param var_two: the name of the variable two
:type var_two: str
:return: the pearson correlation coeff computed by TestCorrelationLoop.__my_pearson
:rtype: float
:raise IkatsException: Test preparation error when piece of data is missing
"""
ts_selection = [value[1] for value in dataset.values() if
(value[0].get(CONTEXT, None) == context) and
(value[0].get(VARIABLE, None) in [var_one, var_two])]
if len(ts_selection) != 2:
msg = "Test preparation error: expects 2 TS defined for cts={} vars=[{},{}]"
raise IkatsException(msg.format(context, var_one, var_two))
# Read values of timeseries, ignoring the timestamps
x = ts_selection[0][:, 1]
y = ts_selection[1][:, 1]
the_min = min(x.size, y.size)
x_val = x[:the_min]
y_val = y[:the_min]
return self.__my_pearson(x_val, y_val)
@staticmethod
def __retrieve_funcids(tsuids):
"""
Internal purpose: retrieves funcIds from a list of tsuids
:param tsuids: TSUIDS list to get functional identifier from
:type tsuids: list
:return: list of funcids
:rtype: list of str
:raise exception: error occurred
"""
try:
return [IkatsApi.ts.fid(tsuid=tsuid) for tsuid in tsuids]
except Exception:
raise IkatsException("Failed to convert tsuids={} to funcids".format(tsuids))
def __get_matrices(self, computed_result, ref_dim):
"""
Factorized code: read the 4 matrices from their correlation loop result
- Mean matrix
- Variance matrix
- OID matrix: links to process-data
- Extracted matrix: links replaced by json content from process-data
:param computed_result: result from correlation loop as returned
by CorrelationDataset::get_json_friendly_dict()
:type computed_result: dict
:param ref_dim: expected size of matrices
:type ref_dim: int
:return: the matrices prepared for the tests are:
- Mean matrix
- Variance matrix
- OID matrix
- Extracted matrix
:rtype: list, list, list, list
"""
# Step 1: gets the Mean matrix
self.assertIsNotNone(computed_result['matrices'][0], "Mean Correlation object exists")
mean_obj = computed_result['matrices'][0]
self.assertEqual(mean_obj['desc']['label'], "Mean Correlation", "Description of mean correlation")
self.assertIsNotNone(mean_obj['data'], "Mean Correlation data exists")
mean_matrix = mean_obj['data']
self.assertTrue(is_triangular_matrix(mean_matrix, ref_dim), "Mean matrix is triangular")
# Step 2: gets the Variance matrix
self.assertIsNotNone(computed_result['matrices'][1], "Variance Correlation object exists")
variance_obj = computed_result['matrices'][1]
self.assertEqual(variance_obj['desc']['label'], "Variance", "Description of the variance correlation")
self.assertIsNotNone(variance_obj['data'], "Variance data exists")
variance_matrix = variance_obj['data']
self.assertTrue(is_triangular_matrix(variance_matrix, ref_dim), "Variance matrix is triangular")
# Step 3: gets the OID matrix (object identifiers pointing to the process_data table)
oid_matrix = computed_result['matrices'][2]['data']
self.assertTrue(is_triangular_matrix(oid_matrix, ref_dim),
"OID links: matrix is triangular, dim={}".format(ref_dim))
# Step 4: gets the extracted matrix from OID matrix: loading process_data from their OID.
extracted_matrix = get_triangular_matrix(dim=ref_dim,
default_value_diag=None,
default_value_other=None)
local_ntdm = ResourceLocator().ntdm
for r_idx, row in enumerate(oid_matrix):
# if item is None
# => extracted is not applied and lambda returns None
extracted_matrix[r_idx] = [identifier and local_ntdm.download_data(identifier).json for identifier in row]
return mean_matrix, variance_matrix, oid_matrix, extracted_matrix
def __test_corrs_by_context(self, json_corr_by_context, ref_contexts, ref_var_one, ref_var_two):
"""
Tests the json content that was produced by correlation.data.CorrelationsByContext
- the presence and lengths of the following lists:
- contexts,
- correlation,
- tsuid pairs,
- the context values are matching ref_contexts,
- the variable names are matching sorted(ref_var_one, ref_var_two).
And finally returns the parsed contexts, correlations and tsuid pairs.
:param json_corr_by_context: should be a well-formed dict.
:type json_corr_by_context: dict
:param ref_contexts: expected context values that should be contained by json_corr_by_context["x_value"]
:type ref_contexts: list
:param ref_var_one: expected first variable name
:type ref_var_one: str
:param ref_var_two: expected second variable name
:type ref_var_two: str
:return: contexts, correlations and tsuid pairs
:rtype: list, list, list
"""
self.assertTrue(isinstance(json_corr_by_context, dict))
tested_keys = ["variables", 'x_value', "y_values", "ts_lists"]
self.assertTrue([True, True, True, True] ==
[key in json_corr_by_context for key in tested_keys])
self.assertTrue(json_corr_by_context["variables"] == sorted([ref_var_one, ref_var_two]))
y_values = json_corr_by_context['y_values']
self.assertTrue(isinstance(y_values, list) and len(y_values) == 1)
contexts = json_corr_by_context["x_value"]
corrs = y_values[0]
tsuid_pairs = json_corr_by_context["ts_lists"]
# Testing that all the lists are well defined and sized
for label, tested_vect in zip(["ctx", "corrs", "tsuid_pairs"],
[contexts['data'], corrs['data'], tsuid_pairs]):
self.assertTrue(isinstance(tested_vect, list) and len(tested_vect) == len(ref_contexts),
"Testing {}".format(label))
self.assertEqual(contexts['data'], ref_contexts)
# Testing that defined pairs are well-formed
for idx_pair, pair in enumerate(tsuid_pairs):
if pair is not None:
self.assertTrue(
isinstance(pair, list)
and len(pair) == 2
and isinstance(pair[0], str)
and isinstance(pair[1], str))
else:
# Pair is None means that piece of data is not available
# => corresponding correlation is None
self.assertIsNone(corrs['data'][idx_pair])
return contexts, corrs, tsuid_pairs
def test_my_pearson(self):
"""
Test the pearson calculation used for tests
Provided by
http://stackoverflow.com/questions/3949226/calculating-pearson-correlation-and-significance-in-python
"""
corr = self.__my_pearson(x_val=np.array([1.0, 2.0, 3.0]), y_val=np.array([1.0, 5.0, 7.0]))
self.assertTrue(abs(corr - 0.981980506062) <= PRECISION)
def test_init_config_from_meta_ref0(self):
"""
Tests that initialized config from metadata is ok with the cases of ignored TS
- ignored TS (variable+context): there is no other TS sharing the same context
- ignored TS: TS without context (ie without metadata providing the context)
- ignored TS: TS without variable (ie without metadata order_by providing the variable)
- ignored variable: when all associated TS are ignored
Note: this is the unique white box unittest on loop._initialize_config_from_meta
"""
# Dataset with incomplete meta
# ref0_TS7 cannot be correlated to any other TS for context=5
# => context 5 is without any correlations
# => var WS2 will be ignored
#
# ref0_TS11 is without context (flight)
#
# ref0_TS10 is without variable (metric)
# Remaining variables on consistent data:
loaded_meta = {
'ref0_TS1': {CONTEXT: 1, VARIABLE: "HEADING", "qual_nb_points": 80},
'ref0_TS2': {CONTEXT: 1, VARIABLE: "GS"},
'ref0_TS3': {CONTEXT: 1, VARIABLE: "WS1"},
'ref0_TS4': {CONTEXT: 2, VARIABLE: "HEADING", "funcId": "ignored"},
'ref0_TS5': {CONTEXT: 2, VARIABLE: "GS"},
'ref0_TS6': {CONTEXT: 2, VARIABLE: "WS1"},
'ref0_TS7': {CONTEXT: 5, VARIABLE: "WS2"},
'ref0_TS8': {CONTEXT: 4, VARIABLE: "WS1"},
'ref0_TS9': {CONTEXT: 4, VARIABLE: "WS3"},
'ref0_TS10': {CONTEXT: 4},
'ref0_TS11': {VARIABLE: "WS1"}
}
corr_loop_config, contexts, variables = \
loop._initialize_config_from_meta(ts_metadata_dict=loaded_meta,
context_meta=CONTEXT,
variable_meta=VARIABLE)
self.assertListEqual([1, 2, 4], contexts, msg="Test applicable contexts.")
self.assertListEqual(["GS", "HEADING", "WS1", "WS3"], variables, msg="Test evaluated variables.")
ref_corr_loop_config = [(0, [(0, 'ref0_TS2'), (1, 'ref0_TS1'), (2, 'ref0_TS3')]),
(1, [(0, 'ref0_TS5'), (1, 'ref0_TS4'), (2, 'ref0_TS6')]),
(2, [(2, 'ref0_TS8'), (3, 'ref0_TS9')])]
for ref, computed in zip(ref_corr_loop_config, sorted(corr_loop_config, key=lambda x: x[0])):
self.assertEqual(ref[0], computed[0], "Test computed config: contexts")
self.assertListEqual(ref[1], computed[1], "Test computed config: (var,tsuid) pairs")
def test_nominal_ref1(self):
"""
Tests matrices without null elements: all variables defined in each context
"""
# Prepare REF1
# --------------
ts_ramp_up = np.array([[1101889318000 + x * 1000, 10.0 * x] for x in range(10)], dtype=np.dtype('O'))
ts_ramp_up_longer = np.array([[1101889318000 + x * 1000, 10.0 * x - 5.0]
for x in range(15)], dtype=np.dtype('O'))
ts_up_and_down = np.array([[1101889318000 + x * 1050, 10.0 * (5 - abs(5 - x))]
for x in range(10)], dtype=np.dtype('O'))
ts_down_and_up = np.array([[1101889318500 + x * 1000, 10.0 * (abs(5 - x))]
for x in range(10)], dtype=np.dtype('O'))
dataset = {
'REF1_TS1': [{CONTEXT: '1', VARIABLE: "WS1"}, ts_ramp_up],
'REF1_TS2': [{CONTEXT: '1', VARIABLE: "WS2"}, ts_up_and_down],
'REF1_TS3': [{CONTEXT: '1', VARIABLE: "HEADING"}, ts_up_and_down],
'REF1_TS4': [{CONTEXT: '2', VARIABLE: "WS1"}, ts_ramp_up_longer],
'REF1_TS5': [{CONTEXT: '2', VARIABLE: "WS2"}, ts_down_and_up],
'REF1_TS6': [{CONTEXT: '2', VARIABLE: "HEADING"}, ts_ramp_up]
}
ts_selection_ref1 = None
try:
ts_selection_ref1 = TestCorrelationLoop.__save_dataset(dataset)
computed_result = loop.correlation_ts_list_loop(ts_list=ts_selection_ref1,
corr_method=loop.PEARSON,
context_meta=CONTEXT)
self.assertListEqual(['HEADING', 'WS1', 'WS2'], computed_result['variables'], "Test sorted variables")
mean_matrix, variance_matrix, oid_matrix, extracted_matrix = self.__get_matrices(computed_result, ref_dim=3)
# Testing the linked correlation results
# ----------------------------------------
# Ts internal content: CorrelationsByContext for variables indexes [0,0]
# variables indexes [0,0] <=> variables ['HEADING', 'HEADING']
# ----------------------------------------------------------------------
obj_heading_heading_by_flights = extracted_matrix[0][0]
contexts, corrs, tsuid_pairs = \
self.__test_corrs_by_context(json_corr_by_context=obj_heading_heading_by_flights,
ref_contexts=['1', '2'],
ref_var_one="HEADING",
ref_var_two="HEADING")
# Pearson Correlation == 1 because Corr(x,x) is 1.0 if Var(x) is not zero
self.assertEqual(1.0, corrs['data'][0], "Pearson Corr(HEADING,HEADING) for flight 1")
self.assertEqual(1.0, corrs['data'][1], "Pearson Corr(HEADING,HEADING) for flight 2")
# Tests internal content: CorrelationsByContext for variables indexes [0,1]
# variables indexes [0,1] <=> variables ['HEADING', 'WS1']
# -------------------------------------------------------------------------
obj_heading_ws1_by_flights = extracted_matrix[0][1]
# - Computes the expected correlation for context=1, according the tested dataset
ref_heading_ws1_fl_1 = self.__pearson_from_dataset(dataset=dataset,
context='1',
var_one="HEADING",
var_two="WS1")
# - Computes the expected correlation for context=2, according the tested dataset
ref_heading_ws1_fl_2 = self.__pearson_from_dataset(dataset=dataset,
context='2',
var_one="HEADING",
var_two="WS1")
contexts, heading_ws1_corrs, tsuid_pairs = \
self.__test_corrs_by_context(json_corr_by_context=obj_heading_ws1_by_flights,
ref_contexts=['1', '2'],
ref_var_one="HEADING",
ref_var_two="WS1")
# Checking that tsuid are consistent with expected funcID
# flight 1 + HEADING => funcID is REF1_TS3
# flight 1 + WS1 => funcID is REF1_TS1
# flight 2 + HEADING => funcID is REF1_TS6
# flight 2 + WS1 => funcID is REF1_TS4
for ref_funcid, tsuid in zip(["REF1_TS3", "REF1_TS1", "REF1_TS6", "REF1_TS4"],
[tsuid_pairs[0][0], tsuid_pairs[0][1],
tsuid_pairs[1][0], tsuid_pairs[1][1]]):
# Just checks that the actual funcId is the same as the expected one in dataset definition
#
actual_funcid = IkatsApi.md.read(tsuid)[tsuid]["funcId"]
self.assertEqual(ref_funcid, actual_funcid,
"Testing tsuid={}: equality between funcId={} and ref={}".format(tsuid,
actual_funcid,
ref_funcid))
finally:
self.__remove_dataset(ts_selection_ref1)
self.assertTrue(abs(ref_heading_ws1_fl_1 - heading_ws1_corrs['data'][0]) <= PRECISION,
"Pearson Corr(HEADING,WS1) for flight 1")
self.assertTrue(abs(ref_heading_ws1_fl_2 - heading_ws1_corrs['data'][1]) <= PRECISION,
"Pearson Corr(HEADING,WS1) for flight 2")
# Testing the mean correlation results
# ------------------------------------
# Diagonal means: always 1.0 when defined
for i in range(3):
self.assertTrue(mean_matrix[i][0] == 1.0)
# The other means: just one case tested: the HEADING + WS1 correlation pair
values = heading_ws1_corrs['data']
self.assertTrue(abs(mean_matrix[0][1] - freduce(lambda x, y: x + y,
values, 0.0) / len(values)) <= PRECISION)
# Testing the variance correlation results
# ----------------------------------------
# Diagonal variance: always 0.0 as the correlations are 1.0, 1.0, ...
# => There is no case of constant TS producing NaN correlations
for i in range(3):
self.assertTrue(abs(variance_matrix[i][0]) <= PRECISION)
# The other variances: just one case tested: Var(Corr(HEADING, WS1))
mean = mean_matrix[0][1]
values = heading_ws1_corrs['data']
# recomputing the variance ...
self.assertTrue(abs(variance_matrix[0][1] - freduce(lambda x, y: x + (y - mean) ** 2,
values, 0.0) / len(values)) <= PRECISION)
def test_incomplete_ref2(self):
"""
Tests initializations with incomplete data:
- matrices with null elements:
null elements means that there is no pair of variables (v1, v2) in any of the found contexts.
- ignored TS (variable + context): there is no other TS sharing the same context
- ignored TS: TS without context (ie without metadata providing the context)
- ignored TS: TS without variable (ie without metadata order_by providing the variable)
- ignored variable: when all associated TS are ignored
"""
# Prepare ref2
# ------------
ts_ramp_up = np.array([[1101889318000 + x * 1000, 8.0 * x] for x in range(10)], dtype= | np.dtype('O') | numpy.dtype |
from __future__ import division, print_function, unicode_literals
import os
from os import listdir
import json
import sys
import random
import cv2
import pickle
import numpy as np
seed = 13
random.seed(seed)
| np.random.seed(seed) | numpy.random.seed |
import os
from torch.autograd import Variable
from torch.utils.data import Dataset, Subset
import torchvision.datasets as dsets
from torchvision import transforms
import torch
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.gridspec as gridspec
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import torch.nn as nn
import importlib as imp
import contrastive
#import netr as net
import net
use_cuda = True
class Dataseq(Dataset):
"""Titanic dataset."""
def __init__(self, X, y, idx_nolabel): #, idx_wlabel, batch_size
"""
Args:
data: pandas dataframe
"""
self.X = X
self.y = y
#self.idx_wlabel = idx_wlabel
self.idx_nolabel = idx_nolabel
#self.batch_size = batch_size
def __len__(self):
return len(self.idx_nolabel)
def __gettrio__(self, idx):
# sample = {col: self.data.loc[idx, col].values for dtype in self.input_dict.keys() for col in self.input_dict[dtype]}
#idx0 = self.use_idx[idx]
#[idx1, idx2] = np.random.choice(np.append(self.use_idx[0:idx], self.use_idx[idx+1:]), 2, replace=False)
#[idx0, idx1, idx2] = np.random.choice(self.idx_nolabel, 3, replace=False)
sample0 = self.X[self.idx_nolabel[idx]].float()/256.0
#sample1 = self.X[idx1].float()/256.0
#sample2 = self.X[idx2].float()/256.0
return sample0 #, sample1, sample2
def __getitem__(self, idx):
sample = self.X[self.idx_nolabel[idx]].float()/256.0
return sample, idx, self.y[self.idx_nolabel[idx]]
#enc = do_train()
def do_train():
epoch_len = 8
n_batches = 1600
lr = 1.0e-4
mb_size = 128
latent_dim = 32
ARloss = contrastive.AlwaysRight()
train = dsets.MNIST(
root='../data/',
train=True,
# transform = transforms.Compose([transforms.RandomRotation(10), transforms.ToTensor()]),
transform=transforms.Compose([transforms.ToTensor()]),
download=True
)
train_data = Dataseq(train.train_data, train.train_labels, np.arange(train.train_labels.size(0)))
train_iter = torch.utils.data.DataLoader(train_data, batch_size=mb_size, shuffle=True)
enc = net.Encoder(dim=latent_dim)
if use_cuda:
enc = enc.cuda()
if use_cuda:
mu = torch.zeros(mb_size, 3, latent_dim).cuda()
logvar = torch.zeros(mb_size, 3, latent_dim).cuda()
else:
mu = torch.zeros(mb_size, 3, latent_dim)
logvar = torch.zeros(mb_size, 3, latent_dim)
solver = optim.RMSprop([p for p in enc.parameters()], lr=lr)
for it in range(n_batches):
X, idx, y = next(iter(train_iter))
if len(set(idx)) != mb_size:
print(len(set(idx)))
#print(y[0:5])
if use_cuda:
X = Variable(X).cuda()
else:
X = Variable(X)
#mu[:, 0], logvar[:, 0] = enc(T)
#mu[:, 0] = enc.reparameterize(mu[:, 0], logvar[:, 0])
#mu[:, 1], logvar[:, 1] = torch.cat((mu[3:, 0], mu[0:3, 0]), dim=0), torch.cat((logvar[3:, 0], logvar[0:3, 0]), dim=0)
#mu[:, 2], logvar[:, 2] = torch.cat((mu[5:, 0], mu[0:5, 0]), dim=0), torch.cat((logvar[5:, 0], logvar[0:5, 0]), dim=0)
mu0, logvar0 = enc(X)
mu0a = enc.reparameterize(mu0, logvar0)
mu0b = enc.reparameterize(mu0, logvar0)
mu1 = torch.cat((mu0a[3:], mu0a[0:3]), dim=0)
mu2 = torch.cat((mu0b[5:], mu0b[0:5]), dim=0)
mu = torch.cat((mu0a.unsqueeze(1), mu1.unsqueeze(1), mu2.unsqueeze(1)), 1)
if use_cuda:
target = torch.zeros(mb_size, 3).cuda()
else:
target = torch.zeros(mb_size, 3)
loss = ARloss(mu, target)
loss += 1.0 / 4.0 * torch.mean(torch.pow(mu, 2))
loss += 1.0 / 4.0 * torch.mean(torch.exp(logvar) - logvar)
mu = torch.cat((mu0a.unsqueeze(1), mu0b.unsqueeze(1), mu2.unsqueeze(1)), 1)
target[:, 2] = 1
loss += 0.5*ARloss(mu, target)
loss.backward()
solver.step()
enc.zero_grad()
if (it + 1) % epoch_len == 0:
print(it+1, loss.data.cpu().numpy(), torch.mean(torch.pow(mu0, 2)).data.cpu().numpy())
return enc
def make_preds(enc):
mb_size = 128
test = dsets.MNIST(
root='../data/',
train=False,
transform=transforms.Compose([transforms.ToTensor()])
)
test_data = Dataseq(test.train_data, test.train_labels, np.arange(test.train_labels.size(0)))
test_iter = torch.utils.data.DataLoader(test_data, batch_size=mb_size, shuffle=False)
mu = torch.zeros(len(test.train_labels), enc.dim)
y_test = torch.zeros(len(test.train_labels))
s = 0
for X, idx, y in test_iter:
e = s + X.size(0)
y_test[s:e] = y
if use_cuda:
X = Variable(X).cuda()
else:
X = Variable(X)
mu[s:e], _ = enc(X)
s = e
mu = mu.data.cpu().numpy()
pca = PCA(n_components=3, svd_solver='arpack', copy=True, whiten=True)
pca.fit(mu[:, :])
pca_vecs = pca.transform(mu[:, :])
plt.close('all')
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xs=pca_vecs[np.where(y_test==1)][:, 0], ys=pca_vecs[np.where(y_test==1)][:, 1], zs=pca_vecs[np.where(y_test==1)][:, 2], zdir='z', s=5, c='k', depthshade=True, label='1')
ax.scatter(xs=pca_vecs[np.where(y_test==7)][:, 0], ys=pca_vecs[ | np.where(y_test==7) | numpy.where |
import numpy as np
import tensorflow as tf
from server import Server
from client import Client
def get_datasets(num_clients, model_type, dataset_type):
# Load training and eval data
if model_type == 'perceptron':
mnist = tf.contrib.learn.datasets.load_dataset("mnist")
X_train = np.concatenate((mnist.train.images, mnist.validation.images))
y_train = np.concatenate((
np.asarray(mnist.train.labels, dtype=np.int32),
| np.asarray(mnist.validation.labels, dtype=np.int32) | numpy.asarray |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import csv
# For comparison to other methods
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import roc_curve
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVR
from sklearn.tree import DecisionTreeRegressor
#my ELM code
from ELM import ELMNetwork
#create new dictionary for later
test_maes_dictionary = dict()
#set plot style
plt.style.use('ggplot')
sns.set_context("talk")
##SPLIT ARRAY##
def split(list, size):
return list[:size], list[size:]
##PREPARE DATA##
def prepareData():
#read the file
reader = csv.reader(open("pulsar_stars.csv", "r"), delimiter=",")
#discard first line
next(reader)
# convert to floats from strings
x = list(reader)
for i in range(len(x)):
for j in range(len(x[i])):
x[i][j] = float(x[i][j])
#confirmation of 17898 lines
print("Length:",len(x))
#split off 20% for test
X_test, x = split(x,3580)
#split remaining into validation and training groups
X_val, X_train = split(x, 3580)
#confirmation of sizes
print("Test:", len(X_test), "Validation:", len(X_val), "Training:", len(X_train), "\n")
#create y values for train
y_train = []
for x in X_train:
y_train.append(x[-1])
x.pop(-1)
#create y values for validation
y_val = []
for x in X_val:
y_val.append(x[-1])
x.pop(-1)
#create y values for testing
y_test = []
for x in X_test:
y_test.append(x[-1])
x.pop(-1)
#convert all to numpy arrays for ease of use later
X_train = np.array(X_train)
y_train = | np.array(y_train) | numpy.array |
ENABLE_MULTIPROCESSING = True
from dsl import cpp_trace_param_automata
def generate_public_submission():
import numpy as np
import pandas as pd
import os
import json
from pathlib import Path
import matplotlib.pyplot as plt
from matplotlib import colors
import numpy as np
from xgboost import XGBClassifier
import pdb
# data_path = Path('.')
data_path = Path('.')
if not (data_path / 'test').exists():
data_path = Path('../input/abstraction-and-reasoning-challenge')
training_path = data_path / 'training'
evaluation_path = data_path / 'evaluation'
test_path = data_path / 'test'
def plot_result(test_input, test_prediction,
input_shape):
"""
Plots the first train and test pairs of a specified task,
using same color scheme as the ARC app
"""
cmap = colors.ListedColormap(
['#000000', '#0074D9', '#FF4136', '#2ECC40', '#FFDC00',
'#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25'])
norm = colors.Normalize(vmin=0, vmax=9)
fig, axs = plt.subplots(1, 2, figsize=(15, 15))
test_input = test_input.reshape(input_shape[0], input_shape[1])
axs[0].imshow(test_input, cmap=cmap, norm=norm)
axs[0].axis('off')
axs[0].set_title('Actual Target')
test_prediction = test_prediction.reshape(input_shape[0], input_shape[1])
axs[1].imshow(test_prediction, cmap=cmap, norm=norm)
axs[1].axis('off')
axs[1].set_title('Model Prediction')
plt.tight_layout()
plt.show()
def plot_test(test_prediction, task_name):
"""
Plots the first train and test pairs of a specified task,
using same color scheme as the ARC app
"""
cmap = colors.ListedColormap(
['#000000', '#0074D9', '#FF4136', '#2ECC40', '#FFDC00',
'#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25'])
norm = colors.Normalize(vmin=0, vmax=9)
fig, axs = plt.subplots(1, 1, figsize=(15, 15))
axs.imshow(test_prediction, cmap=cmap, norm=norm)
axs.axis('off')
axs.set_title(f'Test Prediction {task_name}')
plt.tight_layout()
plt.show()
# https://www.kaggle.com/inversion/abstraction-and-reasoning-starter-notebook
def flattener(pred):
str_pred = str([row for row in pred])
str_pred = str_pred.replace(', ', '')
str_pred = str_pred.replace('[[', '|')
str_pred = str_pred.replace('][', '|')
str_pred = str_pred.replace(']]', '|')
return str_pred
sample_sub1 = pd.read_csv(data_path / 'sample_submission.csv')
sample_sub1 = sample_sub1.set_index('output_id')
sample_sub1.head()
def get_moore_neighbours(color, cur_row, cur_col, nrows, ncols):
if cur_row <= 0:
top = -1
else:
top = color[cur_row - 1][cur_col]
if cur_row >= nrows - 1:
bottom = -1
else:
bottom = color[cur_row + 1][cur_col]
if cur_col <= 0:
left = -1
else:
left = color[cur_row][cur_col - 1]
if cur_col >= ncols - 1:
right = -1
else:
right = color[cur_row][cur_col + 1]
return top, bottom, left, right
def get_tl_tr(color, cur_row, cur_col, nrows, ncols):
if cur_row == 0:
top_left = -1
top_right = -1
else:
if cur_col == 0:
top_left = -1
else:
top_left = color[cur_row - 1][cur_col - 1]
if cur_col == ncols - 1:
top_right = -1
else:
top_right = color[cur_row - 1][cur_col + 1]
return top_left, top_right
def make_features(input_color, nfeat):
nrows, ncols = input_color.shape
feat = np.zeros((nrows * ncols, nfeat))
cur_idx = 0
for i in range(nrows):
for j in range(ncols):
feat[cur_idx, 0] = i
feat[cur_idx, 1] = j
feat[cur_idx, 2] = input_color[i][j]
feat[cur_idx, 3:7] = get_moore_neighbours(input_color, i, j, nrows, ncols)
feat[cur_idx, 7:9] = get_tl_tr(input_color, i, j, nrows, ncols)
feat[cur_idx, 9] = len(np.unique(input_color[i, :]))
feat[cur_idx, 10] = len(np.unique(input_color[:, j]))
feat[cur_idx, 11] = (i + j)
feat[cur_idx, 12] = len(np.unique(input_color[i - local_neighb:i + local_neighb,
j - local_neighb:j + local_neighb]))
cur_idx += 1
return feat
def features(task, mode='train'):
num_train_pairs = len(task[mode])
feat, target = [], []
global local_neighb
for task_num in range(num_train_pairs):
input_color = np.array(task[mode][task_num]['input'])
target_color = task[mode][task_num]['output']
nrows, ncols = len(task[mode][task_num]['input']), len(task[mode][task_num]['input'][0])
target_rows, target_cols = len(task[mode][task_num]['output']), len(task[mode][task_num]['output'][0])
if (target_rows != nrows) or (target_cols != ncols):
print('Number of input rows:', nrows, 'cols:', ncols)
print('Number of target rows:', target_rows, 'cols:', target_cols)
not_valid = 1
return None, None, 1
imsize = nrows * ncols
# offset = imsize*task_num*3 #since we are using three types of aug
feat.extend(make_features(input_color, nfeat))
target.extend(np.array(target_color).reshape(-1, ))
return np.array(feat), np.array(target), 0
# mode = 'eval'
mode = 'test'
if mode == 'eval':
task_path = evaluation_path
elif mode == 'train':
task_path = training_path
elif mode == 'test':
task_path = test_path
all_task_ids = sorted(os.listdir(task_path))
nfeat = 13
local_neighb = 5
valid_scores = {}
model_accuracies = {'ens': []}
pred_taskids = []
for task_id in all_task_ids:
task_file = str(task_path / task_id)
with open(task_file, 'r') as f:
task = json.load(f)
feat, target, not_valid = features(task)
if not_valid:
print('ignoring task', task_file)
print()
not_valid = 0
continue
xgb = XGBClassifier(n_estimators=10, n_jobs=-1)
xgb.fit(feat, target, verbose=-1)
# training on input pairs is done.
# test predictions begins here
num_test_pairs = len(task['test'])
for task_num in range(num_test_pairs):
cur_idx = 0
input_color = np.array(task['test'][task_num]['input'])
nrows, ncols = len(task['test'][task_num]['input']), len(
task['test'][task_num]['input'][0])
feat = make_features(input_color, nfeat)
print('Made predictions for ', task_id[:-5])
preds = xgb.predict(feat).reshape(nrows, ncols)
if (mode == 'train') or (mode == 'eval'):
ens_acc = (np.array(task['test'][task_num]['output']) == preds).sum() / (nrows * ncols)
model_accuracies['ens'].append(ens_acc)
pred_taskids.append(f'{task_id[:-5]}_{task_num}')
# print('ensemble accuracy',(np.array(task['test'][task_num]['output'])==preds).sum()/(nrows*ncols))
# print()
preds = preds.astype(int).tolist()
# plot_test(preds, task_id)
sample_sub1.loc[f'{task_id[:-5]}_{task_num}',
'output'] = flattener(preds)
if (mode == 'train') or (mode == 'eval'):
df = pd.DataFrame(model_accuracies, index=pred_taskids)
print(df.head(10))
print(df.describe())
for c in df.columns:
print(f'for {c} no. of complete tasks is', (df.loc[:, c] == 1).sum())
df.to_csv('ens_acc.csv')
sample_sub1.head()
training_path = data_path / 'training'
evaluation_path = data_path / 'evaluation'
test_path = data_path / 'test'
training_tasks = sorted(os.listdir(training_path))
eval_tasks = sorted(os.listdir(evaluation_path))
T = training_tasks
Trains = []
for i in range(400):
task_file = str(training_path / T[i])
task = json.load(open(task_file, 'r'))
Trains.append(task)
E = eval_tasks
Evals = []
for i in range(400):
task_file = str(evaluation_path / E[i])
task = json.load(open(task_file, 'r'))
Evals.append(task)
cmap = colors.ListedColormap(
['#000000', '#0074D9', '#FF4136', '#2ECC40', '#FFDC00',
'#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25'])
norm = colors.Normalize(vmin=0, vmax=9)
# 0:black, 1:blue, 2:red, 3:greed, 4:yellow,
# 5:gray, 6:magenta, 7:orange, 8:sky, 9:brown
plt.figure(figsize=(5, 2), dpi=200)
plt.imshow([list(range(10))], cmap=cmap, norm=norm)
plt.xticks(list(range(10)))
plt.yticks([])
# plt.show()
def plot_task(task):
n = len(task["train"]) + len(task["test"])
fig, axs = plt.subplots(2, n, figsize=(4 * n, 8), dpi=50)
plt.subplots_adjust(wspace=0, hspace=0)
fig_num = 0
for i, t in enumerate(task["train"]):
t_in, t_out = np.array(t["input"]), np.array(t["output"])
axs[0][fig_num].imshow(t_in, cmap=cmap, norm=norm)
axs[0][fig_num].set_title(f'Train-{i} in')
axs[0][fig_num].set_yticks(list(range(t_in.shape[0])))
axs[0][fig_num].set_xticks(list(range(t_in.shape[1])))
axs[1][fig_num].imshow(t_out, cmap=cmap, norm=norm)
axs[1][fig_num].set_title(f'Train-{i} out')
axs[1][fig_num].set_yticks(list(range(t_out.shape[0])))
axs[1][fig_num].set_xticks(list(range(t_out.shape[1])))
fig_num += 1
for i, t in enumerate(task["test"]):
t_in, t_out = np.array(t["input"]), np.array(t["output"])
axs[0][fig_num].imshow(t_in, cmap=cmap, norm=norm)
axs[0][fig_num].set_title(f'Test-{i} in')
axs[0][fig_num].set_yticks(list(range(t_in.shape[0])))
axs[0][fig_num].set_xticks(list(range(t_in.shape[1])))
axs[1][fig_num].imshow(t_out, cmap=cmap, norm=norm)
axs[1][fig_num].set_title(f'Test-{i} out')
axs[1][fig_num].set_yticks(list(range(t_out.shape[0])))
axs[1][fig_num].set_xticks(list(range(t_out.shape[1])))
fig_num += 1
plt.tight_layout()
plt.show()
def plot_picture(x):
plt.imshow(np.array(x), cmap=cmap, norm=norm)
plt.show()
def Defensive_Copy(A):
n = len(A)
k = len(A[0])
L = np.zeros((n, k), dtype=int)
for i in range(n):
for j in range(k):
L[i, j] = 0 + A[i][j]
return L.tolist()
def Create(task, task_id=0):
n = len(task['train'])
Input = [Defensive_Copy(task['train'][i]['input']) for i in range(n)]
Output = [Defensive_Copy(task['train'][i]['output']) for i in range(n)]
Input.append(Defensive_Copy(task['test'][task_id]['input']))
return Input, Output
def Recolor(task):
Input = task[0]
Output = task[1]
Test_Picture = Input[-1]
Input = Input[:-1]
N = len(Input)
for x, y in zip(Input, Output):
if len(x) != len(y) or len(x[0]) != len(y[0]):
return -1
Best_Dict = -1
Best_Q1 = -1
Best_Q2 = -1
Best_v = -1
# v ranges from 0 to 3. This gives an extra flexibility of measuring distance from any of the 4 corners
Pairs = []
for t in range(15):
for Q1 in range(1, 8):
for Q2 in range(1, 8):
if Q1 + Q2 == t:
Pairs.append((Q1, Q2))
for Q1, Q2 in Pairs:
for v in range(4):
if Best_Dict != -1:
continue
possible = True
Dict = {}
for x, y in zip(Input, Output):
n = len(x)
k = len(x[0])
for i in range(n):
for j in range(k):
if v == 0 or v == 2:
p1 = i % Q1
else:
p1 = (n - 1 - i) % Q1
if v == 0 or v == 3:
p2 = j % Q2
else:
p2 = (k - 1 - j) % Q2
color1 = x[i][j]
color2 = y[i][j]
if color1 != color2:
rule = (p1, p2, color1)
if rule not in Dict:
Dict[rule] = color2
elif Dict[rule] != color2:
possible = False
if possible:
# Let's see if we actually solve the problem
for x, y in zip(Input, Output):
n = len(x)
k = len(x[0])
for i in range(n):
for j in range(k):
if v == 0 or v == 2:
p1 = i % Q1
else:
p1 = (n - 1 - i) % Q1
if v == 0 or v == 3:
p2 = j % Q2
else:
p2 = (k - 1 - j) % Q2
color1 = x[i][j]
rule = (p1, p2, color1)
if rule in Dict:
color2 = 0 + Dict[rule]
else:
color2 = 0 + y[i][j]
if color2 != y[i][j]:
possible = False
if possible:
Best_Dict = Dict
Best_Q1 = Q1
Best_Q2 = Q2
Best_v = v
if Best_Dict == -1:
return -1 # meaning that we didn't find a rule that works for the traning cases
# Otherwise there is a rule: so let's use it:
n = len(Test_Picture)
k = len(Test_Picture[0])
answer = np.zeros((n, k), dtype=int)
for i in range(n):
for j in range(k):
if Best_v == 0 or Best_v == 2:
p1 = i % Best_Q1
else:
p1 = (n - 1 - i) % Best_Q1
if Best_v == 0 or Best_v == 3:
p2 = j % Best_Q2
else:
p2 = (k - 1 - j) % Best_Q2
color1 = Test_Picture[i][j]
rule = (p1, p2, color1)
if (p1, p2, color1) in Best_Dict:
answer[i][j] = 0 + Best_Dict[rule]
else:
answer[i][j] = 0 + color1
return answer.tolist()
sample_sub2 = pd.read_csv(data_path / 'sample_submission.csv')
sample_sub2.head()
def flattener(pred):
str_pred = str([row for row in pred])
str_pred = str_pred.replace(', ', '')
str_pred = str_pred.replace('[[', '|')
str_pred = str_pred.replace('][', '|')
str_pred = str_pred.replace(']]', '|')
return str_pred
example_grid = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
# display(example_grid)
print(flattener(example_grid))
Solved = []
Problems = sample_sub2['output_id'].values
Proposed_Answers = []
test_paths_my = {task.stem: json.load(task.open()) for task in test_path.iterdir()}
test_task_ids = np.sort(list(test_paths_my.keys()))
print(Problems, len(Problems))
task_number_my = dict(zip(test_task_ids, np.arange(100)))
for i in range(len(Problems)):
output_id = Problems[i]
task_id = output_id.split('_')[0]
pair_id = int(output_id.split('_')[1])
f = str(test_path / str(task_id + '.json'))
with open(f, 'r') as read_file:
task = json.load(read_file)
n = len(task['train'])
Input = [Defensive_Copy(task['train'][j]['input']) for j in range(n)]
Output = [Defensive_Copy(task['train'][j]['output']) for j in range(n)]
Input.append(Defensive_Copy(task['test'][pair_id]['input']))
solution = Recolor([Input, Output])
pred = ''
if solution != -1:
Solved.append(i)
pred1 = flattener(solution)
pred = pred + pred1 + ' '
if pred == '':
pred = flattener(example_grid)
Proposed_Answers.append(pred)
sample_sub2['output'] = Proposed_Answers
sample_sub1 = sample_sub1.reset_index()
sample_sub1 = sample_sub1.sort_values(by="output_id")
sample_sub2 = sample_sub2.sort_values(by="output_id")
out1 = sample_sub1["output"].astype(str).values
out2 = sample_sub2["output"].astype(str).values
merge_output = []
for o1, o2 in zip(out1, out2):
o = o1.strip().split(" ")[:1] + o2.strip().split(" ")[:2]
o = " ".join(o[:3])
merge_output.append(o)
sample_sub1["output"] = merge_output
sample_sub1["output"] = sample_sub1["output"].astype(str)
# test_paths_my = { task.stem: json.load(task.open()) for task in test_path.iterdir() }
# test_task_ids = np.sort(list(test_paths_my.keys()))
# task_number_my = dict(zip(test_task_ids, np.arange(100)))
submission = sample_sub1.copy()
submission.to_csv("public_submission.csv", index=False)
#generate_public_submission()
import numpy as np
from tqdm.notebook import tqdm
from PIL import Image, ImageDraw
import time
from collections import defaultdict
import os
import json
import random
import copy
import networkx as nx
from pathlib import Path
import matplotlib.colors as colors
import matplotlib.pyplot as plt
from itertools import product
import pandas as pd
import multiprocessing
import subprocess
# from moviepy.editor import ImageSequenceClip
# from moviepy.editor import clips_array, CompositeVideoClip
# from moviepy.video.io.html_tools import html_embed, HTML2
# def display_vid(vid, verbose=False, **html_kw):
# """
# Display a moviepy video clip, useful for removing loadbars
# """
# rd_kwargs = {
# 'fps': 10, 'verbose': verbose
# }
# if not verbose:
# rd_kwargs['logger'] = None
# return HTML2(html_embed(vid, filetype=None, maxduration=60,
# center=True, rd_kwargs=rd_kwargs, **html_kw))
data_path = Path('../input/abstraction-and-reasoning-challenge/')
# data_path = Path('.') # Artyom: it's better use symlinks locally
cmap_lookup = [
'#000000', '#0074D9', '#FF4136', '#2ECC40', '#FFDC00',
'#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25'
]
cmap_lookup = [np.array([int(x[1:3], 16), int(x[3:5], 16), int(x[5:], 16)]) for x in cmap_lookup]
def cmap(x):
"""
Translate a task matrix to a color coded version
arguments
x : a h x w task matrix
returns
a h x w x 3 matrix with colors instead of numbers
"""
y = np.zeros((*x.shape, 3))
y[x < 0, :] = np.array([112, 128, 144])
y[x > 9, :] = np.array([255, 248, 220])
for i, c in enumerate(cmap_lookup):
y[x == i, :] = c
return y
def draw_one(x, k=20):
"""
Create a PIL image from a task matrix, the task will be
drawn using the default color coding with grid lines
arguments
x : a task matrix
k = 20 : an up scaling factor
returns
a PIL image
"""
img = Image.fromarray(cmap(x).astype(np.uint8)).resize((x.shape[1] * k, x.shape[0] * k), Image.NEAREST)
draw = ImageDraw.Draw(img)
for i in range(x.shape[0]):
draw.line((0, i * k, img.width, i * k), fill=(80, 80, 80), width=1)
for j in range(x.shape[1]):
draw.line((j * k, 0, j * k, img.height), fill=(80, 80, 80), width=1)
return img
def vcat_imgs(imgs, border=10):
"""
Concatenate images vertically
arguments:
imgs : an array of PIL images
border = 10 : the size of space between images
returns:
a PIL image
"""
h = max(img.height for img in imgs)
w = sum(img.width for img in imgs)
res_img = Image.new('RGB', (w + border * (len(imgs) - 1), h), color=(255, 255, 255))
offset = 0
for img in imgs:
res_img.paste(img, (offset, 0))
offset += img.width + border
return res_img
def plot_task(task):
n = len(task["train"]) + len(task["test"])
fig, axs = plt.subplots(2, n, figsize=(n * 4, 8))
plt.subplots_adjust(wspace=0, hspace=0)
fig_num = 0
def go(ax, title, x):
ax.imshow(draw_one(x), interpolation='nearest')
ax.set_title(title)
ax.set_yticks([])
ax.set_xticks([])
for i, t in enumerate(task["train"]):
go(axs[0][fig_num], f'Train-{i} in', t["input"])
go(axs[1][fig_num], f'Train-{i} out', t["output"])
fig_num += 1
for i, t in enumerate(task["test"]):
go(axs[0][fig_num], f'Test-{i} in', t["input"])
try:
go(axs[1][fig_num], f'Test-{i} out', t["output"])
except:
go(axs[1][fig_num], f'Test-{i} out', np.zeros_like(t["input"]))
fig_num += 1
plt.tight_layout()
plt.show()
def real_trace_param_automata(input, params, n_iter, n_hidden):
"""
Execute an automata and return all the intermediate states
arguments:
step_fn : transition rule function, should take two arguments `input` and `hidden_i`,
should return an output grid an a new hidden hidden grid
n_iter : num of iteration to perform
n_hidden: number of hidden grids, if set to 0 `hidden_i` will be set to None
laodbar = True: weather display loadbars
returns:
an array of tuples if output and hidden grids
"""
# hidden = np.zeros((n_hidden, *input.shape)) if n_hidden > 0 else None
#
# global_rules, ca_rules = params
#
# trace = [(input, hidden)]
#
# for rule in global_rules:
#
# output, hidden = apply_rule(input, hidden, rule)
# trace.append((output, hidden))
# input = output
#
# its = range(n_iter)
#
# for i_it in its:
# output, hidden = compute_parametrized_automata(input, hidden, ca_rules)
# trace.append((output, hidden))
#
# if (input.shape == output.shape) and (output == input).all():
# break
# input = output
hidden = np.zeros((n_hidden, *input.shape)) if n_hidden > 0 else None
global_rules, ca_rules, split_rule, merge_rule = params
grids = apply_split_rule(input, hidden, split_rule)
#print(grids[0][0])
for rule in global_rules:
for i, (inp, hid) in enumerate(grids):
if rule['macro_type'] == 'global_rule':
if rule['apply_to'] == 'all' or \
(rule['apply_to'] == 'index' and i == rule['apply_to_index']%len(grids) or
(rule['apply_to'] == 'last' and i == len(grids) - 1)):
grids[i] = apply_rule(inp, hid, rule)
elif rule['macro_type'] == 'global_interaction_rule':
grids = apply_interaction_rule(grids, rule)
#print(grids[0][0])
#1/0
for i, (input, hidden) in enumerate(grids):
for _ in range(n_iter):
output, hidden = compute_parametrized_automata(input, hidden, ca_rules)
if np.array_equal(input, output):
break
input = output
grids[i] = (output, hidden)
output = apply_merge_rule(grids, merge_rule, split_rule)
return output
def apply_interaction_rule(grids, rule):
if rule['type'] == 'align_pattern':
# index_from = rule['index_from'] % len(grids)
# index_to = rule['index_to'] % len(grids)
# allow_rotation = rule['allow_rotation']
if len(grids) > 5:
return grids
for index_from in range(len(grids)):
for index_to in range(index_from+1, len(grids)):
input_i = grids[index_from][0]
input_j = grids[index_to][0]
# print(np.max(input_i>0, axis=1))
# print(np.max(input_i>0, axis=1).shape)
# print(np.arange(input_i.shape[0]).shape)
#1/0
i_nonzero_rows = np.arange(input_i.shape[0])[np.max(input_i>0, axis=1)]
i_nonzero_columns = np.arange(input_i.shape[1])[np.max(input_i>0, axis=0)]
j_nonzero_rows = np.arange(input_j.shape[0])[np.max(input_j>0, axis=1)]
j_nonzero_columns = np.arange(input_j.shape[1])[np.max(input_j>0, axis=0)]
if i_nonzero_rows.shape[0] == 0 or i_nonzero_columns.shape[0] == 0 or \
j_nonzero_rows.shape[0] == 0 or j_nonzero_columns.shape[0] == 0:
continue
i_minrow = np.min(i_nonzero_rows)
i_mincol = np.min(i_nonzero_columns)
i_maxrow = np.max(i_nonzero_rows) + 1
i_maxcol = np.max(i_nonzero_columns) + 1
j_minrow = np.min(j_nonzero_rows)
j_mincol = np.min(j_nonzero_columns)
j_maxrow = np.max(j_nonzero_rows) + 1
j_maxcol = np.max(j_nonzero_columns) + 1
figure_to_align = input_i[i_minrow:i_maxrow, i_mincol:i_maxcol]
figure_target = input_j[j_minrow:j_maxrow, j_mincol:j_maxcol]
best_fit = 0
best_i_fit, best_j_fit = -1, -1
#print(figure_to_align)
#print(figure_target)
if figure_to_align.shape[0] < figure_target.shape[0] or figure_to_align.shape[1] < figure_target.shape[1]:
continue
#1/0
else:
for i_start in range((figure_to_align.shape[0] - figure_target.shape[0])+1):
for j_start in range((figure_to_align.shape[1] - figure_target.shape[1])+1):
fig_1 = figure_to_align[i_start:(i_start + figure_target.shape[0]), j_start:(j_start + figure_target.shape[1])]
if np.logical_and(np.logical_and(figure_target > 0, figure_target!=rule['allow_color']), figure_target != fig_1).any():
continue
fit = np.sum(figure_target==fig_1)
if fit > best_fit:
best_i_fit, best_j_fit = i_start, j_start
best_fit = fit
if best_fit == 0:
continue
imin = j_minrow-best_i_fit
imax = j_minrow-best_i_fit + figure_to_align.shape[0]
jmin = j_mincol - best_j_fit
jmax = j_mincol - best_j_fit + figure_to_align.shape[1]
begin_i = max(imin, 0)
begin_j = max(jmin, 0)
end_i = min(imax, input_j.shape[0])
end_j = min(jmax, input_j.shape[1])
i_fig_begin = (begin_i-imin)
i_fig_end = figure_to_align.shape[0]-(imax-end_i)
j_fig_begin = (begin_j-jmin)
j_fig_end = figure_to_align.shape[1]-(jmax-end_j)
if rule['fill_with_color'] == 0:
input_j[begin_i:end_i, begin_j:end_j] = figure_to_align[i_fig_begin:i_fig_end, j_fig_begin:j_fig_end]
else:
for i, j in product(range(end_i-begin_i + 1), range(end_j-begin_j + 1)):
if input_j[begin_i + i, begin_j + j] == 0:
input_j[begin_i + i, begin_j + j] = rule['fill_with_color'] * (figure_to_align[i_fig_begin + i, j_fig_begin + j])
return grids
def trace_param_automata(input, params, n_iter, n_hidden):
# expected = real_trace_param_automata(input, params, n_iter, n_hidden)
#
# testcase = {'input': input, 'params': params}
# print(str(testcase).replace('\'', '"').replace('array(', '').replace(')', ''))
output = cpp_trace_param_automata(input, params, n_iter)
# if not np.array_equal(expected, output):
# print('cpp result is wrong')
# print('input:')
# print(input)
# print('expected:')
# print(expected)
# print('got:')
# print(output)
#
# diff = [[str(g) if e != g else '-' for e, g in zip(exp_row, got_row)]
# for exp_row, got_row in zip(expected, output)]
# diff_lines = [' '.join(line) for line in diff]
# diff_str = '[[' + ']\n ['.join(diff_lines)
#
# print('diff:')
# print(diff_str)
# print('rules')
# print(params)
#
# assert False
return [[output]]
# def vis_automata_trace(states, loadbar=False, prefix_image=None):
# """
# Create a video from an array of automata states
#
# arguments:
# states : array of automata steps, returned by `trace_automata()`
# loadbar = True: weather display loadbars
# prefix_image = None: image to add to the beginning of each frame
# returns
# a moviepy ImageSequenceClip
# """
# frames = []
# if loadbar:
# states = tqdm(states, desc='Frame')
# for i, (canvas, hidden) in enumerate(states):
#
# frame = []
# if prefix_image is not None:
# frame.append(prefix_image)
# frame.append(draw_one(canvas))
# frames.append(vcat_imgs(frame))
#
# return ImageSequenceClip(list(map(np.array, frames)), fps=10)
# def vis_automata_paramed_task(tasks, parameters, n_iter, n_hidden, vis_only_ix=None):
# """
# Visualize the automata steps during the task solution
# arguments:
# tasks : the task to be solved by the automata
# step_fn : automata transition function as passed to `trace_automata()`
# n_iter : number of iterations to perform
# n_hidden : number of hidden girds
# """
#
# n_vis = 0
#
# def go(task, n_vis, test=False):
#
# if vis_only_ix is not None and vis_only_ix != n_vis:
# return
# trace = trace_param_automata(task['input'], parameters, n_iter, n_hidden)
# if not test:
# vid = vis_automata_trace(trace, prefix_image=draw_one(task['output']))
# else:
# vid = vis_automata_trace(trace, prefix_image=draw_one(np.zeros_like(task['input'])))
#
# # display(display_vid(vid))
#
# for task in (tasks['train']):
# n_vis += 1
# go(task, n_vis)
#
# for task in (tasks['test']):
# n_vis += 1
# go(task, n_vis, True)
training_path = data_path / 'training'
evaluation_path = data_path / 'evaluation'
test_path = data_path / 'test'
training_tasks = sorted(os.listdir(training_path))
evaluation_tasks = sorted(os.listdir(evaluation_path))
test_tasks = sorted(os.listdir(test_path))
def load_data(p, phase=None):
"""
Load task data
"""
if phase in {'training', 'test', 'evaluation'}:
p = data_path / phase / p
task = json.loads(Path(p).read_text())
dict_vals_to_np = lambda x: {k: np.array(v) for k, v in x.items()}
assert set(task) == {'test', 'train'}
res = dict(test=[], train=[])
for t in task['train']:
assert set(t) == {'input', 'output'}
res['train'].append(dict_vals_to_np(t))
for t in task['test']:
if phase == 'test':
assert set(t) == {'input'}
else:
assert set(t) == {'input', 'output'}
res['test'].append(dict_vals_to_np(t))
return res
nbh = lambda x, i, j: {
(ip, jp) : x[i+ip, j+jp]
for ip, jp in product([1, -1, 0], repeat=2)
if 0 <= i+ip < x.shape[0] and 0 <= j+jp < x.shape[1] and (not (ip==0 and jp==0))
}
def get_random_split_rule(all_colors, best_candidates={}, temp=0, config={}, r_type=None):
rule = {}
rule['type'] = random.choice(['nothing', 'color_figures', 'figures', 'macro_multiply'])
if rule['type'] in ['color_figures', 'figures']:
rule['sort'] = random.choice(['biggest', 'smallest'])
if rule['type'] == 'macro_multiply':
rule['k1'] = np.random.randint(config['mink1'], config['maxk1']+1)
rule['k2'] = np.random.randint(config['mink2'], config['maxk2']+1)
return rule
def get_random_merge_rule(all_colors, best_candidates={}, temp=0, config={}, r_type=None):
rule = {}
rule['type'] = random.choice(['cellwise_or', 'output_first', 'output_last'])
return rule
def apply_split_rule(input, hidden, split_rule):
if split_rule['type'] == 'nothing':
return [(input, hidden)]
if split_rule['type'] == 'macro_multiply':
ks = split_rule['k1'] * split_rule['k2']
grids = [(np.copy(input), np.copy(hidden)) for _ in range(ks)]
return grids
#split_rule['type'] = 'figures'
dif_c_edge = split_rule['type'] == 'figures'
communities = get_connectivity_info(input, ignore_black=True, edge_for_difcolors=dif_c_edge)
if len(communities) > 0:
if split_rule['sort'] == 'biggest':
communities = communities[::-1]
grids = [(np.zeros_like(input), np.zeros_like(hidden)) for _ in range(len(communities))]
for i in range(len(communities)):
for point in communities[i]:
grids[i][0][point] = input[point]
else:
grids = [(input, hidden)]
return grids
def apply_merge_rule(grids, merge_rule, split_rule):
if split_rule['type'] == 'macro_multiply':
shape_base = grids[0][0].shape
shapes = [arr[0].shape for arr in grids]
if not np.array([shape_base == sh for sh in shapes]).all():
return np.zeros((1, 1), dtype=np.int)
ks_1 = split_rule['k1']
ks_2 = split_rule['k2']
output = np.zeros((shape_base[0] * ks_1, shape_base[1] * ks_2), dtype=np.int8)
for k1 in range(ks_1):
for k2 in range(ks_2):
output[(k1*shape_base[0]):((k1+1) * shape_base[0]), (k2*shape_base[1]):((k2+1) * shape_base[1])] = grids[k1*ks_2 + k2][0]
return output
if merge_rule['type'] == 'cellwise_or':
output = np.zeros_like(grids[0][0])
for i in np.arange(len(grids))[::-1]:
if grids[i][0].shape == output.shape:
output[grids[i][0]>0] = grids[i][0][grids[i][0]>0]
return output
elif merge_rule['type'] == 'output_first':
output = grids[0][0]
elif merge_rule['type'] == 'output_last':
output = grids[-1][0]
return output
def get_random_ca_rule(all_colors, best_candidates={}, temp=0, config={}, r_type=None):
types_possible = \
[
'copy_color_by_direction',
'direct_check',
'indirect_check',
'nbh_check',
'corner_check',
'color_distribution',
]
ca_rules = []
best_candidates_items = list(best_candidates.items())
if len(best_candidates_items) > 0:
for best_score, best_candidates_score in best_candidates_items:
for best_c in best_candidates_score:
gl, ca, _, _ = best_c
ca_rules += [c['type'] for c in ca]
type_counts = dict(zip(types_possible, np.zeros(len(types_possible))))
rules, counts = np.unique(ca_rules, return_counts=True)
for i in range(rules.shape[0]):
type_counts[rules[i]] += counts[i]
counts = np.array(list(type_counts.values()))
if np.sum(counts) > 0:
counts /= np.sum(counts)
else:
counts = np.ones(counts.shape[0]) / counts.shape[0]
uniform = np.ones(counts.shape[0]) / counts.shape[0]
probs = temp * counts + (1 - temp) * uniform
else:
probs = np.ones(len(types_possible)) / len(types_possible)
colors = all_colors[1:]
type_probs = np.ones(len(types_possible)) / len(types_possible)
if r_type is None:
random_type = types_possible[np.random.choice(len(types_possible), p=probs)]
else:
random_type = r_type
def get_random_out_color():
possible_colors = config['possible_colors_out']
return np.random.choice(possible_colors)
def get_random_ignore_colors():
if config['possible_ignore_colors'].shape[0] > 0:
possible_colors = config['possible_ignore_colors']
return possible_colors[np.random.randint(2, size=possible_colors.shape[0]) == 1]
else:
return []
def get_random_all_colors():
return all_colors[np.random.randint(2, size=all_colors.shape[0]) == 1]
def get_random_colors():
return get_random_all_colors()
def get_random_all_color():
return np.random.choice(all_colors)
def get_random_color():
return get_random_all_color()
rule = {}
rule['type'] = random_type
rule['macro_type'] = 'ca_rule'
rule['ignore_colors'] = list(config['ignore_colors'])
if np.random.rand() < 0.5 and config['possible_ignore_colors'].shape[0]:
rule['ignore_colors'] += [random.choice(config['possible_ignore_colors'])]
if random_type == 'copy_color_by_direction':
rule['direction'] = random.choice(['everywhere'])
rule['copy_color'] = [get_random_out_color()]
rule['look_back_color'] = rule['copy_color'][0]
elif random_type == 'corner_check':
if np.random.rand() < 0.5:
rule['nbh_check_colors'] = [get_random_all_color()]
else:
rule['nbh_check_colors'] = list(np.unique([get_random_all_color(), get_random_all_color()]))
rule['nbh_check_out'] = get_random_out_color()
rule['ignore_colors'] = list(np.unique(rule['ignore_colors'] + [rule['nbh_check_out']]))
elif random_type == 'direct_check':
rule['nbh_check_sum'] = np.random.randint(4)
if np.random.rand() < 0.5:
rule['nbh_check_colors'] = [get_random_all_color()]
else:
rule['nbh_check_colors'] = list(np.unique([get_random_all_color(), get_random_all_color()]))
rule['nbh_check_out'] = get_random_out_color()
rule['ignore_colors'] = list(np.unique(rule['ignore_colors'] + [rule['nbh_check_out']]))
elif random_type == 'indirect_check':
rule['nbh_check_sum'] = np.random.randint(4)
if np.random.rand() < 0.5:
rule['nbh_check_colors'] = [get_random_all_color()]
else:
rule['nbh_check_colors'] = list(np.unique([get_random_all_color(), get_random_all_color()]))
rule['nbh_check_out'] = get_random_out_color()
rule['ignore_colors'] = list(np.unique(rule['ignore_colors'] + [rule['nbh_check_out']]))
elif random_type == 'nbh_check':
rule['nbh_check_sum'] = np.random.randint(8)
if np.random.rand() < 0.5:
rule['nbh_check_colors'] = [get_random_all_color()]
else:
rule['nbh_check_colors'] = list(np.unique([get_random_all_color(), get_random_all_color()]))
rule['nbh_check_out'] = get_random_out_color()
rule['ignore_colors'] = list(np.unique(rule['ignore_colors'] + [rule['nbh_check_out']]))
elif random_type == 'color_distribution':
rule['direction'] = random.choice(
['top', 'bottom', 'left', 'right', 'top_left', 'bottom_left', 'top_right', 'bottom_right'])
rule['check_in_empty'] = np.random.randint(2)
rule['color_out'] = get_random_out_color()
if rule['check_in_empty'] == 0:
rule['color_in'] = rule['color_out']
else:
rule['color_in'] = get_random_all_color()
rule['ignore_colors'] = list(np.unique(rule['ignore_colors'] + [rule['color_out']]))
return rule
def get_random_global_rule(all_colors, best_candidates={}, temp=0, config={}, r_type=None):
types_possible = \
[
'distribute_colors',
'unity',
'color_for_inners',
'map_color',
'draw_lines',
'draw_line_to',
'gravity',
'make_holes',
'distribute_from_border',
'align_pattern',
'rotate',
'flip'
]
if config['allow_make_smaller']:
types_possible += \
[
'crop_empty',
'crop_figure',
'split_by_H',
'split_by_W',
'reduce'
]
# if config['allow_make_bigger']:
# types_possible += \
# [
# 'macro_multiply_by',
# 'micro_multiply_by',
# 'macro_multiply_k',
# ]
gl_rules = []
best_candidates_items = list(best_candidates.items())
if len(best_candidates_items) > 0:
for best_score, best_candidates_score in best_candidates_items:
for best_c in best_candidates_score:
gl, ca, _, _ = best_c
gl_rules += [c['type'] for c in gl]
type_counts = dict(zip(types_possible, np.zeros(len(types_possible))))
rules, counts = np.unique(gl_rules, return_counts=True)
for i in range(rules.shape[0]):
type_counts[rules[i]] += counts[i]
counts = np.array(list(type_counts.values()))
if np.sum(counts) > 0:
counts /= np.sum(counts)
else:
counts = np.ones(counts.shape[0]) / counts.shape[0]
uniform = np.ones(counts.shape[0]) / counts.shape[0]
probs = temp * counts + (1 - temp) * uniform
else:
probs = np.ones(len(types_possible)) / len(types_possible)
colors = all_colors[1:]
type_probs = np.ones(len(types_possible)) / len(types_possible)
if r_type is None:
random_type = types_possible[np.random.choice(len(types_possible), p=probs)]
else:
random_type = r_type
def get_random_all_colors():
return all_colors[np.random.randint(2, size=all_colors.shape[0]) == 1]
def get_random_colors():
return all_colors[np.random.randint(2, size=all_colors.shape[0]) == 1]
def get_random_all_color():
return np.random.choice(all_colors)
def get_random_color():
return get_random_all_color()
def get_random_out_color():
possible_colors = config['possible_colors_out']
return np.random.choice(possible_colors)
rule = {}
rule['type'] = random_type
rule['macro_type'] = 'global_rule'
rule['apply_to'] = random.choice(['all', 'index'])
if np.random.rand()<0.2:
rule['apply_to'] = 'last'
if rule['apply_to'] == 'index':
rule['apply_to_index'] = np.random.choice(10)
if random_type == 'macro_multiply_k':
rule['k'] = (np.random.randint(1, 4), np.random.randint(1, 4))
elif random_type == 'flip':
rule['how'] = random.choice(['ver', 'hor'])
elif random_type == 'rotate':
rule['rotations_count'] = np.random.randint(1, 4)
elif random_type == 'micro_multiply_by':
rule['how_many'] = random.choice([2, 3, 4, 5, 'size'])
elif random_type == 'macro_multiply_by':
rule['how_many'] = random.choice(['both', 'hor', 'ver'])
rule['rotates'] = [np.random.randint(1) for _ in range(4)]
rule['flips'] = [random.choice(['hor', 'ver', 'horver', 'no']) for _ in range(4)]
elif random_type == 'distribute_from_border':
rule['colors'] = list(np.unique([get_random_out_color(), get_random_all_color()]))
elif random_type == 'draw_lines':
rule['direction'] = random.choice(['everywhere', 'horizontal', 'vertical', 'horver', 'diagonal'])
# 'top', 'bottom', 'left', 'right',
# 'top_left', 'bottom_left', 'top_right', 'bottom_right'])
rule['not_stop_by_color'] = 0 # get_random_all_color()
rule['start_by_color'] = get_random_all_color()
rule['with_color'] = get_random_out_color()
elif random_type == 'reduce':
rule['skip_color'] = get_random_all_color()
elif random_type == 'draw_line_to':
#rule['direction_type'] = random.choice(['border'])
rule['direction_color'] = get_random_all_color()
rule['not_stop_by_color'] = 0
if np.random.rand() < 0.5:
rule['not_stop_by_color_and_skip'] = get_random_all_color()
else:
rule['not_stop_by_color_and_skip'] = 0
rule['start_by_color'] = get_random_all_color()
rule['with_color'] = get_random_out_color()
elif random_type == 'distribute_colors':
rule['colors'] = list(np.unique([get_random_out_color(), get_random_all_color()]))
rule['horizontally'] = np.random.randint(2)
rule['vertically'] = np.random.randint(2)
rule['intersect'] = get_random_out_color()
elif random_type == 'color_for_inners':
rule['color_out'] = get_random_out_color()
elif random_type == 'crop_figure':
rule['mode'] = random.choice(['smallest', 'biggest'])
rule['dif_c_edge'] = random.choice([True, False])
elif random_type == 'unity':
rule['mode'] = random.choice(['diagonal', 'horizontal', 'vertical', 'horver'])
# rule['inner'] = np.random.choice(2)
rule['ignore_colors'] = [0]
if np.random.rand() < 0.5:
rule['ignore_colors'] += [get_random_all_color()]
rule['with_color'] = random.choice([get_random_out_color(), 0])
elif random_type == 'map_color':
rule['color_in'] = get_random_all_color()
rule['color_out'] = get_random_out_color()
elif random_type == 'gravity':
rule['gravity_type'] = random.choice(['figures', 'cells'])
rule['steps_limit'] = np.random.choice(2)
rule['look_at_what_to_move'] = np.random.choice(2)
if rule['look_at_what_to_move'] == 1:
rule['color_what'] = get_random_out_color()
rule['direction_type'] = random.choice(['border', 'color'])
if rule['direction_type'] == 'border':
rule['direction_border'] = random.choice(['top', 'bottom', 'left', 'right'])
else:
rule['direction_color'] = get_random_color()
elif random_type == 'split_by_H' or random_type == 'split_by_W':
rule['merge_rule'] = random.choice(['and', 'equal', 'or', 'xor'])
elif random_type == 'align_pattern':
rule['macro_type'] = 'global_interaction_rule'
# rule['allow_rotation'] = False
rule['allow_color'] = get_random_all_color()
rule['fill_with_color'] = 0 #random.choice([0, get_random_all_color()])
return rule
def get_task_metadata(task):
colors = []
shapes_input = [[], []]
shapes_output = [[], []]
for part in ['train']:
for uni_task in task[part]:
inp = uni_task['input']
colors += list(np.unique(inp))
out = uni_task['output']
colors += list(np.unique(out))
shapes_input[0].append(inp.shape[0])
shapes_input[1].append(inp.shape[1])
shapes_output[0].append(out.shape[0])
shapes_output[1].append(out.shape[1])
all_colors = np.unique(colors)
min_k1 = int(np.floor(np.min(np.array(shapes_output[0])/np.array(shapes_input[0]))))
min_k2 = int(np.floor(np.min(np.array(shapes_output[1])/np.array(shapes_input[1]))))
max_k1 = int(np.ceil(np.max(np.array(shapes_output[0])/np.array(shapes_input[0]))))
max_k2 = int(np.ceil(np.max(np.array(shapes_output[1])/np.array(shapes_input[1]))))
max_shape = np.max([shapes_input])
config = {}
config['mink1'] = max(1, min(min(min_k1, 30//max_shape), 3))
config['mink2'] = max(1, min(min(min_k2, 30//max_shape), 3))
config['maxk1'] = max(1, min(min(max_k1, 30//max_shape), 3))
config['maxk2'] = max(1, min(min(max_k2, 30//max_shape), 3))
config['allow_make_smaller'] = False
config['allow_make_bigger'] = False
for uni_task in task['train']:
if uni_task['input'].shape[0] > uni_task['output'].shape[0] or \
uni_task['input'].shape[1] > uni_task['output'].shape[1]:
config['allow_make_smaller'] = True
if uni_task['input'].shape[0] < uni_task['output'].shape[0] or \
uni_task['input'].shape[1] < uni_task['output'].shape[1]:
config['allow_make_bigger'] = True
colors_out = []
changed_colors = []
inp_colors = []
for uni_task in task['train']:
inp = uni_task['input']
out = uni_task['output']
for i in range(min(inp.shape[0], out.shape[0])):
for j in range(min(inp.shape[1], out.shape[1])):
inp_colors.append(inp[i, j])
if out[i, j] != inp[i, j]:
colors_out.append(out[i, j])
changed_colors.append(inp[i, j])
inp_colors = np.unique(inp_colors)
changed_colors = np.unique(changed_colors)
config['ignore_colors'] = [c for c in inp_colors if not c in changed_colors]
config['possible_ignore_colors'] = np.array([c for c in all_colors if not c in config['ignore_colors']])
if len(colors_out) == 0:
colors_out = [0]
config['possible_colors_out'] = np.unique(colors_out)
return all_colors, config
def compute_parametrized_automata(input, hidden_i, rules):
output = np.zeros_like(input, dtype=int)
hidden_o = np.copy(hidden_i)
for i, j in product(range(input.shape[0]), range(input.shape[1])):
i_c = input[i, j]
i_nbh = nbh(input, i, j)
# cells adagent to the current one
i_direct_nbh = {k: v for k, v in i_nbh.items() if k in {(1, 0), (-1, 0), (0, 1), (0, -1)}}
i_indirect_nbh = {k: v for k, v in i_nbh.items() if k in {(1, 1), (-1, -1), (-1, 1), (1, -1)}}
is_top_b, is_bottom_b = i == 0, i == input.shape[0] - 1
is_left_b, is_right_b = j == 0, j == input.shape[1] - 1
is_b = is_top_b or is_bottom_b or is_left_b or is_right_b
if i_c > 0:
output[i, j] = i_c
for rule in rules:
if i_c in rule['ignore_colors']:
continue
if rule['type'] == 'copy_color_by_direction':
if rule['direction'] == 'bottom' or rule['direction'] == 'everywhere':
if not is_top_b and input[i - 1, j] in rule['copy_color'] and \
(i == 1 or input[i - 2, j] == rule['look_back_color']):
output[i, j] = input[i - 1, j]
break
if rule['direction'] == 'top' or rule['direction'] == 'everywhere':
if not is_bottom_b and input[i + 1, j] in rule['copy_color'] and \
(i == input.shape[0] - 2 or input[i + 2, j] == rule['look_back_color']):
output[i, j] = input[i + 1, j]
break
if rule['direction'] == 'right' or rule['direction'] == 'everywhere':
if not is_left_b and input[i, j - 1] in rule['copy_color'] and \
(j == 1 or input[i, j - 2] == rule['look_back_color']):
output[i, j] = input[i, j - 1]
break
if rule['direction'] == 'left' or rule['direction'] == 'everywhere':
if not is_right_b and input[i, j + 1] in rule['copy_color'] and \
(j == input.shape[1] - 2 or input[i, j + 2] == rule['look_back_color']):
output[i, j] = input[i, j + 1]
break
elif rule['type'] == 'corner_check':
color_nbh = rule['nbh_check_colors']
sum_nbh = 3
out_nbh = rule['nbh_check_out']
i_uplecorner_nbh = {k: v for k, v in i_nbh.items() if k in {(-1, -1), (-1, 0), (0, -1)}}
i_upricorner_nbh = {k: v for k, v in i_nbh.items() if k in {(-1, 1), (-1, 0), (0, 1)}}
i_dolecorner_nbh = {k: v for k, v in i_nbh.items() if k in {(1, -1), (1, 0), (0, -1)}}
i_doricorner_nbh = {k: v for k, v in i_nbh.items() if k in {(1, 1), (1, 0), (0, 1)}}
if sum(1 for v in i_nbh.values() if v in color_nbh) < 3:
continue
did_something = False
for corner_idx in [i_uplecorner_nbh, i_upricorner_nbh, i_dolecorner_nbh, i_doricorner_nbh]:
for color in color_nbh:
if sum(1 for v in corner_idx.values() if v == color) == sum_nbh:
output[i, j] = out_nbh
did_something = True
break
if did_something:
break
if did_something:
break
elif rule['type'] == 'nbh_check':
color_nbh = rule['nbh_check_colors']
sum_nbh = rule['nbh_check_sum']
out_nbh = rule['nbh_check_out']
proper_nbhs = i_nbh.values()
if sum(1 for v in proper_nbhs if v in color_nbh) > sum_nbh:
output[i, j] = out_nbh
break
elif rule['type'] == 'direct_check':
color_nbh = rule['nbh_check_colors']
sum_nbh = rule['nbh_check_sum']
out_nbh = rule['nbh_check_out']
proper_nbhs = i_direct_nbh.values()
if sum(1 for v in proper_nbhs if v in color_nbh) > sum_nbh:
output[i, j] = out_nbh
break
elif rule['type'] == 'indirect_check':
color_nbh = rule['nbh_check_colors']
sum_nbh = rule['nbh_check_sum']
out_nbh = rule['nbh_check_out']
proper_nbhs = i_indirect_nbh.values()
if sum(1 for v in proper_nbhs if v in color_nbh) > sum_nbh:
output[i, j] = out_nbh
break
elif rule['type'] == 'color_distribution':
directions = ['top', 'bottom', 'left', 'right', 'top_left', 'bottom_left', 'top_right', 'bottom_right']
not_border_conditions = \
[
not is_top_b,
not is_bottom_b,
not is_left_b,
not is_right_b,
not is_top_b and not is_left_b,
not is_bottom_b and not is_left_b,
not is_top_b and not is_right_b,
not is_bottom_b and not is_right_b
]
index_from = \
[
(i - 1, j),
(i + 1, j),
(i, j - 1),
(i, j + 1),
(i - 1, j - 1),
(i + 1, j - 1),
(i - 1, j + 1),
(i + 1, j + 1)
]
did_something = False
for i_dir, direction in enumerate(directions):
if rule['direction'] == direction:
if not_border_conditions[i_dir]:
if (rule['check_in_empty'] == 1 and input[index_from[i_dir]] > 0) or \
(rule['check_in_empty'] == 0 and input[index_from[i_dir]] == rule['color_in']):
output[i, j] = rule['color_out']
did_something = True
break
if did_something:
break
return output, hidden_o
def get_connectivity_info(color: np.array, ignore_black = False, von_neumann_only = False, edge_for_difcolors = False):
# UnionFind structure allows us to detect all connected areas in a linear time.
class UnionFind:
def __init__(self) -> None:
self.area = np.ones(color.size)
self.parent = np.arange(color.size)
def find(self, x: int) -> int:
if self.parent[x] != x:
self.parent[x] = self.find(self.parent[x])
return self.parent[x]
def union(self, u: int, v: int) -> None:
root_u, root_v = self.find(u), self.find(v)
if root_u != root_v:
area_u, area_v = self.area[root_u], self.area[root_v]
if area_u < area_v:
root_u, root_v = root_v, root_u
self.parent[root_v] = root_u
self.area[root_u] = area_u + area_v
union_find = UnionFind()
neighbours = [[-1, 0], [0, -1], [1, 0], [0, 1]]
if not von_neumann_only:
neighbours.extend([[-1, -1], [1, -1], [1, 1], [-1, 1]])
nrows, ncols = color.shape
for i in range(nrows):
for j in range(ncols):
for s, t in neighbours:
u, v = i + s, j + t
if u >= 0 and u < nrows and v >= 0 and v < ncols and \
(color[u, v] == color[i, j] or (edge_for_difcolors and (color[u, v]>0) == (color[i, j]>0))):
union_find.union(u * ncols + v, i * ncols + j)
# for every cell: write down the area of its corresponding area
communities = defaultdict(list)
for i, j in product(range(nrows), range(ncols)):
if not ignore_black or color[i, j] > 0:
communities[union_find.find(i * ncols + j)].append((i, j))
# the result is always sorted for consistency
communities = sorted(communities.values(), key = lambda area: (len(area), area))
return communities
def get_graph_communities(im, ignore_black=False):
G = nx.Graph()
I, J = im.shape
for i in range(I):
for j in range(J):
if ignore_black and im[i, j] == 0:
continue
G.add_node((i, j))
edges = []
if j >= 1:
if im[i, j] == im[i, j - 1]:
edges.append(((i, j), (i, j - 1)))
if j < J - 1:
if im[i, j] == im[i, j + 1]:
edges.append(((i, j), (i, j + 1)))
if i >= 1:
if im[i, j] == im[i - 1, j]:
edges.append(((i, j), (i - 1, j)))
if j >= 1:
if im[i, j] == im[i - 1, j - 1]:
edges.append(((i, j), (i - 1, j - 1)))
if j < J - 1:
if im[i, j] == im[i - 1, j + 1]:
edges.append(((i, j), (i - 1, j + 1)))
if i < I - 1:
if im[i, j] == im[i + 1, j]:
edges.append(((i, j), (i + 1, j)))
if j >= 1:
if im[i, j] == im[i + 1, j - 1]:
edges.append(((i, j), (i + 1, j - 1)))
if j < J - 1:
if im[i, j] == im[i + 1, j + 1]:
edges.append(((i, j), (i + 1, j + 1)))
G.add_edges_from(edges)
communities = list(nx.community.k_clique_communities(G, 2))
communities = [list(com) for com in communities]
for i in range(I):
for j in range(J):
i_nbh = nbh(im, i, j)
if sum(1 for v in i_nbh.values() if v == im[i, j]) == 0:
communities.append([(i, j)])
return communities
def apply_rule(input, hidden_i, rule):
output = np.zeros_like(input, dtype=int)
# print(type(input))
# print(input.shape)
hidden = np.zeros_like(input)
output[:, :] = input[:, :]
if rule['type'] == 'macro_multiply_k':
output = np.tile(output, rule['k'])
elif rule['type'] == 'flip':
if rule['how'] == 'ver':
output = output[::-1, :]
elif rule['how'] == 'hor':
output = output[:, ::-1]
elif rule['type'] == 'reduce':
skip_row = np.zeros(input.shape[0])
for i in range(1, input.shape[0]):
skip_row[i] = (input[i] == input[i-1]).all() or (input[i] == rule['skip_color']).all()
if (input[0] == rule['skip_color']).all():
skip_row[0] = 1
if np.sum(skip_row==0)>0:
output = input[skip_row == 0]
skip_column = np.zeros(input.shape[1])
for i in range(1, input.shape[1]):
skip_column[i] = (input[:, i] == input[:, i-1]).all() or (input[:, i] == rule['skip_color']).all()
if (input[:, 0] == rule['skip_color']).all():
skip_column[0] = 1
if np.sum(skip_column==0)>0:
output = output[:, skip_column == 0]
elif rule['type'] == 'rotate':
output = np.rot90(output, rule['rotations_count'])
elif rule['type'] == 'micro_multiply_by':
if rule['how_many'] == 'size':
k = output.shape[0]
else:
k = rule['how_many']
output = np.repeat(output, k, axis=0)
output = np.repeat(output, k, axis=1)
elif rule['type'] == 'macro_multiply_by':
if rule['how_many'] == 'both':
k = (2, 2)
elif rule['how_many'] == 'hor':
k = (1, 2)
elif rule['how_many'] == 'ver':
k = (2, 1)
output = np.tile(output, k)
if input.shape[0] == input.shape[1]:
for i in range(k[0]):
for j in range(k[1]):
sub = output[i * input.shape[0]: (i + 1) * input.shape[0],
j * input.shape[1]: (j + 1) * input.shape[1]]
sub_rotated = np.rot90(sub, rule['rotates'][i * 2 + j])
output[i * input.shape[0]: (i + 1) * input.shape[0],
j * input.shape[1]: (j + 1) * input.shape[1]] = sub_rotated
for i in range(k[0]):
for j in range(k[1]):
sub = output[i * input.shape[0]: (i + 1) * input.shape[0], j * input.shape[1]: (j + 1) * input.shape[1]]
if 'ver' in rule['flips'][i * 2 + j]:
sub = sub[::-1, :]
if 'hor' in rule['flips'][i * 2 + j]:
sub = sub[:, ::-1]
output[i * input.shape[0]: (i + 1) * input.shape[0], j * input.shape[1]: (j + 1) * input.shape[1]] = sub
elif rule['type'] == 'distribute_from_border':
hidden = np.zeros_like(input)
for i in range(1, input.shape[0] - 1):
if output[i, 0] in rule['colors']:
if not output[i, input.shape[1] - 1] in rule['colors'] or output[i, input.shape[1] - 1] == output[i, 0]:
output[i] = output[i, 0]
for j in range(1, input.shape[1] - 1):
if output[0, j] in rule['colors']:
if not output[input.shape[0] - 1, j] in rule['colors'] or output[input.shape[0] - 1, j] == output[0, j]:
output[:, j] = output[0, j]
elif rule['type'] == 'color_for_inners':
hidden = np.zeros_like(input)
changed = 1
while changed == 1:
changed = 0
for i, j in product(range(input.shape[0]), range(input.shape[1])):
i_c = input[i, j]
if i_c > 0 or hidden[i, j] == 1:
continue
if i == 0 or i == input.shape[0] - 1 or j == 0 or j == input.shape[1] - 1:
hidden[i, j] = 1
changed = 1
continue
i_nbh = nbh(hidden, i, j)
# cells adagent to the current one
i_direct_nbh = {k: v for k, v in i_nbh.items() if k in {(1, 0), (-1, 0), (0, 1), (0, -1)}}
if sum(1 for v in i_direct_nbh.values() if v == 1) > 0:
hidden[i, j] = 1
changed = 1
output[((hidden == 0).astype(np.int) * (input == 0).astype(np.int)) == 1] = rule['color_out']
hidden = np.copy(hidden)
elif rule['type'] == 'draw_lines':
hidden = np.zeros_like(input)
if rule['direction'] == 'everywhere':
directions = ['top', 'bottom', 'left', 'right', 'top_left', 'bottom_left', 'top_right', 'bottom_right']
elif rule['direction'] == 'horizontal':
directions = ['left', 'right']
elif rule['direction'] == 'vertical':
directions = ['top', 'bottom']
elif rule['direction'] == 'horver':
directions = ['top', 'bottom', 'left', 'right']
elif rule['direction'] == 'diagonal':
directions = ['top_left', 'bottom_left', 'top_right', 'bottom_right']
else:
directions = [rule['direction']]
possible_directions = ['top', 'bottom', 'left', 'right',
'top_left', 'bottom_left', 'top_right', 'bottom_right']
index_change = \
[
[-1, 0],
[1, 0],
(0, -1),
(0, 1),
(-1, -1),
(+1, -1),
(-1, +1),
(+1, +1)
]
for i_dir, direction in enumerate(possible_directions):
if direction in directions:
idx_ch = index_change[i_dir]
for i in range(input.shape[0]):
for j in range(input.shape[1]):
if input[i, j] == rule['start_by_color']:
tmp_i = i + idx_ch[0]
tmp_j = j + idx_ch[1]
while 0 <= tmp_i < input.shape[0] and \
0 <= tmp_j < input.shape[1] and \
input[tmp_i, tmp_j] == rule['not_stop_by_color']:
output[tmp_i, tmp_j] = rule['with_color']
tmp_i += idx_ch[0]
tmp_j += idx_ch[1]
elif rule['type'] == 'draw_line_to':
hidden = np.zeros_like(input)
index_change = \
[
[-1, 0],
[1, 0],
(0, -1),
(0, 1),
]
for i, j in product(range(input.shape[0]), range(input.shape[1])):
if input[i, j] != rule['start_by_color']:
continue
number_0 = np.sum(output[:i] == rule['direction_color'])
number_1 = np.sum(output[(i + 1):] == rule['direction_color'])
number_2 = np.sum(output[:, :j] == rule['direction_color'])
number_3 = np.sum(output[:, (j + 1):] == rule['direction_color'])
i_dir = np.argmax([number_0, number_1, number_2, number_3])
# print([number_0, number_1, number_2, number_3])
# 1/0
idx_ch = index_change[i_dir]
tmp_i = i + idx_ch[0]
tmp_j = j + idx_ch[1]
while 0 <= tmp_i < input.shape[0] and \
0 <= tmp_j < input.shape[1] and \
(input[tmp_i, tmp_j] in [rule['not_stop_by_color'], rule['not_stop_by_color_and_skip']]):
skip_color = rule['not_stop_by_color_and_skip']
if skip_color == 0 or input[tmp_i, tmp_j] != skip_color:
output[tmp_i, tmp_j] = rule['with_color']
tmp_i += idx_ch[0]
tmp_j += idx_ch[1]
elif rule['type'] == 'distribute_colors':
non_zero_rows = []
non_zero_columns = []
color_for_row = np.zeros(input.shape[0])
color_for_column = np.zeros(input.shape[1])
for i in range(input.shape[0]):
row = input[i]
colors, counts = np.unique(row, return_counts=True)
good_colors = np.array([c in rule['colors'] for c in colors])
if not good_colors.any():
continue
colors = colors[good_colors]
counts = counts[good_colors]
best_color = colors[np.argmax(counts)]
color_for_row[i] = best_color
non_zero_rows.append(i)
for j in range(input.shape[1]):
row = input[:, j]
colors, counts = np.unique(row, return_counts=True)
good_colors = np.array([c in rule['colors'] for c in colors])
if not good_colors.any():
continue
colors = colors[good_colors]
counts = counts[good_colors]
best_color = colors[np.argmax(counts)]
color_for_column[j] = best_color
non_zero_columns.append(j)
if rule['horizontally'] == 1:
for i in non_zero_rows:
output[i] = color_for_row[i]
if rule['vertically'] == 1:
for j in non_zero_columns:
output[:, j] = color_for_column[j]
for i in non_zero_rows:
for j in non_zero_columns:
if input[i, j] == 0:
output[i, j] = rule['intersect']
hidden = np.copy(hidden_i)
elif rule['type'] == 'unity':
hidden = np.copy(hidden_i)
if rule['mode'] == 'vertical':
for j in range(input.shape[1]):
last_color_now = np.zeros(10, dtype=np.int) - 1
for i in range(input.shape[0]):
if not input[i, j] in rule['ignore_colors'] and last_color_now[input[i, j]] >= 0:
if rule['with_color'] == 0:
output[(last_color_now[input[i, j]] + 1):i, j] = input[i, j]
else:
output[(last_color_now[input[i, j]] + 1):i, j] = rule['with_color']
last_color_now[input[i, j]] = i
elif not input[i, j] in rule['ignore_colors']:
last_color_now[input[i, j]] = i
elif rule['mode'] == 'horizontal':
for i in range(input.shape[0]):
last_color_now = np.zeros(10, dtype=np.int) - 1
for j in range(input.shape[1]):
if not input[i, j] in rule['ignore_colors'] and last_color_now[input[i, j]] >= 0:
if rule['with_color'] == 0:
output[i, (last_color_now[input[i, j]] + 1):j] = input[i, j]
else:
output[i, (last_color_now[input[i, j]] + 1):j] = rule['with_color']
last_color_now[input[i, j]] = j
elif not input[i, j] in rule['ignore_colors']:
last_color_now[input[i, j]] = j
elif rule['mode'] == 'horver':
for j in range(input.shape[1]):
last_color_now = np.zeros(10, dtype=np.int) - 1
for i in range(input.shape[0]):
if not input[i, j] in rule['ignore_colors'] and last_color_now[input[i, j]] >= 0:
if rule['with_color'] == 0:
output[(last_color_now[input[i, j]] + 1):i, j] = input[i, j]
else:
output[(last_color_now[input[i, j]] + 1):i, j] = rule['with_color']
last_color_now[input[i, j]] = i
elif not input[i, j] in rule['ignore_colors']:
last_color_now[input[i, j]] = i
for i in range(input.shape[0]):
last_color_now = np.zeros(10, dtype=np.int) - 1
for j in range(input.shape[1]):
if not input[i, j] in rule['ignore_colors'] and last_color_now[input[i, j]] >= 0:
if rule['with_color'] == 0:
output[i, (last_color_now[input[i, j]] + 1):j] = input[i, j]
else:
output[i, (last_color_now[input[i, j]] + 1):j] = rule['with_color']
last_color_now[input[i, j]] = j
elif not input[i, j] in rule['ignore_colors']:
last_color_now[input[i, j]] = j
elif rule['mode'] == 'diagonal':
for diag_id in range(-input.shape[0] - 1, input.shape[1] + 1):
last_color_now_x = np.zeros(10, dtype=np.int) - 1
last_color_now_y = np.zeros(10, dtype=np.int) - 1
for i, j in zip(np.arange(input.shape[0]), diag_id + np.arange(input.shape[0])):
if 0 <= i < input.shape[0] and 0 <= j < input.shape[1]:
if not input[i, j] in rule['ignore_colors'] and last_color_now_x[input[i, j]] >= 0:
if rule['with_color'] == 0:
output[np.arange(last_color_now_x[input[i, j]] + 1, i), np.arange(
last_color_now_y[input[i, j]] + 1, j)] = input[i, j]
else:
output[np.arange(last_color_now_x[input[i, j]] + 1, i), np.arange(
last_color_now_y[input[i, j]] + 1, j)] = rule[
'with_color']
last_color_now_x[input[i, j]] = i
last_color_now_y[input[i, j]] = j
elif not input[i, j] in rule['ignore_colors']:
last_color_now_x[input[i, j]] = i
last_color_now_y[input[i, j]] = j
reflected_input = input[:, ::-1]
output = output[:, ::-1]
for diag_id in range(-reflected_input.shape[0] - 1, reflected_input.shape[1] + 1):
last_color_now_x = np.zeros(10, dtype=np.int) - 1
last_color_now_y = np.zeros(10, dtype=np.int) - 1
for i, j in zip(np.arange(reflected_input.shape[0]), diag_id + np.arange(reflected_input.shape[0])):
if 0 <= i < reflected_input.shape[0] and 0 <= j < reflected_input.shape[1]:
if not reflected_input[i, j] in rule['ignore_colors'] and last_color_now_x[
reflected_input[i, j]] >= 0:
if rule['with_color'] == 0:
output[np.arange(last_color_now_x[reflected_input[i, j]] + 1, i), np.arange(
last_color_now_y[reflected_input[i, j]] + 1, j)] = reflected_input[i, j]
else:
output[np.arange(last_color_now_x[reflected_input[i, j]] + 1, i), np.arange(
last_color_now_y[reflected_input[i, j]] + 1, j)] = rule[
'with_color']
last_color_now_x[reflected_input[i, j]] = i
last_color_now_y[reflected_input[i, j]] = j
elif not reflected_input[i, j] in rule['ignore_colors']:
last_color_now_x[reflected_input[i, j]] = i
last_color_now_y[reflected_input[i, j]] = j
output = output[:, ::-1]
elif rule['type'] == 'split_by_H':
hidden = np.copy(hidden_i)
if output.shape[0] >= 2:
part1 = output[:int(np.floor(output.shape[0] / 2))]
part2 = output[int(np.ceil(output.shape[0] / 2)):]
output = np.zeros_like(part1)
if rule['merge_rule'] == 'or':
output[part1 > 0] = part1[part1 > 0]
output[part2 > 0] = part2[part2 > 0]
elif rule['merge_rule'] == 'equal':
idx = np.logical_and(np.logical_and(part1 > 0, part2 > 0), part1 == part2)
output[idx] = part1[idx]
elif rule['merge_rule'] == 'and':
idx = np.logical_and(part1 > 0, part2 > 0)
output[idx] = part1[idx]
elif rule['merge_rule'] == 'xor':
idx = np.logical_xor(part1 > 0, part2 > 0)
output[idx] = part1[idx]
elif rule['type'] == 'split_by_W':
hidden = np.copy(hidden_i)
if output.shape[1] >= 2:
part1 = output[:, :int(np.floor(output.shape[1] / 2))]
part2 = output[:, int(np.ceil(output.shape[1] / 2)):]
output = np.zeros_like(part1)
if rule['merge_rule'] == 'or':
output[part1 > 0] = part1[part1 > 0]
output[part2 > 0] = part2[part2 > 0]
elif rule['merge_rule'] == 'equal':
idx = np.logical_and(np.logical_and(part1 > 0, part2 > 0), part1 == part2)
output[idx] = part1[idx]
elif rule['merge_rule'] == 'and':
idx = np.logical_and(part1 > 0, part2 > 0)
output[idx] = part1[idx]
elif rule['merge_rule'] == 'xor':
idx = np.logical_xor(part1 > 0, part2 > 0)
output[idx] = part1[idx]
elif rule['type'] == 'map_color':
hidden = np.copy(hidden_i)
output[output == rule['color_in']] = rule['color_out']
elif rule['type'] == 'crop_empty':
hidden = np.copy(hidden_i)
nonzerosi = np.max((output != 0).astype(np.int), axis=1)
nonzerosj = np.max((output != 0).astype(np.int), axis=0)
# print(nonzerosi)
# print(nonzerosj)
if np.max(nonzerosi) == 0 or np.max(nonzerosj) == 0:
output = output * 0
else:
mini = np.min(np.arange(output.shape[0])[nonzerosi == 1])
maxi = np.max(np.arange(output.shape[0])[nonzerosi == 1])
minj = np.min(np.arange(output.shape[1])[nonzerosj == 1])
maxj = np.max(np.arange(output.shape[1])[nonzerosj == 1])
output = output[mini:(maxi + 1), minj:(maxj + 1)]
elif rule['type'] == 'crop_figure':
hidden = np.copy(hidden_i)
communities = get_connectivity_info(output, ignore_black=True, edge_for_difcolors=rule['dif_c_edge'])
if len(communities) == 0:
output = np.zeros_like(output)
else:
if rule['mode'] == 'biggest':
biggest = list(communities[np.argmax([len(list(com)) for com in communities])])
else:
biggest = list(communities[np.argmin([len(list(com)) for com in communities])])
biggest = np.array(biggest)
min_bx = np.min(biggest[:, 0])
min_by = np.min(biggest[:, 1])
biggest[:, 0] -= min_bx
biggest[:, 1] -= min_by
output = np.zeros((np.max(biggest[:, 0]) + 1, np.max(biggest[:, 1]) + 1), dtype=np.int)
for i in range(biggest.shape[0]):
output[tuple(biggest[i])] = input[(min_bx + biggest[i][0], min_by + biggest[i][1])]
elif rule['type'] == 'make_holes':
hidden = np.copy(hidden_i)
for i in range(output.shape[0]):
for j in range(output.shape[1]):
i_nbh = nbh(output, i, j)
proper_nbhs = i_nbh.values()
for color in range(1, 10):
if sum(1 for v in proper_nbhs if v == color) == 8:
output[i, j] = 0
break
elif rule['type'] == 'gravity':
changed_smth = 1
hidden = np.copy(hidden_i)
im = output
if rule['gravity_type'] == 'figures':
communities = get_connectivity_info(im, ignore_black=True)
else:
communities = []
for i in range(output.shape[0]):
for j in range(output.shape[1]):
if output[i, j] > 0:
communities.append([[i, j]])
directions = []
for com in communities:
community = list(com)
color_fig = output[community[0][0], community[0][1]]
if rule['look_at_what_to_move'] == 1 and color_fig != rule['color_what']:
directions.append('None')
continue
xs = [p[0] for p in community]
ys = [p[1] for p in community]
if rule['direction_type'] == 'border':
direction = rule['direction_border']
elif rule['direction_type'] == 'color':
color = rule['direction_color']
xmin, xmax = np.min(xs), np.max(xs)
ymin, ymax = np.min(ys), np.max(ys)
number_0 = np.sum(output[:xmin] == color)
number_1 = np.sum(output[(xmax + 1):] == color)
number_2 = np.sum(output[:, :ymin] == color)
number_3 = np.sum(output[:, (ymax + 1):] == color)
direction = ['top', 'bottom', 'left', 'right'][np.argmax([number_0, number_1, number_2, number_3])]
directions.append(direction)
already_moved = np.zeros(len(communities))
while changed_smth > 0:
changed_smth = 0
for i, com in enumerate(communities):
community = list(com)
color_fig = output[community[0][0], community[0][1]]
xs = [p[0] for p in community]
ys = [p[1] for p in community]
direction = directions[i]
if direction == 'top':
toper = np.array([[p[0] - 1, p[1]] for p in community if (p[0] - 1, p[1]) not in community])
xs = np.array([p[0] for p in toper])
ys = np.array([p[1] for p in toper])
if np.min(xs) < 0:
continue
if (output[xs, ys] == 0).all() and (rule['steps_limit']==1 or already_moved[i]==0):
changed_smth = 1
already_moved[i]=1
com_xs = np.array([p[0] for p in community])
com_ys = np.array([p[1] for p in community])
output[com_xs, com_ys] = 0
output[com_xs - 1, com_ys] = color_fig
communities[i] = [(p[0] - 1, p[1]) for p in community]
if direction == 'bottom':
toper = np.array([[p[0] + 1, p[1]] for p in community if (p[0] + 1, p[1]) not in community])
xs = np.array([p[0] for p in toper])
ys = np.array([p[1] for p in toper])
if np.max(xs) == input.shape[0]:
continue
if (output[xs, ys] == 0).all() and (rule['steps_limit']==1 or already_moved[i]==0):
changed_smth = 1
already_moved[i]=1
com_xs = np.array([p[0] for p in community])
com_ys = np.array([p[1] for p in community])
output[com_xs, com_ys] = 0
output[com_xs + 1, com_ys] = color_fig
communities[i] = [(p[0] + 1, p[1]) for p in community]
if direction == 'left':
toper = np.array([[p[0], p[1] - 1] for p in community if (p[0], p[1] - 1) not in community])
xs = np.array([p[0] for p in toper])
ys = np.array([p[1] for p in toper])
if np.min(ys) < 0:
continue
if (output[xs, ys] == 0).all() and (rule['steps_limit']==1 or already_moved[i]==0):
changed_smth = 1
already_moved[i]=1
com_xs = np.array([p[0] for p in community])
com_ys = np.array([p[1] for p in community])
output[com_xs, com_ys] = 0
output[com_xs, com_ys - 1] = color_fig
communities[i] = [(p[0], p[1] - 1) for p in community]
if direction == 'right':
toper = np.array([[p[0], p[1] + 1] for p in community if (p[0], p[1] + 1) not in community])
xs = np.array([p[0] for p in toper])
ys = np.array([p[1] for p in toper])
if np.max(ys) == input.shape[1]:
continue
if (output[xs, ys] == 0).all() and (rule['steps_limit']==1 or already_moved[i]==0):
changed_smth = 1
already_moved[i]=1
com_xs = np.array([p[0] for p in community])
com_ys = np.array([p[1] for p in community])
output[com_xs, com_ys] = 0
output[com_xs, com_ys + 1] = color_fig
communities[i] = [(p[0], p[1] + 1) for p in community]
return output, hidden
def compute_metrics(prediction_grid, answer_grid):
n_metrics = 11
def get_metrics(prediction, answer):
prediction_empty = (prediction == 0).astype(np.int)
answer_empty = (answer == 0).astype(np.int)
right = (prediction == answer).astype(np.int)
# empty_right = (prediction_empty == answer_empty).astype(np.int)
#
accuracy = np.mean(right)
# accuracy_empty = np.mean(empty_right)
# precision = 1 - np.mean((1 - prediction_empty) * (1 - right))
# recall = 1 - np.mean((1 - answer_empty) * (1 - right))
# precision_empty = 1 - np.mean((1 - prediction_empty) * (1 - empty_right))
# recall_empty = 1 - np.mean((1 - answer_empty) * (1 - empty_right))
# return [accuracy,
# accuracy_empty,
# precision, recall,
# precision_empty, recall_empty
# ][:n_metrics]
color_rights = []
for color in range(10):
idx = answer != color
# print(idx.astype(np.int))
color_right = float((np.logical_or(idx, right).all() and not (prediction[idx]==color).any()))
color_rights.append(color_right)
#print(color_rights)
#print(color_rights)
#1/0
# right = (prediction == answer).astype(np.int)
# empty_right = (prediction_empty == answer_empty).astype(np.int)
#
# accuracy = np.mean(right)
# accuracy_empty = np.mean(empty_right)
# precision = 1 - np.mean((1 - prediction_empty) * (1 - right))
# recall = 1 - np.mean((1 - answer_empty) * (1 - right))
# precision_empty = 1 - np.mean((1 - prediction_empty) * (1 - empty_right))
# recall_empty = 1 - np.mean((1 - answer_empty) * (1 - empty_right))
return [accuracy] + color_rights
#print(prediction_grid.shape, answer_grid.shape)
if prediction_grid.shape == answer_grid.shape:
# print(prediction_grid)
# print(answer_grid)
mets = get_metrics(prediction_grid, answer_grid) + [1]
#print(mets)
return mets
# elif prediction_grid.shape[0] >= answer_grid.shape[0] and prediction_grid.shape[1] >= answer_grid.shape[1]:
# metrics = np.zeros((prediction_grid.shape[0] - answer_grid.shape[0] + 1,
# prediction_grid.shape[1] - answer_grid.shape[1] + 1, n_metrics))
# for i in range(prediction_grid.shape[0] - answer_grid.shape[0] + 1):
# for j in range(prediction_grid.shape[1] - answer_grid.shape[1] + 1):
# prediction = prediction_grid[i:(i + answer_grid.shape[0]), j:(j + answer_grid.shape[1])]
# metrics[i, j] = get_metrics(prediction, answer_grid)
#
# maxi, maxj = np.unravel_index(metrics[:, :, 0].argmax(), metrics[:, :, 0].shape)
# # mean_metrics = list(np.mean(np.mean(metrics, axis=0), axis=0)/2 + np.array(metrics[maxi, maxj])/2)
# size_proportion = answer_grid.shape[0] * answer_grid.shape[1] / prediction_grid.shape[0] / \
# prediction_grid.shape[1]
# metrics = metrics[maxi, maxj]
# return list(metrics) + [size_proportion]
#
# elif prediction_grid.shape[0] <= answer_grid.shape[0] and prediction_grid.shape[1] <= answer_grid.shape[1]:
# metrics = np.zeros((answer_grid.shape[0] - prediction_grid.shape[0] + 1,
# answer_grid.shape[1] - prediction_grid.shape[1] + 1, n_metrics))
# for i in range(answer_grid.shape[0] - prediction_grid.shape[0] + 1):
# for j in range(answer_grid.shape[1] - prediction_grid.shape[1] + 1):
# answer = answer_grid[i:(i + prediction_grid.shape[0]), j:(j + prediction_grid.shape[1])]
# metrics[i, j] = get_metrics(prediction_grid, answer)
#
# maxi, maxj = np.unravel_index(metrics[:, :, 0].argmax(), metrics[:, :, 0].shape)
# # mean_metrics = list(np.mean(np.mean(metrics, axis=0), axis=0)/2 + np.array(metrics[maxi, maxj])/2)
# size_proportion = answer_grid.shape[0] * answer_grid.shape[1] / prediction_grid.shape[0] / \
# prediction_grid.shape[1]
# metrics = metrics[maxi, maxj]
# return list(metrics) + [1/size_proportion]
# elif prediction_grid.shape[0] >= answer_grid.shape[0] and prediction_grid.shape[1] >= answer_grid.shape[1]:
# maxi, maxj = 0, 0
# maxcommon = 0
#
# for i in range(prediction_grid.shape[0] - answer_grid.shape[0] + 1):
# for j in range(prediction_grid.shape[1] - answer_grid.shape[1] + 1):
# for i_check, j_check in product(range(answer_grid.shape[0]), range(answer_grid.shape[1])):
# if prediction_grid[i + i_check, j + j_check] != answer_grid[i_check, j_check]:
# common = i_check * j_check
# break
# if i_check == answer_grid.shape[0] - 1 and j_check == answer_grid.shape[1] - 1:
# common = i_check * j_check
#
# if common > maxcommon:
# maxi = i
# maxj = j
# maxcommon = common
# if common == answer_grid.shape[0] * answer_grid.shape[1]:
# break
#
# metrics = get_metrics(prediction_grid[maxi:(maxi + answer_grid.shape[0]),
# maxj:(maxj + answer_grid.shape[1])], answer_grid)
#
# modified_pred = np.zeros_like(prediction_grid)
# modified_pred[:] = prediction_grid[:]
# modified_pred[maxi:(maxi + answer_grid.shape[0]), maxj:(maxj + answer_grid.shape[1])] = 0
# size_proportion = answer_grid.shape[0] * answer_grid.shape[1] / prediction_grid.shape[0] / prediction_grid.shape[1]
# #print(np.mean(modified_pred==0))
# return list(size_proportion*np.array(metrics)) + [1.0]
#
# elif prediction_grid.shape[0] <= answer_grid.shape[0] and prediction_grid.shape[1] <= answer_grid.shape[1]:
# maxi, maxj = 0, 0
# maxcommon = 0
#
# for i in range(answer_grid.shape[0] - prediction_grid.shape[0] + 1):
# for j in range(answer_grid.shape[1] - prediction_grid.shape[1] + 1):
# for i_check, j_check in product(range(prediction_grid.shape[0]), range(prediction_grid.shape[1])):
# #print(i_check, j_check)
# if answer_grid[i + i_check, j + j_check] != prediction_grid[i_check, j_check]:
# common = i_check * j_check
# break
# if i_check == prediction_grid.shape[0] - 1 and j_check == prediction_grid.shape[1] - 1:
# common = i_check * j_check
#
# if common > maxcommon:
# maxi = i
# maxj = j
# maxcommon = common
# if common == prediction_grid.shape[0] * prediction_grid.shape[1]:
# break
#
# metrics = get_metrics(answer_grid[maxi:(maxi + prediction_grid.shape[0]),
# maxj:(maxj + prediction_grid.shape[1])], prediction_grid)
#
# modified_pred = np.zeros_like(answer_grid)
# modified_pred[:] = answer_grid[:]
# modified_pred[maxi:(maxi + prediction_grid.shape[0]), maxj:(maxj + prediction_grid.shape[1])] = 0
# size_proportion = prediction_grid.shape[0] * prediction_grid.shape[1] / answer_grid.shape[0] / answer_grid.shape[1]
# return list(size_proportion*np.array(metrics)) + [1.0]
return list(np.array(get_metrics(answer_grid, answer_grid)) * 0) + [0]
def validate_automata(task_global, params, n_iter_max, n_hidden):
def validate(task):
inp = task['input']
out = trace_param_automata(inp, params, n_iter_max, n_hidden)[-1][0]
metrics = compute_metrics(out, task['output'])
return metrics
metrics = []
for task in task_global['train']:
metrics.append(validate(task))
mean_metrics = list(np.round(np.mean(metrics, axis=0), 3))
min_metrics = list(np.round(np.min(metrics, axis=0), 3))
return tuple(mean_metrics + list(np.array(metrics)[:, 0].reshape(-1)))#tuple(mean_metrics + min_metrics)
def product_better(a, b):
""" Return True iff the two tuples a and b respect a<b for the partial order. """
a = np.array(a)
b = np.array(b)
return (np.array(a) >= np.array(b)).all() and (np.array(a) > np.array(b)).any()
def generate_random_ca(all_colors, best_candidates, temp, config, length=1):
rules = []
for _ in range(length):
rules.append(get_random_ca_rule(all_colors, best_candidates, temp, config))
return rules
def generate_random_global(all_colors, best_candidates, temp, config, length=1):
rules = []
for _ in range(length):
rules.append(get_random_global_rule(all_colors, best_candidates, temp, config))
return rules
def generate_population(all_colors, config, size=64, length=1):
population = []
for i in range(size):
split_rule = get_random_split_rule(all_colors, {}, 0, config)
merge_rule = get_random_merge_rule(all_colors, {}, 0, config)
global_rules = generate_random_global(all_colors, {}, 0, config, np.random.choice(2, p=[0.2, 0.8]))
ca_rules = generate_random_ca(all_colors, {}, 0, config, np.random.choice(2, p=[0.2, 0.8]))
population.append([global_rules, ca_rules, split_rule, merge_rule])
return population
from pathlib import Path
import json
train_path = data_path / 'training'
valid_path = data_path / 'evaluation'
test_path = data_path / 'test'
submission_path = data_path / 'public_submission.csv'
train_tasks = { task.stem: json.load(task.open()) for task in train_path.iterdir() }
valid_tasks = { task.stem: json.load(task.open()) for task in valid_path.iterdir() }
test_path = { task.stem: json.load(task.open()) for task in test_path.iterdir() }
train_task_ids = np.sort(list(train_tasks.keys()))
valid_task_ids = np.sort(list(valid_tasks.keys()))
test_task_ids = np.sort(list(test_path.keys()))
from functools import partial
from itertools import product
from sklearn.preprocessing import MinMaxScaler
def change_color(colors_in, colors_out, grid):
out_grid = np.zeros_like(grid)
out_grid[:] = grid[:]
for i in range(grid.shape[0]):
for j in range(grid.shape[1]):
for color_in, color_out in zip(colors_in, colors_out):
if grid[i, j] == color_in:
out_grid[i, j] = color_out
break
return out_grid
def reduce_grid(grid_rows, grid_columns, color, grid):
out_grid = np.zeros((len(grid_rows), len(grid_columns)), dtype=np.int)
for i, j in product(range(len(grid_rows)), range(len(grid_columns))):
out_grid[i, j] = grid[grid_rows[i][0], grid_columns[j][0]]
return out_grid
def unreduce_grid(line_rows, line_columns, n, m, grid_rows, grid_columns, color, grid):
out_grid = np.zeros((n, m), dtype=np.int)
for i in range(len(line_rows)):
out_grid[line_rows[i]] = color
for j in range(len(line_columns)):
out_grid[:, line_columns[j]] = color
for i, j in product(range(len(grid_rows)), range(len(grid_columns))):
if grid[i, j] != 0:
for i_gr_row in list(grid_rows[i]):
for j_gr_col in list(grid_columns[j]):
out_grid[i_gr_row, j_gr_col] = grid[i, j]
return out_grid
def get_color_features(input_grid):
colors = np.unique(input_grid)
colors_numbers = np.array([np.mean(input_grid == color) for color in colors]).reshape((-1, 1))
# communities_1 = get_graph_communities(input_grid)
#
# communities_2 = get_connectivity_info(input_grid)
#
# communities_1 = sorted([sorted(com) for com in communities_1])
# communities_2 = sorted([sorted(com) for com in communities_2])
#
# assert all((a == b) for a, b in zip(communities_1, communities_2))
# colors_communities = [np.sum([input_grid[list(com)[0]] == color for com in communities]) / len(communities) for
# color in colors]
#colors_communities = np.array(colors_communities).reshape((-1, 1))
colors_borders = np.array([np.mean(input_grid[0] == color) for color in colors]).reshape((-1, 1))
colors_borders += np.array([np.mean(input_grid[-1] == color) for color in colors]).reshape((-1, 1))
colors_borders += np.array([np.mean(input_grid[:, 0] == color) for color in colors]).reshape((-1, 1))
colors_borders += np.array([np.mean(input_grid[:, -1] == color) for color in colors]).reshape((-1, 1))
colors_borders /= np.sum(colors_borders)
colors_features = np.concatenate([colors_numbers, colors_borders], axis=1)
return colors_features, colors
def get_train_color_features(task):
colors_in_train = []
colors_in_each_train = []
for uni_task in task['train']:
inp = uni_task['input']
colors_unique, color_numbers = np.unique(inp, return_counts=True)
colors_in_train += list(colors_unique)
colors_in_each_train.append(colors_unique)
max_color_task = np.argmax([clrs.shape[0] for clrs in colors_in_each_train])
colors = colors_in_each_train[max_color_task]
input_grid = task['train'][max_color_task]['input']
train_colors_features, _ = get_color_features(input_grid)
scaler = MinMaxScaler()
train_colors_features = scaler.fit_transform(train_colors_features)
sums = np.sum(train_colors_features, axis=1)
train_colors_features = train_colors_features[np.argsort(sums)[::-1]]
return train_colors_features, scaler, np.unique(colors_in_train)
def build_mapping(task, config):
reverse_functions = []
for part in ['train', 'test']:
for uni_task in task[part]:
if part == 'test':
reverse_functions.append({})
if config['reduce_grid']:
can_reduce_grid = True
for uni_task in task['train']:
if uni_task['input'].shape != uni_task['output'].shape:
can_reduce_grid = False
break
inp = uni_task['input']
colors_rows = []
line_rows = []
for i in range(inp.shape[0]):
if (inp[i] == inp[i][0]).all():
colors_rows.append(inp[i][0])
line_rows.append(i)
row_colors, row_counts = np.unique(colors_rows, return_counts=True)
colors_columns = []
line_columns = []
for i in range(inp.shape[1]):
if (inp[:, i] == inp[0, i]).all():
colors_columns.append(inp[0, i])
line_columns.append(i)
column_colors, column_counts = np.unique(colors_columns, return_counts=True)
if row_colors.shape[0] != 1 or column_colors.shape[0] != 1 or \
row_counts[0] < 2 or column_counts[0] < 2:
can_reduce_grid = False
break
line_rows.append(inp.shape[0])
line_rows = [-1] + line_rows
line_columns.append(inp.shape[1])
line_columns = [-1] + line_columns
for i in range(len(line_rows) - 1):
if (line_rows[i] + 1) < line_rows[i + 1]:
for j in range(len(line_columns) - 1):
if (line_columns[j] + 1) < line_columns[j + 1]:
color = inp[line_rows[i] + 1][line_columns[j] + 1]
if not (inp[(line_rows[i] + 1):(line_rows[i + 1]),
(line_columns[j] + 1):(line_columns[j + 1])] == color).all():
can_reduce_grid = False
break
for i in range(1, len(line_rows) - 1):
if not (uni_task['input'][line_rows[i]] == uni_task['output'][line_rows[i]]).all():
can_reduce_grid = False
break
for j in range(1, len(line_columns) - 1):
if not (uni_task['input'][:, line_columns[j]] == uni_task['output'][:, line_columns[j]]).all():
can_reduce_grid = False
break
if not can_reduce_grid:
break
if can_reduce_grid:
for part in ['train', 'test']:
for i_task, uni_task in enumerate(task[part]):
inp = uni_task['input']
colors_rows = []
line_rows = []
for i in range(inp.shape[0]):
if (inp[i] == inp[i][0]).all():
colors_rows.append(inp[i][0])
line_rows.append(i)
row_colors, row_counts = np.unique(colors_rows, return_counts=True)
colors_columns = []
line_columns = []
for i in range(inp.shape[1]):
if (inp[:, i] == inp[0, i]).all():
colors_columns.append(inp[0, i])
line_columns.append(i)
column_colors, column_counts = np.unique(colors_columns, return_counts=True)
line_rows.append(inp.shape[0])
line_rows = [-1] + line_rows
line_columns.append(inp.shape[1])
line_columns = [-1] + line_columns
grid_rows = []
grid_columns = []
for i in range(len(line_rows) - 1):
if (line_rows[i] + 1) < line_rows[i + 1]:
grid_rows.append(np.arange(line_rows[i] + 1, line_rows[i + 1]))
for j in range(len(line_columns) - 1):
if (line_columns[j] + 1) < line_columns[j + 1]:
grid_columns.append(np.arange(line_columns[j] + 1, line_columns[j + 1]))
uni_task['input'] = reduce_grid(grid_rows, grid_columns, row_colors[0], inp)
if part == 'train':
uni_task['output'] = reduce_grid(grid_rows, grid_columns, row_colors[0], uni_task['output'])
if part == 'test':
reverse_functions[i_task]['unreduce_grid'] = partial(unreduce_grid, line_rows[1:-1],
line_columns[1:-1], inp.shape[0],
inp.shape[1],
grid_rows, grid_columns, row_colors[0])
if config['map_color']:
go_map_color = True
train_colors_features, scaler, unique_train_colors = get_train_color_features(task)
for uni_task in task['test']:
inp = uni_task['input']
colors_test = list(np.unique(inp))
for color in colors_test:
if not color in unique_train_colors:
go_map_color = True
if go_map_color:
colors_in_all = [[], []]
colors_out_all = [[], []]
for i_part, part in enumerate(['train', 'test']):
for i_task, uni_task in enumerate(task[part]):
input_grid = uni_task['input']
colors_features, colors = get_color_features(input_grid)
proper_colors = list(np.arange(train_colors_features.shape[0]))
colors_features = scaler.transform(colors_features)
colors_in = []
colors_out = []
for i, color in enumerate(colors):
color_features = colors_features[i].reshape((1, -1))
distances = np.sum(np.power(train_colors_features - color_features, 2), axis=1)
closests = list(np.argsort(distances))
for closest in closests:
if closest in proper_colors:
proper_colors.remove(closest)
colors_in.append(color)
colors_out.append(closest)
break
if part == 'train':
colors_in_all[i_part].append(colors_in)
colors_out_all[i_part].append(colors_out)
if part == 'test':
colors_in_all[i_part].append(colors_out)
colors_out_all[i_part].append(colors_in)
reverse_functions[i_task]['train_colors_in'] = colors_out
reverse_functions[i_task]['train_colors_out'] = colors_in
unique_test_colors = []
for i_task, uni_task in enumerate(task['train']):
output_grid = uni_task['output']
colors = np.unique(output_grid)
for color in colors:
if not color in unique_train_colors:
unique_test_colors.append(color)
unique_test_colors = np.unique(unique_test_colors)
colors_out = 9 - np.arange(unique_test_colors.shape[0])
for part in ['train', 'test']:
for i_task, uni_task in enumerate(task[part]):
if part == 'train':
uni_task['input'] = change_color(colors_in_all[0][i_task], colors_out_all[0][i_task],
uni_task['input'])
colors_in_all[0][i_task] += list(unique_test_colors)
colors_out_all[0][i_task] += list(colors_out)
uni_task['output'] = change_color(colors_in_all[0][i_task], colors_out_all[0][i_task],
uni_task['output'])
if part == 'test':
reverse_functions[i_task]['test_colors_in'] = list(colors_out)
reverse_functions[i_task]['test_colors_out'] = list(unique_test_colors)
if config['find_wall']:
for i_part, part in enumerate(['train', 'test']):
for i_task, uni_task in enumerate(task[part]):
input_grid = uni_task['input']
colors_features, colors = get_color_features(input_grid)
sums = np.sum(colors_features, axis=1)
color_wall = colors[np.argsort(sums)[::-1][0]]
#print(color_wall)
if color_wall == 0:
continue
colors_in = [0, color_wall]
colors_out = [color_wall, 0]
uni_task['input'] = change_color(colors_in, colors_out, input_grid)
if part == 'train':
uni_task['output'] = change_color(colors_in, colors_out, uni_task['output'])
if part == 'test':
reverse_functions[i_task]['return_wall'] = partial(change_color, colors_out,
colors_in)
return task, reverse_functions
def update_pool(task, best_candidates, candidate, num_params):
start = time.time()
score = validate_automata(task, candidate, 25, 1)
is_uncomp = True
updated_keys = False
best_candidates_items = list(best_candidates.items())
for best_score, best_candidates_score in best_candidates_items:
if product_better(score, best_score):
# Remove previous best candidate and add the new one
del best_candidates[best_score]
best_candidates[score] = [candidate]
is_uncomp = False # The candidates are comparable
updated_keys = True
if product_better(best_score, score):
is_uncomp = False # The candidates are comparable
if is_uncomp: # The two candidates are uncomparable
best_candidates[score].append(candidate)
best_candidates[score] = sorted(best_candidates[score], key=lambda x: len(x[0]) + len(x[1]))
if len(best_candidates[score]) > num_params:
best_candidates[score] = [cand for cand in best_candidates[score] if
(len(cand[0]) + len(cand[1])) <= len(best_candidates[score][0][0]) + len(best_candidates[score][0][1]) + 2]
# best_candidates[score] = best_candidates[score][:num_params]
return updated_keys
def generate_asexual_part(best_candidates, temp, part, generate_func, all_colors, config, alpha_mutate_rule_same_type):
if type(part) == list:
if np.random.rand() < (1 / (len(part) + 1))**0.75:
part.append(generate_func(all_colors, best_candidates, temp, config))
else:
index = np.random.randint(len(part))
if np.random.rand() < 0.3:
part = part[:index] + part[(index + 1):]
else:
r_type = None
if np.random.rand() < alpha_mutate_rule_same_type:
r_type = part[index]['type']
if np.random.rand() < 0.5:
part[index] = generate_func(all_colors, best_candidates, temp, config, r_type)
else:
part = part[:index] + [generate_func(all_colors, best_candidates, temp, config, r_type)] + part[index:]
else:
part = generate_func(all_colors, best_candidates, temp, config)
return part
def generate_sexual_part(best_candidates, temp, first, second, generate_func, all_colors, config, alpha_sexual_mutate,
alpha_mutate_rule_same_type, alpha_mutate_rule_same_type_one_parameter):
if type(first) == list:
if len(first) == 0 and len(second) == 0:
child = []
elif len(first) == 0:
split2 = np.random.randint(len(second))
if np.random.rand() <= 0.5:
child = second[split2:]
else:
child = second[:split2]
elif len(second) == 0:
split1 = np.random.randint(len(first))
if np.random.rand() <= 0.5:
child = first[split1:]
else:
child = first[:split1]
else:
split1 = np.random.randint(len(first))
split2 = np.random.randint(len(second))
if np.random.rand() <= 0.5:
child = first[:split1] + second[split2:]
else:
child = second[:split2] + first[split1:]
if np.random.rand() < alpha_sexual_mutate:
index = np.random.randint(len(child) + 1)
if index == len(child):
child.append(generate_func(all_colors, best_candidates, temp, config))
else:
r_type = None
same_type = np.random.rand() < alpha_mutate_rule_same_type
one_param_modification = np.random.rand() < alpha_mutate_rule_same_type_one_parameter
if same_type:
r_type = child[index]['type']
same_type_rule = generate_func(all_colors, best_candidates, temp, config, r_type)
if not one_param_modification:
child[index] = same_type_rule
else:
key = random.choice(list(child[index].keys()))
child[index][key] = same_type_rule[key]
else:
if np.random.rand() < 0.5:
child[index] = generate_func(all_colors, best_candidates, temp, config)
else:
child = child[:index] + [generate_func(all_colors, best_candidates, temp, config, r_type)] + child[
index:]
else:
if np.random.rand() < 0.5:
child = copy.deepcopy(first)
else:
child = copy.deepcopy(second)
return child
def generate_asexual_child(best_candidates, temp, parent, all_colors, config, alpha_mutate_rule_same_type):
child = copy.deepcopy(parent)
gen_functions = [get_random_global_rule, get_random_ca_rule, get_random_split_rule, get_random_merge_rule]
idx_to_mutate = np.random.choice(len(child), p =[0.4, 0.4, 0.1, 0.1])
child[idx_to_mutate] = generate_asexual_part(best_candidates, temp, child[idx_to_mutate], gen_functions[idx_to_mutate],
all_colors, config, alpha_mutate_rule_same_type)
return child
def generate_sexual_child(best_candidates, temp, first, second, all_colors, config, alpha_sexual_mutate,
alpha_mutate_rule_same_type, alpha_mutate_rule_same_type_one_parameter):
gen_functions = [get_random_global_rule, get_random_ca_rule, get_random_split_rule, get_random_merge_rule]
what_to_mutate = np.random.choice(len(gen_functions), p=[0.5, 0.5, 0.0, 0.0])
child = []
for idx_to_mutate, gen_func in enumerate(gen_functions):
child.append(generate_sexual_part(best_candidates, temp, first[idx_to_mutate], second[idx_to_mutate],
gen_func, all_colors, config,
(what_to_mutate==idx_to_mutate) * alpha_sexual_mutate, alpha_mutate_rule_same_type,
alpha_mutate_rule_same_type_one_parameter))
return child
def post_solved_process(task, solved, all_colors, config, reverse_functions, config_mapping):
test_preds = []
best_candidates = defaultdict(list)
update_pool(task, best_candidates, solved, 1)
start_time = time.time()
while time.time() - start_time < 30:
best_scores = list(best_candidates.keys())
first_score = random.choice(best_scores)
idx = np.random.choice(len(list(best_candidates[first_score])))
first = list(best_candidates[first_score])[idx]
child = generate_asexual_child(best_candidates, 0.5, first, all_colors, config, 0.)
update_pool(task, best_candidates, child, 1)
train_colors_features, scaler, _ = get_train_color_features(task)
print(list(best_candidates.values())[0][0])
for i_task, uni_task in enumerate(task['test']):
predictions = []
for solved in list(best_candidates.values())[0]:
if reverse_functions[i_task].get('train_colors_in', None):
inp = uni_task['input']
colors_unique, color_numbers = np.unique(inp, return_counts=True)
input_grid = uni_task['input']
colors_features, colors = get_color_features(input_grid)
colors_features = scaler.transform(colors_features)
colors_in = []
colors_out = []
if colors_unique.shape[0] <= train_colors_features.shape[0]:
proper_colors = list(np.arange(train_colors_features.shape[0]))
for i, color in enumerate(colors):
color_features = colors_features[i].reshape((1, -1))
distances = np.sum(np.power(train_colors_features - color_features, 2), axis=1)
closests = list(np.argsort(distances))
for closest in closests:
if closest in proper_colors:
proper_colors.remove(closest)
colors_in.append(color)
colors_out.append(closest)
break
colors_in += list(reverse_functions[i_task]['train_colors_out'])
colors_out += list(reverse_functions[i_task]['train_colors_in'])
input_task = change_color(colors_in, colors_out, uni_task['input'])
trace = trace_param_automata(input_task, solved, 25, 0)
t_pred = trace[-1][0]
if not reverse_functions[i_task].get('unreduce_grid', None) is None:
t_pred = reverse_functions[i_task]['unreduce_grid'](t_pred)
if not reverse_functions[i_task].get('train_colors_in', None) is None:
colors_in = reverse_functions[i_task]['train_colors_in'] + reverse_functions[i_task][
'test_colors_in']
colors_out = reverse_functions[i_task]['train_colors_out'] + reverse_functions[i_task][
'test_colors_out']
t_pred = change_color(colors_in, colors_out, t_pred)
predictions.append(t_pred)
else:
closests_to = [[] for _ in range(train_colors_features.shape[0])]
for i, color in enumerate(colors):
color_features = colors_features[i].reshape((1, -1))
distances = np.sum(np.power(train_colors_features - color_features, 2), axis=1)
closest = np.argsort(distances)[0]
closests_to[closest].append(color)
for i in range(len(closests_to)):
if len(closests_to[i]) == 0:
closests_to[i] = [-1]
answers = []
for color_map in product(*closests_to):
input_task = | np.zeros_like(uni_task['input']) | numpy.zeros_like |
from typing import Union
import numpy as np
import pytest
from galois_field.core.ElementInGFpn import ElementInGFpn
from galois_field.core.types import Fp, Fpn
@pytest.mark.parametrize('coeffs, p, mod_coeffs, expected_coeffs', [
(np.array([4, 3, 2, 1]), 2, np.array([1, 1, 1]), [1, 0]),
(np.array([4, 3, 2, 1]), 5, np.array([1, 0, 2]), [4, 0]),
(np.array([2, 1]), 11, np.array([1, 0, 1]), [2, 1])
])
def test_ElementInGFpn_init(coeffs, p, mod_coeffs, expected_coeffs):
result = ElementInGFpn(coeffs, p, mod_coeffs)
assert result.coeffs == expected_coeffs
@pytest.mark.parametrize('coeffs, p, mod_coeffs, expected', [
(np.array([0]), 2, np.array([1, 1, 1]), '0'),
(np.array([4, 3, 2, 1]), 2, np.array([1, 1, 1]), '1x'),
(np.array([4, 3, 2, 1]), 5, np.array([1, 0, 2]), '4x'),
(np.array([2, 1]), 11, np.array([1, 0, 1]), '2x + 1')
])
def test_ElementInGFpn_str(coeffs, p, mod_coeffs, expected):
result = ElementInGFpn(coeffs, p, mod_coeffs)
assert str(result) == expected
@pytest.mark.parametrize('coeffs, p, mod_coeffs, expected', [
(np.array([4, 3, 2, 1]), 2, np.array([1, 1, 1]),
'ElementInGFpn([1, 0], 2, [1, 1, 1])'),
(np.array([4, 3, 2, 1]), 5, np.array([1, 0, 2]),
'ElementInGFpn([4, 0], 5, [1, 0, 2])'),
(np.array([2, 1]), 11, np.array([1, 0, 1]),
'ElementInGFpn([2, 1], 11, [1, 0, 1])')
])
def test_ElementInGFpn_repr(coeffs, p, mod_coeffs, expected):
result = ElementInGFpn(coeffs, p, np.poly1d(mod_coeffs))
assert repr(result) == expected
@pytest.mark.parametrize('coeffs1, coeffs2, p, mod_coeffs, expected_coeffs', [
(np.array([1, 2, 3, 4]), np.array([1, 2, 3, 4]), 5,
np.array([1, 0, 0, 0, 2]), [2, 4, 1, 3]),
(np.array([1, 2, 3, 4]), np.array([1, 2, 3, 4]),
11, np.array([1, 0, 1]), [4, 4]),
(3, np.array([1, 2, 3, 4]), 5,
np.array([1, 0, 0, 0, 2]), [1, 2, 3, 2]),
(np.array([1, 2, 3, 4]), 2,
11, np.array([1, 0, 1]), [2, 4])
])
def test_GFpn_add(coeffs1: Fpn, coeffs2: Fpn,
p: int, mod_coeffs: Fpn, expected_coeffs):
if isinstance(coeffs1, int):
el1 = coeffs1
else:
el1 = ElementInGFpn(coeffs1, p, np.poly1d(mod_coeffs))
if isinstance(coeffs2, int):
el2 = coeffs2
else:
el2 = ElementInGFpn(coeffs2, p, np.poly1d(mod_coeffs))
result = el1 + el2
print(result)
assert result.coeffs == expected_coeffs
@pytest.mark.parametrize('coeffs1, coeffs2, p, mod_coeffs, expected_coeffs', [
(np.array([1, 2, 3, 4]), np.array([4, 3, 2, 1]),
5, np.array([1, 0, 0, 0, 2]), [2, 4, 1, 3]),
(np.array([1, 2, 3, 4]), [4, 3, 2, 1], 11, np.array([1, 0, 1]), [4, 4]),
(np.array([1, 2, 3, 4]), 3,
5, np.array([1, 0, 0, 0, 2]), [1, 2, 3, 1]),
(4, [4, 3, 2, 1], 11, np.array([1, 0, 1]), [2, 6])
])
def test_GFpn_sub(coeffs1: Fpn, coeffs2: Fpn,
p: int, mod_coeffs: Fpn, expected_coeffs):
if isinstance(coeffs1, int):
el1 = coeffs1
else:
el1 = ElementInGFpn(coeffs1, p, np.poly1d(mod_coeffs))
if isinstance(coeffs2, int):
el2 = coeffs2
else:
el2 = ElementInGFpn(coeffs2, p, np.poly1d(mod_coeffs))
result = el1 - el2
assert result.coeffs == expected_coeffs
@pytest.mark.parametrize('coeffs1, coeffs2, p, mod_coeffs, expected_coeffs', [
(np.array([1, 2, 3, 4]), np.array([1, 2]), 5,
np.array([1, 0, 0, 0, 2]), [4, 2, 0, 1]),
(np.array([1, 2]), np.array([1, 2, 3, 4]),
11, np.array([1, 0, 1]), [6, 2]),
(np.array([1, 2, 3, 4]), 15, 11, np.array([1, 0, 0, 1, 2]), [4, 8, 1, 5]),
(15, np.array([1, 2, 3, 4]), 11, np.array([1, 0, 0, 1, 2]), [4, 8, 1, 5]),
])
def test_GFpn_mul(coeffs1: Union[Fpn, Fp],
coeffs2: Union[Fpn, Fp],
p: int, mod_coeffs: Fpn, expected_coeffs):
if isinstance(coeffs1, Fp):
el1 = coeffs1
else:
el1 = ElementInGFpn(coeffs1, p, np.poly1d(mod_coeffs))
if isinstance(coeffs2, Fp):
el2 = coeffs2
else:
el2 = ElementInGFpn(coeffs2, p, np.poly1d(mod_coeffs))
result = el1 * el2
assert result.coeffs == expected_coeffs
@pytest.mark.parametrize("coeffs1, coeffs2, p, mod_coeffs, expected_coeffs", [
(np.array([1, 1]), np.array([4]), 11, np.array([1, 0, 1]), [3, 3]),
(np.array([1, 2]), np.array([1, 1]), 5, np.array([1, 0, 2]), [3, 3]),
(2, np.array([1, 1]), 7, np.array([1, 0, 0, 1, 1]), [5, 2, 5, 0]),
(np.array([1, 1]), 5, 7, np.array([1, 0, 0, 1, 1]), [3, 3])
])
def test_GFpn_div(coeffs1: Union[Fpn, Fp],
coeffs2: Union[Fpn, Fp], p, mod_coeffs, expected_coeffs):
"""poly^{-1} = expected (mod mod_poly)"""
if isinstance(coeffs1, Fp):
el1 = coeffs1
else:
el1 = ElementInGFpn(coeffs1, p, np.poly1d(mod_coeffs))
if isinstance(coeffs2, Fp):
el2 = coeffs2
else:
el2 = ElementInGFpn(coeffs2, p, np.poly1d(mod_coeffs))
result = el1 / el2
assert result.coeffs == expected_coeffs
@pytest.mark.parametrize("el, exp, expected_coeffs", [
(ElementInGFpn(np.array([4]), 11, | np.poly1d([1, 0, 1]) | numpy.poly1d |
import time
import wandb
import os
import numpy as np
from itertools import chain
import torch
from onpolicy.utils.util import update_linear_schedule
from onpolicy.runner.shared.base_runner import Runner
def _t2n(x):
return x.detach().cpu().numpy()
class HNSRunner(Runner):
def __init__(self, config):
super(HNSRunner, self).__init__(config)
def run(self):
self.warmup()
start = time.time()
episodes = int(self.num_env_steps) // self.episode_length // self.n_rollout_threads
for episode in range(episodes):
if self.use_linear_lr_decay:
self.trainer.policy.lr_decay(episode, episodes)
env_infos = {}
env_infos['max_box_move_prep'] = []
env_infos['max_box_move'] = []
env_infos['num_box_lock_prep'] = []
env_infos['num_box_lock'] = []
env_infos['max_ramp_move_prep'] = []
env_infos['max_ramp_move'] = []
env_infos['num_ramp_lock_prep'] = []
env_infos['num_ramp_lock'] = []
env_infos['food_eaten_prep'] = []
env_infos['food_eaten'] = []
env_infos['lock_rate'] = []
env_infos['activated_sites'] = []
discard_episode = 0
success = 0
trials = 0
for step in range(self.episode_length):
# Sample actions
values, actions, action_log_probs, rnn_states, rnn_states_critic = self.collect(step)
# Obser reward and next obs
obs, share_obs, rewards, dones, infos, _ = self.envs.step(actions)
for done, info in zip(dones, infos):
if done:
if "discard_episode" in info.keys() and info['discard_episode']:
discard_episode += 1
else:
trials += 1
if "success" in info.keys() and info['success']:
success += 1
for k in env_infos.keys():
if k in info.keys():
env_infos[k].append(info[k])
data = obs, share_obs, rewards, dones, infos, \
values, actions, action_log_probs, \
rnn_states, rnn_states_critic
# insert data into buffer
self.insert(data)
# compute return and update network
self.compute()
train_infos = self.train()
# post process
total_num_steps = (episode + 1) * self.episode_length * self.n_rollout_threads
# save model
if (episode % self.save_interval == 0 or episode == episodes - 1):
self.save()
# log information
if episode % self.log_interval == 0:
end = time.time()
print("\n Env {} Algo {} Exp {} updates {}/{} episodes, total num timesteps {}/{}, FPS {}.\n"
.format(self.env_name,
self.algorithm_name,
self.experiment_name,
episode,
episodes,
total_num_steps,
self.num_env_steps,
int(total_num_steps / (end - start))))
if self.env_name == "HideAndSeek" :
for hider_id in range(self.all_args.num_hiders):
agent_k = 'hider%i/average_step_rewards' % hider_id
train_infos[agent_k] = np.mean(self.buffer.rewards[:, :, hider_id])
for seeker_id in range(self.all_args.num_seekers):
agent_k = 'seeker%i/average_step_rewards' % seeker_id
train_infos[agent_k] = np.mean(self.buffer.rewards[:, :, self.all_args.num_hiders+seeker_id])
if self.env_name == "BoxLocking" or self.env_name == "BlueprintConstruction":
train_infos['average_step_rewards'] = np.mean(self.buffer.rewards)
success_rate = success/trials if trials > 0 else 0.0
print("success rate is {}.".format(success_rate))
if self.use_wandb:
wandb.log({'success_rate': success_rate}, step=total_num_steps)
wandb.log({'discard_episode': discard_episode}, step=total_num_steps)
else:
self.writter.add_scalars('success_rate', {'success_rate': success_rate}, total_num_steps)
self.writter.add_scalars('discard_episode', {'discard_episode': discard_episode}, total_num_steps)
self.log_env(env_infos, total_num_steps)
self.log_train(train_infos, total_num_steps)
# eval
if episode % self.eval_interval == 0 and self.use_eval:
self.eval(total_num_steps)
def warmup(self):
# reset env
obs, share_obs, _ = self.envs.reset()
share_obs = share_obs if self.use_centralized_V else obs
self.buffer.share_obs[0] = share_obs.copy()
self.buffer.obs[0] = obs.copy()
@torch.no_grad()
def collect(self, step):
self.trainer.prep_rollout()
value, action, action_log_prob, rnn_state, rnn_state_critic \
= self.trainer.policy.get_actions( | np.concatenate(self.buffer.share_obs[step]) | numpy.concatenate |
import time
import ray
import cv2
import models
import copy
import torch
import numpy as np
import torch.nn as nn
import time
@ray.remote
class Trainer:
def __init__(self,checkpoint,replay_buffer,share_storage) -> None:
# self.eval_model=nn.DataParallel(models.Model()) #multi-GPU
# self.target_model=nn.DataParallel(models.Model()) #multi-GPU
self.eval_model=models.Model()
self.target_model=models.Model()
# self.eval_model.module.set_weights(copy.deepcopy(checkpoint["weights"])) #multi-GPU
# self.target_model.module.set_weights(copy.deepcopy(checkpoint["weights"])) #multi-GPU
self.eval_model.set_weights(copy.deepcopy(checkpoint["weights"]))
self.target_model.set_weights(copy.deepcopy(checkpoint["weights"]))
self.eval_model.cuda()
self.target_model.cuda()
self.replay_buffer=replay_buffer
self.share_storage=share_storage
self.gamma=checkpoint['gamma']
self.tau=checkpoint["tau"]
self.batch_size=checkpoint['batch_size']
self.training_step=checkpoint['training_step']
self.trained_step=checkpoint['max_training_step']
self.replace_target_iter=checkpoint['replace_target_iter']
self.flag=True
self.learn_step_counter=1
self.loss_fn=torch.nn.SmoothL1Loss(reduction="none")
self.optimizer=torch.optim.Adam(self.eval_model.parameters(),lr=checkpoint['lr'])
print('trainer init done')
def continous_update_weights(self):
print('wait train')
while not ray.get(self.share_storage.get_info.remote('start_training')):
time.sleep(0.1)
print('start train-----------------------------------------------------')
batch=self.replay_buffer.get_batch.remote(self.batch_size)
while True:
if self.flag:
batch_=self.replay_buffer.get_batch.remote(self.batch_size)
batch=ray.get(batch)
tree_idx,abs_error=self.update_weights(batch)
self.flag = not self.flag
else:
batch=self.replay_buffer.get_batch.remote(self.batch_size)
batch_=ray.get(batch_)
tree_idx,abs_error=self.update_weights(batch_)
self.flag = not self.flag
self.replay_buffer.batch_update.remote(tree_idx,abs_error)
self.learn_step_counter=self.learn_step_counter%self.replace_target_iter
if self.learn_step_counter==0:
# self.share_storage.set_info.remote({"weights": copy.deepcopy(self.eval_model.module.get_weights())})
self.share_storage.set_info.remote({"weights": copy.deepcopy(self.eval_model.get_weights())})
self.share_storage.save_checkpoint.remote()
print('net_replace!!!!')
for target_param, param in zip(self.target_model.parameters(), self.eval_model.parameters()):
target_param.data.copy_(self.tau * param + (1 - self.tau) * target_param)
self.learn_step_counter+=1
def update_weights(self,batch):
tree_idx, batch_obs, batch_act, batch_reward, batch_obs_, batch_done, ISWeights=copy.deepcopy(batch)
batch_obs=torch.FloatTensor(np.stack(batch_obs)).permute(0,3,1,2).cuda()
batch_act=torch.LongTensor(np.vstack(batch_act)).cuda()
batch_reward=torch.FloatTensor(np.vstack(batch_reward)).cuda()
batch_obs_=torch.FloatTensor(np.stack(batch_obs_)).permute(0,3,1,2).cuda()
batch_done=torch.BoolTensor( | np.vstack(batch_done) | numpy.vstack |
import time
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
def fft_window(tnum, nfft, window, overlap):
# IN : full length of time series, nfft, window name, overlap ratio
# OUT : bins, 1 x nfft window function
# use overlapping
bins = int(np.fix((int(tnum/nfft) - overlap)/(1.0 - overlap)))
# window function
if window == 'rectwin': # overlap = 0.5
win = np.ones(nfft)
elif window == 'hann': # overlap = 0.5
win = np.hanning(nfft)
elif window == 'hamm': # overlap = 0.5
win = np.hamming(nfft)
elif window == 'kaiser': # overlap = 0.62
win = np.kaiser(nfft, beta=30)
elif window == 'HFT248D': # overlap = 0.84
z = 2*np.pi/nfft*np.arange(0,nfft)
win = 1 - 1.985844164102*np.cos(z) + 1.791176438506*np.cos(2*z) - 1.282075284005*np.cos(3*z) + \
0.667777530266*np.cos(4*z) - 0.240160796576*np.cos(5*z) + 0.056656381764*np.cos(6*z) - \
0.008134974479*np.cos(7*z) + 0.000624544650*np.cos(8*z) - 0.000019808998*np.cos(9*z) + \
0.000000132974*np.cos(10*z)
return bins, win
def fftbins(x, dt, nfft, window, overlap, detrend=0, full=0):
# IN : 1 x tnum data
# OUT : bins x faxis fftdata
tnum = len(x)
bins, win = fft_window(tnum, nfft, window, overlap)
win_factor = np.mean(win**2) # window factors
# make an x-axis #
ax = np.fft.fftfreq(nfft, d=dt) # full 0~fN -fN~-f1
if np.mod(nfft, 2) == 0: # even nfft
ax = np.hstack([ax[0:int(nfft/2)], -(ax[int(nfft/2)]), ax[int(nfft/2):nfft]])
if full == 1: # full shift to -fN ~ 0 ~ fN
ax = np.fft.fftshift(ax)
else: # half 0~fN
ax = ax[0:int(nfft/2+1)]
# make fftdata
if full == 1: # full shift to -fN ~ 0 ~ fN
if np.mod(nfft, 2) == 0: # even nfft
fftdata = np.zeros((bins, nfft+1), dtype=np.complex_)
else: # odd nfft
fftdata = np.zeros((bins, nfft), dtype=np.complex_)
else: # half 0 ~ fN
fftdata = np.zeros((bins, int(nfft/2+1)), dtype=np.complex_)
for b in range(bins):
idx1 = int(b*np.fix(nfft*(1 - overlap)))
idx2 = idx1 + nfft
sx = x[idx1:idx2]
if detrend == 0:
sx = signal.detrend(sx, type='constant') # subtract mean
elif detrend == 1:
sx = signal.detrend(sx, type='linear')
sx = sx * win # apply window function
# get fft
SX = np.fft.fft(sx, n=nfft)/nfft # divide by the length
if np.mod(nfft, 2) == 0: # even nfft
SX = np.hstack([SX[0:int(nfft/2)], np.conj(SX[int(nfft/2)]), SX[int(nfft/2):nfft]])
if full == 1: # shift to -fN ~ 0 ~ fN
SX = np.fft.fftshift(SX)
else: # half 0 ~ fN
SX = SX[0:int(nfft/2+1)]
fftdata[b,:] = SX
return ax, fftdata, win_factor
def cwt(x, dt, df, detrend=0, full=1):
# detrend signal
if detrend == 0:
x = signal.detrend(x, type='constant') # subtract mean
elif detrend == 1:
x = signal.detrend(x, type='linear')
# make a t-axis
tnum = len(x)
nfft = nextpow2(tnum) # power of 2
t = np.arange(nfft)*dt
# make a f-axis with constant df
s0 = 2.0*dt # the smallest scale
ax = np.arange(0.0, 1.0/(1.03*s0), df) # 1.03 for the Morlet wavelet function
# scales
old_settings = np.seterr(divide='ignore')
sj = 1.0/(1.03*ax)
np.seterr(**old_settings)
dj = np.log2(sj/s0) / np.arange(len(sj)) # dj; necessary for reconstruction
sj[0] = tnum*dt/2.0
dj[0] = 0 # remove infinity point due to fmin = 0
# Morlet wavelet function (unnormalized)
omega0 = 6.0 # nondimensional wavelet frequency
wf0 = lambda eta: np.pi**(-1.0/4) * np.exp(1.0j*omega0*eta) * np.exp(-1.0/2*eta**2)
ts = np.sqrt(2)*sj # e-folding time for Morlet wavelet with omega0 = 6; significance level
# FFT of signal
X = np.fft.fft(x, n=nfft)/nfft
# calculate CWT
snum = len(sj)
cwtdata = np.zeros((nfft, snum), dtype=np.complex_)
for j, s in enumerate(sj):
# nondimensional time axis at time scale s
eta = t/s
# FFT of the normalized wavelet function
W = np.fft.fft( np.conj( wf0(eta - np.mean(eta))*np.sqrt(dt/s) ) )
# Wavelet transform at scae s for all n time
cwtdata[:,j] = np.conj(np.fft.fftshift(np.fft.ifft(X * W) * nfft)) # phase direction correct
# full size
if full == 1:
cwtdata = np.hstack([np.fliplr(np.conj(cwtdata)), cwtdata[:,1:]]) # real x only
ax = np.hstack([-ax[::-1], ax[1:]])
return ax, cwtdata[0:tnum,:], dj, ts
def cross_power(XX, YY, win_factor):
Pxy = np.mean(XX * np.conjugate(YY), 0)
Pxy = np.abs(Pxy).real / win_factor
return Pxy
def coherence(XX, YY):
# normalization outside loop
# Pxy = np.mean(XX * np.conjugate(YY), 0)
# Pxx = np.mean(XX * np.conjugate(XX), 0).real
# Pyy = np.mean(YY * np.conjugate(YY), 0).real
# Gxy = np.abs(Pxy).real / np.sqrt(Pxx * Pyy)
# normalization inside loop
bins = XX.shape[0]
val = np.zeros(XX.shape, dtype=np.complex_)
for i in range(bins):
X = XX[i,:]
Y = YY[i,:]
Pxx = X * np.matrix.conjugate(X)
Pyy = Y * np.matrix.conjugate(Y)
val[i,:] = X*np.matrix.conjugate(Y) / np.sqrt(Pxx*Pyy)
# average over bins
Gxy = np.mean(val, 0)
Gxy = np.abs(Gxy).real
return Gxy
def cross_phase(XX, YY):
Pxy = np.mean(XX * np.conjugate(YY), 0)
Axy = np.arctan2(Pxy.imag, Pxy.real).real
return Axy
def correlation(XX, YY, win_factor):
bins = XX.shape[0]
nfreq = XX.shape[1]
val = np.zeros(XX.shape, dtype=np.complex_)
for b in range(bins):
X = XX[b,:]
Y = YY[b,:]
val[b,:] = np.fft.ifftshift(X* | np.matrix.conjugate(Y) | numpy.matrix.conjugate |
import numpy as np
# ---------------------------------------------- ๆฐๆฎ้็็ๆ ---------------------------------------------------
# ๆนๅฝขๅบๅ[a,b]^n็ๆ้ๆบๆฐ, nไปฃ่กจๅ้ไธชๆฐ
def rand_it(batch_size, variable_dim, region_a, region_b):
# np.random.rand( )ๅฏไปฅ่ฟๅไธไธชๆไธ็ปๆไปโ0~1โๅๅๅๅธ็้ๆบๆ ทๆฌๅผใ้ๆบๆ ทๆฌๅๅผ่ๅดๆฏ[0,1)๏ผไธๅ
ๆฌ1ใ
# np.random.rand(3,2 )ๅฏไปฅ่ฟๅไธไธชๆไธ็ปๆไปโ0~1โๅๅๅๅธ็้ๆบ็ฉ้ต(3่ก2ๅ)ใ้ๆบๆ ทๆฌๅๅผ่ๅดๆฏ[0,1)๏ผไธๅ
ๆฌ1ใ
x_it = (region_b - region_a) * np.random.rand(batch_size, variable_dim) + region_a
x_it = x_it.astype(np.float32)
return x_it
def rand_bd_1D(batch_size, variable_dim, region_a, region_b):
# np.asarray ๅฐ่พๅ
ฅ่ฝฌไธบ็ฉ้ตๆ ผๅผใ
# ๅฝ่พๅ
ฅๆฏๅ่กจ็ๆถๅ๏ผๆดๆนๅ่กจ็ๅผๅนถไธไผๅฝฑๅ่ฝฌๅไธบ็ฉ้ต็ๅผ
# [0,1] ่ฝฌๆขไธบ ็ฉ้ต๏ผ็ถๅ
# reshape(-1,1):ๆฐ็ปๆฐ็shapeๅฑๆงๅบ่ฏฅ่ฆไธๅๆฅ็้
ๅฅ๏ผๅฆๆ็ญไบ-1็่ฏ๏ผ้ฃไนNumpyไผๆ นๆฎๅฉไธ็็ปดๅบฆ่ฎก็ฎๅบๆฐ็ป็ๅฆๅคไธไธชshapeๅฑๆงๅผใ
region_a = float(region_a)
region_b = float(region_b)
if variable_dim == 1:
x_left_bd = np.ones(shape=[batch_size, variable_dim], dtype=np.float32) * region_a
x_right_bd = np.ones(shape=[batch_size, variable_dim], dtype=np.float32) * region_b
return x_left_bd, x_right_bd
else:
return
def rand_bd_2D(batch_size, variable_dim, region_a, region_b):
# np.asarray ๅฐ่พๅ
ฅ่ฝฌไธบ็ฉ้ตๆ ผๅผใ
# ๅฝ่พๅ
ฅๆฏๅ่กจ็ๆถๅ๏ผๆดๆนๅ่กจ็ๅผๅนถไธไผๅฝฑๅ่ฝฌๅไธบ็ฉ้ต็ๅผ
# [0,1] ่ฝฌๆขไธบ ็ฉ้ต๏ผ็ถๅ
# reshape(-1,1):ๆฐ็ปๆฐ็shapeๅฑๆงๅบ่ฏฅ่ฆไธๅๆฅ็้
ๅฅ๏ผๅฆๆ็ญไบ-1็่ฏ๏ผ้ฃไนNumpyไผๆ นๆฎๅฉไธ็็ปดๅบฆ่ฎก็ฎๅบๆฐ็ป็ๅฆๅคไธไธชshapeๅฑๆงๅผใ
# np.random.random((100, 50)) ไธๆนไปฃ่กจ็ๆ100่ก 50ๅ็้ๆบๆตฎ็นๆฐ๏ผๆตฎ็นๆฐ่ๅด : (0,1)
# np.random.random([100, 50]) ๅ np.random.random((100, 50)) ๆๆไธๆ ท
region_a = float(region_a)
region_b = float(region_b)
if variable_dim == 2:
x_left_bd = (region_b-region_a) * np.random.random([batch_size, 2]) + region_a # ๆตฎ็นๆฐ้ฝๆฏไป0-1ไธญ้ๆบใ
for ii in range(batch_size):
x_left_bd[ii, 0] = region_a
x_right_bd = (region_b - region_a) * np.random.random([batch_size, 2]) + region_a
for ii in range(batch_size):
x_right_bd[ii, 0] = region_b
y_bottom_bd = (region_b - region_a) * np.random.random([batch_size, 2]) + region_a
for ii in range(batch_size):
y_bottom_bd[ii, 1] = region_a
y_top_bd = (region_b - region_a) * np.random.random([batch_size, 2]) + region_a
for ii in range(batch_size):
y_top_bd[ii, 1] = region_b
x_left_bd = x_left_bd.astype(np.float32)
x_right_bd = x_right_bd.astype(np.float32)
y_bottom_bd = y_bottom_bd.astype(np.float32)
y_top_bd = y_top_bd.astype(np.float32)
return x_left_bd, x_right_bd, y_bottom_bd, y_top_bd
else:
return
def rand_bd_3D(batch_size, variable_dim, region_a, region_b):
# np.asarray ๅฐ่พๅ
ฅ่ฝฌไธบ็ฉ้ตๆ ผๅผใ
# ๅฝ่พๅ
ฅๆฏๅ่กจ็ๆถๅ๏ผๆดๆนๅ่กจ็ๅผๅนถไธไผๅฝฑๅ่ฝฌๅไธบ็ฉ้ต็ๅผ
# [0,1] ่ฝฌๆขไธบ ็ฉ้ต๏ผ็ถๅ
# reshape(-1,1):ๆฐ็ปๆฐ็shapeๅฑๆงๅบ่ฏฅ่ฆไธๅๆฅ็้
ๅฅ๏ผๅฆๆ็ญไบ-1็่ฏ๏ผ้ฃไนNumpyไผๆ นๆฎๅฉไธ็็ปดๅบฆ่ฎก็ฎๅบๆฐ็ป็ๅฆๅคไธไธชshapeๅฑๆงๅผใ
region_a = float(region_a)
region_b = float(region_b)
if variable_dim == 3:
bottom_bd = (region_b - region_a) * np.random.rand(batch_size, 3) + region_a
for ii in range(batch_size):
bottom_bd[ii, 2] = region_a
top_bd = (region_b - region_a) * np.random.rand(batch_size, 3) + region_a
for ii in range(batch_size):
top_bd[ii, 2] = region_b
left_bd = (region_b - region_a) * np.random.rand(batch_size, 3) + region_a
for ii in range(batch_size):
left_bd[ii, 1] = region_a
right_bd = (region_b - region_a) * np.random.rand(batch_size, 3) + region_a
for ii in range(batch_size):
right_bd[ii, 1] = region_b
front_bd = (region_b - region_a) * np.random.rand(batch_size, 3) + region_a
for ii in range(batch_size):
front_bd[ii, 0] = region_b
behind_bd = (region_b - region_a) * np.random.rand(batch_size, 3) + region_a
for ii in range(batch_size):
behind_bd[ii, 0] = region_a
bottom_bd = bottom_bd.astype(np.float32)
top_bd = top_bd.astype(np.float32)
left_bd = left_bd.astype(np.float32)
right_bd = right_bd.astype(np.float32)
front_bd = front_bd.astype(np.float32)
behind_bd = behind_bd.astype(np.float32)
return bottom_bd, top_bd, left_bd, right_bd, front_bd, behind_bd
else:
return
def rand_bd_4D(batch_size, variable_dim, region_a, region_b):
# np.asarray ๅฐ่พๅ
ฅ่ฝฌไธบ็ฉ้ตๆ ผๅผใ
# ๅฝ่พๅ
ฅๆฏๅ่กจ็ๆถๅ๏ผๆดๆนๅ่กจ็ๅผๅนถไธไผๅฝฑๅ่ฝฌๅไธบ็ฉ้ต็ๅผ
# [0,1] ่ฝฌๆขไธบ ็ฉ้ต๏ผ็ถๅ
# reshape(-1,1):ๆฐ็ปๆฐ็shapeๅฑๆงๅบ่ฏฅ่ฆไธๅๆฅ็้
ๅฅ๏ผๅฆๆ็ญไบ-1็่ฏ๏ผ้ฃไนNumpyไผๆ นๆฎๅฉไธ็็ปดๅบฆ่ฎก็ฎๅบๆฐ็ป็ๅฆๅคไธไธชshapeๅฑๆงๅผใ
region_a = float(region_a)
region_b = float(region_b)
variable_dim = int(variable_dim)
x0a = (region_b - region_a) * np.random.rand(batch_size, variable_dim) + region_a
for ii in range(batch_size):
x0a[ii, 0] = region_a
x0b = (region_b - region_a) * np.random.rand(batch_size, variable_dim) + region_a
for ii in range(batch_size):
x0b[ii, 0] = region_b
x1a = (region_b - region_a) * np.random.rand(batch_size, variable_dim) + region_a
for ii in range(batch_size):
x1a[ii, 1] = region_a
x1b = (region_b - region_a) * np.random.rand(batch_size, variable_dim) + region_a
for ii in range(batch_size):
x1b[ii, 1] = region_b
x2a = (region_b - region_a) * np.random.rand(batch_size, variable_dim) + region_a
for ii in range(batch_size):
x2a[ii, 2] = region_a
x2b = (region_b - region_a) * np.random.rand(batch_size, variable_dim) + region_a
for ii in range(batch_size):
x2b[ii, 2] = region_b
x3a = (region_b - region_a) * np.random.rand(batch_size, variable_dim) + region_a
for ii in range(batch_size):
x3a[ii, 3] = region_a
x3b = (region_b - region_a) * np.random.rand(batch_size, variable_dim) + region_a
for ii in range(batch_size):
x3b[ii, 3] = region_b
x0a = x0a.astype(np.float32)
x0b = x0b.astype(np.float32)
x1a = x1a.astype(np.float32)
x1b = x1b.astype(np.float32)
x2a = x2a.astype(np.float32)
x2b = x2b.astype(np.float32)
x3a = x3a.astype(np.float32)
x3b = x3b.astype(np.float32)
return x0a, x0b, x1a, x1b, x2a, x2b, x3a, x3b
def rand_bd_5D(batch_size, variable_dim, region_a, region_b):
# np.asarray ๅฐ่พๅ
ฅ่ฝฌไธบ็ฉ้ตๆ ผๅผใ
# ๅฝ่พๅ
ฅๆฏๅ่กจ็ๆถๅ๏ผๆดๆนๅ่กจ็ๅผๅนถไธไผๅฝฑๅ่ฝฌๅไธบ็ฉ้ต็ๅผ
# [0,1] ่ฝฌๆขไธบ ็ฉ้ต๏ผ็ถๅ
# reshape(-1,1):ๆฐ็ปๆฐ็shapeๅฑๆงๅบ่ฏฅ่ฆไธๅๆฅ็้
ๅฅ๏ผๅฆๆ็ญไบ-1็่ฏ๏ผ้ฃไนNumpyไผๆ นๆฎๅฉไธ็็ปดๅบฆ่ฎก็ฎๅบๆฐ็ป็ๅฆๅคไธไธชshapeๅฑๆงๅผใ
region_a = float(region_a)
region_b = float(region_b)
if variable_dim == 5:
x0a = (region_b - region_a) * | np.random.rand(batch_size, 5) | numpy.random.rand |
#! /usr/bin/env python3
import numpy as np
def computeWeightedMRecons(NeighborValues,NeighborWeights,TrainingInfo):
# Weighted Mode Computation
if TrainingInfo.FeatReconMethod=='DWM':
ClassLabels = np.unique(NeighborValues)
ClassWeightSums = np.zeros((np.shape(NeighborWeights)[0],np.shape(ClassLabels)[0]))
for i in range(0,np.shape(ClassLabels)[0]):
TempFeats=np.zeros(( | np.shape(NeighborWeights) | numpy.shape |
import numpy as np
import numpy.testing as npt
import nitime.timeseries as ts
import pytest
def test_get_time_unit():
number = 4
npt.assert_equal(ts.get_time_unit(number), None)
list_of_numbers = [4, 5, 6]
npt.assert_equal(ts.get_time_unit(list_of_numbers), None)
for tu in ['ps', 's', 'D']:
time_point = ts.TimeArray([4], time_unit=tu)
npt.assert_equal(ts.get_time_unit(time_point), tu)
list_of_time = [ts.TimeArray(4, time_unit=tu), ts.TimeArray(5, time_unit=tu)]
npt.assert_equal(ts.get_time_unit(list_of_time), tu)
# Go crazy, we don't mind:
list_of_lists = [[ts.TimeArray(4, time_unit=tu),
ts.TimeArray(5, time_unit=tu)],
[ts.TimeArray(4, time_unit=tu),
ts.TimeArray(5, time_unit=tu)]]
npt.assert_equal(ts.get_time_unit(list_of_lists), tu)
time_arr = ts.TimeArray([4, 5], time_unit=tu)
npt.assert_equal(ts.get_time_unit(time_arr), tu)
def test_TimeArray():
time1 = ts.TimeArray(list(range(100)), time_unit='ms')
time2 = time1 + time1
npt.assert_equal(time2.time_unit, 'ms')
time1 = ts.TimeArray(10 ** 6)
npt.assert_equal(time1.__repr__(), '1000000.0 s')
#TimeArray can't be more than 1-d:
with pytest.raises(ValueError) as e_info:
ts.TimeArray(np.zeros((2, 2)))
dt = ts.TimeArray(0.001, time_unit='s')
tt = ts.TimeArray([dt])
npt.assert_equal(dt, tt)
t1 = ts.TimeArray([0, 1, 2, 3])
t2 = ts.TimeArray([ts.TimeArray(0),
ts.TimeArray(1),
ts.TimeArray(2),
ts.TimeArray(3)])
npt.assert_equal(t1, t2)
def test_TimeArray_math():
"Addition and subtraction should convert to TimeArray units"
time1 = ts.TimeArray(list(range(10)), time_unit='ms')
time2 = ts.TimeArray(list(range(1,11)), time_unit='ms')
# units should be converted to whatever units the array has
time3 = time1 + 1
npt.assert_equal(time2,time3)
time4 = time2 - 1
npt.assert_equal(time1,time4)
# floats should also work
time3 = time1 + 1.0
npt.assert_equal(time2,time3)
time4 = time2 - 1.0
npt.assert_equal(time1,time4)
# test the r* versions
time3 = 1 + time1
npt.assert_equal(time2,time3)
time4 = 1 - time2
npt.assert_equal(-time1,time4)
# floats should also work
time3 = 1.0 + time1
npt.assert_equal(time2,time3)
time4 = 1.0 - time2
npt.assert_equal(-time1,time4)
timeunits = ts.TimeArray(list(range(10)), time_unit='s')
timeunits.convert_unit('ms')
# now, math with non-TimeArrays should be based on the new time_unit
# here the range() list gets converted to a TimeArray with the same units
# as timeunits (which is now 'ms')
tnew = timeunits + list(range(10))
npt.assert_equal(tnew, timeunits+time1) # recall that time1 was 0-10ms
def test_TimeArray_comparison():
"Comparison with unitless quantities should convert to TimeArray units"
time = ts.TimeArray(list(range(10)), time_unit='ms')
npt.assert_equal(time < 5 , [True]*5+[False]*5)
npt.assert_equal(time > 5 , [False]*6+[True]*4)
npt.assert_equal(time <= 5, [True]*6+[False]*4)
npt.assert_equal(time >= 5, [False]*5+[True]*5)
npt.assert_equal(time == 5, [False]*5+[True] + [False]*4)
time.convert_unit('s')
# now all of time is < 1 in the new time_unit
npt.assert_equal(time < 5 , [True]*10)
npt.assert_equal(time > 5 , [False]*10)
npt.assert_equal(time <= 5, [True]*10)
npt.assert_equal(time >= 5, [False]*10)
npt.assert_equal(time == 5, [False]*10)
def test_TimeArray_init_int64():
"""Make sure that we can initialize TimeArray with an array of ints"""
time = ts.TimeArray(np.int64(1))
npt.assert_equal(time.__repr__(), '1.0 s')
pass
def test_TimeArray_init_list():
"""Initializing with a list that contains TimeArray should work.
"""
for t in [0.001, ts.TimeArray(0.001, time_unit='s')]:
tl = [t]
ta = ts.TimeArray(t, time_unit='s')
tla = ts.TimeArray(tl, time_unit='s')
npt.assert_(ta, tla)
def test_TimeArray_repr():
"""
>>> a = ts.TimeArray([1.1,2,3])
>>> a
TimeArray([ 1.1, 2. , 3. ], time_unit='s')
>>> t = ts.TimeArray(a,time_unit='ms')
>>> t
TimeArray([ 1100., 2000., 3000.], time_unit='ms')
>>> t[0]
1100.0 ms
"""
def test_TimeArray_copyflag():
"""Testing the setting of the copy-flag, where that makes sense"""
#These two should both generate a TimeArray, with one picosecond.
#This one holds time_unit='s'
t1 = ts.TimeArray(np.array([1], dtype=np.int64), copy=False)
#This one holds time_unit='ps':
t2 = ts.TimeArray(1, time_unit='ps')
t3 = ts.TimeArray(t2, copy=False)
npt.assert_equal(t1, t2)
npt.assert_equal(t2.ctypes.data, t3.ctypes.data)
def test_TimeArray_new():
for unit in ['ns', 'ms', 's', None]:
for flag in [True, False]:
#list -doesn't make sense to set copy=True
time2 = ts.TimeArray(list(range(5)), time_unit=unit, copy=True)
#numpy array (float) - doesn't make sense to set copy=True
time2f = ts.TimeArray(np.arange(5.), time_unit=unit, copy=True)
#TimeArray
time3 = ts.TimeArray(time2, time_unit=unit, copy=flag)
#integer
time4 = ts.TimeArray(5, time_unit=unit, copy=True)
#float
time5 = ts.TimeArray(5.0, time_unit=unit, copy=True)
npt.assert_equal(time2, time2f)
npt.assert_equal(time2, time3)
time3[0] += 100
if flag:
npt.assert_(time2[0] != time3[0])
else:
npt.assert_(time2[0] == time3[0])
npt.assert_equal(time2[1:], time3[1:])
npt.assert_equal(time4, time5)
def test_TimeArray_bool():
time1 = ts.TimeArray([1, 2, 3], time_unit='s')
time2 = ts.TimeArray([1000, 2000, 3000], time_unit='ms')
bool_arr = np.ones(time1.shape, dtype=bool)
npt.assert_equal(time1, time2)
npt.assert_equal(bool_arr, time1 == time2)
npt.assert_(type(time1 == time2) is not ts.TimeArray)
def test_TimeArray_convert_unit():
"""
>>> a = ts.TimeArray([1,2,3,4])
>>> a.convert_unit('ms')
>>> a
TimeArray([ 1000., 2000., 3000., 4000.], time_unit='ms')
>>> a.time_unit
'ms'
>>> b = ts.TimeArray([1,2,3,4],'s')
>>> a==b
array([ True, True, True, True], dtype=bool)
"""
def test_TimeArray_div():
#divide singelton by singleton:
a = 2.0
b = 6.0
time1 = ts.TimeArray(a, time_unit='s')
time2 = ts.TimeArray(b, time_unit='s')
div1 = a / b
#This should eliminate the units and return a float, not a TimeArray:
div2 = time1 / time2
npt.assert_equal(div1, div2)
#Divide a TimeArray by a singelton:
a = np.array([1, 2, 3])
b = 6.0
time1 = ts.TimeArray(a, time_unit='s')
time2 = ts.TimeArray(b, time_unit='s')
div1 = a / b
#This should eliminate the units and return a float array, not a TimeArray:
div2 = time1 / time2
npt.assert_equal(div1, div2)
#Divide a TimeArray by another TimeArray:
a = np.array([1, 2, 3])
b = np.array([2, 2, 2]).astype(float) # TimeArray division is float division!
time1 = ts.TimeArray(a, time_unit='s')
time2 = ts.TimeArray(b, time_unit='s')
div1 = a / b
#This should eliminate the units and return a float array, not a TimeArray:
div2 = time1 / time2
npt.assert_equal(div1, div2)
def test_TimeArray_index_at():
time1 = ts.TimeArray(list(range(10)), time_unit='ms')
for i in range(5):
# The return value is always an array, so we keep it for multiple tests
i_arr = np.array(i)
# Check 'closest' indexing mode first
idx = time1.index_at(i)
npt.assert_equal(idx, i_arr)
# If we index with seconds/1000, results shouldn't vary
idx_secs = time1.index_at(ts.TimeArray(i / 1000., time_unit='s'))
npt.assert_equal(idx_secs, i_arr)
# If we now change the tolerance
# In this case, it should still return
idx = time1.index_at(i + 0.1, tol=0.1)
npt.assert_equal(idx, i_arr)
# But with a smaller tolerance, we should get no indices
idx = time1.index_at(i + 0.1, tol=0.05)
npt.assert_equal(idx, np.array([]))
# Now, check before/after modes
idx = time1.index_at(i + 0.1, mode='before')
npt.assert_equal(idx, i_arr)
idx = time1.index_at(i + 0.1, mode='after')
npt.assert_equal(idx, i_arr + 1)
def test_TimeArray_at():
time1 = ts.TimeArray(list(range(10)), time_unit='ms')
for i in range(10):
this = time1.at(i)
i_ms = ts.TimeArray(i / 1000.)
npt.assert_equal(this, ts.TimeArray(i, time_unit='ms'))
this_secs = time1.at(i_ms)
npt.assert_equal(this_secs, ts.TimeArray(i, time_unit='ms'))
seconds_array = ts.TimeArray(time1, time_unit='s')
this_secs = seconds_array.at(i / 1000.)
npt.assert_equal(this_secs, ts.TimeArray(i, time_unit='ms'))
all = time1.at(i_ms, tol=10)
npt.assert_equal(all, time1)
if i > 0 and i < 9:
this_secs = time1.at(i_ms, tol=1)
npt.assert_equal(this_secs,
ts.TimeArray([i - 1, i, i + 1], time_unit='ms'))
def test_TimeArray_at2():
time1 = ts.TimeArray(list(range(10)), time_unit='ms')
for i in [1]:
i_ms = ts.TimeArray(i / 1000.)
this_secs = time1.at(i_ms, tol=1)
npt.assert_equal(this_secs,
ts.TimeArray([i - 1, i, i + 1], time_unit='ms'))
def test_UniformTime_index_at():
time1 = ts.UniformTime(t0=1000, length=10, sampling_rate=1000, time_unit='ms')
mask = [False] * 10
for i in range(10):
idx = time1.index_at(ts.TimeArray(1000 + i, time_unit='ms'))
npt.assert_equal(idx, np.array(i))
mask[i] = True
mask_idx = time1.index_at(ts.TimeArray(1000 + i, time_unit='ms'),
boolean=True)
npt.assert_equal(mask_idx, mask)
if i > 0 and i < 9:
mask[i - 1] = True
mask[i + 1] = True
mask_idx = time1.index_at(
ts.TimeArray([999 + i, 1000 + i, 1001 + i],
time_unit='ms'), boolean=True)
npt.assert_equal(mask_idx, mask)
mask[i - 1] = False
mask[i + 1] = False
mask[i] = False
#XXX Need to write these tests:
#Test the unit conversion:
#
#def test_TimeArray_unit_conversion():
#Test the overloaded __getitem__ and __setitem:
#
def test_TimeArray_getset():
t1 = ts.TimeSeries(data = np.random.rand(2, 3, 4), sampling_rate=1)
npt.assert_equal(t1[0],t1.data[...,0])
def test_UniformTime():
tuc = ts.time_unit_conversion
for unit, duration in zip(['ns', 'ms', 's', None],
[2 * 10 ** 9, 2 * 10 ** 6, 100, 20]):
t1 = ts.UniformTime(duration=duration, sampling_rate=1,
time_unit=unit)
t2 = ts.UniformTime(duration=duration, sampling_rate=20,
time_unit=unit)
#The following two tests verify that first-last are equal to the
#duration, but it is unclear whether that is really the behavior we
#want, because the t_i held by a TimeSeries is the left
#(smaller) side of the time-duration defined by the bin
#The difference between the first and last item is the duration:
#npt.assert_equal(t1[-1]-t1[0],
# ts.TimeArray(duration,time_unit=unit))
#Duration doesn't depend on the sampling rate:
#npt.assert_equal(t1[-1]-t2[0],
# ts.TimeArray(duration,time_unit=unit))
a = ts.UniformTime(duration=10, sampling_rate=1)
b = ts.UniformTime(a, time_unit=unit)
npt.assert_equal(a.sampling_interval, b.sampling_interval)
npt.assert_equal(a.sampling_rate, b.sampling_rate)
b = ts.UniformTime(a, duration=2 * duration, time_unit=unit)
npt.assert_equal(a.sampling_interval, b.sampling_interval)
npt.assert_equal(a.sampling_rate, b.sampling_rate)
b = ts.UniformTime(a, length=100, time_unit=unit)
npt.assert_equal(a.sampling_interval, b.sampling_interval)
npt.assert_equal(a.sampling_rate, b.sampling_rate)
b = ts.UniformTime(a, length=100, time_unit=unit)
npt.assert_equal(a.sampling_interval, b.sampling_interval)
npt.assert_equal(a.sampling_rate, b.sampling_rate)
b = ts.UniformTime(a, length=100, duration=duration, time_unit=unit)
c = ts.UniformTime(length=100, duration=duration, time_unit=unit)
npt.assert_equal(c, b)
b = ts.UniformTime(sampling_interval=1, duration=10, time_unit=unit)
c = ts.UniformTime(sampling_rate=tuc['s'] / tuc[unit],
length=10, time_unit=unit)
npt.assert_equal(c, b)
#This should raise a value error, because the duration is shorter than
#the sampling_interval:
with pytest.raises(ValueError) as e_info:
ts.UniformTime(dict(sampling_interval=10, duration=1))
#Time objects can be initialized with other time objects setting the
#duration, sampling_interval and sampling_rate:
a = ts.UniformTime(length=1, sampling_rate=1)
with pytest.raises(ValueError) as e_info:
ts.UniformTime(dict(data=a, sampling_rate=10, sampling_interval=.1))
b = ts.UniformTime(duration=2 * a.sampling_interval,
sampling_rate=2 * a.sampling_rate)
npt.assert_equal(ts.Frequency(b.sampling_rate),
ts.Frequency(2 * a.sampling_rate))
npt.assert_equal(b.sampling_interval,
ts.TimeArray(0.5 * a.sampling_rate))
b = ts.UniformTime(duration=10,
sampling_interval=a.sampling_interval)
npt.assert_equal(b.sampling_rate, a.sampling_rate)
b = ts.UniformTime(duration=10,
sampling_rate=a.sampling_rate)
npt.assert_equal(b.sampling_interval, a.sampling_interval)
# make sure the t0 ando other attribute is copied
a = ts.UniformTime(length=1, sampling_rate=1)
b = a.copy()
npt.assert_equal(b.duration, a.duration)
npt.assert_equal(b.sampling_rate, a.sampling_rate)
npt.assert_equal(b.sampling_interval, a.sampling_interval)
npt.assert_equal(b.t0, a.t0)
def test_UniformTime_repr():
"""
>>> time1 = ts.UniformTime(sampling_rate=1000,time_unit='ms',length=3)
>>> time1.sampling_rate
1000.0 Hz
>>> time1
UniformTime([ 0., 1., 2.], time_unit='ms')
>>> time2= ts.UniformTime(sampling_rate=1000,time_unit='s',length=3)
>>> time2.sampling_rate
1000.0 Hz
>>> time2
UniformTime([ 0. , 0.001, 0.002], time_unit='s')
>>> a = ts.UniformTime(length=5,sampling_rate=1,time_unit='ms')
>>> b = ts.UniformTime(a)
>>> b
UniformTime([ 0., 1000., 2000., 3000., 4000.], time_unit='ms')
>>> a
UniformTime([ 0., 1000., 2000., 3000., 4000.], time_unit='ms')
>>> b = ts.UniformTime(a,time_unit='s')
>>> b
UniformTime([ 0., 1., 2., 3., 4.], time_unit='s')
>>> a = ts.UniformTime(length=1,sampling_rate=2)
>>> b = ts.UniformTime(length=10,sampling_interval=a.sampling_interval)
>>> b.sampling_rate
2.0 Hz
"""
def test_Frequency():
"""Test frequency representation object"""
tuc = ts.time_unit_conversion
for unit in ['ns', 'ms', 's', None]:
f = ts.Frequency(1, time_unit=unit)
npt.assert_equal(f.to_period(), tuc[unit])
f = ts.Frequency(1000, time_unit=unit)
npt.assert_equal(f.to_period(), tuc[unit] / 1000)
f = ts.Frequency(0.001, time_unit=unit)
npt.assert_equal(f.to_period(), tuc[unit] * 1000)
def test_TimeSeries():
"""Testing the initialization of the uniform time series object """
#Test initialization with duration:
tseries1 = ts.TimeSeries([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], duration=10)
tseries2 = ts.TimeSeries([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], sampling_interval=1)
npt.assert_equal(tseries1.time, tseries2.time)
#downsampling:
t1 = ts.UniformTime(length=8, sampling_rate=2)
#duration is the same, but we're downsampling to 1Hz
tseries1 = ts.TimeSeries(data=[1, 2, 3, 4], time=t1, sampling_rate=1)
#If you didn't explicitely provide the rate you want to downsample to, that
#is an error:
with pytest.raises(ValueError) as e_info:
ts.TimeSeries(dict(data=[1, 2, 3, 4], time=t1))
tseries2 = ts.TimeSeries(data=[1, 2, 3, 4], sampling_rate=1)
tseries3 = ts.TimeSeries(data=[1, 2, 3, 4], sampling_rate=1000,
time_unit='ms')
#you can specify the sampling_rate or the sampling_interval, to the same
#effect, where specificying the sampling_interval is in the units of that
#time-series:
tseries4 = ts.TimeSeries(data=[1, 2, 3, 4], sampling_interval=1,
time_unit='ms')
npt.assert_equal(tseries4.time, tseries3.time)
#The units you use shouldn't matter - time is time:
tseries6 = ts.TimeSeries(data=[1, 2, 3, 4],
sampling_interval=0.001,
time_unit='s')
npt.assert_equal(tseries6.time, tseries3.time)
#And this too - perverse, but should be possible:
tseries5 = ts.TimeSeries(data=[1, 2, 3, 4],
sampling_interval=ts.TimeArray(0.001,
time_unit='s'),
time_unit='ms')
npt.assert_equal(tseries5.time, tseries3.time)
#initializing with a UniformTime object:
t = ts.UniformTime(length=3, sampling_rate=3)
data = [1, 2, 3]
tseries7 = ts.TimeSeries(data=data, time=t)
npt.assert_equal(tseries7.data, data)
data = [1, 2, 3, 4]
#If the data is not the right length, that should throw an error:
with pytest.raises(ValueError) as e_info:
ts.TimeSeries(dict(data=data, time=t))
# test basic arithmetics wiht TimeSeries
tseries1 = ts.TimeSeries([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], sampling_rate=1)
tseries2 = tseries1 + 1
npt.assert_equal(tseries1.data + 1, tseries2.data)
npt.assert_equal(tseries1.time, tseries2.time)
tseries2 -= 1
npt.assert_equal(tseries1.data, tseries2.data)
npt.assert_equal(tseries1.time, tseries2.time)
tseries2 = tseries1 * 2
npt.assert_equal(tseries1.data * 2, tseries2.data)
npt.assert_equal(tseries1.time, tseries2.time)
tseries2 = tseries2 / 2
npt.assert_equal(tseries1.data, tseries2.data)
npt.assert_equal(tseries1.time, tseries2.time)
tseries_nd1 = ts.TimeSeries(np.random.randn(3, 100), sampling_rate=1)
tseries_nd2 = ts.TimeSeries(np.random.randn(3, 100), sampling_rate=1)
npt.assert_equal((tseries_nd1 + tseries_nd2).data,
tseries_nd1.data + tseries_nd2.data)
npt.assert_equal((tseries_nd1 - tseries_nd2).data,
tseries_nd1.data - tseries_nd2.data)
npt.assert_equal((tseries_nd1 * tseries_nd2).data,
tseries_nd1.data * tseries_nd2.data)
npt.assert_equal((tseries_nd1 / tseries_nd2).data,
tseries_nd1.data / tseries_nd2.data)
def test_TimeSeries_repr():
"""
>>> t=ts.UniformTime(length=3,sampling_rate=3)
>>> tseries1 = ts.TimeSeries(data=[3,5,8],time=t)
>>> t.sampling_rate
3.0 Hz
>>> tseries1.sampling_rate
3.0 Hz
>>> tseries1 = ts.TimeSeries(data=[3,5,8],sampling_rate=3)
>>> tseries1.time
UniformTime([ 0. , 0.3333, 0.6667], time_unit='s')
>>> tseries1.sampling_rate
3.0 Hz
>>> tseries1.sampling_interval
0.333333333333 s
>>> a = ts.UniformTime(length=1,sampling_rate=2)
>>> b = ts.TimeSeries(data=[1,2,3],sampling_interval=a.sampling_interval)
>>> b.sampling_rate
2.0 Hz
>>> a = ts.UniformTime(length=1,sampling_rate=1)
>>> b = ts.TimeSeries(data=[1,2,3],sampling_interval=a.sampling_interval)
>>> b.sampling_rate
1.0 Hz
"""
def test_Epochs():
tms = ts.TimeArray(data=list(range(100)), time_unit='ms')
tmin = ts.TimeArray(data=list(range(100)), time_unit='m')
tsec = ts.TimeArray(data=list(range(100)), time_unit='s')
utms = ts.UniformTime(length=100, sampling_interval=1, time_unit='ms')
utmin = ts.UniformTime(length=100, sampling_interval=1, time_unit='m')
utsec = ts.UniformTime(length=100, sampling_interval=1, time_unit='s')
tsms = ts.TimeSeries(data=list(range(100)), sampling_interval=1, time_unit='ms')
tsmin = ts.TimeSeries(data=list(range(100)), sampling_interval=1, time_unit='m')
tssec = ts.TimeSeries(data=list(range(100)), sampling_interval=1, time_unit='s')
# one millisecond epoch
e1ms = ts.Epochs(0, 1, time_unit='ms')
e09ms = ts.Epochs(0.1, 1, time_unit='ms')
msg = "Seems like a problem with copy=False in TimeArray constructor."
npt.assert_equal(e1ms.duration, ts.TimeArray(1, time_unit='ms'), msg)
# one day
e1d = ts.Epochs(0, 1, time_unit='D')
npt.assert_equal(e1d.duration, ts.TimeArray(1, time_unit='D'), msg)
e1ms_ar = ts.Epochs([0, 0], [1, 1], time_unit='ms')
for t in [tms, tmin, tsec, utms, utmin, utsec]:
# the sample time arrays are all at least 1ms long, so this should
# return a timearray that has exactly one time point in it
npt.assert_equal(len(t.during(e1ms)), 1)
# this time epoch should not contain any point
npt.assert_equal(len(t.during(e09ms)), 0)
# make sure, slicing doesn't change the class
npt.assert_equal(type(t), type(t.during(e1ms)))
for t in [tsms, tsmin, tssec]:
# the sample time series are all at least 1ms long, so this should
# return a timeseries that has exactly one time point in it
npt.assert_equal(len(t.during(e1ms)), 1)
# make sure, slicing doesn't change the class
npt.assert_equal(type(t), type(t.during(e1ms)))
# same thing but now there's an array of epochs
e2 = ts.Epochs([0, 10], [10, 20], time_unit=t.time_unit)
# make sure, slicing doesn't change the class for array of epochs
npt.assert_equal(type(t), type(t.during(e2)))
# Indexing with an array of epochs (all of which are the same length)
npt.assert_equal(t[e2].data.shape, (2, 10))
npt.assert_equal(len(t.during(e2)), 10)
npt.assert_equal(t[e2].data.ndim, 2)
# check the data at some timepoints (a dimension was added)
npt.assert_equal(t[e2][0], (0, 10))
npt.assert_equal(t[e2][1], (1, 11))
# check the data for each epoch
npt.assert_equal(t[e2].data[0], list(range(10)))
npt.assert_equal(t[e2].data[1], list(range(10, 20)))
npt.assert_equal(t[e2].duration, e2[0].duration)
# slice with Epochs of different length (not supported for timeseries,
# raise error, though future jagged array implementation could go here)
ejag = ts.Epochs([0, 10], [10, 40], time_unit=t.time_unit)
# next line is the same as t[ejag]
with pytest.raises(ValueError) as e_info:
t.__getitem__(ejag)
# if an epoch lies entirely between samples in the timeseries,
# return an empty array
eshort = ts.Epochs(2.5, 2.7, time_unit=t.time_unit)
npt.assert_equal(len(t[eshort].data), 0)
e1ms_outofrange = ts.Epochs(200, 300, time_unit=t.time_unit)
# assert that with the epoch moved outside of the time range of our
# data, slicing with the epoch now yields an empty array
with pytest.raises(ValueError) as e_info:
t.during(dict(e=e1ms_outofrange))
# the sample timeseries are all shorter than a day, so this should
# raise an error (instead of padding, or returning a shorter than
# expected array.
with pytest.raises(ValueError) as e_info:
t.during(dict(e=e1d))
def test_basic_slicing():
t = ts.TimeArray(list(range(4)))
for x in range(3):
ep = ts.Epochs(.5,x+.5)
npt.assert_equal(len(t[ep]), x)
# epoch starts before timeseries
npt.assert_equal(len(t[ts.Epochs(-1,3)]), len(t)-1)
# epoch ends after timeseries
npt.assert_equal(len(t[ts.Epochs(.5,5)]), len(t)-1)
# epoch starts before and ends after timeseries
npt.assert_equal(len(t[ts.Epochs(-1,100)]), len(t))
ep = ts.Epochs(20,100)
npt.assert_equal(len(t[ep]), 0)
def test_Events():
# time has to be one-dimensional
with pytest.raises(ValueError) as e_info:
ts.Events(np.zeros((2, 2)))
t = ts.TimeArray([1, 2, 3], time_unit='ms')
x = [1, 2, 3]
y = [2, 4, 6]
z = [10., 20., 30.]
i0 = [0, 0, 1]
i1 = [0, 1, 2]
for unit in [None, 's', 'ns', 'D']:
# events with data
ev1 = ts.Events(t, time_unit=unit, i=x, j=y, k=z)
# events with indices
ev2 = ts.Events(t, time_unit=unit, indices=[i0, i1])
# events with indices and labels
ev3 = ts.Events(t, time_unit=unit, labels=['trial', 'other'],
indices=[i0, i1])
# Note that the length of indices and labels has to be identical:
with pytest.raises(ValueError) as e_info:
ts.Events(t, time_unit=unit,
labels=['trial', 'other'], indices=[i0]) # Only
# one of
# the
# indices!
# make sure the time is retained
| npt.assert_equal(ev1.time, t) | numpy.testing.assert_equal |
import numpy as np
from scipy.sparse import load_npz
import glob,os
import matplotlib as mpl
mpl.use('Agg')
from matplotlib import pyplot as plt
mpl.style.use('ggplot')
import seaborn as sns
import argparse
def bin_results(R,P0,n=50,y0=None):
P = np.copy(P0)
lower,upper = np.percentile(P,1),np.percentile(P,99)
P[P<lower] = lower
P[P>upper] = upper
xs = np.argsort(P)
bins = [P[xs[i]] for i in range(0,len(P),int(len(P)/n))]
indices = np.digitize(P,bins)
x = np.array([np.average(P[indices == i]) for i in set(indices)])
if y0 is not None:
y = np.array([np.average(R[indices == i] < y0) for i in set(indices)])
else:
y = np.array([np.average(R[indices == i]) for i in set(indices)])
return x,y
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--viral-load-matrix', help='Path to viral load matrix (individuals x time points)')
parser.add_argument('--resultspath', help='Path to save summary figure')
parser.add_argument('--start-time', help='Beginning of time range to analyze',type=int)
parser.add_argument('--end-time', help='End of time range to analyze',type=int)
parser.add_argument('--LOD', help='Limit of detection',type=float,default=100)
parser.add_argument('--min-resources', help='Minimum number of resources to consider',type=int,default=12)
args,_ = parser.parse_known_args()
ViralLoad = load_npz(args.viral_load_matrix)
timepoints = np.load(args.viral_load_matrix.replace('viral_loads.npz', 'timepoints.npy'))
e0 = np.average([np.average(v.data > args.LOD) for v in ViralLoad[:,args.start_time:args.end_time].T if len(v.data)>0])
Results = []
unique_nm = []
FP = glob.glob(os.path.join(args.resultspath, 'Recall_combined.*.npy'))
for fp in FP:
cond = fp.split('.')[-2]
n = int(cond.split('_')[0].split('-')[1])
m = int(cond.split('_')[1].split('-')[1])
q = int(cond.split('_')[2].split('-')[1])
q = min(m,q) # filenames sometime screwy
sensitivity = np.load('%s/Recall_combined.%s.npy' % (args.resultspath,cond))
efficiency = np.load('%s/Eff_avg.%s.npy' % (args.resultspath,cond))
avg_sensitivity = sensitivity[args.start_time:args.end_time]
avg_efficiency = efficiency[args.start_time:args.end_time]
#effective_m = n/avg_efficiency # stage (i) pools + stage (ii) validation tests
Results.append((n,m,q,avg_sensitivity,avg_efficiency))
#print(Results[-1],m/Results[-1][3]*Results[-1][4])
if (n >= args.min_resources) and (m >= args.min_resources):
unique_nm += [n,m]
unique_nm = np.unique(unique_nm)
X = np.zeros((len(unique_nm),len(unique_nm)))
fig = plt.figure(num=None, figsize=(8, 8))
f = open('%s/summary.resource_t0-%d_t1-%d.tmp.csv' % (args.resultspath, args.start_time, args.end_time),'w')
_ = f.write(','.join(['Sample budget','Test budget','Design samples','Design pools','Design sample split','Design runs per day','Design effectiveness']) + '\n')
for i,n_swabs in enumerate(unique_nm):
for j,m_kits in enumerate(unique_nm):
nm_min = min(n_swabs,m_kits)
best = (e0*nm_min,nm_min,nm_min,1,1)
for n,m,q,sens,eff in Results:
m_eff = n/eff # stage (i) pools + stage (ii) validation tests
n_tests = np.minimum(n_swabs/n, m_kits/m_eff)
if | np.average(n_tests) | numpy.average |
import logging
import os
import fitsio
import numpy as np
import pandas as pd
from astropy.io import fits
from lightkurve.correctors.designmatrix import _spline_basis_vector
from scipy.sparse import csr_matrix, hstack, lil_matrix, vstack
from fbpca import pca
from astropy.stats import sigma_clip
from . import PACKAGEDIR
from .version import __version__
log = logging.getLogger(__name__)
class BackDrop(object):
"""
Class to create background corrections for TESS.
`tess-backdrop` fits a simple, three part model:
1. A 2D, low order polynomial to remove the bulk background
2. A 2D b-spline to remove high spatial frequency noise
3. A model for strap offsets.
Parameters
----------
fnames: list of str, or list of astropy.io.fits objects
Input TESS FFIs
npoly: int
The order of polynomial to fit to the data in both x and y dimension.
Recommend ~4.
nrad: int
The order of polynomial to fit to the data in radius from the boresight
nknots: int
Number of knots to fit in each dimension. Recommend ~40.
degree: int
Degree of spline to fit.
nb: int
Number of bins to downsample to for polynomial fit.
cutout_size : int
Size of cut out to use. Default is 2048, full FFI
"""
def __init__(
self,
fnames=None,
npoly=5,
nrad=5,
nknots=40,
degree=2,
nb=8,
cutout_size=2048,
max_batch_number=20,
min_batch_size=5,
# reference_frame=0,
):
"""
Parameters
----------
fnames: list of str, or list of astropy.io.fits objects
Input TESS FFIs
npoly: int
The order of polynomial to fit to the data in both x and y dimension.
Recommend ~4.
nknots: int
Number of knots to fit in each dimension. Recommend ~40.
degree: int
Degree of spline to fit.
nb: int
Number of bins to downsample to for polynomial fit.
"""
self.npoly = npoly
self.nrad = nrad
self.nknots = nknots
self.degree = degree
self.fnames = fnames
self.nknots = nknots
self.max_batch_number = max_batch_number
self.min_batch_size = min_batch_size
self.nb = nb
self.cutout_size = cutout_size
# self.reference_frame = reference_frame
# if self.fnames is not None:
# self.reference_image = self.fnames[self.reference_frame]
if self.fnames is not None:
if isinstance(self.fnames, (str, list)):
self.fnames = np.asarray(self.fnames)
self.fnames = np.sort(self.fnames)
if len(self.fnames) >= 15:
log.info("Finding bad frames")
self.bad_frames, self.quality = _find_bad_frames(
self.fnames, cutout_size=self.cutout_size
)
else:
self.bad_frames = np.zeros(len(self.fnames), bool)
if (
len(self.fnames[~self.bad_frames]) / self.max_batch_number
< self.min_batch_size
):
self.batches = np.array_split(
self.fnames[~self.bad_frames],
np.max(
[1, len(self.fnames[~self.bad_frames]) // self.min_batch_size]
),
)
else:
self.batches = np.array_split(
self.fnames[~self.bad_frames], self.max_batch_number
)
else:
self.bad_frames = None
self.knots_wbounds = _get_knots(
np.arange(self.cutout_size), nknots=nknots, degree=degree
)
def _build_mask(self):
"""Builds a boolean mask for the input image stack which
1. Masks out very bright pixels (>1500 counts) or pixels with a sharp gradient (>30 counts)
2. Masks out pixels where there is a consistently outlying gradient in the image
3. Masks out saturated columns, including a wide halo, and a whisker mask.
Returns
-------
soft_mask : np.ndarray of bools
"soft" mask of pixels that, on average, have steep gradients
sat_mask : np.ndarray of bools
Mask where pixels that are not saturated are True
"""
# average = np.zeros((self.cutout_size, self.cutout_size))
# weights = np.zeros((self.cutout_size, self.cutout_size))
diff = None
# diff_ar = np.zeros(len(self.fnames))
# sat_mask = None
# hard_mask = np.zeros((self.cutout_size, self.cutout_size), dtype=bool)
med_image = np.zeros((len(self.batches), self.cutout_size, self.cutout_size))
self.odd_mask = csr_matrix((len(self.fnames), 2048 ** 2), dtype=bool)
for bdx, batch in enumerate(self.batches):
if len(batch) == 0:
med_image[bdx, :, :] = np.nan
else:
batch_count = 0
for fdx, fname in enumerate(batch):
fdx += np.where(self.fnames == batch[0])[0][0]
with fits.open(fname, lazy_load_hdus=True) as hdu:
if not hasattr(self, "sector"):
self.sector = int(fname.split("-s")[1].split("-")[0])
self.camera = hdu[1].header["camera"]
self.ccd = hdu[1].header["ccd"]
if self.ccd in [1, 3]:
self.bore_pixel = [2048, 2048]
elif self.ccd in [2, 4]:
self.bore_pixel = [2048, 0]
log.info(
f"Building mask s{self.sector} c{self.camera} ccd{self.ccd}"
)
if not np.all(
[
self.sector == int(fname.split("-s")[1].split("-")[0]),
self.camera == hdu[1].header["camera"],
self.ccd == hdu[1].header["ccd"],
]
):
raise ValueError(
"All files must have same sector, camera, ccd."
)
# Bad frames do not count.
if self.bad_frames[fdx]:
continue
data = fitsio.read(fname)[
: self.cutout_size, 45 : self.cutout_size + 45
]
k = (data > 0) & (data < 1500)
# Blown out frames do not count.
if (~k).sum() / (self.cutout_size ** 2) > 0.05:
self.bad_frames[fdx] = True
continue
data -= np.median(data[::16, ::16])
if diff is None:
diff = data.copy()
else:
diff -= data
# diff_ar[fdx] = (np.abs(diff) > 200).sum() / (
# self.cutout_size ** 2
# )
if (np.abs(diff) > 200).sum() / (self.cutout_size ** 2) > 0.2:
diff = data.copy()
self.bad_frames[fdx] = True
continue
diff = data.copy()
med_image[bdx, :, :] += data
batch_count += 1
# grad = np.gradient(data)
# hard_mask |= np.abs(np.hypot(*grad)) > 50
if batch_count > 0:
med_image[bdx, :, :] /= batch_count
else:
med_image[bdx, :, :] = np.nan
med_image = np.nanmedian(med_image, axis=0)
self.average_image = med_image
del med_image
self.average_image -= np.nanmedian(self.average_image)
self._straps_removed_from_average = False
soft_mask = np.hypot(*np.gradient(self.average_image)) > 10 # | (weights == 0)
sat_mask = get_saturation_mask(self.average_image, cutout_size=self.cutout_size)
# asteroid_mask = np.zeros((self.cutout_size, self.cutout_size), dtype=bool)
#
# binsize = 128
# for fdx, fname in enumerate(self.fnames):
# data = fitsio.read(fname)[: self.cutout_size, 45 : self.cutout_size + 45]
# data -= np.median(data)
# data -= self.average_image
# m = (data > 500) | (data < 0) & soft_mask
# if self.cutout_size > binsize:
# check = np.asarray(
# [
# m[idx::binsize, jdx::binsize]
# for idx in range(binsize)
# for jdx in range(binsize)
# ]
# ).sum(axis=0) / (binsize ** 2)
# else:
# check = m
# if (check > 0.05).any():
# self.odd_mask[fdx] = csr_matrix(m.ravel())
# check = np.kron(
# check,
# np.ones(
# (binsize, binsize),
# dtype=int,
# ),
# )
# grad = np.gradient(np.abs(data))
# asteroid_mask |= (np.hypot(*grad) > 30) & (check < 0.05)
#
# import pdb
# import matplotlib.pyplot as plt
#
# pdb.set_trace()
# del data
# # I don't care about dividing by zero here
# with np.errstate(divide="ignore", invalid="ignore"):
# average /= weights
# soft_mask = (np.hypot(*np.gradient(average)) > 10) | (weights == 0)
# soft_mask = (average > 20) | (weights == 0)
# soft_mask |= np.any(np.gradient(soft_mask.astype(float)), axis=0) != 0
# This makes the soft mask slightly more generous
def enlarge_mask(mask):
m = np.zeros((self.cutout_size, self.cutout_size))
m[1:-1, 1:-1] += mask[:-2, 1:-1].astype(int)
m[1:-1, 1:-1] += mask[2:, 1:-1].astype(int)
m[1:-1, 1:-1] += mask[1:-1, :-2].astype(int)
m[1:-1, 1:-1] += mask[1:-1, 2:].astype(int)
mask |= m >= 3
enlarge_mask(soft_mask)
# enlarge_mask(asteroid_mask)
# self.star_mask = ~(hard_mask | asteroid_mask | soft_mask | ~sat_mask)
self.star_mask = ~(soft_mask | ~sat_mask)
self.sat_mask = sat_mask
# We don't need all these pixels, it's too many to store for every frame.
# Instead we'll just save 5000 of them.
if (soft_mask & sat_mask).sum() > 5000:
s = np.random.choice((soft_mask & sat_mask).sum(), size=5000, replace=False)
l = np.asarray(np.where(soft_mask & sat_mask))
l = l[:, s]
self.jitter_mask = np.zeros((self.cutout_size, self.cutout_size), bool)
self.jitter_mask[l[0], l[1]] = True
else:
self.jitter_mask = np.copy((soft_mask & sat_mask))
# fname = self.fnames[len(self.fnames) // 2]
# data = fitsio.read(fname)[: self.cutout_size, 45 : self.cutout_size + 45]
# grad = np.asarray(np.gradient(data))
# self.median_data = data[self.jitter_mask]
# self.median_gradient = grad[:, self.jitter_mask]
self.median_data = self.average_image[self.jitter_mask]
self.median_gradient = np.asarray(np.gradient(self.average_image))[
:, self.jitter_mask
]
return soft_mask, sat_mask # , diff_ar
def _build_matrices(self):
"""Allocate the matrices to fit the background.
When we want to build the matrices to evaluate this background model,
we will be able do so in a slightly more efficient way."""
log.info(f"Building matrices s{self.sector} c{self.camera} ccd{self.ccd}")
row, column = np.mgrid[: self.cutout_size, : self.cutout_size]
c, r = column / self.cutout_size - 0.5, row / self.cutout_size - 0.5
crav = c.ravel()
rrav = r.ravel()
self._poly_X = np.asarray(
[
crav ** idx * rrav ** jdx
for idx in np.arange(self.npoly)
for jdx in np.arange(self.npoly)
]
).T
c, r = (column - self.bore_pixel[1]) / 2048, (row - self.bore_pixel[0]) / 2048
crav = c.ravel()
rrav = r.ravel()
rad = (crav ** 2 + rrav ** 2)[:, None] ** 0.5
self._poly_X = np.hstack(
[self._poly_X, np.hstack([rad ** idx for idx in np.arange(1, self.nrad)])]
)
# nice wide priors for polynomial
self._poly_prior_sigma = np.ones(self._poly_X.shape[1]) * 3000
self._poly_prior_mu = np.zeros(self._poly_X.shape[1])
def expand_poly(x, crav, points):
points = np.arange(0, 2048 + 512, 512)
return np.hstack(
[
x
* (
(((crav + 0.5) * self.cutout_size) >= p1)
& (((crav + 0.5) * self.cutout_size) < p2)
)[:, None]
for p1, p2 in zip(points[:-1], points[1:])
if (
(((crav + 0.5) * self.cutout_size) >= p1)
& (((crav + 0.5) * self.cutout_size) < p2)
).any()
]
)
# self._poly_X = expand_poly(self._poly_X, crav, points)
del (
row,
column,
c,
r,
)
del crav, rrav
row, column = (
np.mgrid[: self.cutout_size // self.nb, : self.cutout_size // self.nb]
* self.nb
)
row, column = row + self.nb / 2, column + self.nb / 2
c, r = column / self.cutout_size - 0.5, row / self.cutout_size - 0.5
crav = c.ravel()
rrav = r.ravel()
self.weights = np.sum(
[
self.star_mask[idx :: self.nb, jdx :: self.nb]
for idx in range(self.nb)
for jdx in range(self.nb)
],
axis=0,
)
self._poly_X_down = np.asarray(
[
crav ** idx * rrav ** jdx
for idx in np.arange(self.npoly)
for jdx in np.arange(self.npoly)
]
).T
c, r = (column - self.bore_pixel[1]) / 2048, (row - self.bore_pixel[0]) / 2048
crav = c.ravel()
rrav = r.ravel()
rad = (crav ** 2 + rrav ** 2)[:, None] ** 0.5
self._poly_X_down = np.hstack(
[
self._poly_X_down,
np.hstack([rad ** idx for idx in np.arange(1, self.nrad)]),
]
)
# self._poly_X_down = expand_poly(self._poly_X_down, crav, points)
del (
row,
column,
c,
r,
)
del crav, rrav
self.poly_sigma_w_inv = self._poly_X_down[self.weights.ravel() != 0].T.dot(
self._poly_X_down[self.weights.ravel() != 0]
)
self.poly_sigma_w_inv += np.diag(1 / self._poly_prior_sigma ** 2)
e = lil_matrix((self.cutout_size, self.cutout_size * self.cutout_size))
for idx in range(self.cutout_size):
e[idx, np.arange(self.cutout_size) * self.cutout_size + idx] = 1
self._strap_X = e.T.tocsr()
del e
self._spline_X = self._get_spline_matrix(np.arange(self.cutout_size))
self.X = hstack([self._spline_X, self._strap_X], format="csr")
# We'll sacrifice some memory here for speed later.
# self.XT = self.X.T.tocsr()
# self.Xm = self.X[self.star_mask.ravel()].tocsr()
self.XmT = self.X[self.star_mask.ravel()].T.tocsr()
self.prior_mu = np.zeros(self._spline_X.shape[1] + self._strap_X.shape[1])
self.prior_sigma = (
np.ones(self._spline_X.shape[1] + self._strap_X.shape[1]) * 40
)
self.prior_sigma[: self._spline_X.shape[1]] *= 10
self.sigma_w_inv = self.XmT.dot(
self.X[self.star_mask.ravel()].tocsr()
) + np.diag(1 / self.prior_sigma ** 2)
if not self._straps_removed_from_average:
log.info(
f"Correcting average image s{self.sector} c{self.camera} ccd{self.ccd}"
)
self._straps_removed_from_average = True
fit_results = self._fit_frame(self.average_image)
self.average_image -= fit_results[2][None, :]
self.average_image -= self._poly_X.dot(fit_results[0]).reshape(
(self.cutout_size, self.cutout_size)
)
self.average_image -= self._spline_X.dot(fit_results[1].ravel()).reshape(
(self.cutout_size, self.cutout_size)
)
def _get_spline_matrix(self, xc, xr=None):
"""Helper function to make a 2D spline matrix in a fairly memory efficient way."""
def _X(x):
matrices = [
csr_matrix(
_spline_basis_vector(x, self.degree, idx, self.knots_wbounds)
)
for idx in np.arange(-1, len(self.knots_wbounds) - self.degree - 1)
]
X = vstack(matrices, format="csr").T
return X
if xr is None:
xr = xc
Xc = _X(xc)
Xcf = vstack([Xc for idx in range(len(xr))]).tocsr()
Xr = _X(xr)
Xrf = (
hstack([Xr for idx in range(len(xc))])
.reshape((Xcf.shape[0], Xc.shape[1]))
.tocsr()
)
Xf = hstack([Xrf.multiply(X.T) for X in Xcf.T]).tocsr()
return Xf
def __repr__(self):
if hasattr(self, "sector"):
return (
f"BackDrop [Sector {self.sector}, Camera {self.camera}, CCD {self.ccd}]"
)
return "BackDrop"
def fit_model(self):
"""Fit the tess-backdrop model to the files specified by `fnames`."""
if not hasattr(self, "star_mask"):
_ = self._build_mask()
if not hasattr(self, "_poly_X"):
self._build_matrices()
if not hasattr(self, "poly_w"):
self.poly_w, self.spline_w, self.strap_w, self.t_start, self.jitter = (
# np.zeros(
# (
# len(self.fnames),
# self.npoly
# * self.npoly
# * (np.arange(0, 2048 + 512, 512) < self.cutout_size).sum(),
# )
# ),
# np.zeros((len(self.fnames), self.npoly, self.npoly)),
np.zeros((len(self.fnames), self.npoly * self.npoly + self.nrad - 1)),
np.zeros((len(self.fnames), self.nknots, self.nknots)),
np.zeros((len(self.fnames), self.cutout_size)),
np.zeros(len(self.fnames)),
np.zeros((len(self.fnames), self.jitter_mask.sum())),
)
log.info(f"Building frames s{self.sector} c{self.camera} ccd{self.ccd}")
points = np.linspace(0, len(self.fnames), 12, dtype=int)
for idx, fname in enumerate(self.fnames):
if self.t_start[idx] != 0:
continue
if idx in points:
log.info(
f"Running frames s{self.sector} c{self.camera} ccd{self.ccd} {np.where(points == idx)[0][0] * 10}%"
)
if idx != 0:
self.save(package_jitter_comps=False)
with fits.open(fname, lazy_load_hdus=True) as hdu:
if not np.all(
[
self.sector == int(fname.split("-s")[1].split("-")[0]),
self.camera == hdu[1].header["camera"],
self.ccd == hdu[1].header["ccd"],
]
):
raise ValueError(
f"FFI image is not part of Sector {self.sector}, Camera {self.camera}, CCD {self.ccd}"
)
self.t_start[idx] = hdu[0].header["TSTART"]
data = (
fitsio.read(fname)[: self.cutout_size, 45 : self.cutout_size + 45]
- self.average_image
)
(
self.poly_w[idx, :],
self.spline_w[idx, :],
self.strap_w[idx, :],
self.jitter[idx, :],
) = self._fit_frame(data)
self.save()
# # Smaller version of jitter for use later
# bad = sigma_clip(np.gradient(self.jitter_pix, axis=1).std(axis=1), sigma=5).mask
# _, med, std = sigma_clipped_stats(self.jitter_pix[~bad], axis=0)
# j = (np.copy(self.jitter_pix) - med) / std
# j[j > 10] = 0
# U, s, V = pca(j[~bad], 20, n_iter=100)
# X = np.zeros((self.jitter_pix.shape[0], U.shape[1]))
# X[~bad] = np.copy(U)
# self.jitter = X
# self.jitter = np.copy(self.jitter_pix)
def _fit_frame(self, data):
"""Helper function to fit a model to an individual frame."""
avg = np.sum(
[
data[idx :: self.nb, jdx :: self.nb]
* self.star_mask[idx :: self.nb, jdx :: self.nb]
for idx in range(self.nb)
for jdx in range(self.nb)
],
axis=0,
)
# I don't care about dividing by zero here
with np.errstate(divide="ignore", invalid="ignore"):
avg /= self.weights
B = self._poly_X_down[self.weights.ravel() != 0].T.dot(
avg.ravel()[self.weights.ravel() != 0]
)
B += self._poly_prior_mu / self._poly_prior_sigma ** 2
poly_w = np.linalg.solve(self.poly_sigma_w_inv, B)
# iterate once
for count in [0, 1]:
m = self._poly_X_down.dot(poly_w).reshape(
(self.cutout_size // self.nb, self.cutout_size // self.nb)
)
k = np.abs(avg - m) < 300
k = (self.weights.ravel() != 0) & k.ravel()
poly_sigma_w_inv = self._poly_X_down[k].T.dot(self._poly_X_down[k])
poly_sigma_w_inv += np.diag(1 / self._poly_prior_sigma ** 2)
B = self._poly_X_down[k].T.dot(avg.ravel()[k])
B += self._poly_prior_mu / self._poly_prior_sigma ** 2
poly_w = np.linalg.solve(poly_sigma_w_inv, B)
res = data - self._poly_X.dot(poly_w).reshape(
(self.cutout_size, self.cutout_size)
)
res[(np.hypot(*np.gradient(np.abs(res))) > 30) | (np.abs(res) > 500)] *= 0
# The spline and strap components should be small
# sigma_w_inv = self.XT[:, star_mask.ravel()].dot(
# self.X[star_mask.ravel()]
# ).toarray() + np.diag(1 / prior_sigma ** 2)
B = (
self.XmT.dot(res.ravel()[self.star_mask.ravel()])
+ self.prior_mu / self.prior_sigma ** 2
)
w = np.linalg.solve(self.sigma_w_inv, B)
spline_w = w[: self._spline_X.shape[1]].reshape((self.nknots, self.nknots))
strap_w = w[self._spline_X.shape[1] :]
jitter_pix = res[self.jitter_mask]
return (
poly_w, # .reshape((self.npoly, self.npoly)),
spline_w,
strap_w,
jitter_pix,
)
def _get_jitter(self, data):
"""Get the jitter correction somehow..."""
# Use jitter mask.
raise NotImplementedError
def save(self, output_dir=None, package_jitter_comps=True):
"""
Save a model fit to the tess-backrop data directory.
Will create a fits file containing the following extensions
- Primary
- T_START: The time array for each background solution
- KNOTS: Knot spacing in row and column
- SPLINE_W: Solution to the spline model. Has shape (ntimes x nknots x nknots)
- STRAP_W: Solution to the strap model. Has shape (ntimes x self.cutout_size)
- POLY_W: Solution to the polynomial model. Has shape (ntimes x npoly x npoly)
"""
log.info(f"Saving s{self.sector} c{self.camera} ccd{self.ccd}")
# if not hasattr(self, "star_mask"):
# raise ValueError(
# "It does not look like you have regenerated a tess_backdrop model, I do not think you want to save."
# )
hdu0 = fits.PrimaryHDU()
s = np.argsort(self.t_start)
cols = [
fits.Column(
name="T_START", format="D", unit="BJD - 2457000", array=self.t_start[s]
)
]
if hasattr(self, "quality"):
cols.append(
fits.Column(
name="QUALITY",
format="D",
unit="BJD - 2457000",
array=self.quality[s],
)
)
hdu1 = fits.BinTableHDU.from_columns(cols)
cols = [
fits.Column(name="KNOTS", format="D", unit="PIX", array=self.knots_wbounds)
]
hdu2 = fits.BinTableHDU.from_columns(cols)
hdu3 = fits.ImageHDU(self.spline_w[s], name="spline_w")
hdu4 = fits.ImageHDU(self.strap_w[s], name="strap_w")
hdu5 = fits.ImageHDU(self.poly_w[s], name="poly_w")
hdul = fits.HDUList([hdu0, hdu1, hdu2, hdu3, hdu4, hdu5])
hdul[0].header["ORIGIN"] = "tess-backdrop"
hdul[0].header["AUTHOR"] = "<EMAIL>"
hdul[0].header["VERSION"] = __version__
for key in ["sector", "camera", "ccd", "nknots", "npoly", "nrad", "degree"]:
hdul[0].header[key] = getattr(self, key)
fname = (
f"tessbackdrop_sector{self.sector}_camera{self.camera}_ccd{self.ccd}.fits"
)
if output_dir is None:
dir = f"{PACKAGEDIR}/data/sector{self.sector:03}/camera{self.camera:02}/ccd{self.ccd:02}/"
if not os.path.isdir(dir):
os.makedirs(dir)
hdul.writeto(dir + fname, overwrite=True)
else:
hdul.writeto(output_dir + fname, overwrite=True)
hdu0 = fits.PrimaryHDU()
hdu1 = fits.ImageHDU(self.jitter[s], name="jitter_pix")
hdul = fits.HDUList([hdu0, hdu1])
hdul[0].header["ORIGIN"] = "tess-backdrop"
hdul[0].header["AUTHOR"] = "<EMAIL>"
hdul[0].header["VERSION"] = __version__
for key in ["sector", "camera", "ccd", "nknots", "npoly", "nrad", "degree"]:
hdul[0].header[key] = getattr(self, key)
fname = f"tessbackdrop_jitter_sector{self.sector}_camera{self.camera}_ccd{self.ccd}.fits"
if output_dir is None:
dir = f"{PACKAGEDIR}/data/sector{self.sector:03}/camera{self.camera:02}/ccd{self.ccd:02}/"
hdul.writeto(dir + fname, overwrite=True)
else:
hdul.writeto(output_dir + fname, overwrite=True)
if package_jitter_comps:
self._package_jitter_comps()
if self.jitter_comps is not None:
hdu0 = fits.PrimaryHDU()
hdu1 = fits.ImageHDU(self.jitter_comps[s], name="jitter_pix")
hdul = fits.HDUList([hdu0, hdu1])
hdul[0].header["ORIGIN"] = "tess-backdrop"
hdul[0].header["AUTHOR"] = "<EMAIL>"
hdul[0].header["VERSION"] = __version__
for key in [
"sector",
"camera",
"ccd",
"nknots",
"npoly",
"nrad",
"degree",
]:
hdul[0].header[key] = getattr(self, key)
fname = f"tessbackdrop_jitter_components_sector{self.sector}_camera{self.camera}_ccd{self.ccd}.fits"
if output_dir is None:
dir = f"{PACKAGEDIR}/data/sector{self.sector:03}/camera{self.camera:02}/ccd{self.ccd:02}/"
hdul.writeto(dir + fname, overwrite=True)
else:
hdul.writeto(output_dir + fname, overwrite=True)
hdu0 = fits.PrimaryHDU()
hdu1 = fits.ImageHDU(self.star_mask.astype(int), name="STARMASK")
hdu2 = fits.ImageHDU(self.sat_mask.astype(int), name="SATMASK")
hdu3 = fits.ImageHDU(self.average_image, name="AVGIMG")
hdul = fits.HDUList([hdu0, hdu1, hdu2, hdu3])
hdul[0].header["ORIGIN"] = "tess-backdrop"
hdul[0].header["AUTHOR"] = "<EMAIL>"
hdul[0].header["VERSION"] = __version__
for key in ["sector", "camera", "ccd", "nknots", "npoly", "nrad", "degree"]:
hdul[0].header[key] = getattr(self, key)
fname = f"tessbackdrop_masks_sector{self.sector}_camera{self.camera}_ccd{self.ccd}.fits"
if output_dir is None:
dir = f"{PACKAGEDIR}/data/sector{self.sector:03}/camera{self.camera:02}/ccd{self.ccd:02}/"
hdul.writeto(dir + fname, overwrite=True)
else:
hdul.writeto(output_dir + fname, overwrite=True)
def load(self, sector, camera, ccd, full_jitter=False):
"""
Load a model fit to the tess-backrop data directory.
Parameters
----------
sector: int
TESS sector number
camera: int
TESS camera number
ccd: int
TESS CCD number
"""
dir = f"{PACKAGEDIR}/data/sector{sector:03}/camera{camera:02}/ccd{ccd:02}/"
if not os.path.isdir(dir):
raise ValueError(
f"No solutions exist for Sector {sector}, Camera {camera}, CCD {ccd}."
)
fname = f"tessbackdrop_sector{sector}_camera{camera}_ccd{ccd}.fits"
with fits.open(dir + fname, lazy_load_hdus=True) as hdu:
for key in ["sector", "camera", "ccd", "nknots", "npoly", "nrad", "degree"]:
setattr(self, key, hdu[0].header[key])
self.t_start = hdu[1].data["T_START"]
if "QUALITY" in hdu[1].data.names:
self.quality = hdu[1].data["QUALITY"]
self.knots_wbounds = hdu[2].data["KNOTS"]
self.spline_w = hdu[3].data
self.strap_w = hdu[4].data
self.poly_w = hdu[5].data
if full_jitter:
fname = f"tessbackdrop_jitter_sector{sector}_camera{camera}_ccd{ccd}.fits"
with fits.open(dir + fname, lazy_load_hdus=True) as hdu:
self.jitter = hdu[1].data
fname = f"tessbackdrop_jitter_components_sector{sector}_camera{camera}_ccd{ccd}.fits"
if os.path.isfile(dir + fname):
with fits.open(dir + fname, lazy_load_hdus=True) as hdu:
self.jitter_comps = hdu[1].data
fname = f"tessbackdrop_masks_sector{sector}_camera{camera}_ccd{ccd}.fits"
if os.path.isfile(dir + fname):
with fits.open(dir + fname, lazy_load_hdus=True) as hdu:
self.star_mask = hdu[1].data
self.sat_mask = hdu[2].data
self.average_image = hdu[3].data
if self.ccd in [1, 3]:
self.bore_pixel = [2048, 2048]
elif self.ccd in [2, 4]:
self.bore_pixel = [2048, 0]
def list_available(self):
"""List the sectors, cameras and CCDs that
are available to you via the `load` method.
If there is a sector that is not available that you need,
you can create a solution using the TESS FFIs, and then use the
`save` method."""
df = pd.DataFrame(columns=["Sector", "Camera", "CCD"])
idx = 0
for sector in np.arange(200):
for camera in np.arange(1, 5):
for ccd in np.arange(1, 5):
dir = f"{PACKAGEDIR}/data/sector{sector:03}/camera{camera:02}/ccd{ccd:02}/"
if not os.path.isdir(dir):
continue
fname = f"tessbackdrop_sector{sector}_camera{camera}_ccd{ccd}.fits"
if os.path.isfile(dir + fname):
df.loc[idx] = np.hstack([sector, camera, ccd])
idx += 1
return df
def build_correction(self, column, row, times=None):
"""Build a background correction for a given column, row and time array.
Parameters
----------
column : 1D np.ndarray of ints
Array between 0 and 2048 indicating the column number.
NOTE: Columns in TESS FFIs and TPFs are offset by 45 pixels, and usually
are between 45 and 2093.
row : 1D np.ndarray of ints
Array between 0 and 2048 indicating the row number.
times: None, list of ints, or np.ndarray of floats
Times to evaluate the background model at. If none, will evaluate at all
the times for available FFIs. If array of ints, will use those indexes to the original FFIs.
Otherwise, must be an np.ndarray of floats for the T_START time of the FFI.
Returns
-------
bkg : np.ndarray
2D array with shape ntimes x nrow x ncolumn containing the background
estimate for the input column, row and times.
"""
if not hasattr(self, "spline_w"):
raise ValueError(
"tess-backdrop does not have any backdrop information. Do you need to `load` a backdrop file?"
)
if times is None:
tdxs = np.arange(len(self.t_start))
else:
if not hasattr(times, "__iter__"):
times = [times]
if np.all([isinstance(i, (int, np.int64)) for i in times]):
tdxs = times
elif not np.in1d(np.round(times, 6), np.round(self.t_start, 6)).all():
raise ValueError(
"tess-backdrop can not estimate some times in the input `times` array."
"No background information at that time."
)
else:
tdxs = np.asarray(
[
np.where(np.round(self.t_start, 6) == np.round(t, 6))[0][0]
for t in times
]
)
c, r = np.meshgrid(column, row)
c, r = c / self.cutout_size - 0.5, r / self.cutout_size - 0.5
self._poly_X = np.asarray(
[
c.ravel() ** idx * r.ravel() ** jdx
for idx in np.arange(self.npoly)
for jdx in np.arange(self.npoly)
]
).T
c, r = np.meshgrid(column, row)
c, r = (c - self.bore_pixel[1]) / 2048, (r - self.bore_pixel[1]) / 2048
crav = c.ravel()
rrav = r.ravel()
rad = (crav ** 2 + rrav ** 2)[:, None] ** 0.5
self._poly_X = np.hstack(
[self._poly_X, np.hstack([rad ** idx for idx in np.arange(1, self.nrad)])]
)
del c, r, crav, rrav
self._spline_X = self._get_spline_matrix(column, row)
bkg = np.zeros((len(tdxs), len(row), len(column)))
for idx, tdx in enumerate(tdxs):
poly = self._poly_X.dot(self.poly_w[tdx].ravel()).reshape(
(row.shape[0], column.shape[0])
)
spline = self._spline_X.dot(self.spline_w[tdx].ravel()).reshape(
(row.shape[0], column.shape[0])
)
strap = self.strap_w[tdx][column][None, :] * np.ones(row.shape[0])[:, None]
bkg[idx, :, :] = poly + spline + strap
return bkg
def correct_tpf(self, tpf, exptime=None):
"""Returns a TPF with the background corrected
Parameters
----------
tpf : lk.TargetPixelFile
Target Pixel File object. Must be a TESS target pixel file, and must
be a 30 minute cadence.
exptime : float, None
The exposure time between each cadence. If None, will be generated from the data
Returns
-------
corrected_tpf : lk.TargetPixelFile
New TPF object, with the TESS background removed.
"""
if exptime is None:
exptime = np.median(np.diff(tpf.time.value))
if exptime < 0.02:
raise ValueError(
"tess_backdrop can only correct 30 minute cadence FFIs currently."
)
if tpf.mission.lower() != "tess":
raise ValueError("tess_backdrop can only correct TESS TPFs.")
if not hasattr(self, "sector"):
self.load(sector=tpf.sector, camera=tpf.camera, ccd=tpf.ccd)
else:
if (
(self.sector != tpf.sector)
| (self.camera != tpf.camera)
| (self.ccd != tpf.ccd)
):
self.load(sector=tpf.sector, camera=tpf.camera, ccd=tpf.ccd)
tdxs = [
np.argmin(np.abs((self.t_start - t) + exptime))
for t in tpf.time.value
if (np.min(np.abs((self.t_start - t) + exptime)) < exptime)
]
bkg = self.build_correction(
np.arange(tpf.shape[2]) + tpf.column,
np.arange(tpf.shape[1]) + tpf.row,
times=tdxs,
)
return tpf - bkg
def _package_jitter_comps(self):
"""Helper function for packaging up jitter components into different
time scale components.
"""
# We'll hard code the number of PCA components for now
if self.jitter.shape[0] < 40:
self.jitter_comps = None
return
if self.jitter.shape[1] < 50:
self.jitter_comps = self.jitter.copy()
return
npca_components = 30
box = np.ones(20) / 20
X = []
breaks = (
np.where(np.diff(self.t_start) > np.median(np.diff(self.t_start) * 10))[0]
+ 1
)
breaks = np.hstack([0, breaks, len(self.t_start)])
for x1, x2 in zip(breaks[:-1], breaks[1:]):
jitter = self.jitter[x1:x2].copy()
jitter -= np.median(jitter, axis=0)
jitter_smooth = np.zeros(jitter.shape)
for idx in range(jitter.shape[1]):
y = jitter[:, idx].copy()
jitter_smooth[:, idx] = np.convolve(y, box, mode="same")
mask = sigma_clip(jitter - jitter_smooth).mask
jitter[mask] = 0
for idx in range(jitter.shape[1]):
y = jitter[:, idx].copy()
jitter_smooth[:, idx] = np.convolve(y, box, mode="same")
box = np.ones(60) / 60
jitter_smooth2 = np.zeros(jitter.shape)
for idx in range(jitter.shape[1]):
y = jitter_smooth[:, idx].copy()
jitter_smooth2[:, idx] = np.convolve(y, box, mode="same")
short = (
self.jitter[x1:x2].copy()
- np.median(self.jitter[x1:x2], axis=0)
- jitter_smooth
)
medium = jitter_smooth - jitter_smooth2
long = jitter_smooth2
X1 = np.hstack(
[
pca(short, npca_components, n_iter=10)[0],
pca(medium, npca_components, n_iter=10)[0],
pca(long, npca_components, n_iter=10)[0],
]
)
X1 = np.hstack(
[X1[:, idx::npca_components] for idx in range(npca_components)]
)
X.append(X1)
self.jitter_comps = np.vstack(X)
def _get_knots(x, nknots, degree):
"""Find the b-spline knot spacing for an input array x, number of knots and degree
Parameters
----------
x : np.ndarray
In put vector to create b-spline for
nknots : int
Number of knots to use in the b-spline
degree : int
Degree of the b-spline
Returns
-------
knots_wbounds : np.ndarray
The knot locations for the input x.
"""
knots = np.asarray(
[s[-1] for s in np.array_split(np.argsort(x), nknots - degree)[:-1]]
)
knots = [np.mean([x[k], x[k + 1]]) for k in knots]
knots = np.append(np.append(x.min(), knots), x.max())
knots = np.unique(knots)
knots_wbounds = np.append(
np.append([x.min()] * (degree - 1), knots), [x.max()] * (degree)
)
return knots_wbounds
def _find_saturation_column_centers(mask):
"""
Finds the center point of saturation columns.
Parameters
----------
mask : np.ndarray of bools
Mask where True indicates a pixel is saturated
Returns
-------
centers : np.ndarray
Array of the centers in XY space for all the bleed columns
"""
centers = []
radii = []
idxs = np.where(mask.any(axis=0))[0]
for idx in idxs:
line = mask[:, idx]
seq = []
val = line[0]
jdx = 0
while jdx <= len(line):
while line[jdx] == val:
jdx += 1
if jdx >= len(line):
break
if jdx >= len(line):
break
seq.append(jdx)
val = line[jdx]
w = np.array_split(line, seq)
v = np.array_split(np.arange(len(line)), seq)
coords = [(idx, v1.mean().astype(int)) for v1, w1 in zip(v, w) if w1.all()]
rads = [len(v1) / 2 for v1, w1 in zip(v, w) if w1.all()]
for coord, rad in zip(coords, rads):
centers.append(coord)
radii.append(rad)
centers = np.asarray(centers)
radii = np.asarray(radii)
return centers, radii
def get_saturation_mask(data, whisker_width=40, cutout_size=2048):
"""
Finds a mask that will remove saturated pixels, and any "whiskers".
Parameters
----------
data : np.ndarray of shape (2048 x 2048)
Input TESS FFI
Returns
-------
sat_mask: np.ndarray of bools
The mask for saturated pixels. False where pixels are saturated.
"""
sat_cols = (np.abs(np.gradient(data)[1]) > 1e4) | (data > 1e5)
centers, radii = _find_saturation_column_centers(sat_cols)
whisker_mask = np.zeros((cutout_size, cutout_size), bool)
for idx in np.arange(-2, 2):
for jdx in np.arange(-whisker_width // 2, whisker_width // 2):
a1 = np.max([np.zeros(len(centers)), centers[:, 1] - idx], axis=0)
a1 = np.min([np.ones(len(centers)) * cutout_size - 1, a1], axis=0)
b1 = np.max([np.zeros(len(centers)), centers[:, 0] - jdx], axis=0)
b1 = np.min([np.ones(len(centers)) * cutout_size - 1, b1], axis=0)
whisker_mask[a1.astype(int), b1.astype(int)] = True
sat_mask = np.copy(sat_cols)
sat_mask |= np.gradient(sat_mask.astype(float), axis=0) != 0
for count in range(4):
sat_mask |= np.gradient(sat_mask.astype(float), axis=1) != 0
sat_mask |= whisker_mask
X, Y = np.mgrid[:cutout_size, :cutout_size]
jdx = 0
kdx = 0
for jdx in range(8):
for kdx in range(8):
k = (
(centers[:, 1] > jdx * 256 - radii.max() - 1)
& (centers[:, 1] <= (jdx + 1) * 256 + radii.max() + 1)
& (centers[:, 0] > kdx * 256 - radii.max() - 1)
& (centers[:, 0] <= (kdx + 1) * 256 + radii.max() + 1)
)
if not (k).any():
continue
for idx in np.where(k)[0]:
x, y = (
X[jdx * 256 : (jdx + 1) * 256, kdx * 256 : (kdx + 1) * 256]
- centers[idx][1],
Y[jdx * 256 : (jdx + 1) * 256, kdx * 256 : (kdx + 1) * 256]
- centers[idx][0],
)
sat_mask[
jdx * 256 : (jdx + 1) * 256, kdx * 256 : (kdx + 1) * 256
] |= np.hypot(x, y) < (np.min([radii[idx], 70]))
# for idx in tqdm(range(len(centers)), desc="Building Saturation Mask"):
# sat_mask |= np.hypot(X - centers[idx][1], Y - centers[idx][0]) < (radii[idx])
return ~sat_mask
def _std_iter(x, mask, sigma=3, n_iters=3):
"""Iteratively finds the standard deviation of an array after sigma clipping
Parameters
----------
x : np.ndarray
Array with average of zero
mask : np.ndarray of bool
Mask of same size as x, where True indicates a point to be masked.
sigma : int or float
The standard deviation at which to clip
n_iters : int
Number of iterations
"""
m = mask.copy()
for iter in range(n_iters):
std = np.std(x[~m])
m |= np.abs(x) > (std * sigma)
return std
def _find_bad_frames(fnames, cutout_size=2048, corner_check=False):
"""Identifies frames that probably have a lot of scattered lightkurve
If quality flags are available, will use TESS quality flags.
If unavailable, or if `corner_check=True`, loads the 30x30 pixel corner
region of every frame, and uses them to find frames that have a lot of
scattered light.
"""
quality = np.zeros(len(fnames), int)
warned = False
log.info("Extracting quality")
for idx, fname in enumerate(fnames):
try:
quality[idx] = fitsio.read_header(fname, 1)["DQUALITY"]
except KeyError:
if warned is False:
log.warning("Quality flags are missing.")
warned = True
continue
bad = (quality & (2048 | 175)) != 0
if warned | corner_check:
log.info("Using corner check")
corner = np.zeros((4, len(fnames)))
for tdx, fname in enumerate(fnames):
corner[0, tdx] = fitsio.read(fname)[:30, 45 : 45 + 30].mean()
corner[1, tdx] = fitsio.read(fname)[-30:, 45 : 45 + 30].mean()
corner[2, tdx] = fitsio.read(fname)[
:30, 45 + cutout_size - 30 : 45 + cutout_size
].mean()
corner[3, tdx] = fitsio.read(fname)[
-30:, 45 + cutout_size - 30 - 1 : 45 + cutout_size
].mean()
c = corner.T - np.median(corner, axis=1)
c /= np.std(c, axis=0)
bad = ( | np.abs(c) | numpy.abs |
import pandas as pd
import numpy as np
import pdb
import sys
import os
#######################################
# creates validation table in CSV format
#
# this script assumes download of lake_surface_temp_preds.csv from
# the data release (https://www.sciencebase.gov/catalog/item/60341c3ed34eb12031172aa6)
#
##########################################################
#load data
df = pd.read_csv("lake_surface_temp_preds.csv")
vals = df['wtemp_ERA5'].values[:]
df['wtemp_ERA5'] = vals[:]+3.47
df['wtemp_ERA5b'] = vals[:]
pdb.set_trace()
site_ids = np.unique(df['site_id'].values)
meta = pd.read_csv("../../metadata/lake_metadata.csv")
meta = meta[meta['num_obs']>0]
#calculate error per site
# err_per_site_ea = [np.abs((df[df['site_id']==i_d]['wtemp_EALSTM']-df[df['site_id']==i_d]['wtemp_obs'])).mean()for i_d in site_ids]
err_per_site_ea = [np.sqrt(((df[df['site_id']==i_d]['wtemp_EALSTM']-df[df['site_id']==i_d]['wtemp_obs'])**2).mean())for i_d in site_ids]
err_per_site_LM = [np.sqrt(np.nanmean((df[df['site_id']==i_d]['wtemp_LM']-df[df['site_id']==i_d]['wtemp_obs'])**2)) for i_d in site_ids]
err_per_site_e5 = [np.sqrt(((df[df['site_id']==i_d]['wtemp_ERA5']-df[df['site_id']==i_d]['wtemp_obs'])**2).mean())for i_d in site_ids]
err_per_site_e5b = [np.sqrt(((df[df['site_id']==i_d]['wtemp_ERA5b']-3.46-df[df['site_id']==i_d]['wtemp_obs'])**2).mean())for i_d in site_ids]
err_per_site = pd.DataFrame()
err_per_site['site_id'] = site_ids
err_per_site['RMSE_EA'] = err_per_site_ea
print("median err_per_site EALSTM: ",np.median(err_per_site['RMSE_EA'].values))
err_per_site['RMSE_ERA5'] = err_per_site_e5
print("median err_per_site ERA5*: ",np.median(err_per_site['RMSE_ERA5'].values))
err_per_site['RMSE_ERA5b'] = err_per_site_e5b
print("median err_per_site ERA5: ",np.median(err_per_site['RMSE_ERA5b'].values))
err_per_site['RMSE_LM'] = err_per_site_LM
#calc bias per site in 4 different ranges
t1 = df[df['wtemp_obs']<=10]
sites_t1 = np.unique(t1['site_id'].values)
t2 = df[(df['wtemp_obs']>=10)&(df['wtemp_obs']<=20)]
sites_t2 = np.unique(t2['site_id'].values)
t3 = df[(df['wtemp_obs']>=20)&(df['wtemp_obs']<=30)]
sites_t3 = np.unique(t3['site_id'].values)
t4 = df[(df['wtemp_obs']>=30)]
sites_t4 = np.unique(t4['site_id'].values)
print("calc bias per site t1")
bias_per_site_ea_t1 = [np.sqrt(((t1[t1['site_id']==i_d]['wtemp_EALSTM']-t1[t1['site_id']==i_d]['wtemp_obs'])**2).mean())for i_d in sites_t1]
bias_per_site_lm_t1 = [np.sqrt(np.nanmean((t1[t1['site_id']==i_d]['wtemp_LM']-t1[t1['site_id']==i_d]['wtemp_obs'])**2)) for i_d in sites_t1]
bias_per_site_e5_t1 = [np.sqrt(((t1[t1['site_id']==i_d]['wtemp_ERA5']-t1[t1['site_id']==i_d]['wtemp_obs'])**2).mean())for i_d in sites_t1]
bias_per_site_e5b_t1 = [np.sqrt(((t1[t1['site_id']==i_d]['wtemp_ERA5b']-t1[t1['site_id']==i_d]['wtemp_obs'])**2).mean())for i_d in sites_t1]
bias_per_site_t1 = pd.DataFrame()
bias_per_site_t1['site_id'] = sites_t1
bias_per_site_t1['EA'] = bias_per_site_ea_t1
bias_per_site_t1['E5'] = bias_per_site_e5_t1
bias_per_site_t1['E5b'] = bias_per_site_e5b_t1
bias_per_site_t1['LM'] = bias_per_site_lm_t1
bias_per_site_t1.to_feather("bias_per_site_t1")
# print("calc bias per site t2")
# bias_per_site_ea_t2 = [np.sqrt(((t2[t2['site_id']==i_d]['wtemp_EALSTM']-t2[t2['site_id']==i_d]['wtemp_obs'])**2).mean())for i_d in sites_t2]
# bias_per_site_lm_t2 = [np.sqrt(np.nanmean((t1[t1['site_id']==i_d]['wtemp_LM']-t2[t2['site_id']==i_d]['wtemp_obs'])**2)) for i_d in sites_t2]
# bias_per_site_e5_t2 = [np.sqrt(((t2[t2['site_id']==i_d]['wtemp_ERA5']-t2[t2['site_id']==i_d]['wtemp_obs'])**2).mean())for i_d in sites_t2]
# bias_per_site_e5b_t2 = [np.sqrt(((t2[t2['site_id']==i_d]['wtemp_ERA5b']-t2[t2['site_id']==i_d]['wtemp_obs'])**2).mean())for i_d in sites_t2]
# bias_per_site_t2 = pd.DataFrame()
# bias_per_site_t2['site_id'] = sites_t2
# bias_per_site_t2['EA'] = bias_per_site_ea_t2
# bias_per_site_t2['E5'] = bias_per_site_e5_t2
# bias_per_site_t2['E5b'] = bias_per_site_e5b_t2
# bias_per_site_t2['LM'] = bias_per_site_lm_t2
# bias_per_site_t2.to_feather("bias_per_site_t2")
# print("calc bias per site t3")
# bias_per_site_ea_t3 = [np.sqrt(((t3[t3['site_id']==i_d]['wtemp_EALSTM']-t3[t3['site_id']==i_d]['wtemp_obs'])**2).mean())for i_d in sites_t3]
# bias_per_site_lm_t3 = [np.sqrt(np.nanmean((t3[t3['site_id']==i_d]['wtemp_LM']-t3[t3['site_id']==i_d]['wtemp_obs'])**2)) for i_d in sites_t3]
# bias_per_site_e5_t3 = [np.sqrt(((t3[t3['site_id']==i_d]['wtemp_ERA5']-t3[t3['site_id']==i_d]['wtemp_obs'])**2).mean())for i_d in sites_t3]
# bias_per_site_e5b_t3 = [np.sqrt(((t3[t3['site_id']==i_d]['wtemp_ERA5b']-t3[t3['site_id']==i_d]['wtemp_obs'])**2).mean())for i_d in sites_t3]
# bias_per_site_t3 = pd.DataFrame()
# bias_per_site_t3['site_id'] = sites_t3
# bias_per_site_t3['EA'] = bias_per_site_ea_t3
# bias_per_site_t3['E5'] = bias_per_site_e5_t3
# bias_per_site_t3['E5b'] = bias_per_site_e5b_t3
# bias_per_site_t3['LM'] = bias_per_site_lm_t3
# bias_per_site_t3.to_feather("bias_per_site_t3")
# print("calc bias per site t4")
# bias_per_site_ea_t4 = [np.sqrt(((t4[t4['site_id']==i_d]['wtemp_EALSTM']-t4[t4['site_id']==i_d]['wtemp_obs'])**2).mean())for i_d in sites_t4]
# bias_per_site_lm_t4 = [np.sqrt(np.nanmean((t1[t1['site_id']==i_d]['wtemp_LM']-t4[t4['site_id']==i_d]['wtemp_obs'])**2)) for i_d in sites_t4]
# bias_per_site_e5_t4 = [np.sqrt(((t4[t4['site_id']==i_d]['wtemp_ERA5']-t4[t4['site_id']==i_d]['wtemp_obs'])**2).mean())for i_d in sites_t4]
# bias_per_site_e5b_t4 = [np.sqrt(((t4[t4['site_id']==i_d]['wtemp_ERA5b']-t4[t4['site_id']==i_d]['wtemp_obs'])**2).mean())for i_d in sites_t4]
# bias_per_site_t4 = pd.DataFrame()
# bias_per_site_t4['site_id'] = sites_t4
# bias_per_site_t4['EA'] = bias_per_site_ea_t4
# bias_per_site_t4['E5'] = bias_per_site_e5_t4
# bias_per_site_t4['E5b'] = bias_per_site_e5b_t4
# bias_per_site_t4['LM'] = bias_per_site_lm_t4
# bias_per_site_t4.to_feather("bias_per_site_t4")
err_per_site.to_feather("./err_per_site")
err_per_site = pd.read_feather('./err_per_site')
bias_per_site_t1 = pd.read_feather("bias_per_site_t1")
bias_per_site_t2 = pd.read_feather("bias_per_site_t2")
bias_per_site_t3 = pd.read_feather("bias_per_site_t3")
bias_per_site_t4 = pd.read_feather("bias_per_site_t4")
ov_RMSE_EA = np.sqrt(((df['wtemp_EALSTM']-df['wtemp_obs'])**2).mean())
ov_RMSE_E5 = np.sqrt(((df['wtemp_ERA5']-df['wtemp_obs'])**2).mean())
ov_RMSE_E5b = np.sqrt(((df['wtemp_ERA5b']-3.46-df['wtemp_obs'])**2).mean())
ov_RMSE_LM = np.sqrt(np.nanmean((df['wtemp_LM']-df['wtemp_obs'])**2))
md_RMSE_EA = np.median(err_per_site['RMSE_EA'])
md_RMSE_E5 = np.median(err_per_site['RMSE_ERA5'])
md_RMSE_E5b = np.median(err_per_site['RMSE_ERA5b'])
md_RMSE_LM = np.nanmedian(err_per_site['RMSE_LM'])
pdb.set_trace()
#row label
rows = ['EA-LSTM', 'ERA5*','ERA5', 'LM']
ha = 10000
lakes_area1 = meta[(meta['area_m2']< 10*ha)]['site_id'].values
lakes_area2 = meta[(meta['area_m2'] > 10*ha)&(meta['area_m2']< 100*ha)]['site_id'].values #n=69548
lakes_area3 = meta[(meta['area_m2'] > 100*ha)&(meta['area_m2']< 1000*ha)]['site_id'].values #n=8631
lakes_area4 = meta[(meta['area_m2'] > 1000*ha)]['site_id'].values #n=1451
EA_a1_RMSE = np.median(err_per_site[np.isin(site_ids,lakes_area1)]['RMSE_EA'])
E5_a1_RMSE = np.median(err_per_site[np.isin(site_ids,lakes_area1)]['RMSE_ERA5'])
E5b_a1_RMSE = np.median(err_per_site[np.isin(site_ids,lakes_area1)]['RMSE_ERA5b'])
LM_a1_RMSE = np.nanmedian(err_per_site[np.isin(site_ids,lakes_area1)]['RMSE_LM'])
EA_a2_RMSE = np.median(err_per_site[np.isin(site_ids,lakes_area2)]['RMSE_EA'])
E5_a2_RMSE = np.median(err_per_site[np.isin(site_ids,lakes_area2)]['RMSE_ERA5'])
E5b_a2_RMSE = np.median(err_per_site[np.isin(site_ids,lakes_area2)]['RMSE_ERA5b'])
LM_a2_RMSE = np.nanmedian(err_per_site[np.isin(site_ids,lakes_area2)]['RMSE_LM'])
EA_a3_RMSE = np.median(err_per_site[np.isin(site_ids,lakes_area3)]['RMSE_EA'])
E5_a3_RMSE = np.median(err_per_site[np.isin(site_ids,lakes_area3)]['RMSE_ERA5'])
E5b_a3_RMSE = np.median(err_per_site[np.isin(site_ids,lakes_area3)]['RMSE_ERA5b'])
LM_a3_RMSE = np.nanmedian(err_per_site[ | np.isin(site_ids,lakes_area3) | numpy.isin |
#!/usr/bin/env python3
import numpy as np
import unittest
import warnings
from context import lir
from lir.transformers import InstancePairing
warnings.simplefilter("error")
class TestPairing(unittest.TestCase):
def test_pairing(self):
X = np.arange(30).reshape(10, 3)
y = np.concatenate([np.arange(5), np.arange(5)])
pairing = InstancePairing()
X_pairs, y_pairs = pairing.transform(X, y)
self.assertEqual(np.sum(y_pairs == 1), 5, 'number of same source pairs')
self.assertEqual(np.sum(y_pairs == 0), 2*(8+6+4+2), 'number of different source pairs')
pairing = InstancePairing(different_source_limit='balanced')
X_pairs, y_pairs = pairing.transform(X, y)
self.assertEqual( | np.sum(y_pairs == 1) | numpy.sum |
from ccores import constants, wav
import numpy as np
from scipy.ndimage.measurements import label
from scipy import ndimage
import xarray as xr
import ipdb
import os
class dataset(object):
def __init__(self, dataname):
if dataname in constants.NAMES:
dic = constants.NAMES[dataname]
else:
print('Dataset not found')
return
self.name = dataname
self.res = dic['dx']
self.dist = dic['dist']
self.nb = dic['nb']
self.start = dic['start']
self.Tcut = dic['Tcut']
self.Twav = dic['Twav']
obj = wav.wavelet(self.res, self.dist, self.nb, start=self.start)
self.scales = obj.scales
print('Initialised wavelet with scales: ', self.scales)
def read_img(self, torig, lon, lat, edge_smoothing=False, dynamic_background=False, min_area = False):
"""
Filters clouds of set area threshold and prepares image for wavelet analysis via adjusting background temperature
and smoothing cloud edges.
t: numpy array, cloud top temperature data
lon: 1d numpy array, longitude or x
lat: 1d numpy array, latitude or y
edge_smoothing: optional cloud edge smoothing via gaussian filter - can help in case of excessive core
identification at cloud edges (default: False)
dynamic_background: optional dynamical background temperature according to coldest pixel in image -
can help in case of excessive core identification at cloud edges (default: False)
min_area: optional minimum area threshold for identified clouds. If false, minimum is defined by the minimum
core scale (default: False)
:return: filtered cloud top temperatures with adjusted background temperature
"""
londiff = lon[0:-1]-lon[1::]
if not np.allclose(londiff, np.zeros_like(londiff)+londiff[0]):
print('Please provide regular grid coordinates.')
self.original = torig
t = torig.copy()
t[t >= self.Tcut] = 0
t[np.isnan(t)] = 0
outt = t.copy()
print('outmin', np.nanmin(outt), np.nanmax(outt))
labels, numL = label(outt)
u, inv = np.unique(labels, return_inverse=True)
n = np.bincount(inv)
# some approximate minimum cloud scales: 3 pixels across in any direction for minimum wavelet
min_diameter_cloud = 3 * self.res
#set optional minimum cloud area threshold
if min_area:
mincloud = min_area
else:
mincloud = (np.pi * min_diameter_cloud**2)
# min number of pixels in circular cloud
pix_nb = mincloud / self.res**2 # ~ 500km2 cloud = 20 pixel at 5km res
badinds = u[(n < pix_nb)]
goodinds = u[n >= pix_nb]
area_img = np.zeros_like(outt)
for bi in badinds:
inds = np.where(labels == bi)
outt[inds] = 0
for bi in goodinds:
inds = np.where(labels==bi)
area_img[inds]= int(len(inds[0]))#*self.res**2
#detect edge for optional edge smoothing
outt[outt >= self.Twav] = 150
grad = np.gradient(outt)
outt[outt == 150] = np.nan
invalid = np.isnan(outt)
# T difference between cloud edge and background
if dynamic_background:
tdiff = np.nanmax(outt) - np.nanmin(outt)
xmin = 0.5*tdiff
else:
xmin = 10
outt[invalid] = self.Twav - xmin
if edge_smoothing:
nok = np.where(abs(grad[0]) > 80)
d = 2
i = nok[0]
j = nok[1]
for ii, jj in zip(i, j):
kern = outt[ii - d:ii + d + 1, jj - d:jj + d + 1]
outt[ii - d:ii + d + 1, jj - d:jj + d + 1] = ndimage.gaussian_filter(kern, 3, mode='nearest')
self.image = outt
self.minPixel = pix_nb
self.area = area_img
self.invalid = invalid
self.lon = lon
self.lat = lat
def applyWavelet(self, ge_thresh=0, fill=0.01, le_thresh=None):
"""
Applies the wavelet functions and handles wavelet coefficient filtering.
:param ge_thresh: greater-equal threshold for coefficient filtering.
:param fill: fill value for filtering thresholds
:param le_thresh: less-equal threshold for coefficient filtering.
:return: Wavelet coefficient and wavelet power attributes of the wavelet object.
"""
try:
tir = self.image.copy()
except NameError:
print('No image found to apply wavelet. Please read in an image first.')
return
tir[tir > 0] = 0
tir = tir - np.mean(tir)
obj = wav.wavelet(self.res, self.dist, self.nb, start=self.start)
coeffsTIR, powerTIR = obj.calc_coeffs(tir, ge_thresh=ge_thresh, fill=fill, le_thresh=le_thresh)
self.power = powerTIR
self.coeffs = coeffsTIR
del tir
def scaleWeighting(self, wtype='sum', data_tag='MSG'):
"""
Accesses the wavelet power filtering utility functions.
:param wtype: Defines method for wavelet power weighting and core identification
:param data_tag: Identifies input data if needed for wtype
:return: power from weighted scales
"""
if wtype not in constants.UTILS:
print('Method type not found. Choose one of existing power weighting methods (UTILS in constants.py) or add a new one.')
return
self.data_tag = data_tag
Sweighting = constants.UTILS[wtype]
self.scale_weighted = Sweighting(self)
return self.scale_weighted
def to_dataarray(self, filepath=None, date=None, CLOBBER=False, names=None):
"""
Optional data saving function. Saves wavelet power and storm-filtered tir to netCDF files.
:param filepath: outpath for save file
:param date: optional datetime.datetime date for timestamp in data array
:param CLOBBER: if True, overwrites existing file
:param names: [str, str] format, gives custom names to power and thermal infrared (tir) data arrays.
If False: ['power', 'tir']
:return: saves netcdf of xarray dataset with convective core power and original tir data
"""
new_savet = self.original.copy()
isnan = np.isnan(new_savet)
new_savet[isnan] = 0
sfactor=100
try:
new_savet = (np.round(new_savet, 2) * sfactor).astype(np.int16)
except TypeError:
print('TIR data is None, DataArray conversion failed. Return')
return
try:
new_power = (np.round(self.scale_weighted.copy(), 0)).astype(np.int16)
except TypeError:
print('TIR data is None, DataArray conversion failed. Return')
return
#new_power = self.scale_weighted
if self.lat.ndim == 2:
latitudes = self.lat[:, 0]
else:
latitudes = self.lat
if self.lon.ndim == 2:
longitudes = self.lon[0, :]
else:
longitudes = self.lon
ds = xr.Dataset()
# latitudes = np.arange(len(latitudes)).astype(int)
# longitudes = np.arange(len(longitudes)).astype(int)
if date:
if new_power.ndim > 2:
try:
power_da = xr.DataArray(new_power[np.newaxis, :], coords={'time': date, 'scales': np.round(self.scales).astype(np.uint8),
'lat': latitudes, 'lon': longitudes}, # [np.newaxis, :]
dims=['time', 'scales', 'lat', 'lon'])
except ValueError:
ipdb.set_trace()
else:
try:
power_da = xr.DataArray(new_power[np.newaxis, :], coords={'time': date,'lat': latitudes, 'lon': longitudes},
dims=['time', 'lat', 'lon'])
except ValueError:
ipdb.set_trace()
tir_da = xr.DataArray(new_savet[np.newaxis, :], coords={'time': date, 'lat': latitudes, 'lon': longitudes}, # 'time': date,
dims=['time', 'lat', 'lon'])
else:
if new_power.ndim > 2:
try:
power_da = xr.DataArray(new_power,
coords={'scales': | np.round(self.scales) | numpy.round |
import numpy as np
import matplotlib.pyplot as plt
from vo2mft.elHamiltonian import ElHamiltonian
def _sc_kpath(alat):
# k-path runs over simple cubic Brillouin zone:
# G-X-M-G-R-X-M-R.
labels = ["$\\Gamma$", "$X$", "$M$", "$\\Gamma$", "$R$", "$X$", "$M$", "$R$"]
# High-symmetry points in reciprocal lattice coordinates.
G = (0.0, 0.0, 0.0)
M = (0.5, 0.5, 0.0)
R = (0.5, 0.5, 0.5)
X = (0.0, 0.5, 0.0)
# Columns of Dlat = direct lattice vectors.
Dlat = np.array([[alat, 0.0, 0.0], [0.0, alat, 0.0], [0.0, 0.0, alat]])
# Rows of Rlat = reciprocal lattice vectors.
Rlat = 2.0 * np.pi * np.linalg.inv(Dlat)
# High-symmetry points in Cartesian coordinates.
Gc = np.dot(G, Rlat)
Mc = np.dot(M, Rlat)
Rc = np.dot(R, Rlat)
Xc = np.dot(X, Rlat)
kpath = (Gc, Xc, Mc, Gc, Rc, Xc, Mc, Rc)
return kpath, labels
def _interpolate_kpoints(kpath, kpoints_per_panel):
interpolated = []
for panel_index in range(len(kpath)):
# Take panel from kpath[i-1] to kpath[i].
if panel_index == 0:
continue
kstart = kpath[panel_index-1]
kstop = kpath[panel_index]
step = | np.subtract(kstop, kstart) | numpy.subtract |
import grasping.annotation.utils as gau
if __name__ == '__main__':
import numpy as np
import robot_sim.end_effectors.grippers.robotiq85.robotiq85 as rtq85
import modeling.collision_model as cm
import visualization.panda.world as wd
base = wd.World(cam_pos=[.5, .5, .3], lookat_pos=[0, 0, 0])
gripper_s = rtq85.Robotiq85(enable_cc=True)
objcm = cm.CollisionModel("./objects/bunnysim.stl")
objcm.set_pos(np.array([.5,-.3,1.2]))
objcm.attach_to(base)
objcm.show_localframe()
grasp_info_list = gau.define_grasp_with_rotation(gripper_s,
objcm,
gl_jaw_center_pos=np.array([0, 0, 0]),
gl_jaw_center_z= | np.array([1, 0, 0]) | numpy.array |
import math
from matplotlib import text as mtext
import numpy as np
class CurvedText(mtext.Text):
"""
A text object that follows an arbitrary curve.
"""
def __init__(self, x, y, text, axes, **kwargs):
super(CurvedText, self).__init__(x[0],y[0],' ', **kwargs)
axes.add_artist(self)
##saving the curve:
self.__x = x
self.__y = y
self.__zorder = self.get_zorder()
##creating the text objects
self.__Characters = []
for c in text:
if c == ' ':
##make this an invisible 'a':
t = mtext.Text(0,0,'a')
t.set_alpha(0.0)
else:
t = mtext.Text(0,0,c, **kwargs)
#resetting unnecessary arguments
t.set_ha('center')
t.set_rotation(0)
t.set_zorder(self.__zorder +1)
self.__Characters.append((c,t))
axes.add_artist(t)
##overloading some member functions, to assure correct functionality
##on update
def set_zorder(self, zorder):
super(CurvedText, self).set_zorder(zorder)
self.__zorder = self.get_zorder()
for c,t in self.__Characters:
t.set_zorder(self.__zorder+1)
def draw(self, renderer, *args, **kwargs):
"""
Overload of the Text.draw() function. Do not do
do any drawing, but update the positions and rotation
angles of self.__Characters.
"""
self.update_positions(renderer)
def update_positions(self,renderer):
"""
Update positions and rotations of the individual text elements.
"""
#preparations
##determining the aspect ratio:
##from https://stackoverflow.com/a/42014041/2454357
##data limits
xlim = self.axes.get_xlim()
ylim = self.axes.get_ylim()
## Axis size on figure
figW, figH = self.axes.get_figure().get_size_inches()
## Ratio of display units
_, _, w, h = self.axes.get_position().bounds
##final aspect ratio
aspect = ((figW * w)/(figH * h))*(ylim[1]-ylim[0])/(xlim[1]-xlim[0])
#points of the curve in figure coordinates:
x_fig,y_fig = (
np.array(l) for l in zip(*self.axes.transData.transform([
(i,j) for i,j in zip(self.__x,self.__y)
]))
)
#point distances in figure coordinates
x_fig_dist = (x_fig[1:]-x_fig[:-1])
y_fig_dist = (y_fig[1:]-y_fig[:-1])
r_fig_dist = np.sqrt(x_fig_dist**2+y_fig_dist**2)
#arc length in figure coordinates
l_fig = np.insert(np.cumsum(r_fig_dist),0,0)
#angles in figure coordinates
rads = np.arctan2((y_fig[1:] - y_fig[:-1]),(x_fig[1:] - x_fig[:-1]))
degs = np.rad2deg(rads)
rel_pos = 10
for c,t in self.__Characters:
#finding the width of c:
t.set_rotation(0)
t.set_va('center')
bbox1 = t.get_window_extent(renderer=renderer)
w = bbox1.width
h = bbox1.height
#ignore all letters that don't fit:
if rel_pos+w/2 > l_fig[-1]:
t.set_alpha(0.0)
rel_pos += w
continue
elif c != ' ':
t.set_alpha(1.0)
#finding the two data points between which the horizontal
#center point of the character will be situated
#left and right indices:
il = np.where(rel_pos+w/2 >= l_fig)[0][-1]
ir = np.where(rel_pos+w/2 <= l_fig)[0][0]
#if we exactly hit a data point:
if ir == il:
ir += 1
#how much of the letter width was needed to find il:
used = l_fig[il]-rel_pos
rel_pos = l_fig[il]
#relative distance between il and ir where the center
#of the character will be
fraction = (w/2-used)/r_fig_dist[il]
##setting the character position in data coordinates:
##interpolate between the two points:
x = self.__x[il]+fraction*(self.__x[ir]-self.__x[il])
y = self.__y[il]+fraction*(self.__y[ir]-self.__y[il])
#getting the offset when setting correct vertical alignment
#in data coordinates
t.set_va(self.get_va())
bbox2 = t.get_window_extent(renderer=renderer)
bbox1d = self.axes.transData.inverted().transform(bbox1)
bbox2d = self.axes.transData.inverted().transform(bbox2)
dr = np.array(bbox2d[0]-bbox1d[0])
#the rotation/stretch matrix
rad = rads[il]
rot_mat = np.array([
[math.cos(rad), math.sin(rad)*aspect],
[-math.sin(rad)/aspect, math.cos(rad)]
])
##computing the offset vector of the rotated character
drp = np.dot(dr,rot_mat)
#setting final position and rotation:
t.set_position( | np.array([x,y]) | numpy.array |
import numpy as np
import array
import os, sys
import re
import smpy.smpy as S
from scipy.interpolate import CubicSpline
from astropy.table import Table
from astropy import units as u
def f99_extinction(wave):
"""
Return Fitzpatrick 99 galactic extinction curve as a function of wavelength
"""
anchors_x = [0., 0.377, 0.820, 1.667, 1.828, 2.141, 2.433, 3.704, 3.846]
anchors_y = [0., 0.265, 0.829, 2.688, 3.055, 3.806, 4.315, 6.265, 6.591]
f99 = CubicSpline(anchors_x, anchors_y)
output_x = (1 / wave.to(u.micron)).value
return f99(output_x)
def process(catalog_in, translate_file, filter_file,
cat_format = 'ascii.commented_header',
id_col = 'id', flux_col = 'flux',
fluxerr_col = 'fluxerr', exclude_columns=None,
correct_extinction=True,
overwrite=True, verbose=False):
input_data = Table.read(catalog_in, format=cat_format)
if exclude_columns != None:
input_data.remove_columns(exclude_columns)
column_names = input_data.columns.keys()
ID = input_data[id_col]
flux_col_end = flux_col
fluxerr_col_end = fluxerr_col
try:
translate_init = Table.read(translate_file, format='ascii.no_header')
fnames = translate_init['col1']
fcodes = translate_init['col2']
flux_cols = np.array([a.startswith('F') for a in fcodes])
fluxerr_cols = np.array([a.startswith('E') for a in fcodes])
except:
raise
# Parse filter file with smpy - get central wavelengths back
filt_obj = S.LoadEAZYFilters(filter_file)
lambda_cs = np.zeros(flux_cols.sum())
f99_means = np.zeros(flux_cols.sum())
for il, line in enumerate(translate_init[flux_cols]):
filtnum = int(line['col2'][1:])-1
lambda_cs[il] = filt_obj.filters[filtnum].lambda_c.value
wave = filt_obj.filters[filtnum].wave
resp = filt_obj.filters[filtnum].response
f99_ext = f99_extinction(wave)
f99_means[il] = np.trapz(resp*f99_ext, wave.value) / np.trapz(resp, wave.value)
wl_order = np.argsort(lambda_cs)
flux_colnames_ordered = fnames[flux_cols][wl_order]
fluxerr_colnames_ordered = fnames[fluxerr_cols][wl_order]
f99_means_ordered = f99_means[wl_order]
fluxes = np.zeros((len(input_data), len(flux_colnames_ordered)))
fluxerrs = np.zeros((len(input_data), len(flux_colnames_ordered)))
for i in range(len(flux_colnames_ordered)):
if correct_extinction:
try:
extinctions = f99_means_ordered[i]*input_data['EBV']
flux_correction = 10**(extinctions/2.5)
isgood = (input_data[fluxerr_colnames_ordered[i]] > 0.)
input_data[flux_colnames_ordered[i]][isgood] *= flux_correction[isgood]
except:
raise
fluxes[:,i] = input_data[flux_colnames_ordered[i]]
fluxerrs[:,i] = input_data[fluxerr_colnames_ordered[i]]
fluxes = fluxes[:,1:-1]
fluxerrs = fluxerrs[:,1:-1]
fnames_full = flux_colnames_ordered[1:-1]
efnames_full = fluxerr_colnames_ordered[1:-1]
fnames = [filt.split('_')[0] for filt in fnames_full]
color_a = np.zeros(fluxes.shape)
color_b = | np.zeros(fluxes.shape) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 31 15:15:12 2019
@author: bwc
"""
# standard imports
import numpy as np
import matplotlib.pyplot as plt
# custom imports
import apt_fileio
import peak_param_determination as ppd
from histogram_functions import bin_dat
import scipy.interpolate
import image_registration.register_images
#import sel_align_m2q_log_xcorr
import scaling_correction
import time
import m2q_calib
from voltage_and_bowl import do_voltage_and_bowl
import voltage_and_bowl
import colorcet as cc
import matplotlib._color_data as mcd
import matplotlib
FIGURE_SCALE_FACTOR = 2
def colorbar():
fig = plt.gcf()
ax = fig.gca()
#
# norm = matplotlib.colors.Normalize(vmin=0, vmax=1, clip=False)
#
fig.colorbar()
return None
def extents(f):
delta = f[1] - f[0]
return [f[0] - delta/2, f[-1] + delta/2]
def create_histogram(ys,cts_per_slice=2**10,y_roi=None,delta_y=0.1):
# even number
num_y = int(np.ceil(np.abs(np.diff(y_roi))/delta_y/2)*2)
num_x = int(ys.size/cts_per_slice)
xs = np.arange(ys.size)
N,x_edges,y_edges = np.histogram2d(xs,ys,bins=[num_x,num_y],range=[[1,ys.size],y_roi],density=False)
return (N,x_edges,y_edges)
def edges_to_centers(*edges):
centers = []
for es in edges:
centers.append((es[0:-1]+es[1:])/2)
if len(centers)==1:
centers = centers[0]
return centers
def plot_2d_histo(ax,N,x_edges,y_edges):
pos1 = ax.imshow(np.log10(1+1*np.transpose(N)), aspect='auto',
extent=extents(x_edges) + extents(y_edges), origin='lower', cmap=cc.cm.CET_L8,
interpolation='bicubic')
ax.set_xticks([0,100000,200000,300000,400000])# ax.set_xticklabels(["\n".join(x) for x in data.index])
return pos1
def steel():
# Load and subsample data (for easier processing)
fn = r"Q:\NIST_Projects\EUV_APT_IMS\BWC\R44_02203-v01.epos"
epos = apt_fileio.read_epos_numpy(fn)
epos = epos[100000::10]
# Voltage and bowl correct ToF data
p_volt = np.array([])
p_bowl = np.array([])
t_i = time.time()
tof_corr, p_volt, p_bowl = do_voltage_and_bowl(epos,p_volt,p_bowl)
print("time to voltage and bowl correct: "+str(time.time()-t_i)+" seconds")
# Only apply bowl correction
tof_bcorr = voltage_and_bowl.mod_geometric_bowl_correction(p_bowl,epos['tof'],epos['x_det'],epos['y_det'])
# Plot histogram for steel
fig = plt.figure(figsize=(FIGURE_SCALE_FACTOR*3.14961,FIGURE_SCALE_FACTOR*3.14961),num=1,dpi=100)
plt.clf()
ax1, ax2 = fig.subplots(2,1,sharex=True)
N,x_edges,y_edges = create_histogram(tof_bcorr,y_roi=[400,600],cts_per_slice=2**9,delta_y=0.25)
im = plot_2d_histo(ax1,N,x_edges,y_edges)
# plt.colorbar(im)
ax1.set(ylabel='flight time (ns)')
ax1twin = ax1.twinx()
ax1twin.plot(epos['v_dc'],'-',
linewidth=2,
color=mcd.XKCD_COLORS['xkcd:white'])
ax1twin.set(ylabel='applied voltage (volts)',ylim=[0, 6000],xlim=[0, 400000])
N,x_edges,y_edges = create_histogram(tof_corr,y_roi=[425,475],cts_per_slice=2**9,delta_y=0.25)
im = plot_2d_histo(ax2,N,x_edges,y_edges)
# plt.colorbar(im)
ax2.set(xlabel='ion sequence',ylabel='corrected flight time (ns)')
fig.tight_layout()
fig.savefig(r'Q:\users\bwc\APT\scale_corr_paper\metal_not_wandering.pdf', format='pdf', dpi=600)
return 0
def sio2_R45():
fn = r"Q:\NIST_Projects\EUV_APT_IMS\BWC\R45_data\R45_04472-v02_allVfromAnn.epos"
epos = apt_fileio.read_epos_numpy(fn)
epos = epos[25000:]
# Voltage and bowl correct ToF data
p_volt = np.array([])
p_bowl = np.array([])
t_i = time.time()
tof_corr, p_volt, p_bowl = do_voltage_and_bowl(epos,p_volt,p_bowl)
print("time to voltage and bowl correct: "+str(time.time()-t_i)+" seconds")
# Only apply bowl correction
tof_bcorr = voltage_and_bowl.mod_geometric_bowl_correction(p_bowl,epos['tof'],epos['x_det'],epos['y_det'])
# Plot histogram for sio2
fig = plt.figure(figsize=(FIGURE_SCALE_FACTOR*3.14961,FIGURE_SCALE_FACTOR*3.14961),num=2,dpi=100)
plt.clf()
ax1, ax2 = fig.subplots(2,1,sharex=True)
N,x_edges,y_edges = create_histogram(tof_bcorr,y_roi=[280,360],cts_per_slice=2**9,delta_y=.25)
im = plot_2d_histo(ax1,N,x_edges,y_edges)
plt.colorbar(im)
ax1.set(ylabel='flight time (ns)')
ax1twin = ax1.twinx()
ax1twin.plot(epos['v_dc'],'-',
linewidth=2,
color=mcd.XKCD_COLORS['xkcd:white'])
ax1twin.set(ylabel='applied voltage (volts)',ylim=[0000, 8000],xlim=[0, 400000])
N,x_edges,y_edges = create_histogram(tof_corr,y_roi=[280,360],cts_per_slice=2**9,delta_y=.25)
im = plot_2d_histo(ax2,N,x_edges,y_edges)
plt.colorbar(im)
ax2.set(xlabel='ion sequence',ylabel='corrected flight time (ns)')
fig.tight_layout()
fig.savefig(r'Q:\users\bwc\APT\scale_corr_paper\SiO2_NUV_wandering.pdf', format='pdf', dpi=600)
return 0
def sio2_R44():
fn = r"C:\Users\bwc\Documents\NetBeansProjects\R44_03200\recons\recon-v02\default\R44_03200-v02.epos"
epos = apt_fileio.read_epos_numpy(fn)
# Voltage and bowl correct ToF data
p_volt = np.array([])
p_bowl = np.array([])
t_i = time.time()
tof_corr, p_volt, p_bowl = do_voltage_and_bowl(epos,p_volt,p_bowl)
print("time to voltage and bowl correct: "+str(time.time()-t_i)+" seconds")
# Only apply bowl correction
tof_bcorr = voltage_and_bowl.mod_geometric_bowl_correction(p_bowl,epos['tof'],epos['x_det'],epos['y_det'])
# Plot histogram for sio2
fig = plt.figure(figsize=(FIGURE_SCALE_FACTOR*3.14961,FIGURE_SCALE_FACTOR*3.14961),num=3,dpi=100)
plt.clf()
ax1, ax2 = fig.subplots(2,1,sharex=True)
roi = [1400000,1800000]
N,x_edges,y_edges = create_histogram(tof_bcorr[roi[0]:roi[1]],y_roi=[300,310],cts_per_slice=2**7,delta_y=.2)
im = plot_2d_histo(ax1,N,x_edges,y_edges)
plt.colorbar(im)
ax1.set(ylabel='flight time (ns)')
ax1twin = ax1.twinx()
ax1twin.plot(epos['v_dc'][roi[0]:roi[1]],'-',
linewidth=2,
color=mcd.XKCD_COLORS['xkcd:white'])
ax1twin.set(ylabel='applied voltage (volts)',ylim=[0000, 7000],xlim=[0,None])
N,x_edges,y_edges = create_histogram(tof_corr[roi[0]:roi[1]],y_roi=[300,310],cts_per_slice=2**7,delta_y=0.2)
im = plot_2d_histo(ax2,N,x_edges,y_edges)
plt.colorbar(im)
ax2.set(xlabel='ion sequence',ylabel='corrected flight time (ns)')
ax2.set_xlim(0,roi[1]-roi[0])
fig.tight_layout()
fig.savefig(r'Q:\users\bwc\APT\scale_corr_paper\Figure_R44NUV.pdf', format='pdf', dpi=600)
return 0
def sio2_R20():
fn = r"Q:\NIST_Projects\EUV_APT_IMS\BWC\R20_07080-v01.epos"
epos = apt_fileio.read_epos_numpy(fn)
#epos = epos[165000:582000]
# Voltage and bowl correct ToF data
p_volt = np.array([])
p_bowl = np.array([])
t_i = time.time()
tof_corr, p_volt, p_bowl = do_voltage_and_bowl(epos,p_volt,p_bowl)
print("time to voltage and bowl correct: "+str(time.time()-t_i)+" seconds")
# Only apply bowl correction
tof_bcorr = voltage_and_bowl.mod_geometric_bowl_correction(p_bowl,epos['tof'],epos['x_det'],epos['y_det'])
# Plot histogram for sio2
fig = plt.figure(figsize=(FIGURE_SCALE_FACTOR*3.14961,FIGURE_SCALE_FACTOR*3.14961),num=4,dpi=100)
plt.clf()
ax1, ax2 = fig.subplots(2,1,sharex=True)
N,x_edges,y_edges = create_histogram(tof_bcorr,y_roi=[320,380],cts_per_slice=2**9,delta_y=.25)
plot_2d_histo(ax1,N,x_edges,y_edges)
ax1.set(ylabel='flight time (ns)')
ax1twin = ax1.twinx()
ax1twin.plot(epos['v_dc'],'-',
linewidth=2,
color=mcd.XKCD_COLORS['xkcd:white'])
ax1twin.set(ylabel='applied voltage (volts)',ylim=[0000, 5000],xlim=[0, 400000])
N,x_edges,y_edges = create_histogram(tof_corr,y_roi=[320,380],cts_per_slice=2**9,delta_y=.25)
plot_2d_histo(ax2,N,x_edges,y_edges)
ax2.set(xlabel='ion sequence',ylabel='corrected flight time (ns)')
fig.tight_layout()
fig.savefig(r'Q:\users\bwc\APT\scale_corr_paper\SiO2_EUV_wandering.pdf', format='pdf', dpi=600)
return 0
def corr_idea():
fig = plt.figure(num=5)
plt.close(fig)
fig = plt.figure(constrained_layout=True,figsize=(FIGURE_SCALE_FACTOR*3.14961,FIGURE_SCALE_FACTOR*1.5*3.14961),num=5,dpi=100)
gs = fig.add_gridspec(3, 1)
ax2 = fig.add_subplot(gs[:2, :])
ax1 = fig.add_subplot(gs[2, :])
def shaded_plot(ax,x,y,idx,col_idx=None):
if col_idx is None:
col_idx = idx
sc = 50
cols = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
xlim = ax.get_xlim()
idxs = np.nonzero((x>=xlim[0]) & (x<=xlim[1]))
ax.fill_between(x[idxs], y[idxs]+idx*sc, (idx-0.005)*sc, color=cols[col_idx])
# ax.plot(x,y+idx*sc, color='k')
return
fn = r"Q:\NIST_Projects\EUV_APT_IMS\BWC\R45_data\R45_04472-v02_allVfromAnn.epos"
epos = apt_fileio.read_epos_numpy(fn)
epos = epos[25000:]
# Voltage and bowl correct ToF data
p_volt = np.array([])
p_bowl = np.array([])
t_i = time.time()
tof_corr, p_volt, p_bowl = do_voltage_and_bowl(epos,p_volt,p_bowl)
print("time to voltage and bowl correct: "+str(time.time()-t_i)+" seconds")
# Plot histogram for sio2
# fig = plt.figure(figsize=(2*3.14961,2*3.14961),num=654321,dpi=100)
# plt.clf()
# ax2 = fig.subplots(1,1)
N,x_edges,y_edges = create_histogram(tof_corr,y_roi=[80,400],cts_per_slice=2**10,delta_y=0.0625)
#ax1.imshow(np.log10(1+1*np.transpose(N)), aspect='auto',
# extent=extents(x_edges) + extents(y_edges), origin='lower', cmap=cc.cm.CET_L8,
# interpolation='bilinear')
event_idx_range_ref = [0, 0+1024]
event_idx_range_mov = [124000, 124000+1024]
x_centers = edges_to_centers(x_edges)
idxs_ref = (x_centers>=event_idx_range_ref[0]) & (x_centers<=event_idx_range_ref[1])
idxs_mov = (x_centers>=event_idx_range_mov[0]) & (x_centers<=event_idx_range_mov[1])
ref_hist = np.sum(N[idxs_ref,:],axis=0)
mov_hist = np.sum(N[idxs_mov,:],axis=0)
y_centers = edges_to_centers(y_edges)
ax2.set(xlim=[290,320])
N,x_edges,y_edges = create_histogram(0.98*tof_corr,y_roi=[80,400],cts_per_slice=2**10,delta_y=0.0625)
mov_hist = np.sum(N[idxs_mov,:],axis=0)
shaded_plot(ax2,y_centers,mov_hist,2,2)
N,x_edges,y_edges = create_histogram(0.99*tof_corr,y_roi=[80,400],cts_per_slice=2**10,delta_y=0.0625)
mov_hist = np.sum(N[idxs_mov,:],axis=0)
shaded_plot(ax2,y_centers,ref_hist,3,3)
shaded_plot(ax2,y_centers,mov_hist,1,1)
N,x_edges,y_edges = create_histogram(1.0*tof_corr,y_roi=[80,400],cts_per_slice=2**10,delta_y=0.0625)
mov_hist = np.sum(N[idxs_mov,:],axis=0)
shaded_plot(ax2,y_centers,mov_hist,0,col_idx=0)
cs = np.linspace(0.975, 1.005, 256)
dp = np.zeros_like(cs)
for idx, c in enumerate(cs):
N,x_edges,y_edges = create_histogram(c*tof_corr,y_roi=[80,400],cts_per_slice=2**10,delta_y=0.0625)
mov_hist = np.sum(N[idxs_mov,:],axis=0)
dp[idx] = np.sum((mov_hist/np.sum(mov_hist))*(ref_hist/np.sum(ref_hist)))
ax1.set(xlim=[0.975, 1.005],ylim=[-0.1,1.1])
f = scipy.interpolate.interp1d(cs,dp/np.max(dp))
cols = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
xq = [0.98, 0.99017, 1.0]
for idx in [0,1,2]:
ax1.plot(xq[idx],f(xq[idx]),'o',markersize=14,color=cols[2-idx])
ax1.plot(cs,dp/np.max(dp),'k')
ax1.set_xlabel('correction factor, c')
ax1.set_ylabel('dot product (norm)')
ax2.set_xlabel('corrected time of flight (ns)')
ax2.set_ylabel('counts')
plt.pause(0.1)
fig.tight_layout()
fig.savefig(r'Q:\users\bwc\APT\scale_corr_paper\correction_idea.pdf', format='pdf', dpi=600)
return 0
def sio2_R45_corr():
fn = r"Q:\NIST_Projects\EUV_APT_IMS\BWC\R45_data\R45_04472-v03.epos"
epos = apt_fileio.read_epos_numpy(fn)
epos = epos[25000:]
epos = epos[:400000]
# Voltage and bowl correct ToF data
p_volt = np.array([])
p_bowl = np.array([])
t_i = time.time()
tof_corr, p_volt, p_bowl = do_voltage_and_bowl(epos,p_volt,p_bowl)
print("time to voltage and bowl correct: "+str(time.time()-t_i)+" seconds")
# fake_tof = np.sqrt((296/312)*epos['m2q']/1.393e-4)
m2q_roi=[0.8,80]
m2q_to_tof = 613/np.sqrt(59)
tof_roi = [m2q_roi[0]*m2q_to_tof, m2q_roi[1]*m2q_to_tof]
cts_per_slice=2**7
#m2q_roi = [0.9,190]
# tof_roi = [0, 1000]
t_start = time.time()
pointwise_scales,piecewise_scales = scaling_correction.get_all_scale_coeffs(tof_corr,
m2q_roi=tof_roi,
cts_per_slice=cts_per_slice,
max_scale=1.075)
t_end = time.time()
print('Total Time = ',t_end-t_start)
# fake_tof_corr = fake_tof/np.sqrt(pointwise_scales)
q_tof_corr = tof_corr/pointwise_scales
# m2q_corr = epos['m2q']/pointwise_scales
# Plot histogram for sio2
fig = plt.figure(figsize=(FIGURE_SCALE_FACTOR*3.14961,FIGURE_SCALE_FACTOR*3.14961),num=6,dpi=100)
plt.clf()
ax1, ax2 = fig.subplots(2,1,sharex=True)
N,x_edges,y_edges = create_histogram(tof_corr,y_roi=[280,360],cts_per_slice=2**9,delta_y=.25)
im = plot_2d_histo(ax1,N,x_edges,y_edges)
plt.colorbar(im)
ax1.set(ylabel='flight time (ns)')
ax1twin = ax1.twinx()
ax1twin.plot(pointwise_scales,'-',
linewidth=1,
color=mcd.XKCD_COLORS['xkcd:white'])
ax1twin.set(ylabel='correction factor, c',ylim=[0.98, 1.2],xlim=[0, 400000])
N,x_edges,y_edges = create_histogram(q_tof_corr,y_roi=[280,360],cts_per_slice=2**9,delta_y=.25)
im = plot_2d_histo(ax2,N,x_edges,y_edges)
plt.colorbar(im)
ax2.set(xlabel='ion sequence',ylabel='corrected flight time (ns)')
fig.tight_layout()
fig.savefig(r'Q:\users\bwc\APT\scale_corr_paper\SiO2_NUV_corrected.pdf', format='pdf', dpi=600)
return 0
def sio2_R45_histo():
def shaded_plot(ax,x,y,idx,col_idx=None,min_val=None):
if col_idx is None:
col_idx = idx
if min_val is None:
min_val = np.min(y)
sc = 150
cols = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
xlim = ax.get_xlim()
idxs = np.nonzero((x>=xlim[0]) & (x<=xlim[1]))
ax.fill_between(x[idxs], y[idxs], min_val, color=cols[col_idx],linestyle='None',lw=0)
# ax.plot(x,y+idx*sc, color='k')
return
fn = r"Q:\NIST_Projects\EUV_APT_IMS\BWC\R45_data\R45_04472-v03.epos"
epos = apt_fileio.read_epos_numpy(fn)
epos = epos[25000:]
epos = epos[:400000]
# Voltage and bowl correct ToF data
p_volt = np.array([])
p_bowl = np.array([])
t_i = time.time()
tof_corr, p_volt, p_bowl = do_voltage_and_bowl(epos,p_volt,p_bowl)
print("time to voltage and bowl correct: "+str(time.time()-t_i)+" seconds")
# fake_tof = np.sqrt((296/312)*epos['m2q']/1.393e-4)
m2q_roi=[0.8,80]
m2q_to_tof = 613/ | np.sqrt(59) | numpy.sqrt |
import os
import numpy as np
def cat_dog_dataset_train_val(train_dir, num_val_img):
# train image
dog_img = np.array([i for i in os.listdir(train_dir) if 'dog' in i])
cat_img = np.array([i for i in os.listdir(train_dir) if 'cat' in i])
np.random.shuffle(dog_img)
| np.random.shuffle(cat_img) | numpy.random.shuffle |
"""Demonstrates imprecision when using ERAN for BMC.
Code adapted from the ERAN project: https://github.com/eth-sri/eran
"""
import sys
sys.path.insert(0, "../ELINA/python_interface/")
sys.path.insert(0, ".")
from PIL import Image
import math
import numpy as np
import matplotlib
import matplotlib.image
import os
from eran import ERAN
from fppoly import *
from elina_coeff import *
from elina_linexpr0 import *
from read_net_file import *
from analyzer import *
import tensorflow as tf
import csv
import time
import argparse
from timeit import default_timer as timer
from tqdm import tqdm
args = {
"complete": False,
"timeout_lp": 1,
"timeout_milp": 1,
"use_area_heuristic": True,
}
def main():
"""Runs the ERAN analysis on the pendulum_continuous model.
This happens in a few steps:
1. ERAN doesn't specifically support HardTanh layers, so we translate the
HardTanh into an equivalent set of ReLU layers using convert_htanh().
2. We then read the controller network into an ERAN model.
3. We use ERAN/DeepPoly to extract an abstract value describing the
network's behavior over the initial set. Module float imprecision
handling, this abstract value is basically two affine transform A and B
such that Ax <= f(x) <= Bx for all x in the initial set.
4. We compute Ax and Bx for a particular point (0.35, 0.35) right on the
edge of the initial set, and show that the range determined by DeepPoly
(even after applying a concrete HardTanh at the end) is wide enough to
mark that point as unsafe even on the first iteration.
"""
# (1) Translate the model into an equivalent one without HardTanh.
with_htanh_filename = sys.argv[1]
no_htanh_filename = "/ovol/pendulum_continuous.no_htanh.eran"
convert_htanh(with_htanh_filename, no_htanh_filename)
# (2) Read it into ERAN.
num_pixels = 2
model, _, _, _ = read_net(no_htanh_filename, num_pixels, False)
eran = ERAN(model)
# (3) Extract an abstract value over the initial set.
# (3a) Load model and init set into ERAN.
initLB = np.array([-0.35, -0.35])
initUB = np.array([0.35, 0.35])
nn = layers()
nn.specLB = initLB
nn.specUB = initUB
execute_list = eran.optimizer.get_deeppoly(initLB, initUB)
# NOTE: 9 is just a placeholder specnumber to tell it we're using
# ACAS.
analyzer = Analyzer(execute_list, nn, "deeppoly", args["timeout_lp"],
args["timeout_milp"], 9, args["use_area_heuristic"])
# (3b) Perform the analysis and extract the abstract values.
element, _, _ = analyzer.get_abstract0()
lexpr = get_lexpr_for_output_neuron(analyzer.man, element, 0)
uexpr = get_uexpr_for_output_neuron(analyzer.man, element, 0)
lexpr = np.array(extract_from_expr(lexpr))
uexpr = np.array(extract_from_expr(uexpr))
# (3c) Extract the output range for initLB based on the abstract value.
lower_bound, upper_bound = compute_output_range(initLB, lexpr, uexpr)
# Apply extra knowledge that -1 <= lower_bound <= upper_bound <= 1.
lower_bound = max(lower_bound, -1.)
upper_bound = min(upper_bound, 1.)
post_lower, post_upper = post_bounds(initLB, lower_bound, upper_bound)
post_lower, post_upper = post_lower.flatten(), post_upper.flatten()
lower_safe = np.min(post_lower) >= -0.35
upper_safe = | np.max(post_upper) | numpy.max |
__version__ = "v1.1"
__license__ = "MIT"
__author__ = "<NAME>, PhD"
import sys
import numpy as np
import scipy as sp
from scipy.integrate import odeint
from PyQt5 import QtCore, QtGui, QtWidgets
from pyqtgraph import PlotWidget, plot
import pyqtgraph as pg
from sir import *
class SIR_QCvWidget(object):
def setupUi(self, QCvWidget):
QCvWidget.setObjectName("QCvWidget")
QCvWidget.resize(947, 812)
self.gridLayout_2 = QtWidgets.QGridLayout(QCvWidget)
self.gridLayout_2.setContentsMargins(11, 11, 11, 11)
self.gridLayout_2.setSpacing(6)
self.gridLayout_2.setObjectName("gridLayout_2")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setSpacing(6)
self.verticalLayout.setObjectName("verticalLayout")
#
# Graph widget
#
self.graphWidget = pg.PlotWidget(QCvWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.graphWidget.sizePolicy().hasHeightForWidth())
self.graphWidget.setSizePolicy(sizePolicy)
self.graphWidget.setObjectName("graphicsView")
self.verticalLayout.addWidget(self.graphWidget)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
self.verticalLayout.addItem(spacerItem)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setSpacing(6)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
#
# Group box model info
#
self.groupBox_model_info = QtWidgets.QGroupBox(QCvWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_model_info.sizePolicy().hasHeightForWidth())
self.groupBox_model_info.setSizePolicy(sizePolicy)
self.groupBox_model_info.setObjectName("groupBox_model_info")
self.verticalLayoutWidget = QtWidgets.QWidget(self.groupBox_model_info)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 270, 151, 81))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout_2.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.verticalLayout_2.setContentsMargins(11, 11, 11, 11)
self.verticalLayout_2.setSpacing(6)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.comboBox = QtWidgets.QComboBox(self.verticalLayoutWidget)
self.comboBox.setObjectName("comboBox")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.verticalLayout_2.addWidget(self.comboBox)
self.pushButton_reset = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.pushButton_reset.setObjectName("pushButton_reset")
self.verticalLayout_2.addWidget(self.pushButton_reset)
self.verticalLayoutWidget_3 = QtWidgets.QWidget(self.groupBox_model_info)
self.verticalLayoutWidget_3.setGeometry(QtCore.QRect(9, 29, 411, 231))
self.verticalLayoutWidget_3.setObjectName("verticalLayoutWidget_3")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_3)
self.verticalLayout_4.setContentsMargins(11, 11, 11, 11)
self.verticalLayout_4.setSpacing(6)
self.verticalLayout_4.setObjectName("verticalLayout_4")
#
# Second graph widget
#
self.graphWidget_2 = pg.PlotWidget(self.verticalLayoutWidget_3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.graphWidget_2.sizePolicy().hasHeightForWidth())
self.graphWidget_2.setSizePolicy(sizePolicy)
self.graphWidget_2.setObjectName("graphicsView_2")
self.verticalLayout_4.addWidget(self.graphWidget_2)
self.gridLayoutWidget = QtWidgets.QWidget(self.groupBox_model_info)
self.gridLayoutWidget.setGeometry(QtCore.QRect(170, 270, 251, 80))
self.gridLayoutWidget.setObjectName("gridLayoutWidget")
self.gridLayout_6 = QtWidgets.QGridLayout(self.gridLayoutWidget)
self.gridLayout_6.setContentsMargins(11, 11, 11, 11)
self.gridLayout_6.setSpacing(6)
self.gridLayout_6.setObjectName("gridLayout_6")
self.label_base_rep = QtWidgets.QLabel(self.gridLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_base_rep.sizePolicy().hasHeightForWidth())
self.label_base_rep.setSizePolicy(sizePolicy)
self.label_base_rep.setText("")
self.label_base_rep.setAlignment(QtCore.Qt.AlignCenter)
self.label_base_rep.setObjectName("label_base_rep")
self.gridLayout_6.addWidget(self.label_base_rep, 0, 1, 1, 1)
self.label_base_rep_txt = QtWidgets.QLabel(self.gridLayoutWidget)
self.label_base_rep_txt.setObjectName("label_base_rep_txt")
self.gridLayout_6.addWidget(self.label_base_rep_txt, 0, 0, 1, 1)
self.label_immunity_txt = QtWidgets.QLabel(self.gridLayoutWidget)
self.label_immunity_txt.setObjectName("label_immunity_txt")
self.gridLayout_6.addWidget(self.label_immunity_txt, 1, 0, 1, 1)
self.label_immunity = QtWidgets.QLabel(self.gridLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_immunity.sizePolicy().hasHeightForWidth())
self.label_immunity.setSizePolicy(sizePolicy)
self.label_immunity.setText("")
self.label_immunity.setAlignment(QtCore.Qt.AlignCenter)
self.label_immunity.setObjectName("label_immunity")
self.gridLayout_6.addWidget(self.label_immunity, 1, 1, 1, 1)
self.horizontalLayout_2.addWidget(self.groupBox_model_info)
#
# Group box paramters
#
self.groupBox_model_prm = QtWidgets.QGroupBox(QCvWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_model_prm.sizePolicy().hasHeightForWidth())
self.groupBox_model_prm.setSizePolicy(sizePolicy)
self.groupBox_model_prm.setObjectName("groupBox_model_prm")
self.gridLayout = QtWidgets.QGridLayout(self.groupBox_model_prm)
self.gridLayout.setContentsMargins(11, 11, 11, 11)
self.gridLayout.setSpacing(6)
self.gridLayout.setObjectName("gridLayout")
self.spinBox_mu_d = QtWidgets.QDoubleSpinBox(self.groupBox_model_prm)
self.spinBox_mu_d.setDecimals(3)
self.spinBox_mu_d.setMinimum(0.0)
self.spinBox_mu_d.setMaximum(1.0)
self.spinBox_mu_d.setSingleStep(0.001)
self.spinBox_mu_d.setProperty("value", 0.01)
self.spinBox_mu_d.setObjectName("spinBox_mu_d")
self.gridLayout.addWidget(self.spinBox_mu_d, 12, 2, 1, 1)
self.label_mu = QtWidgets.QLabel(self.groupBox_model_prm)
self.label_mu.setObjectName("label_mu")
self.gridLayout.addWidget(self.label_mu, 5, 1, 1, 1)
self.label_N = QtWidgets.QLabel(self.groupBox_model_prm)
self.label_N.setObjectName("label_N")
self.gridLayout.addWidget(self.label_N, 8, 1, 1, 1)
self.label_beta = QtWidgets.QLabel(self.groupBox_model_prm)
self.label_beta.setObjectName("label_beta")
self.gridLayout.addWidget(self.label_beta, 7, 1, 1, 1)
self.label_gamma = QtWidgets.QLabel(self.groupBox_model_prm)
self.label_gamma.setObjectName("label_gamma")
self.gridLayout.addWidget(self.label_gamma, 4, 1, 1, 1)
self.label_nu = QtWidgets.QLabel(self.groupBox_model_prm)
self.label_nu.setObjectName("label_nu")
self.gridLayout.addWidget(self.label_nu, 6, 1, 1, 1)
self.spinBox_N = QtWidgets.QSpinBox(self.groupBox_model_prm)
self.spinBox_N.setMaximum(100000000)
self.spinBox_N.setSingleStep(10000)
self.spinBox_N.setProperty("value", 83000000)
self.spinBox_N.setObjectName("spinBox_N")
self.gridLayout.addWidget(self.spinBox_N, 8, 2, 1, 1)
self.label_tmax = QtWidgets.QLabel(self.groupBox_model_prm)
self.label_tmax.setObjectName("label_tmax")
self.gridLayout.addWidget(self.label_tmax, 9, 1, 1, 1)
self.spinBox_gamma = QtWidgets.QDoubleSpinBox(self.groupBox_model_prm)
self.spinBox_gamma.setDecimals(3)
self.spinBox_gamma.setSingleStep(0.01)
self.spinBox_gamma.setProperty("value", 0.083)
self.spinBox_gamma.setObjectName("spinBox_gamma")
self.gridLayout.addWidget(self.spinBox_gamma, 4, 2, 1, 1)
self.spinBox_mu = QtWidgets.QDoubleSpinBox(self.groupBox_model_prm)
self.spinBox_mu.setDecimals(4)
self.spinBox_mu.setMaximum(0.1)
self.spinBox_mu.setSingleStep(0.0001)
self.spinBox_mu.setObjectName("spinBox_mu")
self.gridLayout.addWidget(self.spinBox_mu, 5, 2, 1, 1)
self.spinBox_nu = QtWidgets.QDoubleSpinBox(self.groupBox_model_prm)
self.spinBox_nu.setDecimals(4)
self.spinBox_nu.setMaximum(0.1)
self.spinBox_nu.setSingleStep(0.0001)
self.spinBox_nu.setObjectName("spinBox_nu")
self.gridLayout.addWidget(self.spinBox_nu, 6, 2, 1, 1)
self.spinBox_beta = QtWidgets.QDoubleSpinBox(self.groupBox_model_prm)
self.spinBox_beta.setSingleStep(0.01)
self.spinBox_beta.setProperty("value", 0.45)
self.spinBox_beta.setObjectName("spinBox_beta")
self.gridLayout.addWidget(self.spinBox_beta, 7, 2, 1, 1)
self.spinBox_tmax = QtWidgets.QSpinBox(self.groupBox_model_prm)
self.spinBox_tmax.setMinimum(5)
self.spinBox_tmax.setMaximum(5000)
self.spinBox_tmax.setSingleStep(5)
self.spinBox_tmax.setProperty("value", 365)
self.spinBox_tmax.setObjectName("spinBox_tmax")
self.gridLayout.addWidget(self.spinBox_tmax, 9, 2, 1, 1)
self.spinBox_i0 = QtWidgets.QDoubleSpinBox(self.groupBox_model_prm)
self.spinBox_i0.setMaximum(9999.99)
self.spinBox_i0.setProperty("value", 5.83)
self.spinBox_i0.setObjectName("spinBox_i0")
self.gridLayout.addWidget(self.spinBox_i0, 10, 2, 1, 1)
self.spinBox_r0 = QtWidgets.QDoubleSpinBox(self.groupBox_model_prm)
self.spinBox_r0.setMaximum(99999.99)
self.spinBox_r0.setSingleStep(0.01)
self.spinBox_r0.setObjectName("spinBox_r0")
self.gridLayout.addWidget(self.spinBox_r0, 11, 2, 1, 1)
self.label_i0 = QtWidgets.QLabel(self.groupBox_model_prm)
self.label_i0.setObjectName("label_i0")
self.gridLayout.addWidget(self.label_i0, 10, 1, 1, 1)
self.label_r0 = QtWidgets.QLabel(self.groupBox_model_prm)
self.label_r0.setObjectName("label_r0")
self.gridLayout.addWidget(self.label_r0, 11, 1, 1, 1)
self.label_mu_d = QtWidgets.QLabel(self.groupBox_model_prm)
self.label_mu_d.setObjectName("label_mu_d")
self.gridLayout.addWidget(self.label_mu_d, 12, 1, 1, 1)
self.label_a = QtWidgets.QLabel(self.groupBox_model_prm)
self.label_a.setObjectName("label_a")
self.gridLayout.addWidget(self.label_a, 13, 1, 1, 1)
self.spinBox_a = QtWidgets.QDoubleSpinBox(self.groupBox_model_prm)
self.spinBox_a.setDecimals(2)
self.spinBox_a.setMinimum(0.01)
self.spinBox_a.setSingleStep(0.01)
self.spinBox_a.setProperty("value", 1.0)
self.spinBox_a.setObjectName("spinBox_a")
self.gridLayout.addWidget(self.spinBox_a, 13, 2, 1, 1)
self.horizontalLayout_2.addWidget(self.groupBox_model_prm)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.gridLayout_2.addLayout(self.verticalLayout, 0, 0, 1, 1)
#
# Rename some widgets
#
self.retranslateUi(QCvWidget)
QtCore.QMetaObject.connectSlotsByName(QCvWidget)
#
# initial values
#
self.i0 = self.spinBox_i0.value()
self.e0 = 0.0
self.r0 = self.spinBox_r0.value()
self.s0 = self.spinBox_N.value() - self.i0 - self.r0
self.d0 = 0.0
self.y0 = [self.s0, self.i0, self.r0]
self.tspan = np.linspace(0, self.spinBox_tmax.value(), self.spinBox_tmax.value()*3)
self.beta = self.spinBox_beta.value()
self.gamma = self.spinBox_gamma.value()
self.mu = self.spinBox_mu.value()
self.nu = self.spinBox_nu.value()
self.N = self.spinBox_N.value()
self.mu_d = self.spinBox_mu_d.value()
self.a = 1/self.spinBox_a.value()
#
# Callbacks
#
self.spinBox_beta.valueChanged.connect(self.callback_change_generic_parameter)
self.spinBox_gamma.valueChanged.connect(self.callback_change_generic_parameter)
self.spinBox_mu.valueChanged.connect(self.callback_change_generic_parameter)
self.spinBox_nu.valueChanged.connect(self.callback_change_generic_parameter)
self.spinBox_N.valueChanged.connect(self.callback_change_generic_parameter)
self.spinBox_mu_d.valueChanged.connect(self.callback_change_generic_parameter)
self.spinBox_a.valueChanged.connect(self.callback_change_generic_parameter)
self.spinBox_tmax.valueChanged.connect(self.callback_change_tmax)
self.spinBox_i0.valueChanged.connect(self.callback_change_s0)
self.spinBox_r0.valueChanged.connect(self.callback_change_s0)
self.comboBox.currentIndexChanged.connect(self.callback_change_model_id)
self.pushButton_reset.clicked.connect(self.callback_reset_parameters)
#
# Local variables
#
self.initial_run = True
self.plot_s_ref = []
self.plot_e_ref = []
self.plot_i_ref = []
self.plot_r_ref = []
self.plot_d_ref = []
self.plot_N_ref = []
self.plot_repro_rate = []
self.plot_legend = []
self.solution = []
self.repro_rate = []
self.N_of_t = []
self.model_id = 0
#
# Start
#
self.callback_solve()
self.plot()
self.plot_diagnostics()
def retranslateUi(self, QCvWidget):
_translate = QtCore.QCoreApplication.translate
QCvWidget.setWindowTitle(_translate("QCvWidget", "SIR Models"))
self.groupBox_model_info.setTitle(_translate("QCvWidget", "model info"))
self.comboBox.setItemText(0, _translate("QCvWidget", "SIR model"))
self.comboBox.setItemText(1, _translate("QCvWidget", "SIRD model"))
self.comboBox.setItemText(2, _translate("QCvWidget", "SEIR model"))
self.comboBox.setItemText(3, _translate("QCvWidget", "SEIRD model"))
self.pushButton_reset.setText(_translate("QCvWidget", "reset values"))
self.label_base_rep_txt.setText(_translate("QCvWidget", "base reproduction number ="))
self.label_immunity_txt.setText(_translate("QCvWidget", "group immunity threshold ="))
self.groupBox_model_prm.setTitle(_translate("QCvWidget", "model parameters"))
self.label_mu.setText(_translate("QCvWidget", "natural mortality rate per day (mu)"))
self.label_N.setText(_translate("QCvWidget", "population size"))
self.label_beta.setText(_translate("QCvWidget", "infection rate (beta)"))
self.label_gamma.setText(_translate("QCvWidget", "recovery rate (gamma)"))
self.label_nu.setText(_translate("QCvWidget", "birth rate per person (nu)"))
self.label_tmax.setText(_translate("QCvWidget", "time span (time unit)"))
self.label_i0.setText(_translate("QCvWidget", "initial infections"))
self.label_r0.setText(_translate("QCvWidget", "initial recoveries"))
self.label_mu_d.setText(_translate("QCvWidget", "S(E)IRD only: disease mortality rate per day (mu_d)"))
self.label_a.setText(_translate("QCvWidget", "SEIR(D) only: medium latency time (days)"))
def callback_change_model_id(self, model_index):
self.model_id = model_index
self.callback_change_s0(0)
def callback_change_generic_parameter(self, new_value):
self.beta = self.spinBox_beta.value()
self.gamma = self.spinBox_gamma.value()
self.mu = self.spinBox_mu.value()
self.nu = self.spinBox_nu.value()
self.N = self.spinBox_N.value()
self.mu_d = self.spinBox_mu_d.value()
self.a = 1/self.spinBox_a.value()
self.callback_solve()
self.plot()
self.plot_diagnostics()
def callback_reset_parameters(self):
#
# Reset spinbox values
#
self.spinBox_beta.setValue(0.45)
self.spinBox_gamma.setValue(0.083)
self.spinBox_mu.setValue(0)
self.spinBox_nu.setValue(0)
self.spinBox_N.setValue(83000000)
self.spinBox_mu_d.setValue(0.01)
self.spinBox_a.setValue(1)
self.spinBox_tmax.setValue(365)
self.spinBox_i0.setValue(5.83)
self.spinBox_r0.setValue(0)
#
# Reset internal data
#
self.beta = self.spinBox_beta.value()
self.gamma = self.spinBox_gamma.value()
self.mu = self.spinBox_mu.value()
self.nu = self.spinBox_nu.value()
self.N = self.spinBox_N.value()
self.mu_d = self.spinBox_mu_d.value()
self.a = 1/self.spinBox_a.value()
if self.model_id == 0:
self.y0 = [self.s0, self.i0, self.r0]
elif self.model_id == 1:
self.y0 = [self.s0, self.i0, self.r0, self.d0]
elif self.model_id == 2:
self.y0 = [self.s0, self.e0, self.i0, self.r0]
elif self.model_id == 3:
self.y0 = [self.s0, self.e0, self.i0, self.r0, self.d0]
self.tspan = np.linspace(0, self.spinBox_tmax.value(), self.spinBox_tmax.value()*3)
self.i0 = self.spinBox_i0.value()
self.r0 = self.spinBox_r0.value()
self.s0 = self.spinBox_N.value() - self.i0 - self.r0
self.callback_solve()
self.plot()
self.plot_diagnostics()
def callback_change_tmax(self, new_value):
self.tspan = np.linspace(0, self.spinBox_tmax.value(), self.spinBox_tmax.value()*3)
self.callback_solve()
self.plot()
self.plot_diagnostics()
def callback_change_s0(self, dummy_new_value):
self.i0 = self.spinBox_i0.value()
self.r0 = self.spinBox_r0.value()
self.s0 = self.spinBox_N.value() - self.i0 - self.r0
if self.model_id == 0:
self.y0 = [self.s0, self.i0, self.r0]
elif self.model_id == 1:
self.y0 = [self.s0, self.i0, self.r0, self.d0]
elif self.model_id == 2:
self.y0 = [self.s0, self.e0, self.i0, self.r0]
elif self.model_id == 3:
self.y0 = [self.s0, self.e0, self.i0, self.r0, self.d0]
self.callback_solve()
self.plot()
self.plot_diagnostics()
def callback_solve(self):
if self.initial_run == False:
if float(pg.__version__[0:4]) < 0.11:
self.plot_legend.scene().removeItem(self.plot_legend)
else:
self.plot_legend.clear()
else:
# After first solve we need to set this to false
self.initial_run = False
if self.model_id == 0:
self.solution = odeint(SIR_function,
self.y0,
self.tspan,
args=(self.N, self.beta, self.gamma, self.mu, self.nu))
self.N_of_t = np.sum(self.solution,1)
print("SIR model solved...")
elif self.model_id == 1:
self.solution = odeint(SIRD_function,
self.y0,
self.tspan,
args=(self.N, self.beta, self.gamma, self.mu, self.nu, self.mu_d))
self.N_of_t = np.sum(self.solution[:,:-1],1)
print("SIRD model solved...")
elif self.model_id == 2:
self.solution = odeint(SEIR_function,
self.y0,
self.tspan,
args=(self.N, self.beta, self.gamma, self.mu, self.nu, self.a))
self.N_of_t = np.sum(self.solution,1)
print("SEIR model solved...")
elif self.model_id == 3:
self.solution = odeint(SEIRD_function,
self.y0,
self.tspan,
args=(self.N, self.beta, self.gamma, self.mu, self.nu, self.a, self.mu_d))
self.N_of_t = np.sum(self.solution[:,:-1],1)
print("SEIRD model solved...")
base_rep = f"{self.beta/self.gamma:.2f}"
herd_immununity_threshold = f"{1-1/(self.beta/self.gamma):.2f}"
self.label_base_rep.setText(base_rep)
self.label_immunity.setText(herd_immununity_threshold)
self.repro_rate = self.solution[:,0]/self.N_of_t * (self.beta/self.gamma)
def plot(self):
self.graphWidget.setBackground("w")
self.graphWidget.setLabel("left", "number of people", color="red", size=30)
self.graphWidget.setLabel("bottom", "time (days)", color="red", size=30)
self.graphWidget.showGrid(x=True, y=True)
self.graphWidget.setXRange(0, self.spinBox_tmax.value()*1.05, padding=0)
self.graphWidget.setYRange(0, np.max(self.N_of_t)*1.05, padding=0)
if self.model_id == 0:
self.plot_s_ref.clear()
self.plot_e_ref.clear()
self.plot_i_ref.clear()
self.plot_r_ref.clear()
self.plot_d_ref.clear()
self.plot_N_ref.clear()
self.graphWidget.addLegend(offset=(-10,10))
self.plot_legend = self.graphWidget.getPlotItem().legend
self.plot_s_ref = self.graphWidget.plot(self.tspan,
self.solution[:,0],
name="suspectible",
pen=pg.mkPen(color="b", width=3, style=QtCore.Qt.SolidLine))
self.plot_i_ref = self.graphWidget.plot(self.tspan,
self.solution[:,1],
name="infected",
pen=pg.mkPen(color="r", width=3, style=QtCore.Qt.SolidLine))
self.plot_r_ref = self.graphWidget.plot(self.tspan,
self.solution[:,2],
name="removed",
pen=pg.mkPen(color="g", width=3, style=QtCore.Qt.SolidLine))
self.plot_N_ref = self.graphWidget.plot(self.tspan,
self.N_of_t,
name="population (all)",
pen=pg.mkPen(color="y", width=3, style=QtCore.Qt.SolidLine))
elif self.model_id == 1:
self.plot_s_ref.clear()
self.plot_e_ref.clear()
self.plot_i_ref.clear()
self.plot_r_ref.clear()
self.plot_d_ref.clear()
self.plot_N_ref.clear()
self.graphWidget.addLegend(offset=(-10,10))
self.plot_legend = self.graphWidget.getPlotItem().legend
self.plot_s_ref = self.graphWidget.plot(self.tspan,
self.solution[:,0],
name="suspectible",
pen=pg.mkPen(color="b", width=3, style=QtCore.Qt.SolidLine))
self.plot_i_ref = self.graphWidget.plot(self.tspan,
self.solution[:,1],
name="infected",
pen=pg.mkPen(color="r", width=3, style=QtCore.Qt.SolidLine))
self.plot_r_ref = self.graphWidget.plot(self.tspan,
self.solution[:,2],
name="recovered",
pen=pg.mkPen(color="g", width=3, style=QtCore.Qt.SolidLine))
self.plot_d_ref = self.graphWidget.plot(self.tspan,
self.solution[:,3],
name="deaths",
pen=pg.mkPen(color="k", width=3, style=QtCore.Qt.SolidLine))
self.plot_N_ref = self.graphWidget.plot(self.tspan,
self.N_of_t,
name="population (all)",
pen=pg.mkPen(color="y", width=3, style=QtCore.Qt.SolidLine))
elif self.model_id == 2:
self.plot_s_ref.clear()
self.plot_e_ref.clear()
self.plot_i_ref.clear()
self.plot_r_ref.clear()
self.plot_d_ref.clear()
self.plot_N_ref.clear()
self.graphWidget.addLegend(offset=(-10,10))
self.plot_legend = self.graphWidget.getPlotItem().legend
self.plot_s_ref = self.graphWidget.plot(self.tspan,
self.solution[:,0],
name="suspectible",
pen=pg.mkPen(color="b", width=3, style=QtCore.Qt.SolidLine))
self.plot_e_ref = self.graphWidget.plot(self.tspan,
self.solution[:,1],
name="exposed (not infectious)",
pen=pg.mkPen(color="c", width=3, style=QtCore.Qt.SolidLine))
self.plot_i_ref = self.graphWidget.plot(self.tspan,
self.solution[:,2],
name="infectious",
pen=pg.mkPen(color="r", width=3, style=QtCore.Qt.SolidLine))
self.plot_r_ref = self.graphWidget.plot(self.tspan,
self.solution[:,3],
name="removed",
pen=pg.mkPen(color="g", width=3, style=QtCore.Qt.SolidLine))
self.plot_N_ref = self.graphWidget.plot(self.tspan,
self.N_of_t,
name="population (all)",
pen=pg.mkPen(color="y", width=3, style=QtCore.Qt.SolidLine))
elif self.model_id == 3:
self.plot_s_ref.clear()
self.plot_e_ref.clear()
self.plot_i_ref.clear()
self.plot_r_ref.clear()
self.plot_d_ref.clear()
self.plot_N_ref.clear()
self.graphWidget.addLegend(offset=(-10,10))
self.plot_legend = self.graphWidget.getPlotItem().legend
self.plot_s_ref = self.graphWidget.plot(self.tspan,
self.solution[:,0],
name="suspectible",
pen=pg.mkPen(color="b", width=3, style=QtCore.Qt.SolidLine))
self.plot_e_ref = self.graphWidget.plot(self.tspan,
self.solution[:,1],
name="exposed (not infectious)",
pen=pg.mkPen(color="c", width=3, style=QtCore.Qt.SolidLine))
self.plot_i_ref = self.graphWidget.plot(self.tspan,
self.solution[:,2],
name="infectious",
pen=pg.mkPen(color="r", width=3, style=QtCore.Qt.SolidLine))
self.plot_r_ref = self.graphWidget.plot(self.tspan,
self.solution[:,3],
name="removed",
pen=pg.mkPen(color="g", width=3, style=QtCore.Qt.SolidLine))
self.plot_d_ref = self.graphWidget.plot(self.tspan,
self.solution[:,4],
name="deaths",
pen=pg.mkPen(color="k", width=3, style=QtCore.Qt.SolidLine))
self.plot_N_ref = self.graphWidget.plot(self.tspan,
self.N_of_t,
name="population (all)",
pen=pg.mkPen(color="y", width=3, style=QtCore.Qt.SolidLine))
def plot_diagnostics(self):
self.graphWidget_2.setBackground("w")
self.graphWidget_2.setLabel("left", "reproduction number", color="red", size=30)
self.graphWidget_2.setLabel("bottom", "time (days)", color="red", size=30)
self.graphWidget_2.showGrid(x=True, y=True)
self.graphWidget_2.setXRange(0, self.spinBox_tmax.value()*1.05, padding=0)
self.graphWidget_2.setYRange(0, | np.max(self.repro_rate) | numpy.max |
'''
This file contains the functions to generate and store a database of
interdependenct random networks
<NAME> - Last updated: 03/04/2018
'''
import Network_Data_Generator
import numpy as np
import os
import random
import math
import os
import sys
import shutil
import networkx as nx
def topo_param(net_type, no_nodes):
if net_type == 'grid':
grid_size_x = np.random.randint(low=3, high=10)
grid_size_y = np.random.randint(low=3, high=10)
while grid_size_y*grid_size_x<=no_nodes:
grid_size_y += 1
while grid_size_y*grid_size_x>no_nodes:
grid_size_y -= 1
no_nodes = int(round(grid_size_x*grid_size_y))
topo_param = grid_size_x
mean_num_arcs = 2*no_nodes-grid_size_x-no_nodes//grid_size_x
assert isinstance(mean_num_arcs, int), 'number of arcs is not an integer'
assert mean_num_arcs>0, 'number of arcs is <= 0'
if net_type == 'random':
# Existence Probability of each arc in the random network
# The lower bound corresponds to the lower bound of the supercritical Regime
# and np.log(noNodes)/noNodes in the upper bound corresponds to the
# lower bound of the connected regime
prob_LB = 1.0/no_nodes
prob_UB = (np.log(no_nodes)/no_nodes+1)*0.5
arc_prob = np.random.uniform(low=prob_LB, high=prob_UB)
topo_param = arc_prob
mean_num_arcs = 0.5*no_nodes*(no_nodes-1)*arc_prob
if net_type == 'scalefree':
# Exponent of the powerlaw of node degee distribution
# whose bounds correspond to Ultra-Small World regime.
expLB = 2.001
expUB = 2.999
exp = np.random.uniform(low=expLB, high=expUB)
topo_param = exp
mean_num_arcs = no_nodes*(no_nodes**(1/(exp-1)))*0.5
if net_type == 'tree':
# The ratio of diameter to number of arcs=n-1
temp = []
for i in range(100):
G = nx.generators.trees.random_tree(no_nodes)
temp.append(nx.algorithms.distance_measures.diameter(G))
diam = random.choice(temp)
topo_param = diam/(no_nodes-1)
mean_num_arcs = no_nodes-1
if net_type == 'mpg':
topo_param = 0
mean_num_arcs = 3*no_nodes-6
return topo_param, no_nodes, mean_num_arcs
# Input values
no_samples = 5 # Number of sample sets of network
no_config = 100 # Number of configurations
noZones = 4 # noZones by noZones tile of zones
paramError = 0.1
rootfolder = '/home/hesam/Desktop/Files/Generated_Network_Dataset_v4.1/' # Root folder where the database is
#'C:\\Users\\ht20\Documents\\Files\Generated_Network_Dataset_v3.1\\'
rootfolder += 'GeneralNetworks/' #'GridNetworks/' # choose relevant dataset folder
prefix = 'GEN'
if not os.path.exists(rootfolder):
os.makedirs(rootfolder)
# The text file which stores the information of each configurations
fileNameList = rootfolder+'List_of_Configurations.txt'
# Saving network data in the text files
fList = open(fileNameList,"a+")
header = 'Config Number\t No. Layers\t No. Nodes\t Topology Parameter\t Interconnection Prob'+\
'\t Damage Prob\t Resource Cap \t Net Types\n'
fList.write(header)
fList.close()
net_type ={}
no_nodes_dict = {}
int_prob_dict={}
topo_param_dict = {}
dam_prob_dict = {}
mean_dam_nodes = {}
mean_dam_arcs = {}
cnfg = 0
while cnfg<no_config:
write_config = True
# Number of layers
no_layers = np.random.randint(low=2, high=3)
# MEAN number of nodes of the random network (same for both networks)
no_nodes = np.random.randint(low=10, high=50)
# MEAN Existence Probability of each interconnection (among all possible pairs)
# in the interdependent random networks
int_prob = np.random.uniform(low=0.001, high=0.05)
# MEAN Probability of damage of each node or arc in the random networks
# Bounds are chosen roughly based on INDP data for Shelby county associated with
# M6 (0.05) - M9 (0.5) scenarios
dam_prob = np.random.uniform(low=0.05, high=0.5)
for k in range(1,no_layers+1):
# Choose a network type randomly
net_type[k] = random.choice(['grid','scalefree','random', 'tree', 'mpg'])
no_nodes_dict[k] = int(round(no_nodes*(1+np.random.normal(0, paramError))))
topo_param_dict[k], no_nodes_dict[k], mean_dam_arcs[k] = topo_param(net_type=net_type[k],
no_nodes=no_nodes_dict[k])
dam_prob_dict[k] = dam_prob*(1+ | np.random.normal(0, paramError) | numpy.random.normal |
import numpy as np
import os,sys,time
import torch
import icp
# import camera
"icp for ios_logger and optitrack"
# Constants
N = 10 # ๋ฐ์ดํฐ์
ํฌ๊ธฐ
num_tests = 10 # ๋ฐ๋ณต ํ
์คํธ ๊ณ์ฐ ํ์
dim = 3 # ๋ฐ์ดํฐ ํฌ์ธํธ ์ฐจ์
noise_sigma = .01 # ๋
ธ์ด์ฆ ํ์ค ํธ์ฐจ
translation = .1 # ํ
์คํธ์
์ต๋ ์ด๋ ๊ฑฐ๋ฆฌ
rotation = .1 # ํ
์คํธ์
์ต๋ ํ์ ๊ฐ
def rotation_matrix(axis, theta):
axis = axis/np.sqrt(np.dot(axis, axis))
a = np.cos(theta/2.)
b, c, d = -axis* | np.sin(theta/2.) | numpy.sin |
import matplotlib.pyplot as plt
import numpy as np
def plot_image(image, shape=[256, 256], cmap="Greys_r"):
plt.imshow(image.reshape(shape), cmap=cmap, interpolation="nearest")
plt.axis("off")
plt.show()
def movingaverage(values,window):
weights = | np.repeat(1.0,window) | numpy.repeat |
import torch
import torch.nn as nn
import numpy as np
import scipy.io as scio
import os
import matplotlib.pyplot as plt
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
torch.manual_seed(1)
np.random.seed(1)
lapl_op = [[[[ 0, 0, -1/12, 0, 0],
[ 0, 0, 4/3, 0, 0],
[-1/12, 4/3, -5, 4/3, -1/12],
[ 0, 0, 4/3, 0, 0],
[ 0, 0, -1/12, 0, 0]]]]
# ============ define relevant functions =============
# https://github.com/benmaier/reaction-diffusion/blob/master/gray_scott.ipynb
def apply_laplacian(mat, dx = 0.01):
# dx is inversely proportional to N
"""This function applies a discretized Laplacian
in periodic boundary conditions to a matrix
For more information see
https://en.wikipedia.org/wiki/Discrete_Laplace_operator#Implementation_via_operator_discretization
"""
# the cell appears 4 times in the formula to compute
# the total difference
neigh_mat = -5*mat.copy()
# Each direct neighbor on the lattice is counted in
# the discrete difference formula
neighbors = [
( 4/3, (-1, 0) ),
( 4/3, ( 0,-1) ),
( 4/3, ( 0, 1) ),
( 4/3, ( 1, 0) ),
(-1/12, (-2, 0)),
(-1/12, (0, -2)),
(-1/12, (0, 2)),
(-1/12, (2, 0)),
]
# shift matrix according to demanded neighbors
# and add to this cell with corresponding weight
for weight, neigh in neighbors:
neigh_mat += weight * np.roll(mat, neigh, (0,1))
return neigh_mat/dx**2
# Define the update formula for chemicals A and B
def update(A, B, DA, DB, f, k, delta_t):
"""Apply the Gray-Scott update formula"""
# compute the diffusion part of the update
diff_A = DA * apply_laplacian(A)
diff_B = DB * apply_laplacian(B)
# Apply chemical reaction
reaction = A*B**2
diff_A -= reaction
diff_B += reaction
# Apply birth/death
diff_A += f * (1-A)
diff_B -= (k+f) * B
A += diff_A * delta_t
B += diff_B * delta_t
return A, B
def GetEachTerm(A, B, DA, DB, f, k, delta_t, dx):
lap_A = DA * apply_laplacian(A,dx)
lap_B = DB * apply_laplacian(B,dx)
# Apply chemical reaction
reaction = A * B ** 2
# Apply birth/death, linear term
lin_A = f * (1 - A)
lin_B = (k + f) * B
return lap_A, lap_B, reaction, lin_A, lin_B
def update_rk4(A0, B0, DA, DB, f, k, delta_t, dx):
"""Update with Runge-kutta-4 method
See https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods
"""
############# Stage 1 ##############
# compute the diffusion part of the update
diff_A = DA * apply_laplacian(A0, dx)
diff_B = DB * apply_laplacian(B0, dx)
# Apply chemical reaction
reaction = A0 * B0 ** 2
diff_A -= reaction
diff_B += reaction
# Apply birth/death
diff_A += f * (1 - A0)
diff_B -= (k + f) * B0
K1_a = diff_A
K1_b = diff_B
############# Stage 1 ##############
A1 = A0 + K1_a * delta_t/2.0
B1 = B0 + K1_b * delta_t/2.0
diff_A = DA * apply_laplacian(A1, dx)
diff_B = DB * apply_laplacian(B1, dx)
# Apply chemical reaction
reaction = A1 * B1 ** 2
diff_A -= reaction
diff_B += reaction
# Apply birth/death
diff_A += f * (1 - A1)
diff_B -= (k + f) * B1
K2_a = diff_A
K2_b = diff_B
############# Stage 2 ##############
A2 = A0 + K2_a * delta_t/2.0
B2 = B0 + K2_b * delta_t/2.0
diff_A = DA * apply_laplacian(A2, dx)
diff_B = DB * apply_laplacian(B2, dx)
# Apply chemical reaction
reaction = A2 * B2 ** 2
diff_A -= reaction
diff_B += reaction
# Apply birth/death
diff_A += f * (1 - A2)
diff_B -= (k + f) * B2
K3_a = diff_A
K3_b = diff_B
############# Stage 3 ##############
A3 = A0 + K3_a * delta_t
B3 = B0 + K3_b * delta_t
diff_A = DA * apply_laplacian(A3, dx)
diff_B = DB * apply_laplacian(B3, dx)
# Apply chemical reaction
reaction = A3 * B3 ** 2
diff_A -= reaction
diff_B += reaction
# Apply birth/death
diff_A += f * (1 - A3)
diff_B -= (k + f) * B3
K4_a = diff_A
K4_b = diff_B
# Final solution
A = A0 + delta_t*(K1_a+2*K2_a+2*K3_a+K4_a)/6.0
B = B0 + delta_t*(K1_b+2*K2_b+2*K3_b+K4_b)/6.0
return A, B
def get_initial_A_and_B(N, random_influence = 0.2):
"""get the initial chemical concentrations"""
# get initial homogeneous concentrations
A = (1-random_influence) * np.ones((N,N))
B = np.zeros((N,N))
# put some noise on there
A += random_influence * np.random.random((N,N))
B += random_influence * np.random.random((N,N))
# initial disturbance
N1, N2, N3 = N//4-4, N//2, 3*N//4
r = int(N/10.0)
# initial disturbance 1
A[N1-r:N1+r, N1-r:N1+r] = 0.50
B[N1-r:N1+r, N1-r:N1+r] = 0.25
# # initial disturbance 2
# A[N1-r:N1+r, N3-r:N3+r] = 0.50
# B[N1-r:N1+r, N3-r:N3+r] = 0.25
#
# # initial disturbance 3
# A[N3-r:N3+r, N3-r:N3+r] = 0.50
# B[N3-r:N3+r, N3-r:N3+r] = 0.25
#
# # initial disturbance 4
# A[N3-r:N3+r, N1-r:N1+r] = 0.50
# B[N3-r:N3+r, N1-r:N1+r] = 0.25
# initial disturbance 5
A[N2-r:N2+r, N2-r:N2+r] = 0.50
B[N2-r:N2+r, N2-r:N2+r] = 0.25
#
# # initial disturbance 6
# A[N2-r:N2+r, N3-r:N3+r] = 0.50
# B[N2-r:N2+r, N3-r:N3+r] = 0.25
return A, B
def postProcess(output, N, xmin, xmax, ymin, ymax, num, batch, save_path):
''' num: Number of time step
'''
x = np.linspace(xmin, xmax, N+1)[:-1]
y = np.linspace(ymin, ymax, N+1)[:-1]
x_star, y_star = np.meshgrid(x, y)
u_pred = output[num, 0, :, :]
# v_star = true[num+25, 1, 1:-1, 1:-1]
v_pred = output[num, 1, :, :]
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(6, 3))
fig.subplots_adjust(hspace=0.3, wspace=0.3)
cf = ax[0].scatter(x_star, y_star, c=u_pred, alpha=0.95, edgecolors='none', cmap='hot', marker='s', s=2)
ax[0].axis('square')
ax[0].set_xlim([xmin, xmax])
ax[0].set_ylim([ymin, ymax])
cf.cmap.set_under('black')
cf.cmap.set_over('white')
ax[0].set_title('u-FDM')
fig.colorbar(cf, ax=ax[0], extend='both')
cf = ax[1].scatter(x_star, y_star, c=v_pred, alpha=0.95, edgecolors='none', cmap='hot', marker='s', s=2) #
ax[1].axis('square')
ax[1].set_xlim([xmin, xmax])
ax[1].set_ylim([ymin, ymax])
cf.cmap.set_under('black')
cf.cmap.set_over('white')
ax[1].set_title('v-FDM')
fig.colorbar(cf, ax=ax[1], extend='both')
# plt.draw()
plt.savefig(save_path + '/uv_[b=%d][t=%d].png'%(batch, num))
plt.close('all')
if __name__ == '__main__':
#################### generate data #####################
# =========== define model parameters ==========
# dt should be 1/2 of dx
# Diffusion coefficients
DA = 0.16 #2*10**-5
DB = 0.08 #DA/4
# define birth/death rates
f = 0.06 #1/25
k = 0.062 #3/50
# grid size
N = 256 # 128
# update in time
delta_t = 1.0 #1.0/2
# spatial step
dx = 1.0 #1.0 / N
# intialize the chemical concentrations, random_incluence=0
A, B = get_initial_A_and_B(N, random_influence = 0.0)
A_record = A.copy()[None,...]
B_record = B.copy()[None,...]
N_simulation_steps = 15000
for step in range(N_simulation_steps):
# Runge-kutta scheme
#A, B = update(A, B, DA, DB, f, k, delta_t)
A, B = update_rk4(A, B, DA, DB, f, k, delta_t, dx)
if step%5 ==0:
print(step)
A_record = np.concatenate((A_record, A[None,...]), axis=0)
B_record = | np.concatenate((B_record, B[None,...]), axis=0) | numpy.concatenate |
#!/usr/bin/env python3
from __future__ import print_function
from __future__ import division
import rospy
import rosbag
import math
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Quaternion
from sensor_msgs.msg import Imu
from tf.transformations import euler_from_quaternion, quaternion_from_euler
from mav_msgs.msg import Actuators
from waypoint_generation_library import WaypointGen
# TODO: make this critically damped by tuning the natural frequency
class PDControl(object):
""" Takes IMU and position data and publishes actuator commands based off a Proportional Derivative law"""
def __init__(self):
self.dlqrPublisher = rospy.Publisher("/uams/command/motor_speed", Actuators, queue_size=1)
# self.dlqrPublisher = rospy.Publisher("/neo11/command/motor_speed", Actuators, queue_size=1)
self.receivedImuQuat = Quaternion()
self.thrustConstant = 1.269e-05
self.momentConstant = 0.016754
self.g = 9.8 # [m/s^2]
self.m = 4.88 # [kg]
self.Ixx = 6.08870e-02 # [kg*m^2]
self.Iyy = 6.87913e-02 # [kg*m^2]
self.Izz = 1.48916e-01 # [kg*m^2]
gamma = self.thrustConstant / self.momentConstant
self.L = 0.2895 # [m]
# damping ratio (overdamped)
zeta = 2
zetaYaw = 1
# natural frequency
self.PI = 3.14159
wnAng = 13 # [rad/s]
wnAngYaw = 200
# attitude control gains calculation based on 2nd order system assumption
# proportional gain
# self.kpAngle = np.array(([self.Ixx * pow(wnAng, 2), # roll
# self.Iyy * pow(wnAng, 2), # pitch
# self.Izz * pow(wnAngYaw, 2)])) # yaw
# self.kpAngle = np.array([11.2, 11.2, 5713.2])
# self.kdAngle = np.array([ 1.12, 1.12, 16.56])
# self.kpAngle = np.array([11.2, 11.2, 5000])
# self.kdAngle = np.array([1.12, 1.12, 16.56])
self.kpAngle = np.array([20, 20, 5000])
self.kdAngle = np.array([11, 11, 160])
print(self.kpAngle)
# derivative gain
# self.kdAngle = np.array(([self.Ixx * zeta * wnAng, # roll
# self.Iyy * zeta * wnAng, # pitch
# self.Izz * 0.5 * zetaYaw * wnAngYaw])) # yaw
print(self.kdAngle)
# position control gains hand-tuned
# proportional gain
self.kpPos = np.array(([0.1, 0.1, 1]))
# derivative gain
self.kdPos = np.array(([0.1, 0.1, 1]))
# variable to keep track of the previous error in each state
self.prevRPYErr = np.zeros((3, 1))
# self.speedAllocationMatrix = np.array([[self.thrustConstant, self.thrustConstant, self.thrustConstant,
# self.thrustConstant, self.thrustConstant, self.thrustConstant],
# [0.5 * self.L * self.thrustConstant,
# self.L * self.thrustConstant,
# 0.5 * self.L * self.thrustConstant,
# -0.5 * self.L * self.thrustConstant,
# (-1) * self.L * self.thrustConstant,
# -0.5 * self.L * self.thrustConstant],
# [(0.5) * math.sqrt(3) * self.L * self.thrustConstant,
# 0,
# (-0.5) * math.sqrt(3) * self.L * self.thrustConstant,
# (-0.5) * math.sqrt(3) * self.L * self.thrustConstant,
# 0,
# (0.5) * math.sqrt(3) * self.L * self.thrustConstant],
# [(-1) * self.momentConstant, self.momentConstant,
# (-1) * self.momentConstant,
# self.momentConstant, (-1) * self.momentConstant, self.momentConstant]])
self.speedAllocationMatrix = np.array([[self.thrustConstant, self.thrustConstant, self.thrustConstant,
self.thrustConstant, self.thrustConstant, self.thrustConstant],
[(0.5) * self.L * self.thrustConstant, self.L * self.thrustConstant,
(0.5) * self.L * self.thrustConstant, (-0.5) * self.L * self.thrustConstant,
(-1) * self.L * self.thrustConstant, (-0.5) * self.L * self.thrustConstant],
[-0.5*(3 ** 0.5) * self.L * self.thrustConstant, 0,
0.5*(3 ** 0.5) * self.L * self.thrustConstant, 0.5*(3 ** 0.5) * self.L * self.thrustConstant,
0, -0.5*(3 ** 0.5) * self.L * self.thrustConstant],
[self.momentConstant, (-1) * self.momentConstant,
self.momentConstant, (-1) * self.momentConstant,
self.momentConstant, (-1) * self.momentConstant]])
# variable to check whether first pass has been completed to start calculating "dt"
self.firstPass = False
# first pass dt corresponding to 100 hz controller
self.firstPassDt = 0.01
# time now subtracted by start time
self.startTime = rospy.get_time()
# previous time placeholder
self.prevTime = 0
# generate the waypoints
WaypointGeneration = WaypointGen()
self.waypoints, self.desVel, self.desAcc, self.timeVec = WaypointGeneration.waypoint_calculation()
# deadbands [x-pos, y-pos, z-pos, yaw]
self.waypointDeadband = np.array(([0.3, 0.3, 0.5, 5 * self.PI / 180]))
pass
def state_update(self, odomInput):
""" Generate state vector from odometry input"""
# create state vector
state = np.zeros((12, 1))
# position
state[0] = odomInput.pose.pose.position.x
state[1] = odomInput.pose.pose.position.y
state[2] = odomInput.pose.pose.position.z
# velocity
state[3] = odomInput.twist.twist.linear.x
state[4] = odomInput.twist.twist.linear.y
state[5] = odomInput.twist.twist.linear.z
# angular position
[roll, pitch, yaw] = euler_from_quaternion([odomInput.pose.pose.orientation.x,
odomInput.pose.pose.orientation.y,
odomInput.pose.pose.orientation.z,
odomInput.pose.pose.orientation.w])
state[6] = roll
state[7] = pitch
state[8] = yaw
# angular rate
state[9] = odomInput.twist.twist.angular.x
state[10] = odomInput.twist.twist.angular.y
state[11] = odomInput.twist.twist.angular.z
# if a nan is seen then set it to 0
for i in range(0, len(state)):
if np.isnan(state[i]):
state[i] = 0
self.ctrl_update(state)
def calc_error(self, state):
""" Find the desired state given the trajectory and PD gains and calculate current error"""
# calculate the time difference
# time now subtracted by start time
currTime = rospy.get_time() - self.startTime
# time difference
if not self.firstPass:
dt = self.firstPassDt
self.firstPass = True
else:
dt = currTime - self.prevTime
# get_time() is pretty unreliable...
if dt <= 0.0001:
dt = 0.01
# find the closest index in timeVec corresponding to the current time
nearestIdx = np.searchsorted(self.timeVec, currTime)
if nearestIdx >= np.size(self.timeVec):
nearestIdx = np.size(self.timeVec) - 1
# desired linear acceleration calculation
posErr = np.array(([self.waypoints[nearestIdx, 0] - state[0, 0],
self.waypoints[nearestIdx, 1] - state[1, 0],
self.waypoints[nearestIdx, 2] - state[2, 0]]))
rateErr = np.array(([self.desVel[nearestIdx, 0] - state[3, 0],
self.desVel[nearestIdx, 1] - state[4, 0],
self.desVel[nearestIdx, 2] - state[5, 0]]))
# ๆๆ็็บฟๅ ้ๅบฆ
desiredLinAcc = np.array(([self.desAcc[nearestIdx, 0] + self.kpPos[0] * posErr[0] + self.kdPos[0] * rateErr[0],
self.desAcc[nearestIdx, 1] + self.kpPos[1] * posErr[1] + self.kdPos[1] * rateErr[1],
self.desAcc[nearestIdx, 2] + self.kpPos[2] * posErr[2] + self.kdPos[2] * rateErr[2]]))
desiredZAcc = desiredLinAcc[2]
# desired RPY angles ๆๆ็ roll pitch yaw
rpyDes = np.array(([(1 / self.g) * (
desiredLinAcc[0] * np.sin(self.waypoints[nearestIdx, 3]) - desiredLinAcc[1] * np.cos(
self.waypoints[nearestIdx, 3])),
(1 / self.g) * (desiredLinAcc[0] * np.cos(self.waypoints[nearestIdx, 3]) + desiredLinAcc[
1] * | np.sin(self.waypoints[nearestIdx, 3]) | numpy.sin |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 26 2:59pm 2020
Simulating a time series similar to NGC 300 X-1, but with a larger S/N...
"""
from __future__ import division, print_function
import numpy as np
from astropy.io import fits
import matplotlib.pyplot as plt
from PyAstronomy.pyasl import foldAt
import Lv0_dirs,Lv2_dj_lsp,Lv2_swift_lc,Lv2_phase
from matplotlib.backends.backend_pdf import PdfPages
import os
from scipy import stats
from tqdm import tqdm
import subprocess
import pathlib
from stingray.pulse.pulsar import pulse_phase,phase_exposure,fold_events
from astropy.utils import iers
iers.conf.auto_download = False
def rebin_lc(corr_lc_files,corr_bg_files,bg_scale,tbin):
rebinned_time = []
rebinned_rate = []
rebinned_errs = []
rebinned_fracexp = []
completeness = []
times,rates,errors,fracexp = Lv2_swift_lc.get_bgsub(corr_lc_files,corr_bg_files,bg_scale)
trunc_times = times-times[0]
time_bins = np.arange(0,trunc_times[-1]+tbin,tbin)
print('Rebinning...')
for i in tqdm(range(len(time_bins)-1)):
time_interval = trunc_times[(trunc_times>=time_bins[i])&(trunc_times<time_bins[i+1])]
rate_interval = rates[(trunc_times>=time_bins[i])&(trunc_times<time_bins[i+1])]
error_interval = errors[(trunc_times>=time_bins[i])&(trunc_times<time_bins[i+1])]
fracexp_interval = fracexp[(trunc_times>=time_bins[i])&(trunc_times<time_bins[i+1])]
comp = len(time_interval)/(tbin/10)
if len(time_interval) != 0:# and comp >= 0.99:
mean_time = np.mean(time_interval)
mean_rate = np.mean(rate_interval)
mean_error = np.sqrt(np.sum(error_interval**2))/ | np.size(error_interval) | numpy.size |
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2016 MIT Probabilistic Computing Project
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import numpy as np
import sklearn.decomposition
from cgpm.cgpm import CGpm
from cgpm.utils import general as gu
from cgpm.utils import mvnormal as multivariate_normal
class FactorAnalysis(CGpm):
"""Factor analysis model with continuous latent variables z in a low
dimensional space. The generative model for a vector x is
z ~ Normal(0, I) where z \in R^L.
e ~ Normal(0, Psi) where Psi = diag(v_1,...,v_D)
x = W.z + mux + e where W \in R^(DxL) and mux \in R^D, learning by EM.
From standard results (Murphy Section 12.1)
z ~ Normal(0, I) Prior.
x|z ~ Normal(W.z + mux, Psi) Likelihood.
x ~ Normal(mux, W.W'+Psi) Marginal.
z|x ~ Normal(m, S) Posterior.
S = inv(I + W'.inv(Psi).W) (covariance)
m = S(W'.inv(Psi).(x-mux)) (mean)
The full joint distribution over [z,x] is then
The mean of [z,x] is [0, mux]
The covariance of [z,x] is (in block form)
I W'
(LxL) (LxD)
W W.W' + Psi
(DxL) (DxD)
where the covariance W' is computed directly
cov(z,x) = cov(z, W.z + mux + e)
= cov(z, W.z) + cov(z, mux) + cov(z, e)
= cov(z, W.z)
= cov(z,z).W'
= I*W'
= W'
Exercise: Confirm that expression for posterior z|x is consistent with
conditioning directly on the joint [z,x] using Schur complement
(Hint: see test suite).
The latent variables are exposed as output variables, but may not be
incorporated.
"""
def __init__(self, outputs, inputs, L=None, distargs=None, params=None,
rng=None):
# Default parameter settings.
if params is None:
params = {}
if distargs is None:
distargs = {}
# Entropy.
if rng is None:
rng = gu.gen_rng(1)
# No inputs.
if inputs:
raise ValueError('FactorAnalysis rejects inputs: %s.' % inputs)
# Correct outputs.
if len(outputs) < 2:
raise ValueError('FactorAnalysis needs >= 2 outputs: %s.' % outputs)
if len(set(outputs)) != len(outputs):
raise ValueError('Duplicate outputs: %s.' % outputs)
# Find low dimensional space.
if L is None:
raise ValueError('Specify latent dimension L: %s.' % L)
if L == 0:
raise ValueError('Latent dimension at least 1: %s.' % L)
if 'outputs' in distargs and any(s != 'numerical'
for s in distargs['outputs']['stattypes']):
raise ValueError('Factor non-numerical outputs: %s.' % distargs)
# Observable and latent variable indexes.
D = len(outputs[:-L])
if D < L:
raise ValueError(
'Latent dimension exceeds observed dimension: (%s,%s)'
% (outputs[:-L], outputs[-L:]))
# Parameters.
mux = params.get('mux', np.zeros(D))
Psi = params.get('Psi', np.eye(D))
W = params.get('W', np.zeros((D,L)))
# Build the object.
self.rng = rng
# Dimensions.
self.L = L
self.D = D
# Variable indexes.
self.outputs = outputs
self.observables = outputs[:-self.L]
self.latents = set(outputs[-self.L:])
self.inputs = []
self.output_mapping = {c:i for i,c in enumerate(self.outputs)}
# Dataset.
self.data = OrderedDict()
self.N = 0
# Parameters of Factor Analysis.
self.mux = np.asarray(mux)
self.Psi = np.asarray(Psi)
self.W = np.asarray(W)
# Parameters of joint distribution [x,z].
self.mu, self.cov = self.joint_parameters()
# Internal factor analysis model.
self.fa = None
def incorporate(self, rowid, observation, inputs=None):
# No duplicate observation.
if rowid in self.data:
raise ValueError('Already observed: %d.' % rowid)
# No inputs.
if inputs:
raise ValueError('No inputs allowed: %s.' % inputs)
if not observation:
raise ValueError('No observation specified: %s.' % observation)
# No unknown variables.
if any(q not in self.outputs for q in observation):
raise ValueError('Unknown variables: (%s,%s).'
% (observation, self.outputs))
# No latent variables.
if any(q in self.latents for q in observation):
raise ValueError('Cannot incorporate latent vars: (%s,%s,%s).'
% (observation, self.outputs, self.latents))
# Incorporate observed observable variables.
x = [observation.get(i, np.nan) for i in self.observables]
# Update dataset and counts.
self.data[rowid] = x
self.N += 1
def unincorporate(self, rowid):
try:
del self.data[rowid]
except KeyError:
raise ValueError('No such observation: %d.' % rowid)
self.N -= 1
def logpdf(self, rowid, targets, constraints=None, inputs=None):
# XXX Deal with observed rowid.
constraints = self.populate_constraints(rowid, targets, constraints)
if inputs:
raise ValueError('Prohibited inputs: %s' % (inputs,))
if not targets:
raise ValueError('No targets: %s' % (targets,))
if any(q not in self.outputs for q in targets):
raise ValueError('Unknown targets: %s' % (targets,))
if any(q in constraints for q in targets):
raise ValueError('Duplicate variable: %s, %s'
% (targets, constraints,))
# Reindex variables.
targets_r = self.reindex(targets)
constraints_r = self.reindex(constraints)
# Retrieve conditional distribution.
muG, covG = FactorAnalysis.mvn_condition(
self.mu, self.cov, targets_r.keys(), constraints_r)
# Compute log density.
x = np.array(targets_r.values())
return multivariate_normal.logpdf(x, muG, covG)
def simulate(self, rowid, targets, constraints=None, inputs=None, N=None):
# XXX Deal with observed rowid.
constraints = self.populate_constraints(rowid, targets, constraints)
if inputs:
raise ValueError('Prohibited inputs: %s' % (inputs,))
if not targets:
raise ValueError('No targets: %s' % (targets,))
if any(q not in self.outputs for q in targets):
raise ValueError('Unknown targets: %s' % (targets,))
if any(q in constraints for q in targets):
raise ValueError('Duplicate variable: %s, %s'
% (targets, constraints,))
# Reindex variables.
targets_r = self.reindex(targets)
constraints_r = self.reindex(constraints)
# Retrieve conditional distribution.
muG, covG = FactorAnalysis.mvn_condition(
self.mu, self.cov, targets_r, constraints_r)
# Generate samples.
sample = self.rng.multivariate_normal(mean=muG, cov=covG, size=N)
def get_sample(samp):
if isinstance(samp, float):
samp = [samp]
assert len(targets) == len(samp)
return dict(zip(targets, samp))
return get_sample(sample) if N is None else map(get_sample, sample)
def logpdf_score(self):
def compute_logpdf(x):
assert len(x) == self.D
targets = {i:v for i,v in enumerate(x) if not np.isnan(v)}
return self.logpdf(None, targets)
return sum(compute_logpdf(x) for x in self.data)
def transition(self, N=None):
X = np.asarray(self.data.values())
# Only run inference on observations without missing entries.
self.fa = sklearn.decomposition.FactorAnalysis(n_components=self.L)
self.fa.fit(X[~np.any(np.isnan(X), axis=1)])
assert self.L, self.D == self.fa.components_.shape
# Update parameters of Factor Analysis.
self.Psi = | np.diag(self.fa.noise_variance_) | numpy.diag |
# - For the record (and future updates):
# - This code was used to generate tractor and sweep file subsets for testing.
# - The hardcoded paths are for NERSC, but you can swap out any
# - legacy survey data release path as needed.
# ADM Currently use DR6 files. Should update to DR8 at some point.
import os
import numpy as np
from time import time
from glob import glob
import fitsio
start = time()
# ADM to test the skies code, we need to mimic a survey directory for a brick
sd = '/global/project/projectdirs/cosmo/data/legacysurvey'
dr = 'dr6'
codir = 'coadd'
blobdir = 'metrics'
brick = '0959p805'
prebrick = brick[:3]
bands = ['g', 'z']
# ADM tear everything down, first
os.system('rm -rf {}'.format(dr))
rootdir = dr
os.system('mkdir {}'.format(rootdir))
rootdir += '/{}'.format(blobdir)
os.system('mkdir {}'.format(rootdir))
rootdir += '/{}'.format(prebrick)
os.system('mkdir {}'.format(rootdir))
os.system('cp {}/{}/blobs*{}* {}'.format(sd, rootdir, brick, rootdir))
rootdir = dr
rootdir += '/{}'.format(codir)
os.system('mkdir {}'.format(rootdir))
rootdir += '/{}'.format(prebrick)
os.system('mkdir {}'.format(rootdir))
rootdir += '/{}'.format(brick)
os.system('mkdir {}'.format(rootdir))
for band in bands:
if (band != 'g' or band != 'z') and brick != '0959p805':
msg = "brick 0959p805, bands g,z chosen as their (DR6) files are small!"
raise ValueError(msg)
os.system('cp {}/{}/*{}-image-{}* {}'.format(sd, rootdir, brick, band, rootdir))
os.system('cp {}/{}/*{}-invvar-{}* {}'.format(sd, rootdir, brick, band, rootdir))
os.system('cp {}/{}/*{}-nexp-{}* {}'.format(sd, rootdir, brick, band, rootdir))
# ADM make a simplified survey bricks file for this data release
brickfile = '{}/survey-bricks-{}.fits.gz'.format(dr, dr)
sbfile = '{}/{}'.format(sd, brickfile)
brickinfo = fitsio.read(sbfile)
# ADM remember that fitsio reads things in as bytes, so convert to unicode
bricknames = brickinfo['brickname'].astype('U')
wbrick = | np.where(bricknames == brick) | numpy.where |
import pdb
import os
import sys
import copy
import json
import pickle
import argparse
import datetime
from itertools import product
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import torch
import analysis
import numpy as np
from torch.utils.data import DataLoader
# Global dependencies
from OAI.config import Y_COLS, CONCEPTS_WO_KLG, CONCEPTS_BALANCED, CLASSES_PER_COLUMN, OUTPUTS_DIR, BASE_DIR, \
EST_TIME_PER_EXP, N_DATALOADER_WORKERS, CACHE_LIMIT, TRANSFORM_STATISTICS_TRAIN
# Models and dataset
from OAI.models import ModelXtoC, ModelXtoC_SENN, ModelOracleCtoY, ModelXtoChat_ChatToY, ModelXtoCtoY, ModelXtoY, \
ModelXtoCY, ModelXtoYWithAuxC
from OAI.dataset import load_non_image_data, load_data_from_different_splits, PytorchImagesDataset, \
get_image_cache_for_split
# ----------------- Training Experiments -----------------
def train_X_to_C(args, dataset_kwargs, model_kwargs):
dataloaders, datasets, dataset_sizes = load_data_from_different_splits(**dataset_kwargs)
# ---- Model fitting ----
if args.use_senn_model:
model = ModelXtoC_SENN(model_kwargs)
else:
model = ModelXtoC(model_kwargs)
results = model.fit(dataloaders=dataloaders, dataset_sizes=dataset_sizes)
# ---- Save results ----
save_model_results(model, results, args, dataset_kwargs, model_kwargs)
def train_oracle_C_to_y_and_test_on_Chat(args, dataset_kwargs, model_kwargs):
params = json.loads(args.oracle_C_to_y_model_params) if args.oracle_C_to_y_model_params else {}
y_cols = Y_COLS
C_cols = args.C_cols
TRAIN_SPLIT = 'train'
TEST_SPLIT = 'test'
# ---- Training Oracle C -> y ----
C_train, y_train = load_non_image_data(TRAIN_SPLIT, C_cols, y_cols,
transform_statistics=TRANSFORM_STATISTICS_TRAIN,
zscore_C=dataset_kwargs['zscore_C'],
zscore_Y=dataset_kwargs['zscore_Y'],
return_CY_only=True,
merge_klg_01=True,
truncate_C_floats=True,
shuffle_Cs=False)
y_train = np.squeeze(y_train, axis=1)
model_CtoY = ModelOracleCtoY(model_type=args.oracle_C_to_y_model, new_kwargs=params)
model_CtoY.fit(C_train, y_train)
# ---- Dataset to pass to ModelXtoC to generate Chats ----
limit = 500 if args.use_small_subset else CACHE_LIMIT
cache_test = get_image_cache_for_split(TEST_SPLIT, limit=limit)
dataset = PytorchImagesDataset(dataset=TEST_SPLIT,
transform_statistics=TRANSFORM_STATISTICS_TRAIN,
C_cols=dataset_kwargs['C_cols'],
y_cols=dataset_kwargs['y_cols'],
zscore_C=dataset_kwargs['zscore_C'],
zscore_Y=dataset_kwargs['zscore_Y'],
cache=cache_test,
truncate_C_floats=True,
data_proportion=dataset_kwargs['data_proportion'],
shuffle_Cs=False,
merge_klg_01=True,
transform='None',
use_small_subset=dataset_kwargs['use_small_subset'],
downsample_fraction=dataset_kwargs['downsample_fraction'])
dataloader = DataLoader(dataset,
batch_size=dataset_kwargs['batch_size'],
shuffle=False,
num_workers=N_DATALOADER_WORKERS)
# Get y_test information separately
C_test, y_test = load_non_image_data(TEST_SPLIT, C_cols, y_cols,
transform_statistics=TRANSFORM_STATISTICS_TRAIN,
zscore_C=dataset_kwargs['zscore_C'],
zscore_Y=dataset_kwargs['zscore_Y'],
return_CY_only=True,
merge_klg_01=True,
truncate_C_floats=True,
shuffle_Cs=False)
y_test = np.squeeze(y_test, axis=1)
dataloaders = {TEST_SPLIT: dataloader}
if args.use_small_subset:
dataset_sizes = {TEST_SPLIT: 500}
else:
dataset_sizes = {TEST_SPLIT: len(dataset)}
# ---- Restore pretrained C model to get Chats ----
results = {}
for pretrained_path in model_kwargs['pretrained_paths']:
model_kwargs['pretrained_path'] = pretrained_path
# Sanity check
y_train_hat = model_CtoY.predict(C_train)
y_train_rmse = np.sqrt(np.mean((y_train - y_train_hat) ** 2))
y_test_hat = model_CtoY.predict(C_test)
y_test_rmse = np.sqrt(np.mean((y_test - y_test_hat) ** 2))
if args.eval_model == 'X_to_C':
model_class = ModelXtoC
elif args.eval_model == 'X_to_C_to_y':
model_class = ModelXtoCtoY
model_XtoC = model_class(model_kwargs)
metrics = model_XtoC.train_or_eval_dataset(dataloaders, dataset_sizes, TEST_SPLIT)
# C_true = np.array([metrics['%s_%s_true' % (TEST_SPLIT, C)] for C in C_cols]) # Same as C_test
C_hat = | np.stack([metrics['%s_%s_pred' % (TEST_SPLIT, C)] for C in C_cols], axis=1) | numpy.stack |
import xarray as xr
import matplotlib.pyplot as plt
import numpy as np
import os
import os.path
class aero:
def __init__(self,ymdh_ini,fcst):
cef=xr.open_dataset("gfs_ctrl.nc")
print("hello")
self.hybrid_coefs=[cef.vcoord[0,1:65],cef.vcoord[1,1:65]]
ymdh_str=str(ymdh_ini)
y=ymdh_str[0:4]
m=ymdh_str[4:6]
d=ymdh_str[6:8]
h=ymdh_str[8:10]
url="https://noaa-gefs-pds.s3.amazonaws.com/gefs."+y+m+d+"/"+h+"/chem/pgrb2ap5/gefs.chem.t"+h+"z.a3d_0p50.f"+str(fcst).zfill(3)+".grib2"
self.url=url
def download(self):
self.f=self.url.split('/')[-1]
if not os.path.isfile(self.f):
os.system("wget "+url)
def sig2p(self):
p0=1000
hyam=self.hybrid_coefs[0]
hybm=self.hybrid_coefs[1]
psfc=1013 #xr.full_like(dust[0,:,:],1013)
p=hyam/100.0+hybm*psfc
self.p=p
def extract_dust(self):
gefs=xr.open_dataset(self.f,engine='pynio')
tmp1=gefs.PMTC_P48_L105_GLL0_A62001
tmp2=gefs.PMTC_P48_L105_GLL0_A62001_1
tmp3=gefs.PMTC_P48_L105_GLL0_A62001_2
tmp4=gefs.PMTF_P48_L105_GLL0_A62001
tmp5=gefs.PMTF_P48_L105_GLL0_A62001_1
dust=sum([tmp1,tmp2,tmp3,tmp4,tmp5])
dust_size=(tmp1*5+tmp2*9+tmp3*11+tmp4*1+tmp5*3)/dust
dust=dust.assign_coords({"lv_HYBL0":("lv_HYBL0",self.p[::-1].values)})
dust_size=dust_size.assign_coords({"lv_HYBL0":("lv_HYBL0",self.p[::-1].values)})
pnew= | np.arange(1000,90,-10) | numpy.arange |
"""Filter design.
"""
from __future__ import division, print_function, absolute_import
import warnings
import numpy
from numpy import (atleast_1d, poly, polyval, roots, real, asarray, allclose,
resize, pi, absolute, logspace, r_, sqrt, tan, log10,
arctan, arcsinh, sin, exp, cosh, arccosh, ceil, conjugate,
zeros, sinh, append, concatenate, prod, ones, array)
from numpy import mintypecode
import numpy as np
from scipy import special, optimize
from scipy.special import comb
from scipy.misc import factorial
from numpy.polynomial.polynomial import polyval as npp_polyval
import math
__all__ = ['findfreqs', 'freqs', 'freqz', 'tf2zpk', 'zpk2tf', 'normalize',
'lp2lp', 'lp2hp', 'lp2bp', 'lp2bs', 'bilinear', 'iirdesign',
'iirfilter', 'butter', 'cheby1', 'cheby2', 'ellip', 'bessel',
'band_stop_obj', 'buttord', 'cheb1ord', 'cheb2ord', 'ellipord',
'buttap', 'cheb1ap', 'cheb2ap', 'ellipap', 'besselap',
'BadCoefficients',
'tf2sos', 'sos2tf', 'zpk2sos', 'sos2zpk', 'group_delay']
class BadCoefficients(UserWarning):
"""Warning about badly conditioned filter coefficients"""
pass
abs = absolute
def findfreqs(num, den, N):
"""
Find array of frequencies for computing the response of an analog filter.
Parameters
----------
num, den : array_like, 1-D
The polynomial coefficients of the numerator and denominator of the
transfer function of the filter or LTI system. The coefficients are
ordered from highest to lowest degree.
N : int
The length of the array to be computed.
Returns
-------
w : (N,) ndarray
A 1-D array of frequencies, logarithmically spaced.
Examples
--------
Find a set of nine frequencies that span the "interesting part" of the
frequency response for the filter with the transfer function
H(s) = s / (s^2 + 8s + 25)
>>> from scipy import signal
>>> signal.findfreqs([1, 0], [1, 8, 25], N=9)
array([ 1.00000000e-02, 3.16227766e-02, 1.00000000e-01,
3.16227766e-01, 1.00000000e+00, 3.16227766e+00,
1.00000000e+01, 3.16227766e+01, 1.00000000e+02])
"""
ep = atleast_1d(roots(den)) + 0j
tz = atleast_1d(roots(num)) + 0j
if len(ep) == 0:
ep = atleast_1d(-1000) + 0j
ez = r_['-1',
numpy.compress(ep.imag >= 0, ep, axis=-1),
numpy.compress((abs(tz) < 1e5) & (tz.imag >= 0), tz, axis=-1)]
integ = abs(ez) < 1e-10
hfreq = numpy.around(numpy.log10(numpy.max(3 * abs(ez.real + integ) +
1.5 * ez.imag)) + 0.5)
lfreq = numpy.around(numpy.log10(0.1 * numpy.min(abs(real(ez + integ)) +
2 * ez.imag)) - 0.5)
w = logspace(lfreq, hfreq, N)
return w
def freqs(b, a, worN=None, plot=None):
"""
Compute frequency response of analog filter.
Given the M-order numerator `b` and N-order denominator `a` of an analog
filter, compute its frequency response::
b[0]*(jw)**M + b[1]*(jw)**(M-1) + ... + b[M]
H(w) = ----------------------------------------------
a[0]*(jw)**N + a[1]*(jw)**(N-1) + ... + a[N]
Parameters
----------
b : array_like
Numerator of a linear filter.
a : array_like
Denominator of a linear filter.
worN : {None, int, array_like}, optional
If None, then compute at 200 frequencies around the interesting parts
of the response curve (determined by pole-zero locations). If a single
integer, then compute at that many frequencies. Otherwise, compute the
response at the angular frequencies (e.g. rad/s) given in `worN`.
plot : callable, optional
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqs`.
Returns
-------
w : ndarray
The angular frequencies at which `h` was computed.
h : ndarray
The frequency response.
See Also
--------
freqz : Compute the frequency response of a digital filter.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``.
Examples
--------
>>> from scipy.signal import freqs, iirfilter
>>> b, a = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1')
>>> w, h = freqs(b, a, worN=np.logspace(-1, 2, 1000))
>>> import matplotlib.pyplot as plt
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.xlabel('Frequency')
>>> plt.ylabel('Amplitude response [dB]')
>>> plt.grid()
>>> plt.show()
"""
if worN is None:
w = findfreqs(b, a, 200)
elif isinstance(worN, int):
N = worN
w = findfreqs(b, a, N)
else:
w = worN
w = atleast_1d(w)
s = 1j * w
h = polyval(b, s) / polyval(a, s)
if plot is not None:
plot(w, h)
return w, h
def freqz(b, a=1, worN=None, whole=False, plot=None):
"""
Compute the frequency response of a digital filter.
Given the M-order numerator `b` and N-order denominator `a` of a digital
filter, compute its frequency response::
jw -jw -jwM
jw B(e ) b[0] + b[1]e + .... + b[M]e
H(e ) = ---- = -----------------------------------
jw -jw -jwN
A(e ) a[0] + a[1]e + .... + a[N]e
Parameters
----------
b : array_like
numerator of a linear filter
a : array_like
denominator of a linear filter
worN : {None, int, array_like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If an array_like, compute the response at the frequencies given (in
radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to 2*pi radians/sample.
plot : callable
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqz`.
Returns
-------
w : ndarray
The normalized frequencies at which `h` was computed, in
radians/sample.
h : ndarray
The frequency response.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``.
Examples
--------
>>> from scipy import signal
>>> b = signal.firwin(80, 0.5, window=('kaiser', 8))
>>> w, h = signal.freqz(b)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.title('Digital filter frequency response')
>>> ax1 = fig.add_subplot(111)
>>> plt.plot(w, 20 * np.log10(abs(h)), 'b')
>>> plt.ylabel('Amplitude [dB]', color='b')
>>> plt.xlabel('Frequency [rad/sample]')
>>> ax2 = ax1.twinx()
>>> angles = np.unwrap(np.angle(h))
>>> plt.plot(w, angles, 'g')
>>> plt.ylabel('Angle (radians)', color='g')
>>> plt.grid()
>>> plt.axis('tight')
>>> plt.show()
"""
b, a = map(atleast_1d, (b, a))
if whole:
lastpoint = 2 * pi
else:
lastpoint = pi
if worN is None:
N = 512
w = numpy.linspace(0, lastpoint, N, endpoint=False)
elif isinstance(worN, int):
N = worN
w = numpy.linspace(0, lastpoint, N, endpoint=False)
else:
w = worN
w = atleast_1d(w)
zm1 = exp(-1j * w)
h = polyval(b[::-1], zm1) / polyval(a[::-1], zm1)
if plot is not None:
plot(w, h)
return w, h
def group_delay(system, w=None, whole=False):
r"""Compute the group delay of a digital filter.
The group delay measures by how many samples amplitude envelopes of
various spectral components of a signal are delayed by a filter.
It is formally defined as the derivative of continuous (unwrapped) phase::
d jw
D(w) = - -- arg H(e)
dw
Parameters
----------
system : tuple of array_like (b, a)
Numerator and denominator coefficients of a filter transfer function.
w : {None, int, array-like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If array, compute the delay at the frequencies given
(in radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to ``2*pi`` radians/sample.
Returns
-------
w : ndarray
The normalized frequencies at which the group delay was computed,
in radians/sample.
gd : ndarray
The group delay.
Notes
-----
The similar function in MATLAB is called `grpdelay`.
If the transfer function :math:`H(z)` has zeros or poles on the unit
circle, the group delay at corresponding frequencies is undefined.
When such a case arises the warning is raised and the group delay
is set to 0 at those frequencies.
For the details of numerical computation of the group delay refer to [1]_.
.. versionadded: 0.16.0
See Also
--------
freqz : Frequency response of a digital filter
References
----------
.. [1] <NAME>, "Understanding Digital Signal Processing,
3rd edition", p. 830.
Examples
--------
>>> from scipy import signal
>>> b, a = signal.iirdesign(0.1, 0.3, 5, 50, ftype='cheby1')
>>> w, gd = signal.group_delay((b, a))
>>> import matplotlib.pyplot as plt
>>> plt.title('Digital filter group delay')
>>> plt.plot(w, gd)
>>> plt.ylabel('Group delay [samples]')
>>> plt.xlabel('Frequency [rad/sample]')
>>> plt.show()
"""
if w is None:
w = 512
if isinstance(w, int):
if whole:
w = np.linspace(0, 2 * pi, w, endpoint=False)
else:
w = np.linspace(0, pi, w, endpoint=False)
w = np.atleast_1d(w)
b, a = map(np.atleast_1d, system)
c = np.convolve(b, a[::-1])
cr = c * np.arange(c.size)
z = np.exp(-1j * w)
num = np.polyval(cr[::-1], z)
den = np.polyval(c[::-1], z)
singular = np.absolute(den) < 10 * EPSILON
if np.any(singular):
warnings.warn(
"The group delay is singular at frequencies [{0}], setting to 0".
format(", ".join("{0:.3f}".format(ws) for ws in w[singular]))
)
gd = np.zeros_like(w)
gd[~singular] = np.real(num[~singular] / den[~singular]) - a.size + 1
return w, gd
def _cplxreal(z, tol=None):
"""
Split into complex and real parts, combining conjugate pairs.
The 1D input vector `z` is split up into its complex (`zc`) and real (`zr`)
elements. Every complex element must be part of a complex-conjugate pair,
which are combined into a single number (with positive imaginary part) in
the output. Two complex numbers are considered a conjugate pair if their
real and imaginary parts differ in magnitude by less than ``tol * abs(z)``.
Parameters
----------
z : array_like
Vector of complex numbers to be sorted and split
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
zc : ndarray
Complex elements of `z`, with each pair represented by a single value
having positive imaginary part, sorted first by real part, and then
by magnitude of imaginary part. The pairs are averaged when combined
to reduce error.
zr : ndarray
Real elements of `z` (those having imaginary part less than
`tol` times their magnitude), sorted by value.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxpair
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> zc, zr = _cplxreal(a)
>>> print zc
[ 1.+1.j 2.+1.j 2.+1.j 2.+2.j]
>>> print zr
[ 1. 3. 4.]
"""
z = atleast_1d(z)
if z.size == 0:
return z, z
elif z.ndim != 1:
raise ValueError('_cplxreal only accepts 1D input')
if tol is None:
# Get tolerance from dtype of input
tol = 100 * np.finfo((1.0 * z).dtype).eps
# Sort by real part, magnitude of imaginary part (speed up further sorting)
z = z[np.lexsort((abs(z.imag), z.real))]
# Split reals from conjugate pairs
real_indices = abs(z.imag) <= tol * abs(z)
zr = z[real_indices].real
if len(zr) == len(z):
# Input is entirely real
return array([]), zr
# Split positive and negative halves of conjugates
z = z[~real_indices]
zp = z[z.imag > 0]
zn = z[z.imag < 0]
if len(zp) != len(zn):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Find runs of (approximately) the same real part
same_real = np.diff(zp.real) <= tol * abs(zp[:-1])
diffs = numpy.diff(concatenate(([0], same_real, [0])))
run_starts = numpy.where(diffs > 0)[0]
run_stops = numpy.where(diffs < 0)[0]
# Sort each run by their imaginary parts
for i in range(len(run_starts)):
start = run_starts[i]
stop = run_stops[i] + 1
for chunk in (zp[start:stop], zn[start:stop]):
chunk[...] = chunk[np.lexsort([abs(chunk.imag)])]
# Check that negatives match positives
if any(abs(zp - zn.conj()) > tol * abs(zn)):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Average out numerical inaccuracy in real vs imag parts of pairs
zc = (zp + zn.conj()) / 2
return zc, zr
def _cplxpair(z, tol=None):
"""
Sort into pairs of complex conjugates.
Complex conjugates in `z` are sorted by increasing real part. In each
pair, the number with negative imaginary part appears first.
If pairs have identical real parts, they are sorted by increasing
imaginary magnitude.
Two complex numbers are considered a conjugate pair if their real and
imaginary parts differ in magnitude by less than ``tol * abs(z)``. The
pairs are forced to be exact complex conjugates by averaging the positive
and negative values.
Purely real numbers are also sorted, but placed after the complex
conjugate pairs. A number is considered real if its imaginary part is
smaller than `tol` times the magnitude of the number.
Parameters
----------
z : array_like
1-dimensional input array to be sorted.
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
y : ndarray
Complex conjugate pairs followed by real numbers.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxreal
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> z = _cplxpair(a)
>>> print(z)
[ 1.-1.j 1.+1.j 2.-1.j 2.+1.j 2.-1.j 2.+1.j 2.-2.j 2.+2.j 1.+0.j
3.+0.j 4.+0.j]
"""
z = atleast_1d(z)
if z.size == 0 or np.isrealobj(z):
return np.sort(z)
if z.ndim != 1:
raise ValueError('z must be 1-dimensional')
zc, zr = _cplxreal(z, tol)
# Interleave complex values and their conjugates, with negative imaginary
# parts first in each pair
zc = np.dstack((zc.conj(), zc)).flatten()
z = np.append(zc, zr)
return z
def tf2zpk(b, a):
r"""Return zero, pole, gain (z, p, k) representation from a numerator,
denominator representation of a linear filter.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
If some values of `b` are too close to 0, they are removed. In that case,
a BadCoefficients warning is emitted.
The `b` and `a` arrays are interpreted as coefficients for positive,
descending powers of the transfer function variable. So the inputs
:math:`b = [b_0, b_1, ..., b_M]` and :math:`a =[a_0, a_1, ..., a_N]`
can represent an analog filter of the form:
.. math::
H(s) = \frac
{b_0 s^M + b_1 s^{(M-1)} + \cdots + b_M}
{a_0 s^N + a_1 s^{(N-1)} + \cdots + a_N}
or a discrete-time filter of the form:
.. math::
H(z) = \frac
{b_0 z^M + b_1 z^{(M-1)} + \cdots + b_M}
{a_0 z^N + a_1 z^{(N-1)} + \cdots + a_N}
This "positive powers" form is found more commonly in controls
engineering. If `M` and `N` are equal (which is true for all filters
generated by the bilinear transform), then this happens to be equivalent
to the "negative powers" discrete-time form preferred in DSP:
.. math::
H(z) = \frac
{b_0 + b_1 z^{-1} + \cdots + b_M z^{-M}}
{a_0 + a_1 z^{-1} + \cdots + a_N z^{-N}}
Although this is true for common filters, remember that this is not true
in the general case. If `M` and `N` are not equal, the discrete-time
transfer function coefficients must first be converted to the "positive
powers" form before finding the poles and zeros.
"""
b, a = normalize(b, a)
b = (b + 0.0) / a[0]
a = (a + 0.0) / a[0]
k = b[0]
b /= b[0]
z = roots(b)
p = roots(a)
return z, p, k
def zpk2tf(z, p, k):
"""
Return polynomial transfer function representation from zeros and poles
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
"""
z = atleast_1d(z)
k = atleast_1d(k)
if len(z.shape) > 1:
temp = poly(z[0])
b = zeros((z.shape[0], z.shape[1] + 1), temp.dtype.char)
if len(k) == 1:
k = [k[0]] * z.shape[0]
for i in range(z.shape[0]):
b[i] = k[i] * poly(z[i])
else:
b = k * poly(z)
a = atleast_1d(poly(p))
# Use real output if possible. Copied from numpy.poly, since
# we can't depend on a specific version of numpy.
if issubclass(b.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(z, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
b = b.real.copy()
if issubclass(a.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(p, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
a = a.real.copy()
return b, a
def tf2sos(b, a, pairing='nearest'):
"""
Return second-order sections from transfer function representation
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See `zpk2sos`.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
zpk2sos, sosfilt
Notes
-----
It is generally discouraged to convert from TF to SOS format, since doing
so usually will not improve numerical precision errors. Instead, consider
designing filters in ZPK format and converting directly to SOS. TF is
converted to SOS by first converting to ZPK format, then converting
ZPK to SOS.
.. versionadded:: 0.16.0
"""
return zpk2sos(*tf2zpk(b, a), pairing=pairing)
def sos2tf(sos):
"""
Return a single transfer function from a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
Notes
-----
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
b = [1.]
a = [1.]
n_sections = sos.shape[0]
for section in range(n_sections):
b = np.polymul(b, sos[section, :3])
a = np.polymul(a, sos[section, 3:])
return b, a
def sos2zpk(sos):
"""
Return zeros, poles, and gain of a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
n_sections = sos.shape[0]
z = np.empty(n_sections*2, np.complex128)
p = np.empty(n_sections*2, np.complex128)
k = 1.
for section in range(n_sections):
zpk = tf2zpk(sos[section, :3], sos[section, 3:])
z[2*section:2*(section+1)] = zpk[0]
p[2*section:2*(section+1)] = zpk[1]
k *= zpk[2]
return z, p, k
def _nearest_real_complex_idx(fro, to, which):
"""Get the next closest real or complex element based on distance"""
assert which in ('real', 'complex')
order = np.argsort(np.abs(fro - to))
mask = np.isreal(fro[order])
if which == 'complex':
mask = ~mask
return order[np.where(mask)[0][0]]
def zpk2sos(z, p, k, pairing='nearest'):
"""
Return second-order sections from zeros, poles, and gain of a system
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See Notes below.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
sosfilt
Notes
-----
The algorithm used to convert ZPK to SOS format is designed to
minimize errors due to numerical precision issues. The pairing
algorithm attempts to minimize the peak gain of each biquadratic
section. This is done by pairing poles with the nearest zeros, starting
with the poles closest to the unit circle.
*Algorithms*
The current algorithms are designed specifically for use with digital
filters. (The output coefficents are not correct for analog filters.)
The steps in the ``pairing='nearest'`` and ``pairing='keep_odd'``
algorithms are mostly shared. The ``nearest`` algorithm attempts to
minimize the peak gain, while ``'keep_odd'`` minimizes peak gain under
the constraint that odd-order systems should retain one section
as first order. The algorithm steps and are as follows:
As a pre-processing step, add poles or zeros to the origin as
necessary to obtain the same number of poles and zeros for pairing.
If ``pairing == 'nearest'`` and there are an odd number of poles,
add an additional pole and a zero at the origin.
The following steps are then iterated over until no more poles or
zeros remain:
1. Take the (next remaining) pole (complex or real) closest to the
unit circle to begin a new filter section.
2. If the pole is real and there are no other remaining real poles [#]_,
add the closest real zero to the section and leave it as a first
order section. Note that after this step we are guaranteed to be
left with an even number of real poles, complex poles, real zeros,
and complex zeros for subsequent pairing iterations.
3. Else:
1. If the pole is complex and the zero is the only remaining real
zero*, then pair the pole with the *next* closest zero
(guaranteed to be complex). This is necessary to ensure that
there will be a real zero remaining to eventually create a
first-order section (thus keeping the odd order).
2. Else pair the pole with the closest remaining zero (complex or
real).
3. Proceed to complete the second-order section by adding another
pole and zero to the current pole and zero in the section:
1. If the current pole and zero are both complex, add their
conjugates.
2. Else if the pole is complex and the zero is real, add the
conjugate pole and the next closest real zero.
3. Else if the pole is real and the zero is complex, add the
conjugate zero and the real pole closest to those zeros.
4. Else (we must have a real pole and real zero) add the next
real pole closest to the unit circle, and then add the real
zero closest to that pole.
.. [#] This conditional can only be met for specific odd-order inputs
with the ``pairing == 'keep_odd'`` method.
.. versionadded:: 0.16.0
Examples
--------
Design a 6th order low-pass elliptic digital filter for a system with a
sampling rate of 8000 Hz that has a pass-band corner frequency of
1000 Hz. The ripple in the pass-band should not exceed 0.087 dB, and
the attenuation in the stop-band should be at least 90 dB.
In the following call to `signal.ellip`, we could use ``output='sos'``,
but for this example, we'll use ``output='zpk'``, and then convert to SOS
format with `zpk2sos`:
>>> from scipy import signal
>>> z, p, k = signal.ellip(6, 0.087, 90, 1000/(0.5*8000), output='zpk')
Now convert to SOS format.
>>> sos = signal.zpk2sos(z, p, k)
The coefficients of the numerators of the sections:
>>> sos[:, :3]
array([[ 0.0014154 , 0.00248707, 0.0014154 ],
[ 1. , 0.72965193, 1. ],
[ 1. , 0.17594966, 1. ]])
The symmetry in the coefficients occurs because all the zeros are on the
unit circle.
The coefficients of the denominators of the sections:
>>> sos[:, 3:]
array([[ 1. , -1.32543251, 0.46989499],
[ 1. , -1.26117915, 0.6262586 ],
[ 1. , -1.25707217, 0.86199667]])
The next example shows the effect of the `pairing` option. We have a
system with three poles and three zeros, so the SOS array will have
shape (2, 6). The means there is, in effect, an extra pole and an extra
zero at the origin in the SOS representation.
>>> z1 = np.array([-1, -0.5-0.5j, -0.5+0.5j])
>>> p1 = np.array([0.75, 0.8+0.1j, 0.8-0.1j])
With ``pairing='nearest'`` (the default), we obtain
>>> signal.zpk2sos(z1, p1, 1)
array([[ 1. , 1. , 0.5 , 1. , -0.75, 0. ],
[ 1. , 1. , 0. , 1. , -1.6 , 0.65]])
The first section has the zeros {-0.5-0.05j, -0.5+0.5j} and the poles
{0, 0.75}, and the second section has the zeros {-1, 0} and poles
{0.8+0.1j, 0.8-0.1j}. Note that the extra pole and zero at the origin
have been assigned to different sections.
With ``pairing='keep_odd'``, we obtain:
>>> signal.zpk2sos(z1, p1, 1, pairing='keep_odd')
array([[ 1. , 1. , 0. , 1. , -0.75, 0. ],
[ 1. , 1. , 0.5 , 1. , -1.6 , 0.65]])
The extra pole and zero at the origin are in the same section.
The first section is, in effect, a first-order section.
"""
# TODO in the near future:
# 1. Add SOS capability to `filtfilt`, `freqz`, etc. somehow (#3259).
# 2. Make `decimate` use `sosfilt` instead of `lfilter`.
# 3. Make sosfilt automatically simplify sections to first order
# when possible. Note this might make `sosfiltfilt` a bit harder (ICs).
# 4. Further optimizations of the section ordering / pole-zero pairing.
# See the wiki for other potential issues.
valid_pairings = ['nearest', 'keep_odd']
if pairing not in valid_pairings:
raise ValueError('pairing must be one of %s, not %s'
% (valid_pairings, pairing))
if len(z) == len(p) == 0:
return array([[k, 0., 0., 1., 0., 0.]])
# ensure we have the same number of poles and zeros, and make copies
p = np.concatenate((p, np.zeros(max(len(z) - len(p), 0))))
z = np.concatenate((z, np.zeros(max(len(p) - len(z), 0))))
n_sections = (max(len(p), len(z)) + 1) // 2
sos = zeros((n_sections, 6))
if len(p) % 2 == 1 and pairing == 'nearest':
p = np.concatenate((p, [0.]))
z = | np.concatenate((z, [0.])) | numpy.concatenate |
import pickle
import pytest
from io import BytesIO
from copy import copy, deepcopy
from pathlib import Path
from _helpers import gamr_skip, create_model, assert_models_equal
from itertools import product
from anndata import AnnData
from cellrank.tl import Lineage
from cellrank._key import Key
from cellrank.ul.models import GAM, GAMR, FittedModel, SKLearnModel
from cellrank.ul.models._utils import (
_OFFSET_KEY,
NormMode,
_rankdata,
_get_offset,
_extract_data,
_get_knotlocs,
)
from cellrank.ul.models._base_model import FailedModel, UnknownModelError
from cellrank.ul.models._pygam_model import GamDistribution, GamLinkFunction, _gams
import numpy as np
from pygam import ExpectileGAM
from scipy.stats import rankdata
from sklearn.svm import SVR
class TestModel:
def test_wrong_type(self):
with pytest.raises(TypeError):
SKLearnModel(0, SVR())
def test_initialize(self, adata: AnnData):
model = create_model(adata)
assert isinstance(model.model, SVR)
def test_prepare_invalid_gene(self, adata_cflare):
model = create_model(adata_cflare)
with pytest.raises(KeyError):
model.prepare("foo", "0")
def test_prepare_invalid_lineage(self, adata_cflare):
model = create_model(adata_cflare)
with pytest.raises(KeyError):
model.prepare(adata_cflare.var_names[0], "foo")
def test_prepare_invalid_data_key(self, adata_cflare):
model = create_model(adata_cflare)
with pytest.raises(KeyError):
model.prepare(adata_cflare.var_names[0], "0", data_key="foo")
def test_prepare_invalid_time_key(self, adata_cflare):
model = create_model(adata_cflare)
with pytest.raises(KeyError):
model.prepare(adata_cflare.var_names[0], "0", time_key="foo")
def test_prepare_invalid_time_range(self, adata_cflare):
model = create_model(adata_cflare)
with pytest.raises(ValueError):
model.prepare(adata_cflare.var_names[0], "0", time_range=(0, 1, 2))
def test_prepare_normal_run(self, adata_cflare):
model = create_model(adata_cflare)
model = model.prepare(adata_cflare.var_names[0], "0")
assert isinstance(model.x, np.ndarray)
assert isinstance(model.w, np.ndarray)
assert isinstance(model.y, np.ndarray)
assert isinstance(model.x_test, np.ndarray)
assert len(model.x_test) == 200
assert model.y_test is None
assert model.conf_int is None
def test_prepare_n_test_points(self, adata_cflare):
model = create_model(adata_cflare)
model = model.prepare(adata_cflare.var_names[0], "0", n_test_points=300)
assert len(model.x_test) == 300
def test_predict(self, adata_cflare):
model = create_model(adata_cflare)
model = model.prepare(adata_cflare.var_names[0], "0").fit()
y_hat = model.predict()
assert isinstance(model.y_test, np.ndarray)
assert len(model.x_test) == len(model.y_test)
assert y_hat is model.y_test
assert model.conf_int is None
def test_confidence_interval(self, adata_cflare):
model = create_model(adata_cflare)
model = model.prepare(adata_cflare.var_names[0], "0").fit()
_ = model.predict()
ci = model.confidence_interval()
assert isinstance(model.conf_int, np.ndarray)
assert len(model.y_test) == len(model.conf_int)
assert ci is model.conf_int
def test_model_1_lineage(self, adata_cflare):
adata_cflare.obsm[Key.obsm.abs_probs(False)] = Lineage(
np.ones((adata_cflare.n_obs, 1)), names=["foo"]
)
model = create_model(adata_cflare)
model = model.prepare(adata_cflare.var_names[0], "foo", n_test_points=100).fit()
_ = model.predict()
assert model.x_test.shape == (100, 1)
xtest, xall = model.x_test, model.x_all
np.testing.assert_allclose(
np.r_[xtest[0], xtest[-1]], np.r_[np.min(xall), np.max(xall)]
)
def test_prepare_resets_fields(self, adata_cflare: AnnData):
g = GAM(adata_cflare)
_ = g.prepare(adata_cflare.var_names[0], "0").fit()
_ = g.predict()
_ = g.confidence_interval()
_ = g.prepare(adata_cflare.var_names[1], "0").fit()
assert isinstance(g.x_test, np.ndarray)
assert g.y_test is None
assert g.x_hat is None
assert g.y_hat is None
assert g.conf_int is None
class TestUtils:
def test_extract_data_wrong_type(self):
with pytest.raises(TypeError):
_ = _extract_data(None)
def test_extract_data_raw_None(self, adata: AnnData):
adata = AnnData(adata.X, raw=None)
with pytest.raises(ValueError):
_ = _extract_data(adata, use_raw=True)
def test_extract_data_invalid_layer(self, adata: AnnData):
with pytest.raises(KeyError):
_extract_data(adata, layer="foo", use_raw=False)
def test_extract_data_normal_run(self, adata: AnnData):
X = _extract_data(adata, use_raw=False)
assert X is adata.X
def test_extract_data_normal_run_layer(self, adata: AnnData):
ms = _extract_data(adata, layer="Ms", use_raw=False)
assert ms is adata.layers["Ms"]
def test_extract_data_normal_run_raw(self, adata: AnnData):
raw = _extract_data(adata, use_raw=True, layer="Ms")
assert raw is adata.raw.X
def test_rank_data_dummy_array(self):
x = np.ones((100,))
np.testing.assert_array_equal(_rankdata(x), rankdata(x))
def test_rank_data_empty(self):
x = np.empty(shape=(0,))
np.testing.assert_array_equal(_rankdata(x), rankdata(x))
@pytest.mark.parametrize("method", ["average", "min", "max", "dense", "ordinal"])
def test_rank_data(self, method: str):
x = np.random.normal(size=(10,))
np.testing.assert_array_equal(_rankdata(x), rankdata(x))
def test_rank_data_invalid_method(self):
with pytest.raises(AssertionError):
_rankdata(np.random.normal(size=(10,)), method="foobar")
def test_get_knots_invalid_n_knots(self):
with pytest.raises(ValueError):
_get_knotlocs([0, 1, 2], 0)
def test_get_knots_non_finite_values(self):
x = np.array([0, 1, 2, 3], dtype=np.float64)
x[-1] = np.inf
with pytest.raises(ValueError):
_get_knotlocs(x, 1)
def test_get_knots_wrong_shape(self):
with pytest.raises(ValueError):
_get_knotlocs(np.array([0, 1, 2, 3]).reshape((2, 2)), 1)
def test_get_knots_only_same_value(self):
with pytest.raises(ValueError):
_get_knotlocs(np.array([42] * 10), 1)
def test_get_knots_empty_pseudotime(self):
with pytest.raises(ValueError):
_get_knotlocs(np.array([]), 2)
def test_get_knots_uniform(self):
expected = np.linspace(0, 5, 3, endpoint=True)
actual = _get_knotlocs(np.array([3, 5, 4, 0]), 3, uniform=True)
np.testing.assert_array_equal(actual, expected)
def test_get_knots_uniform_1_knot(self):
actual = _get_knotlocs(np.array([3, 5, 4, 0]), 1, uniform=True)
| np.testing.assert_array_equal(actual, [5]) | numpy.testing.assert_array_equal |
import numpy as np
import mrcfile
import math
from statistics import mean
import sys
from numpy.fft import fftn, ifftn, ifftshift
import argparse
import os
from write_mrc import write_mrc
from scipy.spatial import cKDTree
from loc_spiral import open_map_mask, cart_2_sph, spiral_filter_vect, spiral_filter
# locBSharpen - method to local sharpening by B-factor correction
# Method to improve the interpretability of cryo-EM maps improving the
# contrast and interpretability at high-resolutions specially for maps
# affected by heterogeneous resolutions and SNRs
# INPUT PARAMETERS:
# map: cryo-EM map to be processed. This maps should be the average of
# the half maps obtained after refinment or the final map without any
# filtering, postprocessing (masking, b-factor correction).
# mask: Tight binary map that differenciates between the macromolecule (1)
# and the background (0).
# pixel_size: sampling rate of the cryo-EM map in Angstroms.
# minRes: minimun resolution of the sweeping resolution range in Angstroms.
# Usually a value of 25-35 Angstroms is good.
# maxRes: maximum resolution of the sweeping resolution range. Provide here
# the obtained FSC global resolution value in Angstroms.
# threshold: noise signficance in the comparision with noise. Excellent
# values are 0.9 or 0.95.
# bw: bandwith of the bandpass filter applied in Fourier Space. For high
# quality maps with global resolution (<4 Angstroms) and showing homogeneous
# distribution of resolutions a value of 3.5-5 is good. For maps presenting
# heterogeneous distributions of resolutions, or worse global resolution,
# values between 6-8 provide excellents results.
# OUTPUT PARAMETERS:
# M: Improved output map
# W: Sum of all amplitude maps during the resolution sweeping that can be
# consider as an quality map representing the amount of signal
# If this method was useful to you, please cite the following paper:
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, "Local computational methods to
# improve the interpretability and analysis of cryo-EM maps" BioRxiv,
# doi: https://doi.org/10.1101/2020.05.11.088013 (2020)
# <NAME>, 01/06/20
# <EMAIL>
# Copyright 2020, Universidad Complutense de Madrid
# $ Revision: 1.0.0.0
# $ Date: 01/06/20
# Copyright 2020 <NAME> @UCM
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The eco option is an aproximation for the spiral phase transform that
# works fine in all the cases tested and saves a lot of computational
# resources. If case you use a computer with limited RAM memory (<32Gb and
# box sizes smaller than 400x400x400px) use the eco option. If not, it is
# more appropiate to use eco = false.
def loc_b_sharpen(map, mask, pixel_size, min_res, max_res, threshold, bandwidth, eco = True):
if not np.any(mask == 0):
print('Your mask contains no zero points. Please enter a threshold for binarization')
binary_thresh = float(input('Threshold: '))
bin_mask = mask.copy()
bin_mask[bin_mask <= binary_thresh] = 0
mask = bin_mask
M = np.multiply(map, 0)
W = np.multiply(map, 0)
[n, n1, n2] = np.shape(map)
max_freq = math.floor(pixel_size * n / max_res)
min_freq = math.floor(pixel_size * n / min_res)
# We create necessary staff for the bank of filters once:
[NR, NC, NZ] = np.shape(map)
[u, v, w] = np.meshgrid(range(1, NC + 1), range(1, NR + 1), range(1, NZ + 1))
# Temporal Variables
## Rich - calculating the mean of all entries in a grid is just shape/2
u0 = math.floor(n/2)
v0 = math.floor(n/2)
w0 = math.floor(n/2)
u = u - u0
v = v - v0
w = w - w0
# Circular mask to be used in the quantile evaluation
mask_c = np.power(( | np.power(u, 2) | numpy.power |
import numpy as np
import matplotlib.pylab as plt
import sys
def run():
visualizeTarget = sys.argv[1]
print(visualizeTarget)
if(visualizeTarget=='step'):
x=np.arange(-5.0,5.0,0.1)
y=step(x)
plt.plot(x,y)
plt.ylim(-0.1,1.1)
plt.show()
elif(visualizeTarget=='sigmoid'):
x=np.arange(-5.0,5.0,0.1)
y=sigmoid(x)
plt.plot(x,y)
plt.ylim(-0.1,1.1)
plt.show()
elif(visualizeTarget=='relu'):
x=np.arange(-5.0,5.0,0.1)
y=relu(x)
plt.plot(x,y)
# plt.ylim(-0.1,1.1)
plt.show()
elif(visualizeTarget=='all'):
x=np.arange(-5.0,5.0,0.1)
y=step(x)
plt.plot(x,y)
# plt.ylim(-0.1,1.1)
x=np.arange(-5.0,5.0,0.1)
y=sigmoid(x)
plt.plot(x,y)
x=np.arange(-5.0,5.0,0.1)
y=relu(x)
plt.plot(x,y)
# plt.ylim(-0.1,3.0)
plt.show()
# for x in sys.argv:
# print(x)
class variable():
def __init__(self, value):
self.data = value
pass
def read(self):
return self.data
def test():
v = variable(424)
print(v.read() == 424)
a = np.array([2,3,1,4,2])
print(a)
print(sigmoid(a))
def TestSimpleANDGate():
print('simple AND gate test')
print(SimpleANDGate(0,0))
print(SimpleANDGate(0,1))
print(SimpleANDGate(1,0))
print(SimpleANDGate(1,1))
def SimpleANDGate(x1,x2):
w1,w2,theta = 0.5,0.5,0.7
tmp = x1*w1+x2*w2
if(tmp<=theta): return 0
elif(tmp>theta): return 1
def TestANDGate():
print('and gate test')
print(ANDGate(0,0))
print(ANDGate(0,1))
print(ANDGate(1,0))
print(ANDGate(1,1))
def ANDGate(x1,x2):
x = | np.array([x1,x2]) | numpy.array |
#!/usr/bin/env python
from __future__ import print_function, division
import math
import glob
import aplpy
import numpy as np
import itertools
import multiprocessing as mp
from astropy.wcs.utils import proj_plane_pixel_scales
from astropy import wcs
import astropy.units as u
from astropy.stats import sigma_clipped_stats
from astropy.nddata.utils import Cutout2D
from astropy.io import fits
from astropy.visualization import ZScaleInterval
from astropy.coordinates import SkyCoord
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from matplotlib.collections import PatchCollection
from scipy.optimize import curve_fit
from photutils import aperture_photometry, CircularAperture
from photutils import Background2D, MedianBackground, make_source_mask
from qso_toolbox import utils as ut
from qso_toolbox import catalog_tools as ct
from qso_toolbox import photometry_tools as pt
def show_rectangles(fig, xw, yw, width, height, angle=0, layer=False,
zorder=None, coords_frame='world', **kwargs):
"""
Overlay rectangles on the current plot.
ATTENTION! THIS IS A MODIFIED VERSION OF THE ORIGINAL APLPY ROUTINE THAT
CORRECTLY ROTATES THE RECTANGLE AROUND ITS CENTER POSITION.
see https://github.com/aplpy/aplpy/pull/327
Parameters
----------
xw : list or `~numpy.ndarray`
The x positions of the centers of the rectangles (in world coordinates)
yw : list or `~numpy.ndarray`
The y positions of the centers of the rectangles (in world coordinates)
width : int or float or list or `~numpy.ndarray`
The width of the rectangle (in world coordinates)
height : int or float or list or `~numpy.ndarray`
The height of the rectangle (in world coordinates)
angle : int or float or list or `~numpy.ndarray`, optional
rotation in degrees (anti-clockwise). Default
angle is 0.0.
layer : str, optional
The name of the rectangle layer. This is useful for giving
custom names to layers (instead of rectangle_set_n) and for
replacing existing layers.
coords_frame : 'pixel' or 'world'
The reference frame in which the coordinates are defined. This is
used to interpret the values of ``xw``, ``yw``, ``width``, and
``height``.
kwargs
Additional keyword arguments (such as facecolor, edgecolor, alpha,
or linewidth) are passed to Matplotlib
:class:`~matplotlib.collections.PatchCollection` class, and can be
used to control the appearance of the rectangles.
"""
xw, yw, width, height, angle = aplpy.core.uniformize_1d(xw, yw, width,
height, angle)
if 'facecolor' not in kwargs:
kwargs.setdefault('facecolor', 'none')
if layer:
fig.remove_layer(layer, raise_exception=False)
if coords_frame not in ['pixel', 'world']:
raise ValueError("coords_frame should be set to 'pixel' or 'world'")
# While we could plot the shape using the get_transform('world') mode
# from WCSAxes, the issue is that the rotation angle is also measured in
# world coordinates so will not be what the user is expecting. So we
# allow the user to specify the reference frame for the coordinates and
# for the rotation.
if coords_frame == 'pixel':
x, y = xw, yw
w = width
h = height
a = angle
transform = fig.ax.transData
else:
x, y = fig.world2pixel(xw, yw)
pix_scale = aplpy.core.proj_plane_pixel_scales(fig._wcs)
sx, sy = pix_scale[fig.x], pix_scale[fig.y]
w = width / sx
h = height / sy
a = angle
transform = fig.ax.transData
# x = x - w / 2.
# y = y - h / 2.
#
# patches = []
# for i in range(len(x)):
# patches.append(Rectangle((x[i], y[i]), width=w[i], height=h[i],
# angle=a[i]))
xp = x - w / 2.
yp = y - h / 2.
radeg = np.pi / 180
xr = (xp - x) * np.cos((angle) * radeg) - (yp - y) * np.sin(
(angle) * radeg) + x
yr = (xp - x) * np.sin((angle) * radeg) + (yp - y) * np.cos(
(angle) * radeg) + y
patches = []
for i in range(len(xr)):
patches.append(
Rectangle((xr[i], yr[i]), width=w[i], height=h[i], angle=a[i]))
# Due to bugs in matplotlib, we need to pass the patch properties
# directly to the PatchCollection rather than use match_original.
p = PatchCollection(patches, transform=transform, **kwargs)
if zorder is not None:
p.zorder = zorder
c = fig.ax.add_collection(p)
if layer:
rectangle_set_name = layer
else:
fig._rectangle_counter += 1
rectangle_set_name = 'rectangle_set_' + str(fig._rectangle_counter)
fig._layers[rectangle_set_name] = c
return fig
# ------------------------------------------------------------------------------
# Plotting functions for image_cutouts
# ------------------------------------------------------------------------------
def open_image(filename, ra, dec, fov, image_folder_path, verbosity=0):
"""Opens an image defined by the filename with a fov of at least the
specified size (in arcseonds).
:param filename:
:param ra:
:param dec:
:param fov:
:param image_folder_path:
:param verbosity:
:return:
"""
filenames_available = glob.glob(filename)
file_found = False
open_file_fov = None
file_path = None
if len(filenames_available) > 0:
for filename in filenames_available:
try:
file_fov = int(filename.split("_")[3].split(".")[0][3:])
except:
file_fov = 9999999
if fov <= file_fov:
data, hdr = fits.getdata(filename, header=True)
file_found = True
file_path =filename
open_file_fov = file_fov
if file_found:
if verbosity > 0:
print("Opened {} with a fov of {} "
"arcseconds".format(file_path, open_file_fov))
return data, hdr, file_path
else:
if verbosity > 0:
print("File {} in folder {} not found. Target with RA {}"
" and Decl {}".format(filename, image_folder_path,
ra, dec))
return None, None, None
def make_mult_png_fig(ra, dec, surveys, bands,
fovs, apertures, square_sizes, image_folder_path, mag_list=None,
magerr_list=None, sn_list=None,
forced_mag_list=None, forced_magerr_list=None,
forced_sn_list=None, n_col=3,
n_sigma=3, color_map_name='viridis',
add_info_label=None, add_info_value=None, verbosity=0):
"""Create a figure to plot cutouts for one source in all specified surveys
and bands.
:param ra: float
Right Ascension of the target
:param dec: float
Declination of the target
:param surveys: list of strings
List of survey names, length has to be equal to bands and fovs
:param bands: list of strings
List of band names, length has to be equal to surveys and fovs
:param fovs: list of floats
Field of view in arcseconds of image cutouts, length has be equal to
surveys, bands and apertures.
:param apertures: list of floats
List of apertures in arcseconds for forced photometry calculated,
length has to be equal to surveys, bands and fovs
:param square_sizes: list of floats
List of
:param image_folder_path: string
Path to the directory where all the images are be stored
:param mag_list: list of floats
List of magnitudes for each survey/band
:param magerr_list: list of floats
List of magnitude errors for each survey/band
:param sn_list: list of floats
List of S/N for each survey/band
:param forced_mag_list: list of floats
List of forced magnitudes for each survey/band
:param forced_magerr_list: list of floats
List of forced magnitude errors for each survey/band
:param forced_sn_list: list of floats
List of forced S/N for each survey/band
:param n_col: int
Number of columns
:param n_sigma: int
Number of sigmas for the sigma-clipping routine that creates the
boundaries for the color map.
:param color_map_name: string
Name of the color map
:param add_info_value : string
Value for additional information added to the title of the figure
:param add_info_label : string
Label for additional information added to the title of the figure
:param verbosity:
Verbosity > 0 will print verbose statements during the execution
:return: matplotlib.figure
Figure with the plot.
"""
n_images = len(surveys)
n_row = int(math.ceil(n_images / n_col))
fig = plt.figure(figsize=(5*n_col, 5*n_row))
fig = _make_mult_png_axes(fig, n_row, n_col, ra, dec, surveys, bands,
fovs, apertures, square_sizes, image_folder_path, mag_list,
magerr_list, sn_list,
forced_mag_list, forced_magerr_list,
forced_sn_list, n_sigma, color_map_name, verbosity)
coord_name = ut.coord_to_name(np.array([ra]),
np.array([dec]),
epoch="J")
if add_info_label is None or add_info_value is None:
fig.suptitle(coord_name[0])
else:
fig.suptitle(coord_name[0]+' '+add_info_label+'='+add_info_value)
return fig
def _make_mult_png_axes(fig, n_row, n_col, ra, dec, surveys, bands,
fovs, apertures, square_sizes, image_folder_path, mag_list=None,
magerr_list=None, sn_list=None,
forced_mag_list=None, forced_magerr_list=None,
forced_sn_list=None,
n_sigma=3, color_map_name='viridis', verbosity=0):
""" Create axes components to plot one source in all specified surveys
and bands.
:param fig: matplotlib.figure
Figure
:param n_row: int
Number of rows
:param n_col: int
Number of columns
:param ra: float
Right Ascension of the target
:param dec: float
Declination of the target
:param surveys: list of strings
List of survey names, length has to be equal to bands and fovs
:param bands: list of strings
List of band names, length has to be equal to surveys and fovs
:param fovs: list of floats
Field of view in arcseconds of image cutouts, length has be equal to
surveys, bands and apertures.
:param apertures: list of floats
List of apertures in arcseconds for forced photometry calculated,
length has to be equal to surveys, bands and fovs
:param square_sizes: list of floats
List of
:param image_folder_path: string
Path to the directory where all the images are be stored
:param mag_list: list of floats
List of magnitudes for each survey/band
:param magerr_list: list of floats
List of magnitude errors for each survey/band
:param sn_list: list of floats
List of S/N for each survey/band
:param forced_mag_list: list of floats
List of forced magnitudes for each survey/band
:param forced_magerr_list: list of floats
List of forced magnitude errors for each survey/band
:param forced_sn_list: list of floats
List of forced S/N for each survey/band
:param n_col: int
Number of columns
:param n_sigma: int
Number of sigmas for the sigma-clipping routine that creates the
boundaries for the color map.
:param color_map_name: string
Name of the color map
:param verbosity:
Verbosity > 0 will print verbose statements during the execution
:return: matplotlib.figure
Figure with the plot.
"""
for idx, survey in enumerate(surveys):
band = bands[idx]
fov = fovs[idx]
aperture = apertures[idx]
size = square_sizes[idx]
if mag_list is not None:
catmag = mag_list[idx]
else:
catmag = None
if magerr_list is not None:
caterr = magerr_list[idx]
else:
caterr = None
if sn_list is not None:
catsn = sn_list[idx]
else:
catsn = None
if forced_mag_list is not None:
forced_mag = forced_mag_list[idx]
else:
forced_mag = None
if forced_magerr_list is not None:
forced_magerr = forced_magerr_list[idx]
else:
forced_magerr = None
if forced_sn_list is not None:
forced_sn = forced_sn_list[idx]
else:
forced_sn = None
# Get the correct filename, accept larger fovs
coord_name = ut.coord_to_name(np.array([ra]), np.array([dec]),
epoch="J")
filename = image_folder_path + '/' + coord_name[0] + "_" + survey + "_" + \
band + "*fov*.fits"
data, hdr, file_path = open_image(filename, ra, dec, fov,
image_folder_path,
verbosity)
if data is not None and hdr is not None:
file_found = True
else:
file_found = False
# Old plotting routine to modify, currently it only plots images for
# surveys and bands that it could open, no auto download implemented
if file_found:
wcs_img = wcs.WCS(hdr)
pixcrd = wcs_img.wcs_world2pix(ra, dec, 0)
positions = (np.float(pixcrd[0]), np.float(pixcrd[1]))
overlap = True
if verbosity >= 4:
print("[DIAGNOSTIC] Image file shape {}".format(data.shape))
try:
img_stamp = Cutout2D(data, positions, size=fov * u.arcsec,
wcs=wcs_img)
if verbosity >= 4:
print("[DIAGNOSTIC] Cutout2D file shape {}".format(
img_stamp.shape))
except:
print("Source not in image")
overlap = False
img_stamp = None
if img_stamp is not None:
if overlap:
img_stamp = img_stamp.data
hdu = fits.ImageHDU(data=img_stamp, header=hdr)
axs = aplpy.FITSFigure(hdu, figure=fig,
subplot=(n_row, n_col, idx + 1),
north=True)
# Check if input color map name is a color map, else use viridis
try:
cm = plt.get_cmap(color_map_name)
except ValueError:
print('Color map argument is not a color map. Setting '
'default: viridis')
cm = plt.get_cmap('viridis')
color_map_name = 'viridis'
# Sigma-clipping of the color scale
mean = np.mean(img_stamp[~np.isnan(img_stamp)])
std = np.std(img_stamp[~np.isnan(img_stamp)])
upp_lim = mean + n_sigma * std
low_lim = mean - n_sigma * std
axs.show_colorscale(vmin=low_lim, vmax=upp_lim,
cmap=color_map_name)
# Plot circular aperture (forced photometry flux)
(yy, xx) = img_stamp.shape
circx = (xx * 0.5) # + 1
circy = (yy * 0.5) # + 1
aper_pix = aperture_inpixels(aperture, hdr)
circle = plt.Circle((circx, circy), aper_pix, color='r', fill=False,
lw=1.5)
fig.gca().add_artist(circle)
# Plot rectangular aperture (error region)
rect_inpixels = aperture_inpixels(size, hdr)
square = plt.Rectangle((circx - rect_inpixels * 0.5,
circy - rect_inpixels * 0.5),
rect_inpixels, rect_inpixels,
color='r', fill=False, lw=1.5)
fig.gca().add_artist(square)
# Create forced photometry label
if (forced_mag is not None):
if (forced_sn is not None) & (forced_magerr is not None):
forcedlabel = r'${0:s} = {1:.2f} \pm {2:.2f} (SN=' \
r'{3:.1f})$'.format(band + "_{forced}",
forced_mag,
forced_magerr,
forced_sn)
elif forced_magerr is not None:
forcedlabel = r'${0:s} = {1:.2f} \pm {2:.2f}$'.format(
band + "_{forced}", forced_mag, forced_magerr)
else:
forcedlabel = r'${0:s} = {1:.2f}$'.format(
band + "_{forced}", forced_mag)
fig.gca().text(0.03, 0.16, forcedlabel, color='black',
weight='bold', fontsize='large',
bbox=dict(facecolor='white', alpha=0.6),
transform=fig.gca().transAxes)
# Create catalog magnitude label
if catmag is not None:
if (catsn is not None) & (caterr is not None):
maglabel = r'${0:s} = {1:.2f} \pm {2:.2f} (SN=' \
r'{3:.2f})$'.format(
band + "_{cat}", catmag, caterr, catsn)
elif caterr is not None:
maglabel = r'${0:s} = {1:.2f} \pm {2:.2f}$'.format(
band + "_{cat}", catmag, caterr)
else:
maglabel = r'${0:s} = {1:.2f}$'.format(
band + "_{cat}", catmag)
fig.gca().text(0.03, 0.04, maglabel, color='black',
weight='bold',
fontsize='large',
bbox=dict(facecolor='white', alpha=0.6),
transform=fig.gca().transAxes)
fig.gca().set_title(survey + " " + band)
return fig
# ------------------------------------------------------------------------------
# Finding Chart plotting routine
# ------------------------------------------------------------------------------
def make_finding_charts(table, ra_column_name, dec_column_name,
target_column_name, survey, band,
aperture, fov, image_folder_path,
offset_table=None,
offset_id = 0,
offset_focus = False,
offset_ra_column_name=None,
offset_dec_column_name=None,
pos_angle_column_name=None,
offset_mag_column_name=None,
offset_id_column_name=None,
# offset_finding_chart=True,
label_position='bottom',
slit_width=None,
slit_length=None,
format ='pdf',
auto_download=False, verbosity=0):
"""Create and save finding charts plots for all targets in the input table.
:param table: pandas.core.frame.DataFrame
Dataframe with targets to plot finding charts for
:param ra_column_name: string
Right ascension column name
:param dec_column_name: string
Declination column name
:param target_column_name: string
Name of the target identifier column
:param survey: string
Survey name
:param band: string
Passband name
:param aperture: float
Aperture to plot in arcseconds
:param fov: float
Field of view in arcseconds
:param image_folder_path: string
Path to where the image will be stored
:param offset_table: pandas.core.frame.DataFrame
Pandas dataframe with offset star information for all targets
:param offset_id: int
Integer indicating the primary offset from the offset table
:param offset_focus: boolean
Boolean to indicate whether offset star will be in the center or not
:param offset_ra_column_name: string
Offset star dataframe right ascension column name
:param offset_dec_column_name: string
Offset star dataframe declination column name
:param pos_angle_column_name: string
Offset star dataframe position angle column name
:param offset_mag_column_name: string
Offset star dataframe magnitude column name
:param offset_id_column_name: string
Offset star dataframe identifier column name
:param label_position: string
String that defines the label position for the offset stars.
Possible label positions are ["left", "right", "top", "bottom",
"topleft"]
:param slit_width: float
Slit width in arcseconds.
:param slit_length: float
Slit length in arcseconds
:param format: string
A string indicating in which format the finding charts are save.
Possible formats: 'pdf', 'png'
:param auto_download: boolean
Boolean to indicate whether images should be automatically downloaded.
:param verbosity:
Verbosity > 0 will print verbose statements during the execution.
"""
surveys = [survey]
bands = [band]
fovs = [fov]
print(offset_table)
print(table)
for idx in table.index:
ra = table.loc[idx, ra_column_name]
dec = table.loc[idx, dec_column_name]
target_name = table.loc[idx, target_column_name]
if offset_table is not None:
offset_target = offset_table.query('target_name=="{}"'.format(
target_name))
# Set position angle
if len(offset_target) > 0:
if pos_angle_column_name is not None:
position_angle = offset_target.loc[offset_target.index[0],
pos_angle_column_name]
else:
target_coords = SkyCoord(ra=ra, dec=dec,
unit=(u.deg, u.deg),
frame='icrs')
offset_coords = SkyCoord(ra=offset_target.loc[:,
offset_ra_column_name].values,
dec=offset_target.loc[:,
offset_dec_column_name].values,
unit=(u.deg, u.deg),
frame='icrs')
# Calculate position angles(East of North)
pos_angles = offset_coords.position_angle(target_coords).to(
u.deg)
# Take position angle to offset_id star in list
position_angle = pos_angles[offset_id].to(u.deg).value
else:
position_angle = 0
offset_target = None
else:
offset_target = None
position_angle = 0
if offset_target is not None:
offset_target.reset_index(inplace=True, drop=True)
if auto_download:
if offset_focus:
ct.get_photometry(offset_target.loc[[0]],
offset_ra_column_name,
offset_dec_column_name,
surveys,
bands,
image_folder_path,
fovs,
# n_jobs=1,
verbosity=verbosity)
else:
ct.get_photometry(table.loc[[idx]],
ra_column_name,
dec_column_name,
surveys,
bands,
image_folder_path,
fovs,
# n_jobs=1,
verbosity=verbosity)
fig = make_finding_chart(ra, dec, survey, band, aperture, fov,
image_folder_path,
offset_df=offset_target,
offset_id=offset_id,
offset_focus=offset_focus,
offset_ra_column_name=offset_ra_column_name,
offset_dec_column_name=offset_dec_column_name,
offset_mag_column_name=offset_mag_column_name,
offset_id_column_name=offset_id_column_name,
label_position=label_position,
slit_width=slit_width,
slit_length=slit_length,
position_angle=position_angle,
verbosity=verbosity)
if format == 'pdf':
fig.save('fc_{}.pdf'.format(target_name), transparent=False)
if format == 'png':
fig.save('fc_{}.png'.format(target_name), transparent=False)
print('{} created'.format('fc_{}'.format(target_name)))
def make_finding_chart(ra, dec, survey, band, aperture, fov,
image_folder_path,
offset_df=None,
offset_id=0,
offset_focus=False,
offset_ra_column_name=None,
offset_dec_column_name=None,
offset_mag_column_name=None,
offset_id_column_name=None,
label_position='bottom',
slit_width=None, slit_length=None,
position_angle=None, verbosity=0):
"""Make the finding chart figure and return it.
This is an internal function, but can be used to create one finding chart.
:param ra: float
Right ascension of the target in decimal degrees
:param dec: float
Declination of the target in decimal degrees
:param survey: string
Survey name
:param band: string
Passband name
:param aperture: float
Size of the plotted aperture in arcseconds
:param fov: float
Field of view in arcseconds
:param image_folder_path: string
Path to where the image will be stored
:param offset_df: pandas.core.frame.DataFrame
Pandas dataframe with offset star information
:param offset_id: int
Integer indicating the primary offset from the offset table
:param offset_focus: boolean
Boolean to indicate whether offset star will be in the center or not
:param offset_ra_column_name: string
Offset star dataframe right ascension column name
:param offset_dec_column_name: string
Offset star dataframe declination column name
:param offset_mag_column_name: string
Offset star dataframe magnitude column name
:param offset_id_column_name: string
Offset star dataframe identifier column name
:param label_position: string
String that defines the label position for the offset stars.
Possible label positions are ["left", "right", "top", "bottom",
"topleft"]
:param slit_width: float
Slit width in arcseconds.
:param slit_length: float
Slit length in arcseconds
:param position_angle:
Position angle for the observation.
:param verbosity:
Verbosity > 0 will print verbose statements during the execution.
:return: matplotlib.figure
Return the matplotlib figure of the finding chart.
"""
if offset_focus:
im_ra = offset_df.loc[offset_id, offset_ra_column_name]
im_dec = offset_df.loc[offset_id, offset_dec_column_name]
else:
im_ra = ra
im_dec = dec
coord_name = ut.coord_to_name(np.array([im_ra]), np.array([im_dec]),
epoch="J")
filename = image_folder_path + '/' + coord_name[0] + "_" + survey + "_" + \
band + "*.fits"
data, hdr, file_path = open_image(filename, im_ra, im_dec,
fov,
image_folder_path,
verbosity=verbosity)
# Reproject data if position angle is specified
if position_angle != 0:
hdr['CRPIX1'] = int(hdr['NAXIS1'] / 2.)
hdr['CRPIX2'] = int(hdr['NAXIS2'] / 2.)
hdr['CRVAL1'] = im_ra
hdr['CRVAL2'] = im_dec
new_hdr = hdr.copy()
pa_rad = np.deg2rad(position_angle)
# TODO: Note that the rotation definition here reflects one axis
# TODO: to make sure that it is a rotated version of north up east left
# TODO: both 001 components have a negative sign!
new_hdr['PC001001'] = -np.cos(pa_rad)
new_hdr['PC001002'] = np.sin(pa_rad)
new_hdr['PC002001'] = np.sin(pa_rad)
new_hdr['PC002002'] = np.cos(pa_rad)
from reproject import reproject_interp
data, footprint = reproject_interp((data, hdr),
new_hdr,
shape_out=[hdr['NAXIS1'],
hdr['NAXIS2']])
hdr = new_hdr
if data is not None:
# Plotting routine from here on.
hdu = fits.PrimaryHDU(data, hdr)
# De-rotate image along the position angle
fig = aplpy.FITSFigure(hdu)
if fov is not None:
fig.recenter(im_ra, im_dec, radius=fov / 3600. * 0.5)
try:
zscale = ZScaleInterval()
z1, z2 = zscale.get_limits(data)
fig.show_grayscale(vmin=z1, vmax=z2)
except Exception as e:
print('Exception encountered: {}'.format(str(e)))
fig.show_grayscale(pmin=10, pmax=99)
fig.add_scalebar(fov/4/3600., '{:.1f} arcmin'.format(fov/4/60.),
color='black',
font='serif',
linewidth=4)
if slit_length is not None and slit_width is not None:
if position_angle == 0:
_plot_slit(fig, im_ra, im_dec, slit_length, slit_width,
position_angle)
else:
_plot_slit(fig, im_ra, im_dec, slit_length, slit_width,
0)
if offset_df is not None and offset_ra_column_name is not None and \
offset_dec_column_name is not None and offset_mag_column_name is \
not None and offset_id_column_name is not None:
print("[INFO] Generating offsets for {}".format(filename))
_plot_offset_stars(fig, ra, dec, offset_df, fov,
offset_id,
offset_ra_column_name,
offset_dec_column_name,
offset_mag_column_name,
offset_id_column_name,
label_position=label_position)
_plot_info_box(fig, ra, dec, offset_df, offset_ra_column_name,
offset_dec_column_name, offset_mag_column_name)
fig.show_circles(xw=ra, yw=dec, radius=aperture / 3600., edgecolor='red',
alpha=1, lw=3)
fig.axis_labels.set_xtext('Right Ascension')
fig.axis_labels.set_ytext('Declination')
c = SkyCoord(ra=ra, dec=dec, unit=(u.degree, u.degree))
title = 'RA= {0} ; DEC = {1}'.format(
c.ra.to_string(precision=3, sep=":", unit=u.hour),
c.dec.to_string(precision=3, sep=":", unit=u.degree, alwayssign=True))
plt.title(title)
fig.add_grid()
fig.grid.show()
fig.set_theme('publication')
return fig
def _plot_slit(fig, ra, dec, slit_length, slit_width, position_angle):
# slit_label = 'PA=${0:.2f}$deg\n \n'.format(position_angle)
# slit_label += 'width={0:.1f}"; length={1:.1f}"'.format(
# slit_width, slit_length)
fig = show_rectangles(fig, ra, dec, slit_width / 3600., slit_length / 3600.,
edgecolor='w', lw=1.0, angle=position_angle,
coords_frame='world')
# if position_angle > 0 and position_angle < 180:
# angle_offset = 180
# dec_offset = 0
# else:
# angle_offset = 0
# dec_offset = 0
# fig.add_label(ra, dec + dec_offset, slit_label,
# rotation=position_angle + angle_offset + 90,
# size='large', color='w')
position_dict = {"left": [8, 0], "right": [-8, 0], "top": [0, 5],
"bottom": [0, -5], "topleft": [8, 5]}
def _plot_offset_stars(fig, ra, dec, offset_df, fov, offset_id,
ra_column_name,
dec_column_name,
mag_column_name,
id_column_name,
label_position="left"):
# Check if star is in image
radius = fov / 25. / 3600.
ra_pos, dec_pos = position_dict[label_position]
fig.show_circles(xw=offset_df.loc[offset_id, ra_column_name],
yw=offset_df.loc[offset_id, dec_column_name],
radius=radius * 0.5,
edgecolor='blue',
lw=3)
fig.show_rectangles(offset_df.drop(offset_id)[ra_column_name],
offset_df.drop(offset_id)[dec_column_name],
radius, radius, edgecolor='blue', lw=1)
abc_dict = {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4: 'E'}
for num, idx in enumerate(offset_df.index):
ra_off = offset_df.loc[idx, ra_column_name]
dec_off = offset_df.loc[idx, dec_column_name]
target_coords = SkyCoord(ra=ra, dec=dec,
unit=(u.deg, u.deg),
frame='icrs')
offset_coords = SkyCoord(ra=ra_off,
dec=dec_off, unit=(u.deg, u.deg),
frame='icrs')
separation = offset_coords.separation(target_coords).to(u.arcsecond)
label = '{}'.format(abc_dict[num])
if separation.value <= fov/2.:
if idx == offset_id:
fig.add_label(ra_off + ra_pos * 5 / 3600. / 3.,
dec_off + dec_pos * 5 / 3600. / 3., label,
color='blue', size='x-large',
verticalalignment='center', family='serif')
else:
fig.add_label(ra_off + ra_pos * radius/5., dec_off + dec_pos *
radius/5., label,
color='blue', size='large',
verticalalignment='center', family='serif')
def _plot_info_box(fig, ra, dec, offset_df, ra_column_name, dec_column_name,
mag_column_name,):
target_info = 'Target: RA={:.4f}, DEC={:.4f}'.format(ra, dec)
info_list = [target_info]
abc_dict = {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4:'E'}
for num, idx in enumerate(offset_df.index):
ra_off = offset_df.loc[idx, ra_column_name]
dec_off = offset_df.loc[idx, dec_column_name]
target_coords = SkyCoord(ra=ra, dec=dec,
unit=(u.deg, u.deg),
frame='icrs')
offset_coords = SkyCoord(ra=ra_off,
dec=dec_off, unit=(u.deg, u.deg),
frame='icrs')
# Calculate position angles and separations (East of North)
pos_angles = offset_coords.position_angle(target_coords).to(u.deg)
separations = offset_coords.separation(target_coords).to(u.arcsecond)
dra, ddec = offset_coords.spherical_offsets_to(target_coords)
mag = offset_df.loc[idx, mag_column_name]
info = '{}:\t RA={:.4f}, DEC={:.4f}, {}={:.2f}, PosAngle={' \
':.2f}'.format(abc_dict[num],
ra_off,
dec_off, mag_column_name,
mag, pos_angles)
info_off = 'Sep={:.2f}, Dra={:.2f}, ' \
'Ddec={:.2f}'.format(separations, dra.to(
'arcsecond'), ddec.to('arcsecond'))
info_list.append(info)
info_list.append(info_off)
ax = plt.gca()
boxdict = dict(facecolor='white', alpha=0.5, edgecolor='none')
ax.text(.02, 0.02, "\n".join(info_list), transform=ax.transAxes,
fontsize='small',
bbox=boxdict)
# ------------------------------------------------------------------------------
# Determine forced photometry for sources in cutouts.
# ------------------------------------------------------------------------------
def get_forced_photometry(table, ra_col_name, dec_col_name, surveys,
bands, apertures, fovs, image_folder_path,
auto_download=True,
verbosity=0):
"""Calculate forced photometry for all objects in the table Data Frame.
In the current version of this routine forced photometry calculations for
the following surveys and bands is available:
survey: 'desdr1'
bands: 'grizy'
survey: "unwise-allwise, unwise-neo1, unwise-neo2, "unwise-neo3",
"unwise-neo4", "unwise-neo5", "unwise-neo6"
bands: 'w1w2w3w4
This function takes a table object (astropy table, astropy fitstable or
DataFrame) with specified Ra and Dec. It eiher looks for the image
cutouts associated with each survey/band/fov entry or automatically
downloads them, if specified. If the image cutouts are found forced
photometry is calculated within the specified aperture.
A note on confusing terminology in the function:
img_name : Name of the image to be opened
[Epoch Identifier][RA in HHMMSS.SS][DEC in DDMMSS.SS]_
[SURVEY]_[PASSBAND]_fov[FIELD OF VIEW].fits
filename : Path to the image without field of view. This variable is used
to find all images of the source with different field of views
[Image folder path]/[Epoch Identifier][RA in HHMMSS.SS]
[DEC in DDMMSS.SS]_[SURVEY]_[PASSBAND]_*.fits
file_path : Path to the image to be opened
[Image folder path]/[Epoch Identifier][RA in HHMMSS.SS]
[DEC in DDMMSS.SS]_[SURVEY]_[PASSBAND]_fov[FIELD OF VIEW].fits
For each survey/band the following columns are added to the input table:
forced_[survey]_mag_[band]
Forced photometry magnitude for the object in the given survey/band.
The magnitudes are all in the AB system
forced_[survey]_flux_[band]
Forced photometry flux for the object in the given survey/band
forced_[survey]_sn_[band]
Forced photometry S/N for the object in the given survey/band
forced_[survey]_magerr_[band]
Forced photometry magnitude error for the object in the given
survey/band
forced_[survey]_comment_[band]
A comment with regard to the forced photometry calculation for each
object in the given survey/band.
If the forced photometry calculation is successful the comment will
give the used apertures: 'ap_[aperture in arcseconds]'
If the forced photometry calculation is unsuccessfull the comment will
reflect the problem:
'image_too_small': cutout image is too small to calculate the forced
photometry (minimum pixel size 50)
'image_not_available': cutout image could not be found and/or downloaded
'crashed': bad things happened! (Check try-except clause in
calculate_forced_aperture_photometry)
Lists of equal length need to be supplied to surveys, bands, apertures and
fovs.
:param table: table object
Input data table with at least RA and Decl. columns
:param ra_col_name: string
Exact string for the RA column in the table
:param dec_col_name: string
Exact string for the Decl. column in the table
:param surveys: list of strings
List of survey names, length has to be equal to bands, apertures and
fovs
:param bands: list of strings
List of band names, length has to be equal to surveys, apertures and
fovs
:param apertures: list of floats
List of apertures in arcseconds for forced photometry calculated,
length has to be equal to surveys, bands and fovs
:param fovs: list of floats
Field of view in arcseconds of image cutouts, length has be equal to
surveys, bands and apertures
:param image_folder_path: string
Path to the directory where all the images will be stored
:param auto_download: Boolean
Switch to enable/disable auto-downloading the cutouts images
:param verbosity:
Verbosity > 0 will print verbose statements during the execution
:return: DataFrame
Returns a DataFrame with the added columns for the forced photometry
calculation.
"""
# Check if table is pandas DataFrame otherwise convert to one
table, format = ct.check_if_table_is_pandas_dataframe(table)
# Add a column to the table specifying the object name used
# for the image name
table.loc[:, 'temp_object_name'] = ut.coord_to_name(table.loc[:,
ra_col_name].values,
table.loc[
:, dec_col_name].values,
epoch="J")
for jdx, survey in enumerate(surveys):
band = bands[jdx]
aperture = apertures[jdx]
fov = fovs[jdx]
for idx in table.index:
ra = table.loc[idx, ra_col_name]
dec = table.loc[idx, dec_col_name]
filename = image_folder_path + '/' + \
table.loc[idx, 'temp_object_name'] + "_" \
+ survey + "_" + band + "*.fits"
data, hdr, file_path = open_image(filename, ra, dec, fov,
image_folder_path, verbosity)
if data is not None:
img_name = file_path.split('/')[-1]
if data is None and auto_download is True:
if survey in ["desdr1", "desdr2"]:
url = ct.get_des_deepest_image_url(ra,
dec,
data_release=survey[-3:],
fov=fov,
band=band,
verbosity=verbosity)
elif survey.split("-")[0] == "unwise" and band in ["w1",
"w2",
"w3",
"w4"]:
# Hack to create npix from fov approximately
npix = int(round(fov / 60. / 4. * 100))
data_release = survey.split("-")[1]
wband = band[1]
url = ct.get_unwise_image_url(ra, dec, npix, wband,
data_release)
else:
raise ValueError(
"Survey and band name not recognized: {} {}. "
"\n "
"Possible survey names include: desdr1, "
"unwise-allwise, unwise-neo1, unwise-neo2, "
"unwise-neo3, unwise-neo4, unwise-neo5,"
"unwise-neo6".format(survey, band))
if url is not None:
img_name = table.loc[idx,'temp_object_name'] + "_" + \
survey + \
"_" + band + "_fov" + '{:d}'.format(fov)
ct.download_image(url, image_name=img_name,
image_folder_path=image_folder_path,
verbosity=verbosity)
file_path = image_folder_path + '/' + img_name + '.fits'
data, hdr = fits.getdata(file_path, header=True)
file_size_sufficient = False
if data is not None:
# Check if file is sufficient
file_size_sufficient = check_image_size(img_name,
file_path,
verbosity)
if data is not None and file_size_sufficient is True:
mag, flux, sn, err, comment = \
calculate_forced_aperture_photometry(file_path,
ra, dec, survey, band,
aperture,
verbosity=verbosity)
table.loc[idx, 'forced_{}_mag_{}'.format(survey, band)] = mag
table.loc[idx, 'forced_{}_flux_{}'.format(survey, band)] = flux
table.loc[idx, 'forced_{}_sn_{}'.format(survey, band)] = sn
table.loc[idx, 'forced_{}_magerr_{}'.format(survey, band)] = \
err
table.loc[idx, 'forced_{}_{}_comment'.format(survey, band)] =\
comment
if data is not None and file_size_sufficient is not True:
table.loc[idx, 'forced_{}_{}_comment'.format(survey, band)] = \
'image_too_small'.format(aperture)
if data is None:
table.loc[idx, 'forced_{}_{}_comment'.format(survey, band)] = \
'image_not_available'.format(aperture)
table.drop(columns='temp_object_name', inplace=True)
table = ct.convert_table_to_format(table, format)
return table
def get_forced_photometry_mp(table, ra_col_name, dec_col_name, surveys,
bands, apertures, fovs, image_folder_path, n_jobs=5,
auto_download=True,
verbosity=0):
"""Calculate forced photometry in multiprocessing mode.
This function works analogous to get_forced_photometry only allowing to
use multiple processor (python multiprocessing module).
:param table: table object
Input data table with at least RA and Decl. columns
:param ra_col_name: string
Exact string for the RA column in the table
:param dec_col_name: string
Exact string for the Decl. column in the table
:param surveys: list of strings
List of survey names, length has to be equal to bands, apertures and
fovs
:param bands: list of strings
List of band names, length has to be equal to surveys, apertures and
fovs
:param apertures: list of floats
List of apertures in arcseconds for forced photometry calculated,
length has to be equal to surveys, bands and fovs
:param fovs: list of floats
Field of view in arcseconds of image cutouts, length has be equal to
surveys,
bands and apertures
:param image_folder_path: string
Path to the directory where all the images will be stored
:param n_jobs:
Number of cores to be used
:param auto_download: Boolean
Switch to enable/disable auto-downloading the cutouts images
:param verbosity:
Verbosity > 0 will print verbose statements during the execution
:return: DataFrame
Returns a DataFrame with the added columns for the forced photometry
calculation.
"""
# Check if table is pandas DataFrame otherwise convert to one
table, format = ct.check_if_table_is_pandas_dataframe(table)
# Add a column to the table specifying the object name used
# for the image name
table['temp_object_name'] = ut.coord_to_name(table[ra_col_name].values,
table[dec_col_name].values,
epoch="J")
for jdx, survey in enumerate(surveys):
band = bands[jdx]
aperture = apertures[jdx]
fov = fovs[jdx]
# Create list with image names
ra = table[ra_col_name].values
dec = table[dec_col_name].values
index = table.index
# # Create image names without the fov ending.
# temp = table.temp_object_name
mp_args = list(zip(index,
ra,
dec,
itertools.repeat(survey),
itertools.repeat(band),
itertools.repeat(aperture),
itertools.repeat(fov),
itertools.repeat(image_folder_path),
table.temp_object_name,
itertools.repeat(auto_download),
itertools.repeat(verbosity)))
# Start multiprocessing pool
with mp.Pool(n_jobs) as pool:
results = pool.starmap(_mp_get_forced_photometry, mp_args)
for result in results:
idx, mag, flux, sn, err, comment = result
table.loc[idx, 'forced_{}_mag_{}'.format(survey, band)] = mag
table.loc[idx, 'forced_{}_flux_{}'.format(survey, band)] = flux
table.loc[idx, 'forced_{}_sn_{}'.format(survey, band)] = sn
table.loc[idx, 'forced_{}_magerr_{}'.format(survey, band)] = \
err
table.loc[idx, 'forced_{}_{}_comment'.format(survey, band)] = \
comment
table.drop(columns='temp_object_name')
table = ct.convert_table_to_format(table, format)
return table
def _mp_get_forced_photometry(index, ra, dec, survey,
band, aperture, fov, image_folder_path,
temp_object_name,
auto_download=True,
verbosity=0):
"""Calculate forced photometry for one object at a time.
:param index:
:param ra: float
Right Ascension of the target
:param dec: float
Declination of the target
:param survey: string
Survey name
:param band: string
Passband name
:param aperture: float
Aperture to calculate forced photometry in in arcseconds
:param fov: float
Field of view in arcseconds
:param image_folder_path: string
Path to where the image will be stored
:param img_name:
The name of the image to be opened for the forced photometry
calculation (excluding the fov:
[Epoch Identifier][RA in HHMMSS.SS][DEC in DDMMSS.SS]_
[SURVEY]_[PASSBAND]
:param auto_download: Boolean
Switch to enable/disable auto-downloading the cutouts images
:param verbosity:
Verbosity > 0 will print verbose statements during the execution
:return: tuple(int, float, float, float, float, string)
Returns a tuple with the forced photometry values:
index, mag, flux, sn, err, comment
"""
# Create image filename to check against files in cutout folder
filename = image_folder_path + '/' + temp_object_name + "_" + survey + "_" \
+ band + "*.fits"
data, hdr, file_path = open_image(filename, ra, dec, fov,
image_folder_path, verbosity)
if data is not None:
img_name = file_path.split('/')[-1]
if data is None and auto_download is True:
if survey in ["desdr1", "desdr2"]:
url = ct.get_des_deepest_image_url(ra,
dec,
data_release=survey[-3:],
fov=fov,
band=band,
verbosity=verbosity)
elif survey.split("-")[0] == "unwise" and band in ["w1",
"w2",
"w3",
"w4"]:
# Hack to create npix from fov approximately
npix = int(round(fov / 60. / 4. * 100))
data_release = survey.split("-")[1]
wband = band[1]
url = ct.get_unwise_image_url(ra, dec, npix, wband,
data_release)
else:
raise ValueError(
"Survey and band name not recognized: {} {}. "
"\n "
"Possible survey names include: desdr1, "
"unwise-allwise, unwise-neo1, unwise-neo2, "
"unwise-neo3, unwise-neo4, unwise-neo5,"
"unwise-neo6".format(survey, band))
if url is not None:
img_name = temp_object_name + "_" + survey + \
"_" + band + "_fov" + '{:d}'.format(fov)
ct.download_image(url, image_name=img_name,
image_folder_path=image_folder_path,
verbosity=verbosity)
file_path = image_folder_path + '/' + img_name + '.fits'
data, hdr = fits.getdata(file_path, header=True)
file_size_sufficient = False
if data is not None:
# Check if file is sufficient
file_size_sufficient = check_image_size(img_name,
file_path,
verbosity)
if data is not None and file_size_sufficient is True:
mag, flux, sn, err, comment = \
calculate_forced_aperture_photometry(file_path,
ra, dec, survey, band,
aperture,
verbosity=verbosity)
return index, mag, flux, sn, err, comment
if data is not None and file_size_sufficient is not True:
comment = 'image_too_small'.format(aperture)
return index, np.nan, np.nan, np.nan, np.nan, comment
if data is None:
comment = 'image_not_available'.format(aperture)
return index, np.nan, np.nan, np.nan, np.nan, comment
def calculate_forced_aperture_photometry(filepath, ra, dec, survey,
band, aperture,
verbosity=0):
"""Calculates the forced photometry for a Ra/Dec position on a given
image file specified by filepath.
:param filepath: string
Path to the image on which to calculate the forced photometry.
:param ra: float
Right ascension of the source for which forced photometry should be
calculated.
:param dec: float
Declination of the source for which forced photometry should be
calculated.
:param survey: string
Survey keyword; The magnitude calculation depends on the survey
photometry and hence this keyword sets the flux to magnitude
conversion accordingly.
:param aperture: float
Aperture in arcseconds in over which the forced photometry is
calculated.
:param verbosity:
Verbosity > 0 will print verbose statements during the execution
:return: tuple(float, float, float, float, string)
Returns a tuple with the forced photometry values:
mag, flux, sn, err, comment
"""
# Open the fits image
data, header = fits.getdata(filepath, header=True)
# Convert radius from arcseconds to pixel
pixelscale = get_pixelscale(header)
aperture_pixel = aperture / pixelscale # pixels
# Transform coordinates of target position to pixel scale
wcs_img = wcs.WCS(header)
pixel_coordinate = wcs_img.wcs_world2pix(ra, dec, 1)
# QUICKFIX to stop aperture photometry from crashing
# try:
# Get photometry
positions = (pixel_coordinate[0], pixel_coordinate[1])
apertures = CircularAperture(positions, r=aperture_pixel)
f = aperture_photometry(data, apertures)
flux = np.ma.masked_invalid(f['aperture_sum'])
# Get the noise
rmsimg, mean_noise, empty_flux = get_noiseaper(data, aperture_pixel)
sn = flux[0] / rmsimg
comment = 'ap_{}'.format(aperture)
if verbosity > 0:
print("flux: ", flux[0], "sn: ", sn)
if sn < 0:
flux[0] = rmsimg
err = -1
mags = flux_to_magnitude(flux, survey)[0]
else:
mags = flux_to_magnitude(flux, survey)[0]
err = mag_err(1. / sn, verbose=False)
if verbosity > 0:
print("mag: ", mags)
if mags is np.ma.masked:
mags = -999
comment = 'masked'
if sn is np.ma.masked:
sn = np.nan
if err is np.ma.masked:
err = np.nan
if flux[0] is np.ma.masked:
flux = np.nan
else:
flux = flux[0]
survey_band = survey+'_'+band
mags = pt.vega_to_ab(mags, survey_band)
return mags, flux, sn, err, comment
# except ValueError:
# return -999, np.nan, np.nan, np.nan, 'crashed'
# ------------------------------------------------------------------------------
# Image utility functions for forced photometry
# (mostly from Eduardo and not modified)
# ------------------------------------------------------------------------------
def check_image_size(image_name, file_path, verbosity):
"""
:param image_name:
:param file_path:
:param verbosity:
:return:
"""
shape = fits.getdata(file_path).shape
min_axis = np.min(shape)
if min_axis < 50 and verbosity > 0:
print("Minimum image dimension : {} (pixels)".format(min_axis))
print("Too few pixels in one axis (<50). Skipping {}".format(
image_name))
if min_axis < 50:
return False
else:
return True
def aperture_inpixels(aperture, hdr):
'''
receives aperture in arcsec. Returns aperture in pixels
'''
pixelscale = get_pixelscale(hdr)
aperture /= pixelscale #pixels
return aperture
def get_pixelscale(hdr):
'''
Get pixelscale from header and return in it in arcsec/pixel
'''
wcs_img = wcs.WCS(hdr)
scale = np.mean(proj_plane_pixel_scales(wcs_img)) * 3600
return scale
def mag_err(noise_flux_ratio, verbose=True):
'''
Calculates the magnitude error from the input noise_flux_ratio
which is basically the inverse of the Signal-to-Noise ratio
'''
err = (2.5 / np.log(10)) * noise_flux_ratio
if verbose:
print(err)
return err
def get_noiseaper(data, radius):
# print("estimating noise in aperture: ", radius)
sources_mask = make_source_mask(data, nsigma=2.5, npixels=3,
dilate_size=15, filter_fwhm=4.5)
N = 5100
ny, nx = data.shape
x1 = np.int(nx * 0.09)
x2 = np.int(nx * 0.91)
y1 = np.int(ny * 0.09)
y2 = np.int(ny * 0.91)
xx = np.random.uniform(x1, x2, N)
yy = np.random.uniform(y1, y2, N)
mask = sources_mask[np.int_(yy), np.int_(xx)]
xx = xx[~mask]
yy = yy[~mask]
positions = list(zip(xx, yy))
apertures = CircularAperture(positions, r=radius)
f = aperture_photometry(data, apertures, mask=sources_mask)
f = np.ma.masked_invalid(f['aperture_sum'])
m1 = np.isfinite(f) #& (f!=0)
empty_fluxes = f[m1]
emptyapmeanflux, emptyapsigma = gaussian_fit_to_histogram(empty_fluxes)
return emptyapsigma, emptyapmeanflux, empty_fluxes
def gaussian_fit_to_histogram(dataset):
""" fit a gaussian function to the histogram of the given dataset
:param dataset: a series of measurements that is presumed to be normally
distributed, probably around a mean that is close to zero.
:return: mean, mu and width, sigma of the gaussian model fit.
Taken from
https://github.com/djones1040/PythonPhot/blob/master/PythonPhot/photfunctions.py
"""
def gauss(x, mu, sigma):
return np.exp(-(x - mu) ** 2 / (2 * sigma ** 2))
if np.ndim(dataset) == 2:
musigma = np.array([gaussian_fit_to_histogram(dataset[:, i])
for i in range(np.shape(dataset)[1])])
return musigma[:, 0], musigma[:, 1]
dataset = dataset[np.isfinite(dataset)]
ndatapoints = len(dataset)
stdmean, stdmedian, stderr, = sigma_clipped_stats(dataset, sigma=5.0)
nhistbins = max(10, int(ndatapoints / 20))
histbins = np.linspace(stdmedian - 5 * stderr, stdmedian + 5 * stderr,
nhistbins)
yhist, xhist = np.histogram(dataset, bins=histbins)
binwidth = np.mean(np.diff(xhist))
binpeak = float(np.max(yhist))
param0 = [stdmedian, stderr] # initial guesses for gaussian mu and sigma
xval = xhist[:-1] + (binwidth / 2)
yval = yhist / binpeak
try:
minparam, cov = curve_fit(gauss, xval, yval, p0=param0)
except RuntimeError:
minparam = -99, -99
mumin, sigmamin = minparam
return mumin, sigmamin
def flux_to_magnitude(flux, survey):
"""
:param flux:
:param survey:
:return:
"""
if survey == "desdr1":
zpt = 30.
elif survey.split("-")[0] == "unwise":
zpt = 22.5
else:
raise ValueError("Survey name not recognized: {}".format(survey))
return -2.5 * np.log10(flux) + zpt
def nmgy2abmag(flux, flux_ivar=None):
"""
Conversion from nanomaggies to AB mag as used in the DECALS survey
flux_ivar= Inverse variance oF DECAM_FLUX (1/nanomaggies^2)
"""
lenf = len(flux)
if lenf > 1:
ii = np.where(flux>0)
mag = 99.99 + np.zeros_like(flux)
mag[ii] = 22.5 - 2.5*np.log10(flux[ii])
else:
mag = 22.5 - 2.5*np.log10(flux)
if flux_ivar is None:
return mag
elif lenf>1:
err = np.zeros_like(mag)
df = np.sqrt(1./flux_ivar)
err[ii] = mag_err(df[ii]/flux[ii], verbose=False)
else:
df = | np.sqrt(1./flux_ivar) | numpy.sqrt |
"""
Sample trajectories
"""
import sys
sys.path.append('../')
from sampler.ai2thor_env import AI2ThorEnvironment, VISIBILITY_DISTANCE, round_to_factor
import random
import numpy as np
from sampler.data_utils import GCSH5Writer
import json
from tqdm import tqdm
import os
import pandas as pd
import argparse
import glob
parser = argparse.ArgumentParser(description='Sample Trajectories')
parser.add_argument(
'-display',
dest='display',
default=0,
type=int,
help='which display we are on'
)
parser.add_argument(
'-seed',
dest='seed',
default=123456,
type=int,
help='seed to use'
)
parser.add_argument(
'-out_path',
dest='out_path',
type=str,
help='Base path to use.'
)
parser.add_argument(
'-nex',
dest='nex',
default=10000,
type=int,
help='Num examples to generate'
)
args = parser.parse_args()
with open(os.path.join(os.path.dirname(os.path.join(__file__)), '..', 'data', 'size_deltas.json'), 'r') as f:
SIZE_DELTAS = json.load(f)
with open(os.path.join(os.path.dirname(os.path.join(__file__)), '..', 'data', 'knob_to_burner.json'), 'r') as f:
KNOB_TO_BURNER = json.load(f)
def rotation_angle_to_object(object_info, point):
"""
Given a point, compute the best rotation angle to object
the coordinate system is a bit weird
90 degrees
^ +x
180 degrees ------> +z 0 degrees
|
|
V
270 degrees
:param object_info:
:param point: [x,y,z]
:return:
"""
object_coords = object_info['axisAlignedBoundingBox']['center'] if 'axisAlignedBoundingBox' in object_info else \
object_info['position']
x_delta = object_coords['x'] - point['x']
z_delta = object_coords['z'] - point['z']
r = np.sqrt(np.square(x_delta) + np.square(z_delta))
angle = np.arctan2(x_delta / r, z_delta / r) * 180 / np.pi
if angle < 0:
angle += 360
if angle > 360.0:
angle -= 360.0
return angle
def horizon_angle_to_object(object_info, point):
"""
^ - angle
/
me reference pt
\
V + angle
we're facing the object already
:param object_info:
:param point: [x,y,z]
:return:
"""
object_coords = object_info['axisAlignedBoundingBox']['center'] if 'axisAlignedBoundingBox' in object_info else \
object_info['position']
my_height = 1.575
y_delta = object_coords['y'] - my_height
xz_dist = np.sqrt(np.square(object_coords['x'] - point['x']) + np.square(object_coords['z'] - point['z']))
r = np.sqrt(np.square(xz_dist) + np.square(y_delta))
angle = np.arctan2(-y_delta / r, xz_dist / r) * 180 / np.pi
if angle < 0:
angle += 360
if angle > 360.0:
angle -= 360.0
return angle
def clip_angle_to_nearest(true_angle, possible_angles=(0, 90, 180, 270)):
"""
Clips angle to the nearest one
:param true_angle: float
:param possible_angles: other angles
:return:
"""
pa = np.array(possible_angles)
dist = np.abs(pa - true_angle)
dist = np.minimum(dist, 360.0 - dist)
return int(pa[np.argmin(dist)])
def get_object_zxwh_and_expansion(target_object, openable_object_pad_distance=0.0, object_pad_distance=0.0):
"""
Compute object bounding box coordinates, with possible expansion
:param target_object:
:param openable_object_pad_distance: pad openable objects that are currently closed this much EXTRA
:param object_pad_distance: pad all objects this much
:return:
"""
object_zxwh = np.array([target_object['axisAlignedBoundingBox']['center']['z'],
target_object['axisAlignedBoundingBox']['center']['x'],
target_object['axisAlignedBoundingBox']['size']['z'],
target_object['axisAlignedBoundingBox']['size']['x'],
])
object_zxwh2 = np.copy(object_zxwh)
# Increase size a bit accordingly
if target_object['openable'] and not target_object['isOpen']:
sd = SIZE_DELTAS['object_name_to_size_delta'].get(target_object['name'],
SIZE_DELTAS['object_type_to_size_delta'].get(
target_object['objectType'], [0.0, 0.0]))
# Rotation matrix
theta = target_object['rotation']['y'] * np.pi / 180
R = np.array([
[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]
])
sd = np.abs(R @ np.array(sd)) + openable_object_pad_distance # Agent radius is tehcnically 0.2 (I dealt with that elsewhere) but 0.4 seems
# needed for some annoying objects
object_zxwh2[2:] += sd
object_zxwh2[2:] += object_pad_distance # agent radius is 0.2
return object_zxwh, object_zxwh2
def zxwh_to_zzxx(bbox_xywh):
sizes = bbox_xywh[..., 2:] / 2.0
return np.concatenate((bbox_xywh[..., :2] - sizes, bbox_xywh[..., :2] + sizes), -1)
def interval_pos(test_x, x0, x1):
"""
In 1D space:
x0 x1
############## ################## ##############
# ---- position 0 position + position
:param test_x:
:param x0:
:param x1:
:return:
"""
assert x1 >= x0
if test_x < x0:
return test_x - x0
if test_x > x1:
return test_x - x1
return 0.0
def distance_to_object_manifold(agent_pos, target_object, openable_object_pad_distance=0.0, object_pad_distance=0.0):
"""
Distance from the agent to the object manifold. If the object is openable we'll pretend it's open and hallucinate accordingly
For simplicitly we'll just use 2d distances
The agent is a circle around its position with radius 0.2
:param agent_pos:
:param object:
:return:
"""
if target_object['objectType'] == 'Floor':
return 0.0
# Just return the expanded box.
_, object_zxwh = get_object_zxwh_and_expansion(target_object,
openable_object_pad_distance=openable_object_pad_distance,
object_pad_distance=object_pad_distance)
# Compute distance to the manifold. negative distance if INSIDE.
# Check if we're inside
object_zzxx = zxwh_to_zzxx(object_zxwh)
z_dist = interval_pos(agent_pos['z'], object_zzxx[0], object_zzxx[2])
x_dist = interval_pos(agent_pos['x'], object_zzxx[1], object_zzxx[3])
if z_dist == 0.0 and x_dist == 0.0:
# Inside the boundary?
return -0.01
r = np.sqrt(np.square(z_dist) + np.square(x_dist))
return r
# Subactions. they aren't DOING anything yet just planning ======================================
def path_to_object(env: AI2ThorEnvironment, target_object, angle_noise=10, dist_noise=0.1, dist_to_obj_penalty=2.0,
faces_good_side_penalty=10.0):
"""
Go to the object (starting from the Agent's location)
:param env:
:param target_object:
:param angle_noise:
:param dist_noise: standard deviation for noise we'll add to shortest pathfinder
:param dist_to_obj_penalty:
:return:
"""
reachable_pts = env.currently_reachable_points
start_location = env.get_agent_location()
receptacle = None
if target_object['openable'] and not target_object['isOpen']:
receptacle = target_object
for pr in target_object['parentReceptacles']:
mr = env.get_object_by_id(pr)
if mr['openable'] and not mr['isOpen']:
receptacle = mr
# Find all pts
reachable_points_touching_object = []
for pt in reachable_pts:
ctr_dist = env.position_dist(pt, target_object['position'])
pt_dist = distance_to_object_manifold(pt, target_object, object_pad_distance=0.0)
if receptacle is not None:
# I really don't know what to set this to safely
oopd = random.random()
pt_dist_to_receptacle = distance_to_object_manifold(pt, receptacle,
openable_object_pad_distance=oopd,
object_pad_distance=0.2)
if pt_dist_to_receptacle < 0.0:
continue
vd = 1.49
if target_object['objectType'] in ('Fridge',):
vd += 0.5
if (ctr_dist > vd) and (pt_dist > vd):
continue
# Might need a minimum distance away from the object
# Get angle
ra = rotation_angle_to_object(target_object, pt) + float(np.random.uniform(-angle_noise, angle_noise))
ra = clip_angle_to_nearest(ra)
ha = horizon_angle_to_object(target_object, pt) + float(np.random.uniform(-angle_noise, angle_noise))
ha = clip_angle_to_nearest(ha, [-30, 0, 30, 60])
pt2 = {k: v for k, v in pt.items()}
pt2['horizon'] = ha
pt2['rotation'] = ra
pt2['dist'] = pt_dist
pt2['dist_to_me'] = env.position_dist(start_location, pt)
# Check if we're facing the good side of the object for an object like fridge
pt2['faces_good_side'] = True
if receptacle is not None:
obj, obj_expanded = get_object_zxwh_and_expansion(receptacle,
openable_object_pad_distance=0.2,
object_pad_distance=0.2)
object_zzxx = zxwh_to_zzxx(obj_expanded)
# Figure out what quadrant we're in
z_dist = interval_pos(pt2['z'], object_zzxx[0], object_zzxx[2])
x_dist = interval_pos(pt2['x'], object_zzxx[1], object_zzxx[3])
# Z expansion, X expansion.
size_delta_z, size_delta_x = (obj_expanded - obj)[2:]
if (abs(z_dist) > 0) and (abs(x_dist) > 0):
pt2['faces_good_side'] = False
else:
# If X expansion is a lot longer, we want Z cordinates to be 0 dist and X coordinates to be + dist
if size_delta_x > (size_delta_z + 0.25):
pt2['faces_good_side'] = (z_dist == 0.0)
if size_delta_z > (size_delta_x + 0.25):
pt2['faces_good_side'] = (x_dist == 0.0)
reachable_points_touching_object.append(pt2)
if len(reachable_points_touching_object) == 0:
raise ValueError("No points touching object {}".format(target_object['objectId']))
dists = np.array(
[(pt2['dist'], pt2['dist_to_me'], float(pt2['faces_good_side'])) for pt2 in reachable_points_touching_object])
# 1 standard dist -> 0.25
joint_dist = dist_to_obj_penalty * dists[:, 0] + dists[:, 1] + np.random.randn(
dists.shape[0]) * dist_noise + faces_good_side_penalty * (1.0 - dists[:, 2])
pt = reachable_points_touching_object[int(np.argmin(joint_dist))]
res = env.get_fancy_shortest_path(start_location, pt)
if res is None:
raise ValueError("Shortest path failed")
return res
def objects_equal(obj1, obj2):
def _bbox_equal(b1, b2, tolerance=4e-2):
b1_np = np.array(b1['cornerPoints'])
b2_np = np.array(b2['cornerPoints'])
return np.abs(b1_np - b2_np).max() < tolerance
def _xyz_equal(b1, b2, tolerance=4e-2, mod360=False):
p1_np = np.array([b1[k2] for k2 in 'xyz'])
p2_np = np.array([b2[k2] for k2 in 'xyz'])
dist = np.abs(p1_np - p2_np)
if mod360:
dist = | np.minimum(dist, 360.0 - dist) | numpy.minimum |
import numpy as np
from tqdm import tqdm
def jitter(x, sigma=0.03):
# https://arxiv.org/pdf/1706.00527.pdf
return x + np.random.normal(loc=0., scale=sigma, size=x.shape)
def scaling(x, sigma=0.1):
# https://arxiv.org/pdf/1706.00527.pdf
factor = np.random.normal(loc=1., scale=sigma, size=(x.shape[0],x.shape[2]))
return np.multiply(x, factor[:,np.newaxis,:])
def rotation(x):
flip = np.random.choice([-1, 1], size=(x.shape[0],x.shape[2]))
rotate_axis = np.arange(x.shape[2])
np.random.shuffle(rotate_axis)
return flip[:,np.newaxis,:] * x[:,:,rotate_axis]
def permutation(x, max_segments=5, seg_mode="equal"):
orig_steps = np.arange(x.shape[1])
num_segs = np.random.randint(1, max_segments, size=(x.shape[0]))
ret = np.zeros_like(x)
for i, pat in enumerate(x):
if num_segs[i] > 1:
if seg_mode == "random":
split_points = np.random.choice(x.shape[1]-2, num_segs[i]-1, replace=False)
split_points.sort()
splits = np.split(orig_steps, split_points)
else:
splits = | np.array_split(orig_steps, num_segs[i]) | numpy.array_split |
import time
import logging
import fire
import numpy as np
from tqdm import tqdm
from torch.utils.data import DataLoader
import models
import utils
from dataset import ImageDataset
logging.getLogger().setLevel(logging.INFO)
def run(model_name, output_dir, dataname, data_dir='./data', batch_size=16, test_run=-1):
data_path = '%s/%s' % (data_dir, dataname)
logging.info('Load data from %s' % data_path)
logging.info('Using model=%s' % model_name)
ds = ImageDataset(data_path)
model = models.get_model(model_name)
data_loader = DataLoader(ds, batch_size=batch_size)
features_list = []
count = 0
iterator = tqdm(data_loader)
for batch in iterator:
output = model.forward_pass(batch.to(utils.torch_device()))
features_list.append(output.cpu().detach().numpy())
if test_run != -1 and count > test_run:
iterator.close()
break
count = count + 1
features = | np.vstack(features_list) | numpy.vstack |
'''
Retrieval Network, Written by Xiao
For robot localization in a dynamic environment.
'''
import torch
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torchvision import models, ops
from lib.params import OBJ_TYPE_NUM
from Network.retrieval_network.params import BBOX_EMDEDDING_VEC_LENGTH, ROI_EMDEDDING_VEC_LENGTH, WORD_EMDEDDING_VEC_LENGTH, OBJ_FEATURE_VEC_LENGTH
from Network.retrieval_network.params import SG_ENCODING_VEC_LENGTH, IMAGE_ENCODING_VEC_LENGTH, SCENE_ENCODING_VEC_LENGTH
from Network.retrieval_network.params import CHECKPOINTS_DIR, IMAGE_SIZE, GCN_TIER, DROPOUT_RATE
import math
import numpy as np
# ------------------------------------------------------------------------------
# -----------------------------retrieval_network--------------------------------
# ------------------------------------------------------------------------------
class RetrievalTriplet(torch.nn.Module):
def __init__(self, GCN_dropout_rate=DROPOUT_RATE, GCN_layers=GCN_TIER, GCN_bias=True, self_pretrained_image=False, pretrainedXXXNet=False):
super(RetrievalTriplet, self).__init__()
self.ModelName = 'RetrievalTriplet'
'''
Default: load pre-trained (on self generated triplets dataset) parameters for image branch,
then, freeze the image branch (both backbone and head) which means we will rely on pretrained
visuial features (RoI feature and entire image feature). By this, the backward gradient flow
from the sg branch through the RoI align to image branch is cut-off.
'''
self.image_branch = TripletNetImage(pretrainedXXXNet=pretrainedXXXNet, XXXNetName='rnet')
if self_pretrained_image:
self.load_self_pretrained_image(CHECKPOINTS_DIR + 'image_best_fit.pkl') # remain to be fixed, donot use
self.SG_branch = TripletNetSG(dropout_rate=GCN_dropout_rate, layer_structure=GCN_layers, bias=GCN_bias)
self.fcn = torch.nn.Sequential(
torch.nn.Linear(IMAGE_ENCODING_VEC_LENGTH + SG_ENCODING_VEC_LENGTH, 2048, bias=True),
torch.nn.ReLU(inplace=True),
torch.nn.Linear(2048, SCENE_ENCODING_VEC_LENGTH, bias=True),
torch.nn.ReLU(inplace=True)
)
# remain to be fixed, donot use
def load_self_pretrained_image(self, checkpoints):
pretrained_model = torch.load(checkpoints)
model_dict = self.image_branch.state_dict()
pretrained_dict = {k: v for k, v in pretrained_model.items() if k in model_dict}
model_dict.update(pretrained_dict)
self.image_branch.load_state_dict(model_dict)
def forward(self, A_img, P_img, N_img, A_on, P_on, N_on,
A_in, P_in, N_in, A_prox, P_prox, N_prox,
A_bbox, P_bbox, N_bbox, A_vec, P_vec, N_vec):
anchor = self.get_embedding(A_img, A_on, A_in, A_prox, A_bbox, A_vec)
positive = self.get_embedding(P_img, P_on, P_in, P_prox, P_bbox, P_vec)
negative = self.get_embedding(N_img, N_on, N_in, N_prox, N_bbox, N_vec)
return anchor, positive, negative
def get_embedding(self, img, R_on, R_in, R_prox, bbox, obj_vec):
img_embedding, X = self.image_branch.get_embedding(img, bbox, obj_vec)
sg_embedding = self.SG_branch.get_embedding(R_on, R_in, R_prox, X)
concacenated = torch.cat((img_embedding, sg_embedding), dim=1)
embedding = self.fcn(concacenated)
return embedding
# ------------------------------------------------------------------------------
# --------------------------------RoI Bridge------------------------------------
# ------------------------------------------------------------------------------
# Import vector embeding related parameters
from lib.params import idx_2_obj_list, THOR_2_VEC
# size of OBJ_TYPE_NUM * WORD_EMDEDDING_VEC_LENGTH = (256,300)
def get_glove_matrix():
features = []
# Normalize feature vectors to unit vector
for obj_name in idx_2_obj_list:
features.append(np.true_divide(THOR_2_VEC[obj_name], np.linalg.norm(THOR_2_VEC[obj_name])))
return torch.FloatTensor(np.asarray(features))
def denseTensor_to_SparseTensor(denseTensor):
indices = torch.nonzero(denseTensor, as_tuple=False).t()
if indices.shape[0] == 3:
values = denseTensor[indices[0], indices[1], indices[2]] # modify this based on dimensionality
elif indices.shape[0] == 2:
values = denseTensor[indices[0], indices[1]]
elif indices.shape[0] == 1:
values = denseTensor[indices[0]]
else:
print("Tensor dim should be smaller than 4")
sparseTensor = torch.sparse.FloatTensor(indices, values, denseTensor.size())
return sparseTensor
# Code from https://www.tensorflow.org/tutorials/text/transformer
def get_angles(pos, i, d_model):
angle_rates = 1 / np.power(5000, (2 * (i//2)) / np.float32(d_model))
return pos * angle_rates
def positional_encoding(position=IMAGE_SIZE+1, d_model=int(BBOX_EMDEDDING_VEC_LENGTH/4)):
angle_rads = get_angles(np.arange(position)[:, np.newaxis],
np.arange(d_model)[np.newaxis, :],
d_model)
# apply sin to even indices in the array; 2i
angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
# apply cos to odd indices in the array; 2i+1
angle_rads[:, 1::2] = | np.cos(angle_rads[:, 1::2]) | numpy.cos |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 27 11:01:48 2019
@author: cvaneck
This routine will determine the RMSF and related parameters,
giving the following input information. One of:
a file with channel frequencies and weights
OR
a file with channel frequencies (assumes equal weights)
OR
Input values for mininum frequency, maximum frequency, and channel width.
(assumes equal weights and all channels present)
The outputs are a list of relavant RMSF properties, and a plot of the RMSF
shape.
"""
#import sys
import argparse
import numpy as np
from RMutils.util_RM import get_rmsf_planes
from matplotlib import pyplot as plt
C = 2.997924538e8 # Speed of light [m/s]
def main():
"""
Determines what set of input parameters were defined, reads in file or
generates frequency array as appropriate, and passes frequency and weight
arrays to the function that works out the RMSF properties.
"""
descStr = """
Calculate and plot RMSF and report main properties, given a supplied
frequency coverage and optional weights (either as second column of
frequency file, or as separate file)."""
parser = argparse.ArgumentParser(description=descStr,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("freqFile", metavar="freqFile.dat", nargs='?',default=None,
help="ASCII file containing frequencies and optionally weights.")
parser.add_argument("weightFile", metavar="weightFile.dat", nargs='?',
help="Optional ASCII file containing weights.")
parser.add_argument("-f", dest=("freq_parms"),nargs=3,default=None,
help="Generate frequencies (in Hz): minfreq, maxfreq, channel_width",
)
parser.add_argument("-m", dest="phiMax_radm2", type=float, default=None,
help="absolute max Faraday depth sampled [Auto, ~10xFWHM].")
parser.add_argument("-d", dest="dphi_radm2", type=float, default=None,
help="Delta phi [Auto, ~10/FWHM].")
parser.add_argument("-s", dest="plotfile", default=None,
help="Filename to save plot to. [do not save]")
parser.add_argument("-n", dest="plotname", default=None,
help="Name of plot [\"Simulated RMSF\"]")
args = parser.parse_args()
#Check that at least one frequency input has been given:
if args.freqFile == None and args.freq_parms == None:
print("Please supply either a file with frequency values or use the -f flag.")
raise(Exception("No frequency input! Use -h flag for help on inputs."))
# if args.phiMax_radm2 != None:
# if args.phiMax_radm2
#Order of priority: frequency file takes precedence over -i flag.
# weight file takes precedence over 2nd column of frequency file.
if args.freqFile != None:
data=np.genfromtxt(args.freqFile,encoding=None,dtype=None)
if len(data.shape) == 2:
freq_array=data[:,0]
weights_array=data[:,1]
else:
freq_array=data
weights_array=np.ones_like(freq_array)
else:
#Generate frequency and weight arrays from intput values.
freq_array=np.arange(float(args.freq_parms[0]),float(args.freq_parms[1]),
float(args.freq_parms[2]))
weights_array=np.ones_like(freq_array)
if args.weightFile != None:
weights_array=np.genfromtxt(args.weightFile,encoding=None,dtype=None)
if len(weights_array) != len(freq_array):
raise Exception('Weights file does not have same number of channels as frequency source')
determine_RMSF_parameters(freq_array,weights_array,args.phiMax_radm2,args.dphi_radm2,args.plotfile,args.plotname)
def determine_RMSF_parameters(freq_array,weights_array,phi_max,dphi,plotfile=None,plotname=None):
"""
Characterizes an RMSF given the supplied frequency and weight arrays.
Prints the results to terminal and produces a plot.
Inputs:
freq_array: array of frequency values (in Hz)
weights_array: array of channel weights (arbitrary units)
phi_max (float): maximum Faraday depth to compute RMSF out to.
dphi (float): step size in Faraday depth
plotfile (str): file name and path to save RMSF plot.
plotname (str): title of plot
"""
lambda2_array=C**2/freq_array**2
l2_min=np.min(lambda2_array)
l2_max=np.max(lambda2_array)
dl2=np.median(np.abs(np.diff(lambda2_array)))
if phi_max == None:
phi_max = 10*2*np.sqrt(3.0) / (l2_max-l2_min) #~10*FWHM
if dphi == None:
dphi = 0.1*2*np.sqrt(3.0) / (l2_max-l2_min) #~10*FWHM
phi_array=np.arange(-1*phi_max/2,phi_max/2+1e-6,dphi) #division by two accounts for how RMSF is always twice as wide as FDF.
RMSFcube, phi2Arr, fwhmRMSFArr, statArr=get_rmsf_planes(lambda2_array,phi_array,weightArr=weights_array,fitRMSF=True)
#Output key results to terminal:
print('RMSF PROPERTIES:')
print('Theoretical (unweighted) FWHM: {:.4g} rad m^-2'.format(3.8 / (l2_max-l2_min)))
print('Measured FWHM: {:.4g} rad m^-2'.format(fwhmRMSFArr))
print('Theoretical largest FD scale probed: {:.4g} rad m^-2'.format(np.pi/l2_min))
print('Theoretical maximum FD*: {:.4g} rad m^-2'.format(np.sqrt(3.0)/dl2))
print('*50% bandwdith depolarization threshold, for median channel width in Delta-lambda^2')
print('* may not be reliable over very large fractional bandwidths or in data with ')
print('differing channel widths or many frequency gaps.')
#Explanation for below: This code find the local maxima in the positive half of the RMSF,
#finds the highest amplitude one, and calls that the first sidelobe.
try:
x=np.diff(np.sign(np.diff(np.abs(RMSFcube[RMSFcube.size//2:])))) #-2=local max, +2=local min
y=1+ | np.where(x==-2) | numpy.where |
# pylint: disable=no-self-use
import unittest
import numpy as np
from src.fingerflow.extractor import utils
class PreprocessImageDataTest(unittest.TestCase):
def test_correct_image_data(self):
mock_input_data = np.zeros((100, 100, 3), dtype=np.uint8)
mock_input_data_size = | np.array(mock_input_data.shape, dtype=np.int32) | numpy.array |
import numpy as np
import matplotlib
matplotlib.use('agg')
from matplotlib import pyplot as plt
from matplotlib import dates as md
from sklearn.linear_model import LinearRegression
from sklearn.svm import SVR
from sklearn.preprocessing import normalize, scale
import sklearn.metrics as metrics
import pickle
import stat_tools as st
import configparser
import os, subprocess
from datetime import datetime, timezone, timedelta
from ast import literal_eval as le
import pytz
def localToUTCtimestamp(t, local_tz):
t_local = local_tz.localize(t, is_dst=None)
t_utc = t_local.astimezone(pytz.utc)
return t_utc.timestamp()
def UTCtimestampTolocal(ts, local_tz):
t_utc = datetime.fromtimestamp(ts,tz=pytz.timezone("UTC"))
t_local = t_utc.astimezone(local_tz)
return t_local
try:
try:
config_path = sys.argv[1]
except Exception:
config_path = "./config.conf"
cp = configparser.ConfigParser()
cp.read(config_path)
inpath=le(cp["paths"]["feature_path"])
GHI_path=le(cp["paths"]["GHI_path"])
forecast_path=le(cp["paths"]["forecast_path"])
lead_minutes=le(cp["forecast"]["lead_minutes"])
days=le(cp["forecast"]["days"])
#lead_minutes=[1,3,5,10,15,30,45];
#sensors = np.arange(99,100)
try:
sensors = le(cp["forecast"]["sensors"])
except Exception:
GHI_Coor = le(cp["GHI_sensors"]["GHI_Coor"]) #if sensor list isn't provided, forecast for all GHI points
sensors = range(0,len(GHI_Coor))
try:
forecast_timezone=pytz.timezone(cp["forecast"]["forecast_timezone"])
print("Using camera timezone: %s" % str(forecast_timezone))
except Exception:
forecast_timezone=pytz.timezone("utc")
print("Error processsing forecast timezone config, assuming UTC")
except KeyError as e:
print("Error loading config: %s" % e)
if not os.path.isdir(forecast_path):
try:
os.mkdir(forecast_path[:-1])
except:
print('Cannot create directory,', forecast_path[:-1])
plt.ioff() #Turn off interactive plotting for running automatically
for day in days:
MAE, MSE = [], []
MAE2, MSE2 = [], []
print("Predicting for " + day)
if not os.path.isdir(forecast_path+day[:8]):
try:
subprocess.call(['mkdir', forecast_path+day[:8]])
except:
print('Cannot create directory,',forecast_path+day[:8])
continue
if not os.path.isdir(forecast_path+day[:8] + "/plots"):
try:
os.mkdir(forecast_path+day[:8] + "/plots")
except:
print('Cannot create directory,', forecast_path+day[:8] + "/plots")
for forward in lead_minutes:
timestamp, DataX, DataY = {},{},{}
MAE_period, MSE_period = [], []
MAE2_period, MSE2_period = [], []
for sensor in sensors:
timestamp[sensor] = []
DataX[sensor] = []
DataY[sensor] = []
try:
x = np.genfromtxt(inpath+day[:8]+'/GHI'+str(sensor)+'.csv',delimiter=',',skip_header=1); # < ORIGINAL
#x = np.genfromtxt(inpath+'/GHI'+str(sensor)+'.csv',delimiter=','); # Temp change to allow running of old data in dhuang3
x = x[x[:,0]==forward]; #Take all rows where forecast period == forward
#if sensor == 26: # Temp added for 2018-09-22 test with location 99
# with np.load(GHI_path+day[:6]+'/GHI_'+str(99)+'.npz') as data: #
# ty, y = data['timestamp'], data['ghi'] #
#else: #
with np.load(GHI_path+day[:6]+'/GHI_'+str(sensor)+'.npz') as data: # < ORIGINAL
ty, y = data['timestamp'], data['ghi'] # < ORIGINAL
#ty -= 3600 #Add an hour (testing only!)
x = x[x[:,1]<=ty[-1]] #Take all "feature" elements where timestamp is less than last GHI timestamp
tx=x[:,1].copy(); #Create copy of feature timestamps
itx = ((tx-ty[0]+30)//60).astype(int) #Create array of relative time based on first GHI timestamp, add 30 secs, floor to minutes, convert to int
print("len(x): %i\tlen y: %i\n" % (len(tx), len(ty)))
try:
print("tx: %i\ty: %i\titx: %i\n" % (tx[0],ty[0],itx[0]))
except IndexError:
pass
x[:,1] = (y[itx]) #Select x values corresponding to times in itx
DataX[sensor] += [x[:,1:]] #Append timestamp and x values to DataX (does NOT copy forecast period "forward" column)
DataY[sensor] += [(y[itx + forward])] #Get future actual GHI
timestamp[sensor] += [tx];
DataX[sensor] = np.vstack(DataX[sensor]) #stack time series for all GHI locations vertically
DataY[sensor] = np.hstack(DataY[sensor]) #stack time series for persistence horizontally
timestamp[sensor] = np.hstack(timestamp[sensor]) #stack timestamps horizontally
#print( DataX[sensor] )
#print(DataY[sensor], DataX[sensor][:,0])
#try:
mk = (DataY[sensor] > 0) & (DataX[sensor][:,0] > 0) #create boolean list where persistence value and timestamp are both >0
DataX[sensor] = DataX[sensor][mk] #take subset selected above
# cf2 column (was 15 but we dropped the leadminutes column already)
cld_frac = np.copy(DataX[sensor][:,14])
DataX[sensor][:,0]/=400; #scale GHI by 400? (note: data in *.npz is already scaled?)
DataX[sensor][:,1:-1] = scale(DataX[sensor][:,1:-1]); #normalize other x values
except ValueError as e:
print("Skipping sensor %i, %s" % (sensor, str(e)))
continue #This will get thrown if there's no GHI data and DataY is filled with NaNs
except IndexError as e:
print("Skipping sensor %i, %s" % (sensor, str(e)))
continue
except FileNotFoundError as e:
print("Skipping sensor %i, %s" % (sensor, str(e)))
continue
# DataX[:,1:] = normalize(DataX[:,1:],axis=0);
DataY[sensor] = DataY[sensor][mk] #take subset to match x values
timestamp[sensor] = timestamp[sensor][mk] #take subset to match x values
print("%i minute forecast, location %i" % (forward, sensor))
print("\t",DataX[sensor].shape,DataY[sensor].shape)
print('\tMean GHI:', | np.nanmean(DataY[sensor]) | numpy.nanmean |
# !/usr/bin/env python
"""Define the unit tests for the :mod:`colour.utilities.array` module."""
import numpy as np
import unittest
from dataclasses import dataclass, field, fields
from copy import deepcopy
from colour.constants import DEFAULT_FLOAT_DTYPE, DEFAULT_INT_DTYPE
from colour.hints import NDArray, Optional, Type, Union
from colour.utilities import (
MixinDataclassFields,
MixinDataclassIterable,
MixinDataclassArray,
MixinDataclassArithmetic,
as_array,
as_int,
as_float,
as_int_array,
as_float_array,
as_int_scalar,
as_float_scalar,
set_default_int_dtype,
set_default_float_dtype,
get_domain_range_scale,
set_domain_range_scale,
domain_range_scale,
to_domain_1,
to_domain_10,
to_domain_100,
to_domain_int,
to_domain_degrees,
from_range_1,
from_range_10,
from_range_100,
from_range_int,
from_range_degrees,
closest_indexes,
closest,
interval,
is_uniform,
in_array,
tstack,
tsplit,
row_as_diagonal,
orient,
centroid,
fill_nan,
has_only_nan,
ndarray_write,
zeros,
ones,
full,
index_along_last_axis,
)
from colour.utilities import is_networkx_installed
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "<EMAIL>"
__status__ = "Production"
__all__ = [
"TestMixinDataclassFields",
"TestMixinDataclassIterable",
"TestMixinDataclassArray",
"TestMixinDataclassArithmetic",
"TestAsArray",
"TestAsInt",
"TestAsFloat",
"TestAsIntArray",
"TestAsFloatArray",
"TestAsIntScalar",
"TestAsFloatScalar",
"TestSetDefaultIntegerDtype",
"TestSetDefaultFloatDtype",
"TestGetDomainRangeScale",
"TestSetDomainRangeScale",
"TestDomainRangeScale",
"TestToDomain1",
"TestToDomain10",
"TestToDomain100",
"TestToDomainDegrees",
"TestToDomainInt",
"TestFromRange1",
"TestFromRange10",
"TestFromRange100",
"TestFromRangeDegrees",
"TestFromRangeInt",
"TestClosestIndexes",
"TestClosest",
"TestInterval",
"TestIsUniform",
"TestInArray",
"TestTstack",
"TestTsplit",
"TestRowAsDiagonal",
"TestOrient",
"TestCentroid",
"TestFillNan",
"TestHasNanOnly",
"TestNdarrayWrite",
"TestZeros",
"TestOnes",
"TestFull",
"TestIndexAlongLastAxis",
]
class TestMixinDataclassFields(unittest.TestCase):
"""
Define :class:`colour.utilities.array.MixinDataclassFields` class unit
tests methods.
"""
def setUp(self):
"""Initialise the common tests attributes."""
@dataclass
class Data(MixinDataclassFields):
a: str
b: str
c: str
self._data: Data = Data(a="Foo", b="Bar", c="Baz")
def test_required_attributes(self):
"""Test the presence of required attributes."""
required_attributes = ("fields",)
for method in required_attributes:
self.assertIn(method, dir(MixinDataclassFields))
def test_fields(self):
"""
Test :meth:`colour.utilities.array.MixinDataclassIterable._fields`
method.
"""
self.assertTupleEqual(
self._data.fields,
fields(self._data),
)
class TestMixinDataclassIterable(unittest.TestCase):
"""
Define :class:`colour.utilities.array.MixinDataclassIterable` class unit
tests methods.
"""
def setUp(self):
"""Initialise the common tests attributes."""
@dataclass
class Data(MixinDataclassIterable):
a: str
b: str
c: str
self._data: Data = Data(a="Foo", b="Bar", c="Baz")
def test_required_attributes(self):
"""Test the presence of required attributes."""
required_attributes = (
"keys",
"values",
"items",
)
for method in required_attributes:
self.assertIn(method, dir(MixinDataclassIterable))
def test_required_methods(self):
"""Test the presence of required methods."""
required_methods = ("__iter__",)
for method in required_methods:
self.assertIn(method, dir(MixinDataclassIterable))
def test__iter__(self):
"""
Test :meth:`colour.utilities.array.MixinDataclassIterable.__iter__`
method.
"""
self.assertDictEqual(
{key: value for key, value in self._data},
{"a": "Foo", "b": "Bar", "c": "Baz"},
)
def test_keys(self):
"""
Test :meth:`colour.utilities.array.MixinDataclassIterable.keys`
method.
"""
self.assertTupleEqual(
tuple(self._data.keys),
("a", "b", "c"),
)
def test_values(self):
"""
Test :meth:`colour.utilities.array.MixinDataclassIterable.values`
method.
"""
self.assertTupleEqual(
tuple(self._data.values),
("Foo", "Bar", "Baz"),
)
def test_items(self):
"""
Test :meth:`colour.utilities.array.MixinDataclassIterable.items`
method.
"""
self.assertTupleEqual(
tuple(self._data.items),
(("a", "Foo"), ("b", "Bar"), ("c", "Baz")),
)
class TestMixinDataclassArray(unittest.TestCase):
"""
Define :class:`colour.utilities.array.MixinDataclassArray` class unit
tests methods.
"""
def setUp(self):
"""Initialise the common tests attributes."""
@dataclass
class Data(MixinDataclassArray):
a: Optional[Union[float, list, tuple, np.ndarray]] = field(
default_factory=lambda: None
)
b: Optional[Union[float, list, tuple, np.ndarray]] = field(
default_factory=lambda: None
)
c: Optional[Union[float, list, tuple, np.ndarray]] = field(
default_factory=lambda: None
)
self._data: Data = Data(
b=np.array([0.1, 0.2, 0.3]), c=np.array([0.4, 0.5, 0.6])
)
self._array: NDArray = np.array(
[
[np.nan, 0.1, 0.4],
[np.nan, 0.2, 0.5],
[np.nan, 0.3, 0.6],
]
)
def test_required_methods(self):
"""Test the presence of required methods."""
required_methods = ("__array__",)
for method in required_methods:
self.assertIn(method, dir(MixinDataclassArray))
def test__array__(self):
"""
Test :meth:`colour.utilities.array.MixinDataclassArray.__array__`
method.
"""
np.testing.assert_array_equal(np.array(self._data), self._array)
self.assertEqual(
np.array(self._data, dtype=DEFAULT_INT_DTYPE).dtype,
DEFAULT_INT_DTYPE,
)
class TestMixinDataclassArithmetic(unittest.TestCase):
"""
Define :class:`colour.utilities.array.MixinDataclassArithmetic` class unit
tests methods.
"""
def setUp(self):
"""Initialise the common tests attributes."""
@dataclass
class Data(MixinDataclassArithmetic):
a: Optional[Union[float, list, tuple, np.ndarray]] = field(
default_factory=lambda: None
)
b: Optional[Union[float, list, tuple, np.ndarray]] = field(
default_factory=lambda: None
)
c: Optional[Union[float, list, tuple, np.ndarray]] = field(
default_factory=lambda: None
)
self._factory: Type[Data] = Data
self._data: Data = Data(
b=np.array([0.1, 0.2, 0.3]), c=np.array([0.4, 0.5, 0.6])
)
self._array: NDArray = np.array(
[
[np.nan, 0.1, 0.4],
[np.nan, 0.2, 0.5],
[np.nan, 0.3, 0.6],
]
)
def test_required_methods(self):
"""Test the presence of required methods."""
required_methods = (
"__iadd__",
"__add__",
"__isub__",
"__sub__",
"__imul__",
"__mul__",
"__idiv__",
"__div__",
"__ipow__",
"__pow__",
"arithmetical_operation",
)
for method in required_methods:
self.assertIn(method, dir(MixinDataclassArithmetic))
def test_arithmetical_operation(self):
"""
Test :meth:`colour.utilities.array.MixinDataclassArithmetic.\
arithmetical_operation` method.
"""
np.testing.assert_almost_equal(
np.array(self._data.arithmetical_operation(10, "+", False)),
self._array + 10,
decimal=7,
)
np.testing.assert_almost_equal(
np.array(self._data.arithmetical_operation(10, "-", False)),
self._array - 10,
decimal=7,
)
np.testing.assert_almost_equal(
np.array(self._data.arithmetical_operation(10, "*", False)),
self._array * 10,
decimal=7,
)
np.testing.assert_almost_equal(
np.array(self._data.arithmetical_operation(10, "/", False)),
self._array / 10,
decimal=7,
)
np.testing.assert_almost_equal(
np.array(self._data.arithmetical_operation(10, "**", False)),
self._array**10,
decimal=7,
)
np.testing.assert_almost_equal(
np.array(self._data + 10), self._array + 10, decimal=7
)
np.testing.assert_almost_equal(
np.array(self._data - 10), self._array - 10, decimal=7
)
np.testing.assert_almost_equal(
np.array(self._data * 10), self._array * 10, decimal=7
)
np.testing.assert_almost_equal(
np.array(self._data / 10), self._array / 10, decimal=7
)
np.testing.assert_almost_equal(
np.array(self._data**10), self._array**10, decimal=7
)
data = deepcopy(self._data)
np.testing.assert_almost_equal(
np.array(data.arithmetical_operation(10, "+", True)),
self._array + 10,
decimal=7,
)
np.testing.assert_almost_equal(
np.array(data.arithmetical_operation(10, "-", True)),
self._array,
decimal=7,
)
np.testing.assert_almost_equal(
np.array(data.arithmetical_operation(10, "*", True)),
self._array * 10,
decimal=7,
)
np.testing.assert_almost_equal(
np.array(data.arithmetical_operation(10, "/", True)),
self._array,
decimal=7,
)
np.testing.assert_almost_equal(
np.array(data.arithmetical_operation(10, "**", True)),
self._array**10,
decimal=7,
)
data = deepcopy(self._data)
np.testing.assert_almost_equal(
np.array(data.arithmetical_operation(self._array, "+", False)),
data + self._array,
decimal=7,
)
np.testing.assert_almost_equal(
np.array(data.arithmetical_operation(data, "+", False)),
data + data,
decimal=7,
)
data = self._factory(1, 2, 3)
data += 1
self.assertEqual(data.a, 2)
data -= 1
self.assertEqual(data.a, 1)
data *= 2
self.assertEqual(data.a, 2)
data /= 2
self.assertEqual(data.a, 1)
data **= 0.5
self.assertEqual(data.a, 1)
class TestAsArray(unittest.TestCase):
"""
Define :func:`colour.utilities.array.as_array` definition unit tests
methods.
"""
def test_as_array(self):
"""Test :func:`colour.utilities.array.as_array` definition."""
np.testing.assert_equal(as_array([1, 2, 3]), np.array([1, 2, 3]))
self.assertEqual(
as_array([1, 2, 3], DEFAULT_FLOAT_DTYPE).dtype, DEFAULT_FLOAT_DTYPE
)
self.assertEqual(
as_array([1, 2, 3], DEFAULT_INT_DTYPE).dtype, DEFAULT_INT_DTYPE
)
np.testing.assert_equal(
as_array(dict(zip("abc", [1, 2, 3])).values()), np.array([1, 2, 3])
)
class TestAsInt(unittest.TestCase):
"""
Define :func:`colour.utilities.array.as_int` definition unit tests
methods.
"""
def test_as_int(self):
"""Test :func:`colour.utilities.array.as_int` definition."""
self.assertEqual(as_int(1), 1)
self.assertEqual(as_int(np.array([1])), 1)
np.testing.assert_almost_equal(
as_int(np.array([1.0, 2.0, 3.0])), np.array([1, 2, 3])
)
self.assertEqual(
as_int(np.array([1.0, 2.0, 3.0])).dtype, DEFAULT_INT_DTYPE
)
self.assertIsInstance(as_int(1), DEFAULT_INT_DTYPE)
class TestAsFloat(unittest.TestCase):
"""
Define :func:`colour.utilities.array.as_float` definition unit tests
methods.
"""
def test_as_float(self):
"""Test :func:`colour.utilities.array.as_float` definition."""
self.assertEqual(as_float(1), 1.0)
self.assertEqual(as_float(np.array([1])), 1.0)
np.testing.assert_almost_equal(
as_float(np.array([1, 2, 3])), | np.array([1.0, 2.0, 3.0]) | numpy.array |
# -*- coding: utf-8 -*-1
"""
2014, LAAS/CNRS
@author: <NAME>
"""
from __future__ import print_function
from dynamic_graph import plug
import numpy as np
from dynamic_graph.sot.core.latch import Latch
from dynamic_graph.sot.core.operator import Selec_of_vector, Mix_of_vector
from dynamic_graph.sot.torque_control.numerical_difference import NumericalDifference
from dynamic_graph.sot.torque_control.joint_torque_controller import JointTorqueController
from dynamic_graph.sot.torque_control.joint_trajectory_generator import JointTrajectoryGenerator
from sot_talos_balance.nd_trajectory_generator import NdTrajectoryGenerator
from dynamic_graph.sot.torque_control.se3_trajectory_generator import SE3TrajectoryGenerator
from dynamic_graph.sot.torque_control.control_manager import ControlManager
from dynamic_graph.sot.torque_control.current_controller import CurrentController
from sot_talos_balance.simple_admittance_controller import SimpleAdmittanceController as AdmittanceController
from dynamic_graph.sot.torque_control.position_controller import PositionController
from dynamic_graph.tracer_real_time import TracerRealTime
from dynamic_graph.sot.torque_control.talos.motors_parameters import NJ
from dynamic_graph.sot.torque_control.talos.motors_parameters import *
from dynamic_graph.sot.torque_control.talos.sot_utils_talos import Bunch
from dynamic_graph.sot.torque_control.utils.filter_utils import create_butter_lp_filter_Wn_05_N_3
#from dynamic_graph.sot.torque_control.talos.joint_pos_ctrl_gains import *
def get_default_conf():
import dynamic_graph.sot.torque_control.talos.balance_ctrl_conf as balance_ctrl_conf
import dynamic_graph.sot.torque_control.talos.base_estimator_conf as base_estimator_conf
import dynamic_graph.sot.torque_control.talos.control_manager_conf as control_manager_conf
import dynamic_graph.sot.torque_control.talos.current_controller_conf as current_controller_conf
import dynamic_graph.sot.torque_control.talos.force_torque_estimator_conf as force_torque_estimator_conf
import dynamic_graph.sot.torque_control.talos.joint_torque_controller_conf as joint_torque_controller_conf
import dynamic_graph.sot.torque_control.talos.joint_pos_ctrl_gains as pos_ctrl_gains
import dynamic_graph.sot.torque_control.talos.motors_parameters as motor_params
import dynamic_graph.sot.torque_control.talos.ddp_controller_conf as ddp_controller_conf
conf = Bunch()
conf.balance_ctrl = balance_ctrl_conf
conf.base_estimator = base_estimator_conf
conf.control_manager = control_manager_conf
conf.current_ctrl = current_controller_conf
conf.force_torque_estimator = force_torque_estimator_conf
conf.joint_torque_controller = joint_torque_controller_conf
conf.pos_ctrl_gains = pos_ctrl_gains
conf.motor_params = motor_params
conf.ddp_controller = ddp_controller_conf
return conf
def get_sim_conf():
import dynamic_graph.sot.torque_control.talos.balance_ctrl_sim_conf as balance_ctrl_conf
import dynamic_graph.sot.torque_control.talos.base_estimator_sim_conf as base_estimator_conf
import dynamic_graph.sot.torque_control.talos.control_manager_sim_conf as control_manager_conf
import dynamic_graph.sot.torque_control.talos.current_controller_sim_conf as current_controller_conf
import dynamic_graph.sot.torque_control.talos.force_torque_estimator_conf as force_torque_estimator_conf
import dynamic_graph.sot.torque_control.talos.joint_torque_controller_conf as joint_torque_controller_conf
import dynamic_graph.sot.torque_control.talos.joint_pos_ctrl_gains_sim as pos_ctrl_gains
import dynamic_graph.sot.torque_control.talos.motors_parameters as motor_params
import dynamic_graph.sot.torque_control.talos.ddp_controller_conf as ddp_controller_conf
conf = Bunch()
conf.balance_ctrl = balance_ctrl_conf
conf.base_estimator = base_estimator_conf
conf.control_manager = control_manager_conf
conf.current_ctrl = current_controller_conf
conf.force_torque_estimator = force_torque_estimator_conf
conf.joint_torque_controller = joint_torque_controller_conf
conf.pos_ctrl_gains = pos_ctrl_gains
conf.motor_params = motor_params
conf.ddp_controller = ddp_controller_conf
return conf
def create_encoders(robot):
encoders = Selec_of_vector('qn')
plug(robot.device.robotState, encoders.sin);
encoders.selec(6,NJ+6);
return encoders
def create_encoders_velocity(robot):
encoders = Selec_of_vector('dqn')
plug(robot.device.robotVelocity, encoders.sin);
encoders.selec(6,NJ+6);
return encoders
def create_joint_pos_selector(robot, conf):
encoders = Selec_of_vector('selecDdpJointPos')
plug(robot.device.robotState, encoders.sin);
encoders.selec(conf.controlled_joint+6, conf.controlled_joint+7);
return encoders
def create_joint_vel_selector(robot, conf):
encoders = Selec_of_vector('selecDdpJointVel')
plug(robot.device.robotVelocity, encoders.sin);
encoders.selec(conf.controlled_joint+6, conf.controlled_joint+7);
return encoders
def create_joint_torque_selector(robot, conf):
encoders = Selec_of_vector('selecDdpJointTorque')
plug(robot.device.ptorque, encoders.sin);
encoders.selec(conf.controlled_joint, conf.controlled_joint+1);
return encoders
def create_pos_des_selector(robot, conf):
encoders = Selec_of_vector('selecDdpJointPosDes')
plug(robot.traj_gen.q, encoders.sin);
encoders.selec(conf.controlled_joint, conf.controlled_joint+1);
return encoders
def create_motor_pos_selector(robot, conf):
encoders = Selec_of_vector('selecDdpMotorPos')
plug(robot.device.motor_angles, encoders.sin);
encoders.selec(conf.controlled_joint, conf.controlled_joint+1);
return encoders
def create_tau_des_selector(robot, conf):
encoders = Selec_of_vector('selecDdpTauDes')
plug(robot.inv_dyn.tau_des, encoders.sin);
encoders.selec(conf.controlled_joint, conf.controlled_joint+1);
return encoders
def create_torque_des_selector(robot, conf):
encoders = Selec_of_vector('selecDdpTorqueDes')
plug(robot.torque_ctrl.u, encoders.sin);
encoders.selec(0, 31);
return encoders
def create_torque_des_selector2(robot, conf):
encoders = Selec_of_vector('selecDdpTorqueDes2')
plug(robot.torque_ctrl.u, encoders.sin);
encoders.selec(31, 32);
return encoders
def create_signal_mixer(robot, conf):
signal_mixer = Mix_of_vector('mix');
signal_mixer.setSignalNumber(2);
plug(robot.torque_des_selec_ddp.sout, signal_mixer.default);
#plug(robot.inv_dyn.tau_des, signal_mixer.default);
plug(robot.ddp_ctrl.tau, signal_mixer.sin1);
#plug(robot.torque_des_selec_ddp2.sout, signal_mixer.sin1);
#plug(robot.inv_dyn.tau_des, signal_mixer.sin1);
#signal_mixer.addSelec(1, 1, 31);
signal_mixer.addSelec(1, 0, 1);
#signal_mixer.addSelec(1, conf.controlled_joint+1, conf.NJ-conf.controlled_joint);
#plug(signal_mixer.sout, robot.torque_ctrl.jointsTorquesDesired);
return signal_mixer
def create_base_estimator(robot, dt, conf, robot_name="robot"):
from dynamic_graph.sot.torque_control.base_estimator import BaseEstimator
base_estimator = BaseEstimator('base_estimator');
plug(robot.encoders.sout, base_estimator.joint_positions);
#plug(robot.device.forceRLEG, base_estimator.forceRLEG);
#plug(robot.device.forceLLEG, base_estimator.forceLLEG);
plug(robot.filters.ft_LF_filter.x_filtered, base_estimator.forceLLEG)
plug(robot.filters.ft_RF_filter.x_filtered, base_estimator.forceRLEG)
plug(robot.filters.ft_LF_filter.dx, base_estimator.dforceLLEG)
plug(robot.filters.ft_RF_filter.dx, base_estimator.dforceRLEG)
plug(robot.filters.estimator_kin.dx, base_estimator.joint_velocities);
plug(robot.imu_filter.imu_quat, base_estimator.imu_quaternion);
#plug(robot.imu_offset_compensation.accelerometer_out, base_estimator.accelerometer);
#plug(robot.imu_offset_compensation.gyrometer_out, base_estimator.gyroscope);
plug(robot.filters.gyro_filter.x_filtered, base_estimator.gyroscope);
plug(robot.filters.acc_filter.x_filtered, base_estimator.accelerometer);
base_estimator.K_fb_feet_poses.value = conf.K_fb_feet_poses;
try:
base_estimator.w_lf_in.value = conf.w_lf_in;
base_estimator.w_rf_in.value = conf.w_rf_in;
except:
pass;
base_estimator.set_imu_weight(conf.w_imu);
base_estimator.set_stiffness_right_foot(conf.K);
base_estimator.set_stiffness_left_foot(conf.K);
base_estimator.set_zmp_std_dev_right_foot(conf.std_dev_zmp)
base_estimator.set_zmp_std_dev_left_foot(conf.std_dev_zmp)
base_estimator.set_normal_force_std_dev_right_foot(conf.std_dev_fz)
base_estimator.set_normal_force_std_dev_left_foot(conf.std_dev_fz)
base_estimator.set_zmp_margin_right_foot(conf.zmp_margin)
base_estimator.set_zmp_margin_left_foot(conf.zmp_margin)
base_estimator.set_normal_force_margin_right_foot(conf.normal_force_margin)
base_estimator.set_normal_force_margin_left_foot(conf.normal_force_margin)
base_estimator.set_right_foot_sizes(conf.RIGHT_FOOT_SIZES)
base_estimator.set_left_foot_sizes(conf.LEFT_FOOT_SIZES)
base_estimator.init(dt, robot_name);
return base_estimator;
def create_imu_offset_compensation(robot, dt):
from dynamic_graph.sot.torque_control.imu_offset_compensation import ImuOffsetCompensation
imu_offset_compensation = ImuOffsetCompensation('imu_offset_comp');
plug(robot.device.accelerometer, imu_offset_compensation.accelerometer_in);
plug(robot.device.gyrometer, imu_offset_compensation.gyrometer_in);
imu_offset_compensation.init(dt);
return imu_offset_compensation;
def create_imu_filter(robot, dt):
from dynamic_graph.sot.core.madgwickahrs import MadgwickAHRS
imu_filter = MadgwickAHRS('imu_filter');
imu_filter.init(dt);
plug(robot.imu_offset_compensation.accelerometer_out, imu_filter.accelerometer);
plug(robot.imu_offset_compensation.gyrometer_out, imu_filter.gyroscope);
return imu_filter;
def create_com_traj_gen(robot, dt):
com_traj_gen = NdTrajectoryGenerator("com_traj_gen");
com_traj_gen.initial_value.value = robot.dynamic.com.value
com_traj_gen.trigger.value = 1.0
com_traj_gen.init(dt,3)
return com_traj_gen
def create_force_traj_gen(name, initial_value, dt):
force_traj_gen = NdTrajectoryGenerator(name);
force_traj_gen.initial_value.value = initial_value;
force_traj_gen.init(dt,6);
return force_traj_gen ;
def create_waist_traj_gen(name, robot, dt):
waist_traj_gen = SE3TrajectoryGenerator(name)
ref_waist = robot.dynamic.data.oMi[robot.dynamic.model.getJointId('root_joint')]
trans = ref_waist.translation
rot = ref_waist.rotation
rot = rot.reshape(9)
initial_value = | np.concatenate((trans,rot)) | numpy.concatenate |
"""
Tests for dataset creation
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import unittest
import tempfile
import os
import shutil
import numpy as np
import deepchem as dc
class TestDatasets(unittest.TestCase):
"""
Test basic top-level API for dataset objects.
"""
def test_sparsify_and_densify(self):
"""Test that sparsify and densify work as inverses."""
# Test on identity matrix
num_samples = 10
num_features = num_samples
X = np.eye(num_samples)
X_sparse = dc.data.sparsify_features(X)
X_reconstructed = dc.data.densify_features(X_sparse, num_features)
np.testing.assert_array_equal(X, X_reconstructed)
# Generate random sparse features dataset
np.random.seed(123)
p = .05
X = np.random.binomial(1, p, size=(num_samples, num_features))
X_sparse = dc.data.sparsify_features(X)
X_reconstructed = dc.data.densify_features(X_sparse, num_features)
np.testing.assert_array_equal(X, X_reconstructed)
# Test edge case with array of all zeros
X = np.zeros((num_samples, num_features))
X_sparse = dc.data.sparsify_features(X)
X_reconstructed = dc.data.densify_features(X_sparse, num_features)
np.testing.assert_array_equal(X, X_reconstructed)
def test_pad_features(self):
"""Test that pad_features pads features correctly."""
batch_size = 100
num_features = 10
num_tasks = 5
# Test cases where n_samples < 2*n_samples < batch_size
n_samples = 29
X_b = np.zeros((n_samples, num_features))
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
# Test cases where n_samples < batch_size
n_samples = 79
X_b = np.zeros((n_samples, num_features))
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
# Test case where n_samples == batch_size
n_samples = 100
X_b = np.zeros((n_samples, num_features))
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
# Test case for object featurization.
n_samples = 2
X_b = np.array([{"a": 1}, {"b": 2}])
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
# Test case for more complicated object featurization
n_samples = 2
X_b = np.array([(1, {"a": 1}), (2, {"b": 2})])
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
# Test case with multidimensional data
n_samples = 50
num_atoms = 15
d = 3
X_b = np.zeros((n_samples, num_atoms, d))
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
def test_pad_batches(self):
"""Test that pad_batch pads batches correctly."""
batch_size = 100
num_features = 10
num_tasks = 5
# Test cases where n_samples < 2*n_samples < batch_size
n_samples = 29
X_b = np.zeros((n_samples, num_features))
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(batch_size, X_b, y_b, w_b,
ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
# Test cases where n_samples < batch_size
n_samples = 79
X_b = np.zeros((n_samples, num_features))
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(batch_size, X_b, y_b, w_b,
ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
# Test case where n_samples == batch_size
n_samples = 100
X_b = np.zeros((n_samples, num_features))
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(batch_size, X_b, y_b, w_b,
ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
# Test case for object featurization.
n_samples = 2
X_b = np.array([{"a": 1}, {"b": 2}])
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(batch_size, X_b, y_b, w_b,
ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
# Test case for more complicated object featurization
n_samples = 2
X_b = np.array([(1, {"a": 1}), (2, {"b": 2})])
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(batch_size, X_b, y_b, w_b,
ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
# Test case with multidimensional data
n_samples = 50
num_atoms = 15
d = 3
X_b = np.zeros((n_samples, num_atoms, d))
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(batch_size, X_b, y_b, w_b,
ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
def test_get_task_names(self):
"""Test that get_task_names returns correct task_names"""
solubility_dataset = dc.data.tests.load_solubility_data()
assert solubility_dataset.get_task_names() == ["log-solubility"]
multitask_dataset = dc.data.tests.load_multitask_data()
assert sorted(multitask_dataset.get_task_names()) == sorted([
"task0", "task1", "task2", "task3", "task4", "task5", "task6", "task7",
"task8", "task9", "task10", "task11", "task12", "task13", "task14",
"task15", "task16"
])
def test_get_data_shape(self):
"""Test that get_data_shape returns currect data shape"""
solubility_dataset = dc.data.tests.load_solubility_data()
assert solubility_dataset.get_data_shape() == (1024,)
multitask_dataset = dc.data.tests.load_multitask_data()
assert multitask_dataset.get_data_shape() == (1024,)
def test_len(self):
"""Test that len(dataset) works."""
solubility_dataset = dc.data.tests.load_solubility_data()
assert len(solubility_dataset) == 10
def test_reshard(self):
"""Test that resharding the dataset works."""
solubility_dataset = dc.data.tests.load_solubility_data()
X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
assert solubility_dataset.get_number_shards() == 1
solubility_dataset.reshard(shard_size=1)
assert solubility_dataset.get_shard_size() == 1
X_r, y_r, w_r, ids_r = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
assert solubility_dataset.get_number_shards() == 10
solubility_dataset.reshard(shard_size=10)
assert solubility_dataset.get_shard_size() == 10
X_rr, y_rr, w_rr, ids_rr = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
# Test first resharding worked
np.testing.assert_array_equal(X, X_r)
np.testing.assert_array_equal(y, y_r)
np.testing.assert_array_equal(w, w_r)
np.testing.assert_array_equal(ids, ids_r)
# Test second resharding worked
np.testing.assert_array_equal(X, X_rr)
np.testing.assert_array_equal(y, y_rr)
np.testing.assert_array_equal(w, w_rr)
np.testing.assert_array_equal(ids, ids_rr)
def test_select(self):
"""Test that dataset select works."""
num_datapoints = 10
num_features = 10
num_tasks = 1
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.ones((num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
indices = [0, 4, 5, 8]
select_dataset = dataset.select(indices)
X_sel, y_sel, w_sel, ids_sel = (select_dataset.X, select_dataset.y,
select_dataset.w, select_dataset.ids)
np.testing.assert_array_equal(X[indices], X_sel)
np.testing.assert_array_equal(y[indices], y_sel)
np.testing.assert_array_equal(w[indices], w_sel)
np.testing.assert_array_equal(ids[indices], ids_sel)
def test_get_shape(self):
"""Test that get_shape works."""
num_datapoints = 100
num_features = 10
num_tasks = 10
# Generate data
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.random.randint(2, size=(num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.NumpyDataset(X, y, w, ids)
X_shape, y_shape, w_shape, ids_shape = dataset.get_shape()
assert X_shape == X.shape
assert y_shape == y.shape
assert w_shape == w.shape
assert ids_shape == ids.shape
def test_iterbatches(self):
"""Test that iterating over batches of data works."""
solubility_dataset = dc.data.tests.load_solubility_data()
batch_size = 2
data_shape = solubility_dataset.get_data_shape()
tasks = solubility_dataset.get_task_names()
for (X_b, y_b, w_b, ids_b) in solubility_dataset.iterbatches(batch_size):
assert X_b.shape == (batch_size,) + data_shape
assert y_b.shape == (batch_size,) + (len(tasks),)
assert w_b.shape == (batch_size,) + (len(tasks),)
assert ids_b.shape == (batch_size,)
def test_itersamples_numpy(self):
"""Test that iterating over samples in a NumpyDataset works."""
num_datapoints = 100
num_features = 10
num_tasks = 10
# Generate data
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = | np.random.randint(2, size=(num_datapoints, num_tasks)) | numpy.random.randint |
# -*- coding: utf-8 -*-
"""
Laboratory set-up for non-linear spectroscopy
This class controls calculations of non-linear optical spectra, and
other experiments in which laboratory setting needs to be controlled.
Examples are pulse polarization setting, pulse shapes and spectra
in non-linear spectroscopy.
Class Details
-------------
"""
import numpy
from ..utils import Integer
from ..utils.vectors import X
from ..core.time import TimeAxis
from ..core.frequency import FrequencyAxis
from ..core.dfunction import DFunction
class LabSetup:
"""Laboratory set-up for non-linear spectroscopy
Class representing laboratory setup for non-linear spectroscopic
experiments. It holds information about pulse shapes and polarizations.
Pulses can be set in time- and/or frequency-domain. **Consistency between
the domains is not checked nor enforced**. Consistent conversion between
domains is provided by convenience routines [TO BE IMPLEMENTED]
Parameters
----------
nopulses : int
Number of pulses in the experiment. Default is 3.
"""
number_of_pulses = Integer("number_of_pulses")
def __init__(self, nopulses = 3):
self.number_of_pulses = nopulses
self.pulse_effects = "rescale_dip" # Pulse shape effects accounted
# for by rescaling transition dipoles
# When the pulses are not defined no
# rescaling is used.
self.M4 = numpy.array([[4.0, -1.0, -1.0],
[-1.0, 4.0, -1.0],
[-1.0,-1.0, 4.0]])/30.0
self.timeaxis = None
self.freqaxis = None
self.F4eM4 = None
self.e = None
self.has_polarizations = False
self.has_freqdomain = False
self.has_timedomain = False
# time or frequency
self.axis_type = None
self.pulse_t = [None]*nopulses
self.pulse_f = [None]*nopulses
self.dscaling = None
self.omega = None
def set_pulse_shapes(self, axis, params):
"""Sets the pulse properties
Pulse shapes or spectra are set in this routine. If axis is of
`TimeAxis` type, the parameters are understood as time domain,
if axis is of `FrequencyAxis` type, they are understood as frequency
domain.
Parameters
----------
axis : TimeAxis or FrequencyAxis
Quantarhei time axis object, which specifies the values for which
pulse properties are defined. If `TimeAxis` is specified, the
parameters are understood as time domain, if `FrequencyAxis`
is specified, they are understood as frequency domain.
params : dictionary
Dictionary of pulse parameters. The parameters are the following:
`ptype` is the pulse type with possible values `Gaussian` and
`numeric`. Time domain pulses are specified with their center
at t = 0.
**Gaussian** pulse has further parameters `amplitude`, `FWHM`,
and `frequency` with obvious meanings. `FWHM` is speficied in `fs`,
`frequency` is specified in energy units, while `amplitude`
is in units of [energy]/[transition dipole moment]. The formula
for the lineshape is
.. math::
\\rm{shape}(\\omega) =
\\frac{2}{\\Delta}\\sqrt{\\frac{\\ln(2)}{\\pi}}
\\exp\\left\\{-\\frac{4\\ln(2)\\omega^2}{\\Delta^2}\\right\\}
The same formulae are used for time- and frequency domain
definitions. For time domain, :math:`t` should be used in stead of
:math:`\omega`.
**numeric** pulse is specified by a second parameters `function`
which should be of DFunction type and specifies line shape around
zero frequency.
Examples
--------
>>> import quantarhei as qr
>>> import matplotlib.pyplot as plt
>>> lab = LabSetup()
...
>>> # Time axis around 0
>>> time = qr.TimeAxis(-500.0, 1000, 1.0, atype="complete")
Gaussian pulse shape in time domain
>>> pulse2 = dict(ptype="Gaussian", FWHM=150, amplitude=1.0)
>>> params = (pulse2, pulse2, pulse2)
>>> lab.set_pulse_shapes(time, params)
Testing the pulse shape
>>> dfc = lab.get_pulse_envelop(1, time.data) # doctest: +SKIP
>>> pl = plt.plot(time.data, dfc) # doctest: +SKIP
>>> plt.show() # doctest: +SKIP
.. plot::
import quantarhei as qr
import matplotlib.pyplot as plt
lab = qr.LabSetup()
time = qr.TimeAxis(-500.0, 1000, 1.0, atype="complete")
pulse2 = dict(ptype="Gaussian", FWHM=150.0, amplitude=1.0)
params = (pulse2, pulse2, pulse2)
lab.set_pulse_shapes(time, params)
dfc = lab.get_pulse_envelop(1, time.data)
pl = plt.plot(time.data, dfc)
plt.show()
`numeric` pulse shape in time domain
>>> # We take the DFunction for creation of `numeric`ly defined
>>> # pulse shape from the previous example
>>> pls = lab.pulse_t[2]
>>> # new lab object
>>> lab2 = LabSetup()
>>> pulse1 = dict(ptype="numeric", function=pls)
>>> params = (pulse1, pulse1, pulse1)
>>> lab2.set_pulse_shapes(time, params)
Testing the pulse shape
>>> dfc = lab2.get_pulse_envelop(1, time.data) # doctest: +SKIP
>>> pl = plt.plot(time.data, dfc) # doctest: +SKIP
>>> plt.show() # we skip output here # doctest: +SKIP
Gaussian pulse shape in frequency domain
>>> lab = LabSetup()
>>> # FrequencyAxis around 0
>>> freq = qr.FrequencyAxis(-2500, 1000, 5.0)
...
>>> pulse2 = dict(ptype="Gaussian", FWHM=800, amplitude=1.0)
>>> params = (pulse2, pulse2, pulse2)
>>> lab.set_pulse_shapes(freq, params)
Testing the pulse shape
>>> # getting differnt frequency axis
>>> freq2 = qr.FrequencyAxis(-1003, 100, 20.0)
>>> # and reading spectrum at two different sets of points
>>> dfc1 = lab.get_pulse_spectrum(1, freq.data)
>>> dfc2 = lab.get_pulse_spectrum(1, freq2.data)
>>> pl1 = plt.plot(freq.data, dfc1) # doctest: +SKIP
>>> pl2 = plt.plot(freq2.data, fdc2) # doctest: +SKIP
>>> plt.show() # doctest: +SKIP
We plot in two different sets of points.
.. plot::
import quantarhei as qr
import matplotlib.pyplot as plt
lab = qr.LabSetup()
freq = qr.FrequencyAxis(-2500, 1000, 5.0)
pulse2 = dict(ptype="Gaussian", FWHM=800.0, amplitude=1.0)
params = (pulse2, pulse2, pulse2)
lab.set_pulse_shapes(freq, params)
freq2 = qr.FrequencyAxis(-1000, 100, 20.0)
dfc1 = lab.get_pulse_spectrum(1, freq.data)
dfc2 = lab.get_pulse_spectrum(1, freq2.data)
pl1 = plt.plot(freq.data, dfc1)
pl2 = plt.plot(freq2.data, dfc2)
plt.show()
`numeric` pulse shape in frequency domain
>>> # We take the DFunction for creation of `numeric`ly defined
>>> # pulse shape from the previous example
>>> pls = lab.pulse_f[2]
>>> # new lab object
>>> lab2 = LabSetup()
>>> pulse1 = dict(ptype="numeric", function=pls)
>>> params = (pulse1, pulse1, pulse1)
>>> lab2.set_pulse_shapes(freq, params)
Testing the pulse shape
>>> dfc = lab2.get_pulse_envelop(1, freq.data) # doctest: +SKIP
>>> pl = plt.plot(freq.data, dfc) # doctest: +SKIP
>>> plt.show() # we skip output here # doctest: +SKIP
Situations in which Exceptions are thrown
>>> pulse3 = dict(ptype="other", FWHM=10, amplitude=1.0)
>>> params = (pulse3, pulse3, pulse3)
>>> lab.set_pulse_shapes(time, params)
Traceback (most recent call last):
...
Exception: Unknown pulse type
>>> params = (pulse2, pulse2)
>>> lab.set_pulse_shapes(time, params)
Traceback (most recent call last):
...
Exception: set_pulses requires 3 parameter sets
>>> params = (pulse2, pulse2)
>>> lab.set_pulse_shapes(time.data, params)
Traceback (most recent call last):
...
Exception: Wrong axis paramater
>>> time = qr.TimeAxis(0.0, 1000, 1.0)
>>> lab.set_pulse_shapes(time, params)
Traceback (most recent call last):
...
Exception: TimeAxis has to be of 'complete' type use atype='complete' as a parameter of TimeAxis
"""
if isinstance(axis, TimeAxis):
if axis.atype == "complete":
self.timeaxis = axis
self.axis_type = "time"
else:
raise Exception("TimeAxis has to be of 'complete' type"+
" use atype='complete' as a parameter"+
" of TimeAxis")
elif isinstance(axis, FrequencyAxis):
self.freqaxis = axis
self.axis_type = "frequency"
else:
raise Exception("Wrong axis paramater")
if len(params) == self.number_of_pulses:
k_p = 0
for par in params:
if par["ptype"] == "Gaussian":
if self.axis_type == "time":
#
# Time domain Gaussian pulse around 0.0 as a DFunction
#
tma = self.timeaxis
fwhm = par["FWHM"]
amp = par["amplitude"]
# normalized Gaussian mupliplied by amplitude
val = (2.0/fwhm)*numpy.sqrt(numpy.log(2.0)/3.14159) \
*amp*numpy.exp(-4.0*numpy.log(2.0)*(tma.data/fwhm)**2)
self.pulse_t[k_p] = DFunction(tma, val)
elif self.axis_type == "frequency":
#
# Frequency domain Gaussian pulse around 0.0
# as a DFunction
#
fra = self.freqaxis
fwhm = par["FWHM"]
amp = par["amplitude"]
try:
freq = par["frequency"]
except:
freq = 0.0
# normalized Gaussian mupliplied by amplitude
val = (2.0/fwhm)*numpy.sqrt(numpy.log(2.0)/3.14159) \
*amp*numpy.exp(-4.0*numpy.log(2.0)\
*((fra.data-freq)/fwhm)**2)
self.pulse_f[k_p] = DFunction(fra, val)
elif par["ptype"] == "numeric":
fce = par["function"]
if self.axis_type == "time":
#
# Create a new DFunction based on the submitted time
# axis
#
data = numpy.zeros(self.timeaxis.length)
i_p = 0
for t_p in self.timeaxis.data:
data[i_p] = fce.at(t_p)
i_p += 1
self.pulse_t[k_p] = DFunction(self.timeaxis, data)
elif self.axis_type == "frequency":
data = | numpy.zeros(self.freqaxis.length) | numpy.zeros |
#!/usr/bin/env python
from random import randint
import numpy as np
import argparse
import os
class OptionParser(argparse.ArgumentParser):
"""OptionParser"""
def __init__(self):
super(OptionParser, self).__init__()
self.add_argument('-c', '--class_inst', type=int, nargs='+',
help='class instance',
default=[1, 2, 3, 4, 5, 6])
self.add_argument('-n', '--number_jobs', type=int, nargs='+',
help='number of jobs', default=[20, 50, 100, 150])
self.add_argument('-m', '--number_machines', type=int, nargs='+',
help='number of machines', default=[3, 5, 8, 10, 12])
class ValidationError(Exception):
"""Vaildation Error"""
def __init__(self, msg, err):
super(ValidationError, self).__init__()
self.msg = msg
self.err = err
def give_weight_duration(n):
if n > 6:
raise ValidationError('Error occured number to large', n)
elif n == 1:
return randint(10, 100), randint(1, 10)
elif n == 2:
return randint(1, 100), randint(1, 100)
elif n == 3:
return randint(10, 20), randint(10, 20)
elif n == 4:
return randint(90, 100), randint(90, 100)
elif n == 5:
p = randint(90, 100)
w = randint(p - 5, p + 5)
return w, p
elif n == 6:
p = randint(10, 100)
w = randint(p - 5, p + 5)
return w, p
def main():
parser = OptionParser()
args = parser.parse_args()
for n in args.number_jobs:
directory = './wt%03d' % (n)
os.mkdir(directory)
i = 1
for rdd in [0.2 + 0.2 * i for i in range(0, 5)]:
for tdf in [0.2 + 0.2 * i for i in range(0, 5)]:
for j in range(0, 5):
p = np.random.randint(low=1, high=100, size=n)
w = | np.random.randint(low=1, high=10, size=n) | numpy.random.randint |
import src.utils.helper_embedding as f
import pytest
from numpy import array_equal, round_
def test_reshape_df(df_embedding_mess, df_embedding_clean):
df = f.reshape_df(df=df_embedding_mess,
col_name='embedding')
assert df.equals(df_embedding_clean)
def test_calculate_cosine_similarity(vectors, vectors_cos_sim):
with pytest.raises(TypeError):
f.calculate_cosine_similarity(a=vectors['a'], b=vectors['c'])
with pytest.raises(ValueError):
f.calculate_cosine_similarity(a=vectors['a'], b=vectors['d'])
assert f.calculate_cosine_similarity(a=vectors['a'],
b=vectors['b']) == vectors_cos_sim
def test_extract_cosine_similarity(df_embedding_clean, series_cos_sim):
series = f.extract_cosine_similarity(df=df_embedding_clean,
word='sat',
col_embedding='embedding')
series = round(number=series, ndigits=6)
assert series.equals(series_cos_sim)
def test_get_embedding_synonyms(df_embedding_clean, similar_words):
assert f.get_embedding_synonyms(df=df_embedding_clean,
word='sat',
col_embedding='embedding',
threshold=0.65) == similar_words['bow']
def test_extract_paragraphs(content_embed):
assert f.extract_paragraphs(txt=content_embed['details_html']) == content_embed['details']
def test_get_document_embedding(content_embed):
input_embed = f.get_document_embedding(txt=content_embed['details'])
input_embed = round_(a=input_embed, decimals=4)
output_embed = round_(a=content_embed['embedding'], decimals=4)
assert array_equal(input_embed, output_embed)
def test_get_paragraphs_and_embeddings(content_embed):
input_id, input_details, input_embed = f.get_paragraphs_and_embeddings(id=content_embed['id'],
txt=content_embed['details_html'])
input_embed = | round_(a=input_embed, decimals=4) | numpy.round_ |
import abc
from unittest import TestCase
import numpy as np
from nose_parameterized import parameterized
from numpy.testing import assert_array_almost_equal
import convolutional.operators as op
class _BaseTest(TestCase, metaclass=abc.ABCMeta):
def setUp(self):
np.random.seed(0)
@parameterized.expand([
((1, 1),),
((32, 32),),
((124, 43),),
((1066, 1024),),
])
def test_add_operator(self, expected_shape):
a, b = np.random.rand(*expected_shape), np.random.rand(*expected_shape)
expected = a + b
actual = op.add(a, b)
self.assertEqual(actual.shape, expected_shape)
assert_array_almost_equal(actual, expected, decimal=6)
@parameterized.expand([
((1, 1),),
((32, 32),),
((124, 43),),
((1066, 1024),),
])
def test_sub_operator(self, expected_shape):
a, b = np.random.rand(*expected_shape), np.random.rand(*expected_shape)
expected = a - b
actual = op.sub(a, b)
self.assertEqual(actual.shape, expected_shape)
assert_array_almost_equal(actual, expected, decimal=6)
@parameterized.expand([
((1, 1), (1, 1),),
((2, 4), (4, 2),),
((12, 24), (24, 32),),
((243, 45), (45, 67),),
])
def test_dot_operator(self, expected_s_a, espected_s_b):
a, b = np.random.rand(*expected_s_a), np.random.rand(*espected_s_b)
expected = np.dot(a, b)
actual = op.dot(a, b)
self.assertEqual(actual.shape, (expected_s_a[0], espected_s_b[1]))
assert_array_almost_equal(actual, expected, decimal=5)
@parameterized.expand([
((1, 1),),
((32, 32),),
((50, 15),),
((3, 24),),
((4014, 1025),),
((4096, 4096),),
])
def test_hadamard_operator(self, shape):
a, b = np.random.randn(*shape), np.random.randn(*shape)
expected = a * b
actual = op.hadamard(a, b)
c = expected - actual
self.assertEqual(actual.shape, shape)
# Almost equal is required because my video card only accepts float32.
assert_array_almost_equal(actual, expected, decimal=6)
@parameterized.expand([
((1, 1),),
((32, 1),),
((1, 32),),
((32, 32),),
((125, 35),),
((4014, 1025),),
((4096, 4096),),
])
def test_scale_operator(self, shape):
alpha, a = np.random.rand(), np.random.randn(*shape)
expected = alpha * a
actual = op.scale(alpha, a)
c = expected - actual
self.assertEqual(actual.shape, shape)
assert_array_almost_equal(actual, expected, decimal=6)
@parameterized.expand([
((3, 3, 1), (3, 3, 1), np.array([[[77], [136], [89]],
[[179], [227], [137]],
[[91], [165], [175]]])),
((2, 2, 1), (3, 3, 2), np.array([[[112, 128], [105, 176]],
[[113, 115], [115, 161]]])),
])
def test_conv_operator(self, a_shape, b_shape, expected):
a, k = 10 * np.random.rand(*a_shape), 10 * np.random.rand(*b_shape)
a, k = a.astype(int), k.astype(int)
actual = op.conv(a, k)
assert_array_almost_equal(actual, expected)
@parameterized.expand([
((1, 1),),
((10, 10),),
((32, 17),),
((40,),),
((2, 12, 4, 3,),),
])
def test_sum_operator(self, a_shape):
a = np.random.rand(*a_shape)
expected = | np.sum(a) | numpy.sum |
# Copyright 2021 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Divide multiple tasks based on data
Parameters
----------
samples๏ผ Train data, see `sedna.datasources.BaseDataSource` for more detail.
Returns
-------
tasks: All tasks based on training data.
task_extractor: Model with a method to predicting target tasks
"""
from typing import List, Any, Tuple
import numpy as np
import pandas as pd
from sedna.datasources import BaseDataSource
from sedna.common.class_factory import ClassType, ClassFactory
from .artifact import Task
__all__ = ('TaskDefinitionBySVC', 'TaskDefinitionByDataAttr')
@ClassFactory.register(ClassType.MTL)
class TaskDefinitionBySVC:
"""
Dividing datasets with `AgglomerativeClustering` based on kernel distance,
Using SVC to fit the clustering result.
Parameters
----------
n_class๏ผ int or None
The number of clusters to find, default=2.
"""
def __init__(self, **kwargs):
n_class = kwargs.get("n_class", "")
self.n_class = max(2, int(n_class)) if str(n_class).isdigit() else 2
def __call__(self,
samples: BaseDataSource) -> Tuple[List[Task],
Any,
BaseDataSource]:
from sklearn.svm import SVC
from sklearn.cluster import AgglomerativeClustering
d_type = samples.data_type
x_data = samples.x
y_data = samples.y
if not isinstance(x_data, pd.DataFrame):
raise TypeError(f"{d_type} data should only be pd.DataFrame")
tasks = []
legal = list(
filter(lambda col: x_data[col].dtype == 'float64', x_data.columns))
df = x_data[legal]
c1 = AgglomerativeClustering(n_clusters=self.n_class).fit_predict(df)
c2 = SVC(gamma=0.01)
c2.fit(df, c1)
for task in range(self.n_class):
g_attr = f"svc_{task}"
task_df = BaseDataSource(data_type=d_type)
task_df.x = x_data.iloc[np.where(c1 == task)]
task_df.y = y_data.iloc[ | np.where(c1 == task) | numpy.where |
"""
Adapted from PASTIS: https://github.com/hiclib/pastis
"""
import numpy as np
from scipy import optimize
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.metrics import euclidean_distances
def compute_wish_distances(counts, alpha=-3., beta=1., bias=None):
if beta == 0:
raise ValueError("beta cannot be equal to 0.")
counts = counts.copy()
if sparse.issparse(counts):
if not sparse.isspmatrix_coo(counts):
counts = counts.tocoo()
if bias is not None:
bias = bias.flatten()
counts.data /= bias[counts.row] * bias[counts.col]
wish_distances = counts / beta
wish_distances.data[wish_distances.data != 0] **= 1. / alpha
return wish_distances
else:
wish_distances = counts.copy() / beta
wish_distances[wish_distances != 0] **= 1. / alpha
return wish_distances
def smooth_intra(distances, h, diag=0):
if sparse.issparse(distances):
d = distances.toarray()
else:
d = np.array(distances)
n = d.shape[0]
d[d == 0] = np.nan
d[ | np.isinf(d) | numpy.isinf |
import numpy as np
import sys
import warnings
warnings.filterwarnings('ignore')
import george
from george import kernels
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF,WhiteKernel, ConstantKernel as C, DotProduct, RationalQuadratic, Matern
from scipy.optimize import minimize
from scipy.interpolate import PchipInterpolator, interp1d
import scipy.io as sio
from .priors import *
import pkg_resources
def get_file(folder, filename):
resource_package = __name__
resource_path = '/'.join((folder, filename)) # Do not use os.path.join()
template = pkg_resources.resource_stream(resource_package, resource_path)
return template
fsps_mlc = sio.loadmat(get_file('train_data','fsps_mass_loss_curve.mat'))
#fsps_mlc = sio.loadmat('dense_basis/train_data/fsps_mass_loss_curve.mat')
fsps_time = fsps_mlc['timeax_fsps'].ravel()
fsps_massloss = fsps_mlc['mass_loss_fsps'].ravel()
# basic SFH tuples
rising_sfh = np.array([10.0,1.0,3,0.5,0.7,0.9])
regular_sfg_sfh = np.array([10.0,0.3,3,0.25,0.5,0.75])
young_quenched_sfh = np.array([10.0,-1.0,3,0.3,0.6,0.8])
old_quenched_sfh = np.array([10.0,-1.0,3,0.1,0.2,0.4])
old_very_quenched_sfh = np.array([10.0,-10.0,3,0.1,0.2,0.4])
double_peaked_SF_sfh = np.array([10.0,0.5,3,0.25,0.4,0.7])
double_peaked_Q_sfh = np.array([10.0,-1.0,3,0.2,0.4,0.8])
# functions:
def neg_ln_like(p, gp, y):
gp.set_parameter_vector(p)
return -gp.log_likelihood(y)
def grad_neg_ln_like(p, gp, y):
gp.set_parameter_vector(p)
return -gp.grad_log_likelihood(y)
def correct_for_mass_loss(sfh, time, mass_loss_curve_time, mass_loss_curve):
correction_factors = np.interp(time, mass_loss_curve_time, mass_loss_curve)
return sfh * correction_factors
def gp_interpolator(x,y,res = 1000, Nparam = 3):
yerr = np.zeros_like(y)
yerr[2:(2+Nparam)] = 0.001/np.sqrt(Nparam)
if len(yerr) > 26:
yerr[2:(2+Nparam)] = 0.1/np.sqrt(Nparam)
#kernel = np.var(yax) * kernels.ExpSquaredKernel(np.median(yax)+np.std(yax))
#k2 = np.var(yax) * kernels.LinearKernel(np.median(yax),order=1)
#kernel = np.var(y) * kernels.Matern32Kernel(np.median(y)) #+ k2
kernel = np.var(y) * (kernels.Matern32Kernel(np.median(y)) + kernels.LinearKernel(np.median(y), order=2))
gp = george.GP(kernel)
#print(xax.shape, yerr.shape)
gp.compute(x.ravel(), yerr.ravel())
x_pred = np.linspace(np.amin(x), np.amax(x), res)
y_pred, pred_var = gp.predict(y.ravel(), x_pred, return_var=True)
return x_pred, y_pred
def gp_sklearn_interpolator(x,y,res = 1000):
kernel = DotProduct(10.0, (1e-2,1e2)) *RationalQuadratic(0.1)
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9)
gp.fit(x.reshape(-1,1),(y-x).reshape(-1,1))
x_pred = np.linspace(0,1,1000)
y_pred, sigma = gp.predict(x_pred[:,np.newaxis], return_std=True)
y_pred = y_pred.ravel() + x_pred
return x_pred, y_pred
def linear_interpolator(x,y,res = 1000):
interpolator = interp1d(x,y)
x_pred = np.linspace(np.amin(x), np.amax(x), res)
y_pred = interpolator(x_pred)
return x_pred, y_pred
def Pchip_interpolator(x,y,res = 1000):
interpolator = PchipInterpolator(x,y)
x_pred = np.linspace(np.amin(x), np.amax(x), res)
y_pred = interpolator(x_pred)
return x_pred, y_pred
def tuple_to_sfh(sfh_tuple, zval, interpolator = 'gp_george', set_sfr_100Myr = False, vb = False):
# generate an SFH from an input tuple (Mass, SFR, {tx}) at a specified redshift
Nparam = int(sfh_tuple[2])
mass_quantiles = np.linspace(0,1,Nparam+2)
time_quantiles = np.zeros_like(mass_quantiles)
time_quantiles[-1] = 1
time_quantiles[1:-1] = sfh_tuple[3:]
# now add SFR constraints
# SFR smoothly increasing from 0 at the big bang
mass_quantiles = | np.insert(mass_quantiles,1,[0.00]) | numpy.insert |
from pathlib import Path
from numpy import arange, array, ceil, empty, floor, isnan, linspace, \
log10, meshgrid, nan, tile, transpose, where
from numpy.ma import masked_where
from matplotlib.pyplot import clf, close, cm, colorbar, figure, savefig, show
from mpl_toolkits.basemap import Basemap
from os.path import dirname, isdir, join, realpath
from os import mkdir
import pyapex, seaborn
from scipy.interpolate import interp2d#, RectBivariateSpline
#
from pyigrf.pyigrf import GetIGRF
from pyiri2016 import IRI2016
from pyiri2016 import IRI2016Profile
from pyiri2016.iriweb import irisubgl, firisubl
from timeutil import TimeUtilities
#
cwd = Path(__file__).parent
DataFolder = cwd / 'data'
class IRI2016_2DProf(IRI2016Profile):
#def __init__(self):
# pass
#def _GetTitle(self):
# IRI2016Profile()._GetTitle(__self__)
def HeightVsTime(self, FIRI=False, hrlim=[0., 24.], hrstp=1.):
self.option = 1
nhrstp = int((hrlim[1] + hrstp - hrlim[0]) / hrstp) + 1
hrbins = list(map(lambda x: hrlim[0] + float(x) * hrstp, range(nhrstp)))
Ne = empty((nhrstp, self.numstp))
if FIRI: NeFIRI = | empty((nhrstp, self.numstp)) | numpy.empty |
# --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
path
#Code starts here
data = pd.read_csv(path)
data.rename(columns={"Total" : "Total_Medals"}, inplace = True)
data.head()
# --------------
#Code starts here
data['Better_Event'] = np.where(data['Total_Summer'] > data['Total_Winter'], 'Summer', ( | np.where(data['Total_Summer'] < data['Total_Winter'], 'Winter', 'Both') | numpy.where |
"""
@Author : <NAME>
"""
import os, sys
from basic.common import add_path,env
import numpy as np
from scipy.linalg import logm, norm
from math import pi, sqrt
from multiprocessing import Pool
from txt_table_v1 import TxtTable
# add_path(env.Home+'/working/eccv18varpose/dataset')
# from PASCAL3D import get_anno_dbs_tbl, get_anno, categories
this_dir = os.path.dirname(os.path.realpath(__file__))
add_path(this_dir+'/../../../dataset')
from Pascal3D import get_anno_dbs_tbl, get_anno, categories
def compute_RotMats(a, e, t):
"""
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
# Warning from Shuai #
# #
# This function is just a replication of matlab implementation for reproducibility purpose only! #
# However, I believe the logic is not correct. But since Pascal3D+ dataset itself is annotated #
# in such way, we have to follow this definition for evaluation purpose. #
# #
# In short words: The resulting rotation matrix can still be valid since it guarantees the CAD model #
# to be projected roughly aligned with the 2D object in image. However, the way in interpreting #
# a, e, t used in this function to construct the rotation matrix is deviated from the true definition #
# of Azimuth, Elevation and In-plane rotation. #
# #
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
"""
assert len(a)==len(e)==len(t)
M = len(a)
# camera intrinsic matrix
Rz = np.zeros((M, 3, 3), dtype=np.float32)
Rx = np.zeros((M, 3, 3), dtype=np.float32)
Rz2 = np.zeros((M, 3, 3), dtype=np.float32)
# C = np.zeros((M, 1, 3), dtype=np.float32)
# initial "1" positions.
Rz [:, 2, 2] = 1
Rx [:, 0, 0] = 1
Rz2[:, 2, 2] = 1
#
R = np.zeros((M, 3, 3), dtype=np.float32)
# convert to radius
a = a * pi / 180.
e = e * pi / 180.
t = t * pi / 180.
# update a, e, t
a = -a
e = pi/2.+e
t = -t
#
sin_a, cos_a = np.sin(a), np.cos(a)
sin_e, cos_e = np.sin(e), np.cos(e)
sin_t, cos_t = np.sin(t), np.cos(t)
# ===========================
# rotation matrix
# ===========================
"""
# [Transposed]
Rz = np.matrix( [[ cos(a), sin(a), 0 ], # model rotate by a
[ -sin(a), cos(a), 0 ],
[ 0, 0, 1 ]] )
# [Transposed]
Rx = np.matrix( [[ 1, 0, 0 ], # model rotate by e
[ 0, cos(e), sin(e) ],
[ 0, -sin(e), cos(e) ]] )
# [Transposed]
Rz2= np.matrix( [[ cos(t), sin(t), 0 ], # camera rotate by t (in-plane rotation)
[-sin(t), cos(t), 0 ],
[ 0, 0, 1 ]] )
R = Rz2*Rx*Rz
"""
# Original matrix (None-transposed.)
# No need to set back to zero?
Rz[:, 0, 0], Rz[:, 0, 1] = cos_a, -sin_a
Rz[:, 1, 0], Rz[:, 1, 1] = sin_a, cos_a
#
Rx[:, 1, 1], Rx[:, 1, 2] = cos_e, -sin_e
Rx[:, 2, 1], Rx[:, 2, 2] = sin_e, cos_e
#
Rz2[:, 0, 0], Rz2[:, 0, 1] = cos_t, -sin_t
Rz2[:, 1, 0], Rz2[:, 1, 1] = sin_t, cos_t
# R = Rz2*Rx*Rz
R[:] = np.einsum("nij,njk,nkl->nil", Rz2, Rx, Rz)
# Return the original matrix without transpose!
return R
#-# def geodesic_dist(R, R_gt): # _geo_err
#-# R, R_gt = map(np.matrix, [R, R_gt])
#-# R_angle = norm(logm(R.transpose()*R_gt), 2) / sqrt(2)
#-# # About different of numpy/scipy norm and matlab norm:
#-# # http://stackoverflow.com/questions/26680412/getting-different-answers-with-matlab-and-python-norm-functions
#-# # https://nl.mathworks.com/help/matlab/ref/norm.html
#-# return R_angle # R_angle_results < pi/6. is treated as correct in VpsKps
def geodesic_dist(R, R_gt): # _geo_err
R, R_gt = map(np.matrix, [R, R_gt])
# With out disp annoying error
_logRR, errest = logm(R.transpose()*R_gt, disp=False)
R_angle = norm(_logRR, 2) / sqrt(2)
# This will do print("logm result may be inaccurate, approximate err =", errest)
# R_angle = norm(logm(R.transpose()*R_gt), 2) / sqrt(2)
#
# About different of numpy/scipy norm and matlab norm:
# http://stackoverflow.com/questions/26680412/getting-different-answers-with-matlab-and-python-norm-functions
# https://nl.mathworks.com/help/matlab/ref/norm.html
return R_angle
def geodesic_dist_new(R, R_gt): # _geo_err
'''ICCV17, From 3D Pose Regression using Convolutional Neural Networks.
Note: the geodesic distance used by vpskps: d(R1, R2)
the simplified version by this paper: d_A(R1, R2)
Their relation is: d(R1, R2) = d_A(R1, R2) / sqrt(2)
'''
R, R_gt = map(np.matrix, [R, R_gt])
# Do clipping to [-1,1].
# For a few cases, (tr(R)-1)/2 can be a little bit less/greater than -1/1.
logR_F = np.clip( (np.trace(R.transpose()*R_gt)-1.)/2., -1, 1)
R_angle = np.arccos( logR_F ) / np.sqrt(2)
# This can return nan when inside is out of range [-1,1]
# R_angle = np.arccos( (np.trace(R.transpose()*R_gt)-1.)/2. ) / np.sqrt(2)
return R_angle
def _geodesic_dist(args):
R, R_gt = args
return geodesic_dist(R, R_gt)
def compute_geo_dists(GT_aet, Pred_aet):
geo_dists= []
gt_As, gt_Es, gt_Ts = GT_aet
pr_As, pr_Es, pr_Ts = Pred_aet
gt_Rs = compute_RotMats(gt_As, gt_Es, gt_Ts)
pr_Rs = compute_RotMats(pr_As, pr_Es, pr_Ts)
# for gt_a, gt_e, gt_t, pr_a, pr_e, pr_t in zip(gt_As, gt_Es, gt_Ts, pr_As, pr_Es, pr_Ts):
for gt_R, pr_R in zip(gt_Rs, pr_Rs):
geo_dists.append( geodesic_dist_new(gt_R, pr_R) )
return np.array(geo_dists)
def parse_rslt_txt(rslt_txt_file):
lines = [x.strip() for x in open(rslt_txt_file).readlines() if not x.strip().startswith('#')]
objID2aet = {}
for line in lines:
lineSp = line.split()
objID = lineSp[0]
a,e,t = map(float, lineSp[1:])
objID2aet[objID] = (a,e,t)
return objID2aet
def eval_one(objID2aet_pred, cate='aeroplane', theta_levels=[pi/6.], nr_worker=20):
# objID2aet_pred = parse_rslt_txt(rslt_txt_file)
keys, rcobjs = get_anno(cate, collection='val', filter='easy')
# print('--->[eval_one] %s '%cate, len(keys))
vps = rcobjs.gt_view
gt_rot_Mats = compute_RotMats(vps.a, vps.e, vps.t)
a_preds, e_preds, t_preds = [],[],[]
for rcobj in rcobjs:
_a,_e,_t = objID2aet_pred[rcobj.obj_id]
a_preds.append(_a)
e_preds.append(_e)
t_preds.append(_t)
a_preds = | np.array(a_preds, np.float32) | numpy.array |
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib import colors
from matplotlib.ticker import MaxNLocator
from astropy.io import ascii
import copy
import os
import numpy as np
from scipy.interpolate import interp1d
from scipy.integrate import quad
from spectractor import parameters
from spectractor.config import set_logger
from spectractor.simulation.simulator import SimulatorInit
from spectractor.simulation.atmosphere import Atmosphere, AtmosphereGrid
from spectractor.fit.fitter import FitWorkspace, run_minimisation_sigma_clipping, run_minimisation, RegFitWorkspace
from spectractor.tools import from_lambda_to_colormap, fftconvolve_gaussian
from spectractor.extractor.spectrum import Spectrum
from spectractor.extractor.spectroscopy import HALPHA, HBETA, HGAMMA, HDELTA, O2_1, O2_2, O2B
class MultiSpectraFitWorkspace(FitWorkspace):
def __init__(self, output_file_name, file_names, fixed_A1s=True, inject_random_A1s=False, bin_width=-1,
nwalkers=18, nsteps=1000, burnin=100, nbins=10,
verbose=0, plot=False, live_fit=False):
"""Class to fit jointly multiple spectra extracted with Spectractor.
The spectrum is supposed to be the product of the star SED, a common instrumental throughput,
a grey term (clouds) and a common atmospheric transmission, with the second order diffraction removed.
The truth parameters are loaded from the file header if provided.
If provided, the atmospheric grid files are used for the atmospheric transmission simulations and interpolated
with splines, otherwise Libradtran is called at each step (slower). The files should have the same name as
the spectrum files but with the atmsim suffix.
Parameters
----------
output_file_name: str
Generic file name to output results.
file_names: list
List of spectrum file names.
bin_width: float
Size of the wavelength bins in nm. If negative, no binning.
nwalkers: int, optional
Number of walkers for MCMC fitting.
nsteps: int, optional
Number of steps for MCMC fitting.
burnin: int, optional
Number of burn-in steps for MCMC fitting.
nbins: int, optional
Number of bins for MCMC chains analysis.
verbose: int, optional
Verbosity level (default: 0).
plot: bool, optional
If True, many plots are produced (default: False).
live_fit: bool, optional
If True, many plots along the fitting procedure are produced to see convergence in live (default: False).
Examples
--------
>>> file_names = ["./tests/data/reduc_20170530_134_spectrum.fits"]
>>> w = MultiSpectraFitWorkspace("./outputs/test", file_names, bin_width=5, verbose=True)
>>> w.output_file_name
'./outputs/test'
>>> w.spectra #doctest: +ELLIPSIS
[<spectractor.extractor.spectrum.Spectrum object at ...>]
>>> w.lambdas #doctest: +ELLIPSIS
array([[ ...
"""
FitWorkspace.__init__(self, output_file_name, nwalkers, nsteps, burnin, nbins, verbose, plot, live_fit)
for name in file_names:
if "spectrum" not in name:
raise ValueError(f"ALl file names must contain spectrum keyword and be an output from Spectractor. "
f"I found {name} in file_names list.")
self.my_logger = set_logger(self.__class__.__name__)
self.output_file_name = output_file_name
self.bin_widths = bin_width
self.spectrum, self.telescope, self.disperser, self.target = SimulatorInit(file_names[0], fast_load=True)
self.spectra = []
self.atmospheres = []
self.file_names = file_names
for name in file_names:
spectrum = Spectrum(name, fast_load=True)
self.spectra.append(spectrum)
atmgrid_file_name = name.replace("sim", "reduc").replace("spectrum.fits", "atmsim.fits")
if os.path.isfile(atmgrid_file_name):
self.atmospheres.append(AtmosphereGrid(name, atmgrid_file_name))
else:
self.my_logger.warning(f"\n\tNo atmosphere grid {atmgrid_file_name}, the fit will be slower...")
self.atmospheres.append(Atmosphere(spectrum.airmass, spectrum.pressure, spectrum.temperature))
self.nspectra = len(self.spectra)
self.spectrum_lambdas = [self.spectra[k].lambdas for k in range(self.nspectra)]
self.spectrum_data = [self.spectra[k].data for k in range(self.nspectra)]
self.spectrum_err = [self.spectra[k].err for k in range(self.nspectra)]
self.spectrum_data_cov = [self.spectra[k].cov_matrix for k in range(self.nspectra)]
self.lambdas = np.empty(1)
self.lambdas_bin_edges = None
self.ref_spectrum_cube = []
self.random_A1s = None
self._prepare_data()
self.ozone = 260.
self.pwv = 3
self.aerosols = 0.015
self.reso = -1
self.A1s = np.ones(self.nspectra)
self.p = np.array([self.ozone, self.pwv, self.aerosols, self.reso, *self.A1s])
self.A1_first_index = 4
self.fixed = [False] * self.p.size
# self.fixed[0] = True
self.fixed[3] = True
self.fixed[self.A1_first_index] = True
if fixed_A1s:
for ip in range(self.A1_first_index, len(self.fixed)):
self.fixed[ip] = True
self.input_labels = ["ozone", "PWV", "VAOD", "reso"] + [f"A1_{k}" for k in range(self.nspectra)]
self.axis_names = ["ozone", "PWV", "VAOD", "reso"] + ["$A_1^{(" + str(k) + ")}$" for k in range(self.nspectra)]
self.bounds = [(100, 700), (0, 10), (0, 0.01), (0.1, 100)] + [(1e-3, 2)] * self.nspectra
for atmosphere in self.atmospheres:
if isinstance(atmosphere, AtmosphereGrid):
self.bounds[0] = (min(self.atmospheres[0].OZ_Points), max(self.atmospheres[0].OZ_Points))
self.bounds[1] = (min(self.atmospheres[0].PWV_Points), max(self.atmospheres[0].PWV_Points))
self.bounds[2] = (min(self.atmospheres[0].AER_Points), max(self.atmospheres[0].AER_Points))
break
self.nwalkers = max(2 * self.ndim, nwalkers)
self.amplitude_truth = None
self.lambdas_truth = None
self.atmosphere = Atmosphere(airmass=1,
pressure=float(np.mean([self.spectra[k].header["OUTPRESS"]
for k in range(self.nspectra)])),
temperature=float(np.mean([self.spectra[k].header["OUTTEMP"]
for k in range(self.nspectra)])))
self.true_instrumental_transmission = None
self.true_atmospheric_transmission = None
self.true_A1s = None
self.get_truth()
if inject_random_A1s:
self.inject_random_A1s()
# design matrix
self.M = np.zeros((self.nspectra, self.lambdas.size, self.lambdas.size))
self.M_dot_W_dot_M = np.zeros((self.lambdas.size, self.lambdas.size))
# prepare results
self.amplitude_params = np.ones(self.lambdas.size)
self.amplitude_params_err = np.zeros(self.lambdas.size)
self.amplitude_cov_matrix = np.zeros((self.lambdas.size, self.lambdas.size))
# regularisation
self.amplitude_priors_method = "noprior"
self.reg = parameters.PSF_FIT_REG_PARAM * self.bin_widths
if self.amplitude_priors_method == "spectrum":
self.amplitude_priors = np.copy(self.true_instrumental_transmission)
self.amplitude_priors_cov_matrix = np.eye(self.lambdas[0].size) # np.diag(np.ones_like(self.lambdas))
self.U = np.diag([1 / np.sqrt(self.amplitude_priors_cov_matrix[i, i]) for i in range(self.lambdas[0].size)])
L = np.diag(-2 * np.ones(self.lambdas[0].size)) + np.diag(np.ones(self.lambdas[0].size), -1)[:-1, :-1] \
+ np.diag(np.ones(self.lambdas[0].size), 1)[:-1, :-1]
L[0, 0] = -1
L[-1, -1] = -1
self.L = L.astype(float)
self.Q = L.T @ np.linalg.inv(self.amplitude_priors_cov_matrix) @ L
self.Q_dot_A0 = self.Q @ self.amplitude_priors
def _prepare_data(self):
# rebin wavelengths
if self.bin_widths > 0:
lambdas_bin_edges = np.arange(int(np.min(np.concatenate(list(self.spectrum_lambdas)))),
int(np.max(np.concatenate(list(self.spectrum_lambdas)))) + 1,
self.bin_widths)
self.lambdas_bin_edges = lambdas_bin_edges
lbdas = []
for i in range(1, lambdas_bin_edges.size):
lbdas.append(0.5 * (0*lambdas_bin_edges[i] + 2*lambdas_bin_edges[i - 1])) # lambda bin value on left
self.lambdas = []
for k in range(self.nspectra):
self.lambdas.append(np.asarray(lbdas))
self.lambdas = np.asarray(self.lambdas)
else:
for k in range(1, len(self.spectrum_lambdas)):
if self.spectrum_lambdas[k].size != self.spectrum_lambdas[0].size or \
not np.all(np.isclose(self.spectrum_lambdas[k], self.spectrum_lambdas[0])):
raise ValueError("\nIf you don't rebin your spectra, "
"they must share the same wavelength arrays (in length and values).")
self.lambdas = np.copy(self.spectrum_lambdas)
dlbda = self.lambdas[0, -1] - self.lambdas[0, -2]
lambdas_bin_edges = list(self.lambdas[0]) + [self.lambdas[0, -1] + dlbda]
# mask
lambdas_to_mask = [np.arange(300, 355, self.bin_widths)]
for line in [HALPHA, HBETA, HGAMMA, HDELTA, O2_1, O2_2, O2B]:
width = line.width_bounds[1]
lambdas_to_mask += [np.arange(line.wavelength - width, line.wavelength + width, self.bin_widths)]
lambdas_to_mask = np.concatenate(lambdas_to_mask).ravel()
lambdas_to_mask_indices = []
for k in range(self.nspectra):
lambdas_to_mask_indices.append(np.asarray([np.argmin(np.abs(self.lambdas[k] - lambdas_to_mask[i]))
for i in range(lambdas_to_mask.size)]))
# rebin atmosphere
if self.bin_widths > 0 and isinstance(self.atmospheres[0], AtmosphereGrid):
self.atmosphere_lambda_bins = []
for i in range(0, lambdas_bin_edges.size):
self.atmosphere_lambda_bins.append([])
for j in range(0, self.atmospheres[0].lambdas.size):
if self.atmospheres[0].lambdas[j] >= lambdas_bin_edges[i]:
self.atmosphere_lambda_bins[-1].append(j)
if i < lambdas_bin_edges.size - 1 and self.atmospheres[0].lambdas[j] >= lambdas_bin_edges[i + 1]:
self.atmosphere_lambda_bins[-1] = np.array(self.atmosphere_lambda_bins[-1])
break
self.atmosphere_lambda_bins = np.array(self.atmosphere_lambda_bins, dtype=object)
self.atmosphere_lambda_step = np.gradient(self.atmospheres[0].lambdas)[0]
# rescale data lambdas
# D2CCD = np.median([self.spectra[k].header["D2CCD"] for k in range(self.nspectra)])
# for k in range(self.nspectra):
# self.spectra[k].disperser.D = self.spectra[k].header["D2CCD"]
# dist = self.spectra[k].disperser.grating_lambda_to_pixel(self.spectra[k].lambdas, x0=self.spectra[k].x0)
# self.spectra[k].disperser.D = D2CCD
# self.spectra[k].lambdas = self.spectra[k].disperser.grating_pixel_to_lambda(dist, x0=self.spectra[k].x0)
# rebin data
self.data = np.empty(self.nspectra, dtype=np.object)
if self.bin_widths > 0:
for k in range(self.nspectra):
data_func = interp1d(self.spectra[k].lambdas, self.spectra[k].data,
kind="cubic", fill_value="extrapolate", bounds_error=None)
# lambdas_truth = np.fromstring(self.spectra[k].header['LBDAS_T'][1:-1], sep=' ')
# amplitude_truth = np.fromstring(self.spectra[k].header['AMPLIS_T'][1:-1], sep=' ', dtype=float)
# data_func = interp1d(lambdas_truth, amplitude_truth,
# kind="cubic", fill_value="extrapolate", bounds_error=None)
data = []
for i in range(1, lambdas_bin_edges.size):
data.append(quad(data_func, lambdas_bin_edges[i - 1], lambdas_bin_edges[i])[0] / self.bin_widths)
self.data[k] = np.copy(data)
# if parameters.DEBUG:
# if "LBDAS_T" in self.spectra[k].header:
# lambdas_truth = np.fromstring(self.spectra[k].header['LBDAS_T'][1:-1], sep=' ')
# amplitude_truth = np.fromstring(self.spectra[k].header['AMPLIS_T'][1:-1],sep=' ',dtype=float)
# plt.plot(lambdas_truth, amplitude_truth, label="truth") # -amplitude_truth)
# plt.plot(self.lambdas, self.data_cube[-1], label="binned data") # -amplitude_truth)
# plt.plot(self.spectra[k].lambdas, self.spectra[k].data, label="raw data") # -amplitude_truth)
# # plt.title(self.spectra[k].filename)
# # plt.xlim(480,700)
# plt.grid()
# plt.legend()
# plt.show()
else:
for k in range(self.nspectra):
self.data[k] = np.copy(self.spectrum_data[k])
# rebin reference star
self.ref_spectrum_cube = []
if self.bin_widths > 0:
for k in range(self.nspectra):
data_func = interp1d(self.spectra[k].target.wavelengths[0], self.spectra[k].target.spectra[0],
kind="cubic", fill_value="extrapolate", bounds_error=None)
data = []
for i in range(1, lambdas_bin_edges.size):
data.append(quad(data_func, lambdas_bin_edges[i - 1], lambdas_bin_edges[i])[0] / self.bin_widths)
self.ref_spectrum_cube.append(np.copy(data))
else:
for k in range(self.nspectra):
ref = interp1d(self.spectra[k].target.wavelengths[0], self.spectra[k].target.spectra[0],
kind="cubic", fill_value="extrapolate", bounds_error=None)(self.lambdas[k])
self.ref_spectrum_cube.append(np.copy(ref))
self.ref_spectrum_cube = np.asarray(self.ref_spectrum_cube)
# rebin errors
self.err = np.empty(self.nspectra, dtype=np.object)
if self.bin_widths > 0:
for k in range(self.nspectra):
err_func = interp1d(self.spectra[k].lambdas, self.spectra[k].err ** 2,
kind="cubic", fill_value="extrapolate", bounds_error=False)
err = []
for i in range(1, lambdas_bin_edges.size):
if i in lambdas_to_mask_indices[k]:
err.append(np.nan)
else:
err.append(np.sqrt(np.abs(quad(err_func, lambdas_bin_edges[i - 1], lambdas_bin_edges[i])[0])
/ self.bin_widths))
self.err[k] = np.copy(err)
else:
for k in range(self.nspectra):
self.err[k] = np.copy(self.spectrum_err[k])
if parameters.DEBUG:
for k in range(self.nspectra):
plt.errorbar(self.lambdas[k], self.data[k], self.err[k], label=f"spectrum {k}")
plt.ylim(0, 1.2 * np.max(self.data[k]))
plt.grid()
# plt.legend()
plt.show()
# rebin W matrices
# import time
# start = time.time()
self.data_cov = np.empty(self.nspectra, dtype=np.object)
self.W = np.empty(self.nspectra, dtype=np.object)
if self.bin_widths > 0:
lmins = []
lmaxs = []
for k in range(self.nspectra):
lmins.append([])
lmaxs.append([])
for i in range(self.lambdas[k].size):
lmins[-1].append(max(0, int(np.argmin(np.abs(self.spectrum_lambdas[k] - lambdas_bin_edges[i])))))
lmaxs[-1].append(min(self.spectrum_data_cov[k].shape[0] - 1,
np.argmin(np.abs(self.spectrum_lambdas[k] - lambdas_bin_edges[i + 1]))))
for k in range(self.nspectra):
cov = np.zeros((self.lambdas[k].size, self.lambdas[k].size))
for i in range(cov.shape[0]):
# imin = max(0, int(np.argmin(np.abs(self.spectrum_lambdas[k] - lambdas_bin_edges[i]))))
# imax = min(self.spectrum_data_cov[k].shape[0] - 1,
# np.argmin(np.abs(self.spectrum_lambdas[k] - lambdas_bin_edges[i + 1])))
imin = lmins[k][i]
imax = lmaxs[k][i]
if imin == imax:
cov[i, i] = (i + 1) * 1e10
continue
if i in lambdas_to_mask_indices[k]:
cov[i, i] = (i + 1e10)
continue
for j in range(i, cov.shape[1]):
# jmin = max(0, int(np.argmin(np.abs(self.spectrum_lambdas[k] - lambdas_bin_edges[j]))))
# jmax = min(self.spectrum_data_cov[k].shape[0] - 1,
# np.argmin(np.abs(self.spectrum_lambdas[k] - lambdas_bin_edges[j + 1])))
jmin = lmins[k][j]
jmax = lmaxs[k][j]
# if imin == imax:
# cov[i, i] = (i + 1) * 1e10
# elif jmin == jmax:
# cov[j, j] = (j + 1) * 1e10
# else:
if jmin == jmax:
cov[j, j] = (j + 1) * 1e10
else:
if j in lambdas_to_mask_indices[k]:
cov[j, j] = (j + 1e10)
else:
mean = np.mean(self.spectrum_data_cov[k][imin:imax, jmin:jmax])
cov[i, j] = mean
cov[j, i] = mean
self.data_cov[k] = np.copy(cov)
# self.data_cov = np.zeros(self.nspectra * np.array(self.data_cov_cube[0].shape))
# for k in range(self.nspectra):
# self.data_cov[k * self.lambdas[k].size:(k + 1) * self.lambdas[k].size,
# k * self.lambdas[k].size:(k + 1) * self.lambdas[k].size] = \
# self.data_cov_cube[k]
# self.data_cov = self.data_cov_cube
# print("fill data_cov_cube", time.time() - start)
# start = time.time()
for k in range(self.nspectra):
try:
L = np.linalg.inv(np.linalg.cholesky(self.data_cov[k]))
invcov_matrix = L.T @ L
except np.linalg.LinAlgError:
invcov_matrix = np.linalg.inv(self.data_cov[k])
self.W[k] = invcov_matrix
# self.data_invcov = np.zeros(self.nspectra * np.array(self.data_cov_cube[0].shape))
# for k in range(self.nspectra):
# self.data_invcov[k * self.lambdas[k].size:(k + 1) * self.lambdas[k].size,
# k * self.lambdas[k].size:(k + 1) * self.lambdas[k].size] = \
# self.data_invcov_cube[k]
# self.data_invcov = self.data_invcov_cube
# print("inv data_cov_cube", time.time() - start)
# start = time.time()
else:
self.W = np.empty(self.nspectra, dtype=np.object)
for k in range(self.nspectra):
try:
L = np.linalg.inv(np.linalg.cholesky(self.spectrum_data_cov[k]))
invcov_matrix = L.T @ L
except np.linalg.LinAlgError:
invcov_matrix = np.linalg.inv(self.spectrum_data_cov[k])
invcov_matrix[lambdas_to_mask_indices[k], :] = 0
invcov_matrix[:, lambdas_to_mask_indices[k]] = 0
self.W[k] = invcov_matrix
def inject_random_A1s(self):
random_A1s = np.random.uniform(0.5, 1, size=self.nspectra)
for k in range(self.nspectra):
self.data[k] *= random_A1s[k]
self.err[k] *= random_A1s[k]
self.data_cov[k] *= random_A1s[k] ** 2
self.W[k] /= random_A1s[k] ** 2
if self.true_A1s is not None:
self.true_A1s *= random_A1s
def get_truth(self):
"""Load the truth parameters (if provided) from the file header.
"""
if 'A1_T' in list(self.spectra[0].header.keys()):
ozone_truth = self.spectrum.header['OZONE_T']
pwv_truth = self.spectrum.header['PWV_T']
aerosols_truth = self.spectrum.header['VAOD_T']
self.truth = (ozone_truth, pwv_truth, aerosols_truth)
self.true_atmospheric_transmission = []
tatm = self.atmosphere.simulate(ozone=ozone_truth, pwv=pwv_truth, aerosols=aerosols_truth)
if self.bin_widths > 0:
for i in range(1, self.lambdas_bin_edges.size):
self.true_atmospheric_transmission.append(quad(tatm, self.lambdas_bin_edges[i - 1],
self.lambdas_bin_edges[i])[0] / self.bin_widths)
else:
self.true_atmospheric_transmission = tatm(self.lambdas[0])
self.true_atmospheric_transmission = np.array(self.true_atmospheric_transmission)
self.true_A1s = np.array([self.spectra[k].header["A1_T"] for k in range(self.nspectra)], dtype=float)
else:
self.truth = None
self.true_instrumental_transmission = []
tinst = lambda lbda: self.disperser.transmission(lbda) * self.telescope.transmission(lbda)
if self.bin_widths > 0:
for i in range(1, self.lambdas_bin_edges.size):
self.true_instrumental_transmission.append(quad(tinst, self.lambdas_bin_edges[i - 1],
self.lambdas_bin_edges[i])[0] / self.bin_widths)
else:
self.true_instrumental_transmission = tinst(self.lambdas[0])
self.true_instrumental_transmission = np.array(self.true_instrumental_transmission)
def simulate(self, ozone, pwv, aerosols, reso, *A1s):
"""Interface method to simulate multiple spectra with a single atmosphere.
Parameters
----------
ozone: float
Ozone parameter for Libradtran (in db).
pwv: float
Precipitable Water Vapor quantity for Libradtran (in mm).
aerosols: float
Vertical Aerosols Optical Depth quantity for Libradtran (no units).
reso: float
Width of the gaussian kernel to smooth the spectra (if <0: no convolution).
Returns
-------
lambdas: array_like
Array of wavelengths (1D).
model: array_like
2D array of the spectrogram simulation.
model_err: array_like
2D array of the spectrogram simulation uncertainty.
Examples
--------
>>> file_names = ["./tests/data/reduc_20170530_134_spectrum.fits"]
>>> w = MultiSpectraFitWorkspace("./outputs/test", file_names, bin_width=5, verbose=True)
>>> lambdas, model, model_err = w.simulate(*w.p)
>>> assert np.sum(model) > 0
>>> assert np.all(lambdas == w.lambdas)
>>> assert np.sum(w.amplitude_params) > 0
"""
# linear regression for the instrumental transmission parameters T
# first: force the grey terms to have an average of 1
A1s = np.array(A1s)
if A1s.size > 1:
m = 1
A1s[0] = m * A1s.size - np.sum(A1s[1:])
self.p[self.A1_first_index] = A1s[0]
# Matrix M filling: hereafter a fast integration is used
M = []
for k in range(self.nspectra):
atm = []
a = self.atmospheres[k].simulate(ozone, pwv, aerosols)
lbdas = self.atmospheres[k].lambdas
for i in range(1, self.lambdas_bin_edges.size):
delta = self.atmosphere_lambda_bins[i][-1] - self.atmosphere_lambda_bins[i][0]
if delta > 0:
atm.append(
np.trapz(a(lbdas[self.atmosphere_lambda_bins[i]]), dx=self.atmosphere_lambda_step) / delta)
else:
atm.append(1)
if reso > 0:
M.append(A1s[k] * np.diag(fftconvolve_gaussian(self.ref_spectrum_cube[k] * np.array(atm), reso)))
else:
M.append(A1s[k] * np.diag(self.ref_spectrum_cube[k] * np.array(atm)))
# hereafter: no binning but gives unbiased result on extracted spectra from simulations and truth spectra
# if self.reso > 0:
# M = np.array([A1s[k] * np.diag(fftconvolve_gaussian(self.ref_spectrum_cube[k] *
# self.atmospheres[k].simulate(ozone, pwv, aerosols)(self.lambdas[k]), reso))
# for k in range(self.nspectra)])
# else:
# M = np.array([A1s[k] * np.diag(self.ref_spectrum_cube[k] *
# self.atmospheres[k].simulate(ozone, pwv, aerosols)(self.lambdas[k]))
# for k in range(self.nspectra)])
# print("compute M", time.time() - start)
# start = time.time()
# for k in range(self.nspectra):
# plt.plot(self.atmospheres[k].lambdas, [M[k][i,i] for i in range(self.atmospheres[k].lambdas.size)])
# # plt.plot(self.lambdas, self.ref_spectrum_cube[k], linestyle="--")
# plt.grid()
# plt.title(f"reso={reso:.3f}")
# plt.show()
# Matrix W filling: if spectra are not independent, use these lines with einstein summations:
# W = np.zeros((self.nspectra, self.nspectra, self.lambdas.size, self.lambdas.size))
# for k in range(self.nspectra):
# W[k, k, ...] = self.data_invcov[k]
# W_dot_M = np.einsum('lkji,kjh->lih', W, M)
# M_dot_W_dot_M = np.einsum('lkj,lki->ij', M, W_dot_M)
# M_dot_W_dot_M = np.zeros_like(M_dot_W_dot_M)
# otherwise, this is much faster:
M_dot_W_dot_M = np.sum([M[k].T @ self.W[k] @ M[k] for k in range(self.nspectra)], axis=0)
M_dot_W_dot_D = np.sum([M[k].T @ self.W[k] @ self.data[k] for k in range(self.nspectra)], axis=0)
if self.amplitude_priors_method != "spectrum":
for i in range(self.lambdas[0].size):
if np.sum(M_dot_W_dot_M[i]) == 0:
M_dot_W_dot_M[i, i] = 1e-10 * np.mean(M_dot_W_dot_M) * np.random.random()
try:
L = np.linalg.inv(np.linalg.cholesky(M_dot_W_dot_M))
cov_matrix = L.T @ L
except np.linalg.LinAlgError:
cov_matrix = np.linalg.inv(M_dot_W_dot_M)
amplitude_params = cov_matrix @ M_dot_W_dot_D
else:
M_dot_W_dot_M_plus_Q = M_dot_W_dot_M + self.reg * self.Q
try:
L = np.linalg.inv(np.linalg.cholesky(M_dot_W_dot_M_plus_Q))
cov_matrix = L.T @ L
except np.linalg.LinAlgError:
cov_matrix = np.linalg.inv(M_dot_W_dot_M_plus_Q)
amplitude_params = cov_matrix @ (M_dot_W_dot_D + self.reg * self.Q_dot_A0)
self.M = M
self.M_dot_W_dot_M = M_dot_W_dot_M
self.M_dot_W_dot_D = M_dot_W_dot_D
model_cube = []
model_err_cube = []
for k in range(self.nspectra):
model_cube.append(M[k] @ amplitude_params)
model_err_cube.append(np.zeros_like(model_cube[-1]))
self.model = np.asarray(model_cube)
self.model_err = np.asarray(model_err_cube)
self.amplitude_params = np.copy(amplitude_params)
self.amplitude_params_err = np.array([np.sqrt(cov_matrix[i, i])
if cov_matrix[i, i] > 0 else 0 for i in range(self.lambdas[0].size)])
self.amplitude_cov_matrix = np.copy(cov_matrix)
# print("algebra", time.time() - start)
# start = time.time()
return self.lambdas, self.model, self.model_err
def plot_fit(self):
"""Plot the fit result.
Examples
--------
>>> file_names = 3 * ["./tests/data/reduc_20170530_134_spectrum.fits"]
>>> w = MultiSpectraFitWorkspace("./outputs/test", file_names, bin_width=5, verbose=True)
>>> w.simulate(*w.p) #doctest: +ELLIPSIS
(array(...
>>> w.plot_fit()
"""
cmap_bwr = copy.copy(cm.get_cmap('bwr'))
cmap_bwr.set_bad(color='lightgrey')
cmap_viridis = copy.copy(cm.get_cmap('viridis'))
cmap_viridis.set_bad(color='lightgrey')
data = copy.deepcopy(self.data)
for k in range(self.nspectra):
data[k][np.isnan(data[k]/self.err[k])] = np.nan
if len(self.outliers) > 0:
bad_indices = self.get_bad_indices()
for k in range(self.nspectra):
data[k][bad_indices[k]] = np.nan
data[k] = np.ma.masked_invalid(data[k])
data = np.array([data[k] for k in range(self.nspectra)], dtype=float)
model = np.array([self.model[k] for k in range(self.nspectra)], dtype=float)
err = np.array([self.err[k] for k in range(self.nspectra)], dtype=float)
gs_kw = dict(width_ratios=[3, 0.13], height_ratios=[1, 1, 1])
fig, ax = plt.subplots(nrows=3, ncols=2, figsize=(7, 6), gridspec_kw=gs_kw)
ozone, pwv, aerosols, reso, *A1s = self.p
#plt.suptitle(f'VAOD={aerosols:.3f}, ozone={ozone:.0f}db, PWV={pwv:.2f}mm, reso={reso:.2f}', y=0.995)
norm = np.nanmax(data)
y = np.arange(0, self.nspectra+1).astype(int) - 0.5
xx, yy = np.meshgrid(self.lambdas[0], y)
ylbda = -0.45 * np.ones_like(self.lambdas[0][1:-1])
# model
im = ax[1, 0].pcolormesh(xx, yy, model / norm, vmin=0, vmax=1, cmap=cmap_viridis)
plt.colorbar(im, cax=ax[1, 1], label='1/max(data)', format="%.1f")
ax[1, 0].set_title("Model", fontsize=12, color='white', x=0.91, y=0.76)
ax[1, 0].grid(color='silver', ls='solid')
ax[1, 0].scatter(self.lambdas[0][1:-1], ylbda, cmap=from_lambda_to_colormap(self.lambdas[0][1:-1]),
edgecolors='None', c=self.lambdas[0][1:-1], label='', marker='o', s=20)
# data
im = ax[0, 0].pcolormesh(xx, yy, data / norm, vmin=0, vmax=1, cmap=cmap_viridis)
plt.colorbar(im, cax=ax[0, 1], label='1/max(data)', format="%.1f")
ax[0, 0].set_title("Data", fontsize=12, color='white', x=0.91, y=0.76)
ax[0, 0].grid(color='silver', ls='solid')
ax[0, 0].scatter(self.lambdas[0][1:-1], ylbda, cmap=from_lambda_to_colormap(self.lambdas[0][1:-1]),
edgecolors='None', c=self.lambdas[0][1:-1], label='', marker='o', s=20)
# residuals
residuals = (data - model)
norm = err
residuals /= norm
std = float(np.nanstd(residuals))
im = ax[2, 0].pcolormesh(xx, yy, residuals, vmin=-3 * std, vmax=3 * std, cmap=cmap_bwr)
plt.colorbar(im, cax=ax[2, 1], label='(Data-Model)/Err', format="%.0f")
# ax[2, 0].set_title('(Data-Model)/Err', fontsize=10, color='black', x=0.84, y=0.76)
ax[2, 0].grid(color='silver', ls='solid')
ax[2, 0].scatter(self.lambdas[0][1:-1], ylbda, cmap=from_lambda_to_colormap(self.lambdas[0][1:-1]),
edgecolors='None', c=self.lambdas[0][1:-1], label='', marker='o', s=10*self.nspectra)
ax[2, 0].text(0.05, 0.8, f'mean={np.nanmean(residuals):.3f}\nstd={np.nanstd(residuals):.3f}',
horizontalalignment='left', verticalalignment='bottom',
color='black', transform=ax[2, 0].transAxes)
ax[2, 0].set_xlabel(r"$\lambda$ [nm]")
for i in range(3):
ax[i, 0].set_xlim(self.lambdas[0, 0], self.lambdas[0, -1])
ax[i, 0].set_ylim(-0.5, self.nspectra-0.5)
ax[i, 0].yaxis.set_major_locator(MaxNLocator(integer=True))
ax[i, 0].set_ylabel("Spectrum index")
ax[i, 1].get_yaxis().set_label_coords(2.6, 0.5)
ax[i, 0].get_yaxis().set_label_coords(-0.06, 0.5)
fig.tight_layout()
if parameters.SAVE:
fig.savefig(self.output_file_name + '_bestfit.pdf', dpi=100, bbox_inches='tight')
if self.live_fit: # pragma: no cover
plt.draw()
plt.pause(1e-8)
plt.close()
else: # pragma: no cover
if parameters.DISPLAY and self.verbose:
plt.show()
def plot_transmissions(self):
"""Plot the fit result for transmissions.
Examples
--------
>>> file_names = ["./tests/data/sim_20170530_134_spectrum.fits"]
>>> w = MultiSpectraFitWorkspace("./outputs/test", file_names, bin_width=5, verbose=True)
>>> w.plot_transmissions()
"""
gs_kw = dict(width_ratios=[1, 1], height_ratios=[1, 0.15])
fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(9, 6), gridspec_kw=gs_kw, sharex="all")
ozone, pwv, aerosols, reso, *A1s = self.p
plt.suptitle(f'VAOD={aerosols:.3f}, ozone={ozone:.0f}db, PWV={pwv:.2f}mm', y=1)
masked = self.amplitude_params_err > 1e6
transmission = np.copy(self.amplitude_params)
transmission_err = np.copy(self.amplitude_params_err)
transmission[masked] = np.nan
transmission_err[masked] = np.nan
ax[0, 0].errorbar(self.lambdas[0], transmission, yerr=transmission_err,
label=r'$T_{\mathrm{inst}} * \left\langle A_1 \right\rangle$', fmt='k.') # , markersize=0.1)
ax[0, 0].set_ylabel(r'Instrumental transmission')
ax[0, 0].set_xlim(self.lambdas[0][0], self.lambdas[0][-1])
ax[0, 0].set_ylim(0, 1.1 * np.nanmax(transmission))
ax[0, 0].grid(True)
ax[0, 0].set_xlabel(r'$\lambda$ [nm]')
if self.true_instrumental_transmission is not None:
ax[0, 0].plot(self.lambdas[0], self.true_instrumental_transmission, "g-",
label=r'true $T_{\mathrm{inst}}* \left\langle A_1 \right\rangle$')
ax[1, 0].set_xlabel(r'$\lambda$ [nm]')
ax[1, 0].grid(True)
ax[1, 0].set_ylabel(r'(Data-Truth)/Err')
norm = transmission_err
residuals = (self.amplitude_params - self.true_instrumental_transmission) / norm
residuals[masked] = np.nan
ax[1, 0].errorbar(self.lambdas[0], residuals, yerr=transmission_err / norm,
label=r'$T_{\mathrm{inst}}$', fmt='k.') # , markersize=0.1)
ax[1, 0].set_ylim(-1.1 * np.nanmax(np.abs(residuals)), 1.1 * np.nanmax(np.abs(residuals)))
else:
ax[1, 0].remove()
ax[0, 0].legend()
tatm = self.atmosphere.simulate(ozone=ozone, pwv=pwv, aerosols=aerosols)
tatm_binned = []
for i in range(1, self.lambdas_bin_edges.size):
tatm_binned.append(quad(tatm, self.lambdas_bin_edges[i - 1], self.lambdas_bin_edges[i])[0] /
(self.lambdas_bin_edges[i] - self.lambdas_bin_edges[i - 1]))
ax[0, 1].errorbar(self.lambdas[0], tatm_binned,
label=r'$T_{\mathrm{atm}}$', fmt='k.') # , markersize=0.1)
ax[0, 1].set_ylabel(r'Atmospheric transmission')
ax[0, 1].set_xlabel(r'$\lambda$ [nm]')
ax[0, 1].set_xlim(self.lambdas[0][0], self.lambdas[0][-1])
ax[0, 1].grid(True)
if self.truth is not None:
ax[0, 1].plot(self.lambdas[0], self.true_atmospheric_transmission, "b-", label=r'true $T_{\mathrm{atm}}$')
ax[1, 1].set_xlabel(r'$\lambda$ [nm]')
ax[1, 1].set_ylabel(r'Data-Truth')
ax[1, 1].grid(True)
residuals = np.asarray(tatm_binned) - self.true_atmospheric_transmission
ax[1, 1].errorbar(self.lambdas[0], residuals, label=r'$T_{\mathrm{inst}}$', fmt='k.') # , markersize=0.1)
ax[1, 1].set_ylim(-1.1 * np.max(np.abs(residuals)), 1.1 * np.max( | np.abs(residuals) | numpy.abs |
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "<NAME>" at 07:03, 18/03/2020 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieu1995 %
#-------------------------------------------------------------------------------------------------------%
from numpy import exp, sign, ones, mean, multiply
from numpy.random import uniform, randint, normal, random, choice
from copy import deepcopy
from mealpy.root import Root
class BaseEO(Root):
"""
The original version of: Equilibrium Optimizer (EO)
(Equilibrium Optimizer: A Novel Optimization Algorithm)
Link:
https://doi.org/10.1016/j.knosys.2019.105190
https://www.mathworks.com/matlabcentral/fileexchange/73352-equilibrium-optimizer-eo
"""
def __init__(self, obj_func=None, lb=None, ub=None, problem_size=50, batch_size=10, verbose=True, epoch=750, pop_size=100):
Root.__init__(self, obj_func, lb, ub, problem_size, batch_size, verbose)
self.epoch = epoch
self.pop_size = pop_size
self.V = 1
self.a1 = 2
self.a2 = 1
self.GP = 0.5
def train(self):
#c_eq1 = [None, float("inf")] # it is global best position
c_eq2 = [None, float("inf")]
c_eq3 = [None, float("inf")]
c_eq4 = [None, float("inf")]
# ---------------- Memory saving-------------------
pop = [self.create_solution() for _ in range(self.pop_size)]
g_best = self.get_global_best_solution(pop, self.ID_FIT, self.ID_MIN_PROB)
c_eq1 = deepcopy(g_best)
for epoch in range(0, self.epoch):
for i in range(0, self.pop_size):
if pop[i][self.ID_FIT] < c_eq1[self.ID_FIT]:
c_eq1 = deepcopy(pop[i])
elif c_eq1[self.ID_FIT] < pop[i][self.ID_FIT] < c_eq2[self.ID_FIT]:
c_eq2 = deepcopy(pop[i])
elif c_eq1[self.ID_FIT] < pop[i][self.ID_FIT] and c_eq2[self.ID_FIT] < pop[i][self.ID_FIT] < c_eq3[self.ID_FIT]:
c_eq3 = deepcopy(pop[i])
elif c_eq1[self.ID_FIT] < pop[i][self.ID_FIT] and c_eq2[self.ID_FIT] < pop[i][self.ID_FIT] and c_eq3[self.ID_FIT] < pop[i][self.ID_FIT] < c_eq4[self.ID_FIT]:
c_eq4 = deepcopy(pop[i])
# make equilibrium pool
c_eq_ave = (c_eq1[self.ID_POS] + c_eq2[self.ID_POS] + c_eq3[self.ID_POS] + c_eq4[self.ID_POS]) / 4
fit_ave = self.get_fitness_position(c_eq_ave)
c_pool = [c_eq1, c_eq2, c_eq3, c_eq4, [c_eq_ave, fit_ave]]
# Eq. 9
t = (1 - epoch/self.epoch) ** (self.a2 * epoch / self.epoch)
for i in range(0, self.pop_size):
lamda = uniform(0, 1, self.problem_size) # lambda in Eq. 11
r = uniform(0, 1, self.problem_size) # r in Eq. 11
c_eq = c_pool[randint(0, len(c_pool))][self.ID_POS] # random selection 1 of candidate from the pool
f = self.a1 * sign(r - 0.5) * (exp(-lamda * t) - 1.0) # Eq. 11
r1 = uniform()
r2 = uniform() # r1, r2 in Eq. 15
gcp = 0.5 * r1 * ones(self.problem_size) * (r2 >= self.GP) # Eq. 15
g0 = gcp * (c_eq - lamda * pop[i][self.ID_POS]) # Eq. 14
g = g0 * f # Eq. 13
temp = c_eq + (pop[i][self.ID_POS] - c_eq) * f + (g * self.V / lamda) * (1.0 - f) # Eq. 16
fit = self.get_fitness_position(temp)
pop[i] = [temp, fit]
g_best = self.update_global_best_solution(pop, self.ID_MIN_PROB, g_best)
self.loss_train.append(g_best[self.ID_FIT])
if self.verbose:
print(">Epoch: {}, Best fit: {}".format(epoch + 1, g_best[self.ID_FIT]))
self.solution = g_best
return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train
class ModifiedEO(BaseEO):
"""
Original version of: Modified Equilibrium Optimizer (MEO)
(An efficient equilibrium optimizer with mutation strategy for numerical optimization)
Link:
https://doi.org/10.1016/j.asoc.2020.106542
"""
def __init__(self, obj_func=None, lb=None, ub=None, problem_size=50, batch_size=10, verbose=True, epoch=750, pop_size=100):
BaseEO.__init__(self, obj_func, lb, ub, problem_size, batch_size, verbose, epoch, pop_size)
def _make_equilibrium_pool__(self, list_equilibrium=None):
pos_list = [item[self.ID_POS] for item in list_equilibrium]
pos_mean = | mean(pos_list, axis=0) | numpy.mean |
from __future__ import unicode_literals, absolute_import, division
import os
import sqlite3
import numpy as np
from .base import BaseValidationTest, TestResult
from .plotting import plt
__all__ = ['QuickBkgTest']
def compute_bkg(image):
"""
Routine to give an estimate of the mean, median and std
of the background level from a given image
Args:
-----
image : np.array
Returns:
--------
mean_bkg : Mean background level
median_bkg : Median background level
bkg_noise: Background noise level
"""
image = image.flatten()
q_low, q_high = np.percentile(image, [5, 95]) # This is kind of arbitrary but it works fine
image = image[(image > q_low) & (image < q_high)]
return np.mean(image), np.median(image), np.std(image)
def get_predicted_bkg(visit, validation_dataset, db_file, band):
if validation_dataset.lower() == 'opsim':
return get_opsim_bkg(visit, db_file, band)
else:
raise NotImplementedError('only "opsim" is currently supported')
# TODO add imSim option
#if validation_dataset == 'imSim':
# return get_imsim_bkg(visit,band)
def compute_sky_counts(mag, band, nsnap):
# Data from https://github.com/lsst-pst/syseng_throughputs/blob/master/plots/table2
if band == 'u':
mag0 = 22.95
counts0 = 50.2
if band == 'g':
mag0 = 22.24
counts0 = 384.6
if band == 'r':
mag0 = 21.20
counts0 = 796.2
if band == 'i':
mag0 = 20.47
counts0 = 1108.1
if band == 'z':
mag0 = 19.60
counts0 = 1687.9
if band == 'y':
mag0 = 18.63
counts0 = 2140.8
return nsnap * counts0 * 10**(-0.4 * (mag - mag0))
def get_airmass_raw_seeing(visit, db_file):
conn = sqlite3.connect(db_file)
cur = conn.cursor()
cur.execute(
"SELECT airmass, filtSkyBrightness, finSeeing, rawSeeing, visitExpTime, fiveSigmaDepth FROM ObsHistory WHERE obsHistID==%d"
% (visit))
rows = cur.fetchall()
return rows[0]
def get_opsim_bkg(visit,db_file,band):
skybrightness = get_airmass_raw_seeing(int(visit),db_file)[1]
# We are going to compute the background counts given OpSim's sky-brightness
mean_bkg = compute_sky_counts(skybrightness,band,1)
median_bkg = mean_bkg # We assume that the background is completely homogeneous
bkg_noise = np.sqrt(mean_bkg) # We assume Poisson noise
return mean_bkg, median_bkg, bkg_noise
class QuickBkgTest(BaseValidationTest):
"""
Check of mean, median and standard deviation of the image background.
We compare to expeted values by OpSim or imSim.
Args:
-----
label (str): x-label for the validation plots
visit (int): Visit numbr to analyze
band (str): Filter/band to analyze
bkg_validation_dataset (str): Name of the validation data to which compare, for now,
only opsim is available.
"""
def __init__(self, label, bkg_validation_dataset, visit, band, db_file, **kwargs):
# pylint: disable=W0231
self.validation_data = get_predicted_bkg(visit, bkg_validation_dataset, db_file, band)
self.label = label
self.visit = visit
self.band = band
self.bkg_validation_dataset = bkg_validation_dataset
def post_process_plot(self, ax):
ymin, ymax = ax[0].get_ylim()
ax[0].plot(
np.ones(3) * self.validation_data[0],
| np.linspace(ymin, ymax, 3) | numpy.linspace |
from __future__ import division
from builtins import object
import pickle
import numpy as np
from sporco.admm import parcbpdn
from sporco.fft import fftn, ifftn
import sporco.linalg as sl
class TestSet01(object):
def setup_method(self, method):
np.random.seed(12345)
def test_01(self):
N = 16
Nd = 5
Cs = 3
M = 4
D = np.random.randn(Nd, Nd, M)
s = np.random.randn(N, N, Cs)
lmbda = 1e-1
b = parcbpdn.ParConvBPDN(D, s, lmbda, dimK=0)
assert b.cri.dimC == 1
assert b.cri.dimK == 0
def test_02(self):
N = 16
Nd = 5
Cs = 3
K = 5
M = 4
D = np.random.randn(Nd, Nd, M)
s = np.random.randn(N, N, Cs, K)
lmbda = 1e-1
b = parcbpdn.ParConvBPDN(D, s, lmbda)
assert b.cri.dimC == 1
assert b.cri.dimK == 1
def test_03(self):
N = 16
Nd = 5
Cd = 3
M = 4
D = np.random.randn(Nd, Nd, Cd, M)
s = np.random.randn(N, N, Cd)
lmbda = 1e-1
b = parcbpdn.ParConvBPDN(D, s, lmbda)
assert b.cri.dimC == 1
assert b.cri.dimK == 0
def test_04(self):
N = 16
Nd = 5
Cd = 3
K = 5
M = 4
D = np.random.randn(Nd, Nd, Cd, M)
s = np.random.randn(N, N, Cd, K)
lmbda = 1e-1
b = parcbpdn.ParConvBPDN(D, s, lmbda)
assert b.cri.dimC == 1
assert b.cri.dimK == 1
def test_05(self):
N = 16
Nd = 5
K = 2
M = 4
D = np.random.randn(Nd, Nd, M)
s = np.random.randn(N, N, K)
lmbda = 1e-1
b = parcbpdn.ParConvBPDN(D, s, lmbda)
assert b.cri.dimC == 0
assert b.cri.dimK == 1
def test_06(self):
N = 16
Nd = 5
K = 2
M = 4
D = np.random.randn(Nd, Nd, M)
s = np.random.randn(N, N, K)
dt = np.float32
opt = parcbpdn.ParConvBPDN.Options({'Verbose': False,
'MaxMainIter': 20, 'AutoRho': {'Enabled':
True}, 'DataType': dt})
lmbda = 1e-1
b = parcbpdn.ParConvBPDN(D, s, lmbda, opt=opt)
b.solve()
assert b.X.dtype == dt
assert b.Y.dtype == dt
assert b.U.dtype == dt
def test_07(self):
N = 16
Nd = 5
K = 2
M = 4
D = np.random.randn(Nd, Nd, M)
s = np.random.randn(N, N, K)
dt = np.float64
opt = parcbpdn.ParConvBPDN.Options({'Verbose': False,
'MaxMainIter': 20, 'AutoRho': {'Enabled':
True}, 'DataType': dt})
lmbda = 1e-1
b = parcbpdn.ParConvBPDN(D, s, lmbda, opt=opt)
b.solve()
assert b.X.dtype == dt
assert b.Y.dtype == dt
assert b.U.dtype == dt
def test_08(self):
N = 16
Nd = 5
M = 4
D = np.random.randn(Nd, Nd, M)
s = | np.random.randn(N, N) | numpy.random.randn |
__doc__ = """ Test Boundary conditions for in Elastica implementation"""
import sys
# System imports
import numpy as np
from test_rod.test_rods import MockTestRod
from elastica.boundary_conditions import (
ConstraintBase,
FreeBC,
OneEndFixedBC,
FixedConstraint,
HelicalBucklingBC,
)
from numpy.testing import assert_allclose
from elastica.utils import Tolerance
import pytest
from pytest import main
test_built_in_boundary_condition_impls = [
FreeBC,
OneEndFixedBC,
FixedConstraint,
HelicalBucklingBC,
]
def test_constraint_base():
test_rod = MockTestRod()
test_rod.position_collection = np.ones(3) * 3.0
test_rod.velocity_collection = np.ones(3) * 5.0
test_rod.director_collection = np.ones(3) * 7.0
test_rod.omega_collection = np.ones(3) * 11.0
class TestBC(ConstraintBase):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def constrain_values(self, rod, time):
rod.position_collection *= time
rod.director_collection *= time
def constrain_rates(self, rod, time):
rod.velocity_collection *= time
rod.omega_collection *= time
testBC = TestBC(_system=test_rod)
testBC.constrain_values(test_rod, 2)
testBC.constrain_rates(test_rod, 2)
assert_allclose(test_rod.position_collection, 6.0, atol=Tolerance.atol())
assert_allclose(test_rod.director_collection, 14.0, atol=Tolerance.atol())
assert_allclose(test_rod.velocity_collection, 10.0, atol=Tolerance.atol())
assert_allclose(test_rod.omega_collection, 22.0, atol=Tolerance.atol())
def test_constraint_base_properties_access():
test_rod = MockTestRod()
class TestBC(ConstraintBase):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Able to access properties in constraint class
assert self._system == test_rod
assert self.constrained_position_idx == 11
assert self.constrained_director_idx == 17
def constrain_values(self, rod, time):
assert self._system == test_rod
assert self.constrained_position_idx == 11
assert self.constrained_director_idx == 17
def constrain_rates(self, rod, time):
assert self._system == test_rod
assert self.constrained_position_idx == 11
assert self.constrained_director_idx == 17
testBC = TestBC(
constrained_position_idx=11, constrained_director_idx=17, _system=test_rod
)
testBC.constrain_values(test_rod, 2)
testBC.constrain_rates(test_rod, 2)
# tests free rod boundary conditions
def test_free_rod():
test_rod = MockTestRod()
free_rod = FreeBC(_system=test_rod)
test_position_collection = np.random.rand(3, 20)
test_rod.position_collection = (
test_position_collection.copy()
) # We need copy of the list not a reference to this array
test_director_collection = np.random.rand(3, 3, 20)
test_rod.director_collection = (
test_director_collection.copy()
) # We need copy of the list not a reference to this array
free_rod.constrain_values(test_rod, time=0)
assert_allclose(
test_position_collection, test_rod.position_collection, atol=Tolerance.atol()
)
assert_allclose(
test_director_collection, test_rod.director_collection, atol=Tolerance.atol()
)
test_velocity_collection = np.random.rand(3, 20)
test_rod.velocity_collection = (
test_velocity_collection.copy()
) # We need copy of the list not a reference to this array
test_omega_collection = np.random.rand(3, 20)
test_rod.omega_collection = (
test_omega_collection.copy()
) # We need copy of the list not a reference to this array
free_rod.constrain_rates(test_rod, time=0)
assert_allclose(
test_velocity_collection, test_rod.velocity_collection, atol=Tolerance.atol()
)
assert_allclose(
test_omega_collection, test_rod.omega_collection, atol=Tolerance.atol()
)
def test_one_end_fixed_bc():
test_rod = MockTestRod()
start_position_collection = np.random.rand(3)
start_director_collection = np.random.rand(3, 3)
fixed_rod = OneEndFixedBC(
start_position_collection, start_director_collection, _system=test_rod
)
test_position_collection = np.random.rand(3, 20)
test_rod.position_collection = (
test_position_collection.copy()
) # We need copy of the list not a reference to this array
test_director_collection = np.random.rand(3, 3, 20)
test_rod.director_collection = (
test_director_collection.copy()
) # We need copy of the list not a reference to this array
fixed_rod.constrain_values(test_rod, time=0)
test_position_collection[..., 0] = start_position_collection
test_director_collection[..., 0] = start_director_collection
assert_allclose(
test_position_collection, test_rod.position_collection, atol=Tolerance.atol()
)
assert_allclose(
test_director_collection, test_rod.director_collection, atol=Tolerance.atol()
)
test_velocity_collection = np.random.rand(3, 20)
test_rod.velocity_collection = (
test_velocity_collection.copy()
) # We need copy of the list not a reference to this array
test_omega_collection = np.random.rand(3, 20)
test_rod.omega_collection = (
test_omega_collection.copy()
) # We need copy of the list not a reference to this array
fixed_rod.constrain_rates(test_rod, time=0)
test_velocity_collection[..., 0] = np.array((0, 0, 0))
test_omega_collection[..., 0] = np.array((0, 0, 0))
assert_allclose(
test_velocity_collection, test_rod.velocity_collection, atol=Tolerance.atol()
)
assert_allclose(
test_omega_collection, test_rod.omega_collection, atol=Tolerance.atol()
)
@pytest.mark.parametrize("seed", [1, 10, 100])
@pytest.mark.parametrize("n_position_constraint", [0, 1, 3, 5])
@pytest.mark.parametrize("n_director_constraint", [0, 2, 6, 9])
def test_fixed_constraint(seed, n_position_constraint, n_director_constraint):
rng = np.random.default_rng(seed)
N = 20
test_rod = MockTestRod()
start_position_collection = rng.random((n_position_constraint, 3))
start_director_collection = rng.random((n_director_constraint, 3, 3))
fixed_rod = FixedConstraint(
*start_position_collection, *start_director_collection, _system=test_rod
)
pos_indices = rng.choice(N, size=n_position_constraint, replace=False)
dir_indices = rng.choice(N, size=n_director_constraint, replace=False)
fixed_rod._constrained_position_idx = pos_indices.copy()
fixed_rod._constrained_director_idx = dir_indices.copy()
test_position_collection = rng.random((3, N))
test_rod.position_collection = (
test_position_collection.copy()
) # We need copy of the list not a reference to this array
test_director_collection = rng.random((3, 3, N))
test_rod.director_collection = (
test_director_collection.copy()
) # We need copy of the list not a reference to this array
fixed_rod.constrain_values(test_rod, time=0)
test_position_collection[..., pos_indices] = start_position_collection.transpose(
(1, 0)
)
test_director_collection[..., dir_indices] = start_director_collection.transpose(
(1, 2, 0)
)
assert_allclose(
test_position_collection, test_rod.position_collection, atol=Tolerance.atol()
)
assert_allclose(
test_director_collection, test_rod.director_collection, atol=Tolerance.atol()
)
test_velocity_collection = rng.random((3, N))
test_rod.velocity_collection = (
test_velocity_collection.copy()
) # We need copy of the list not a reference to this array
test_omega_collection = rng.random((3, N))
test_rod.omega_collection = (
test_omega_collection.copy()
) # We need copy of the list not a reference to this array
fixed_rod.constrain_rates(test_rod, time=0)
test_velocity_collection[..., pos_indices] = 0.0
test_omega_collection[..., dir_indices] = 0.0
assert_allclose(
test_velocity_collection, test_rod.velocity_collection, atol=Tolerance.atol()
)
assert_allclose(
test_omega_collection, test_rod.omega_collection, atol=Tolerance.atol()
)
def test_helical_buckling_bc():
twisting_time = 500.0
slack = 3.0
number_of_rotations = 27.0 # number of 2pi rotations
start_position_collection = | np.array([0.0, 0.0, 0.0]) | numpy.array |
from cnntools import cnntools
from torchvision import models, transforms
from os.path import join as pjoin
import torch
import numpy as np
import pandas as pd
from scipy import stats, linalg
import os
from dnnbrain.dnn import models as dnn_models
import torch.nn as nn
from PIL import Image
from ATT.iofunc import iofiles
from sklearn.decomposition import PCA
def avg_by_imglabel(imgname, actval, label=0):
"""
"""
lblidx = np.array([imgname[i][1]==label for i in range(len(imgname))])
return actval[lblidx,:].mean(axis=0)
class PaddingImage(object):
"""
"""
def __init__(self, prop):
self.prop = prop
def __call__(self, img):
return cnntools.resize_padding_image(img, prop=self.prop)
# Extract PC2
cnn_net = models.alexnet(pretrained=False)
# cnn_net.classifier[-1] = torch.nn.Linear(4096,2)
# cnn_net.classifier = torch.nn.Sequential(*cnn_net.classifier, torch.nn.Linear(1000,2))
# cnn_net.load_state_dict(torch.load('/nfs/a1/userhome/huangtaicheng/workingdir/models/DNNmodel_param/alexnet_twocate.pth'))
# cnn_net.load_state_dict(torch.load('/nfs/a1/userhome/huangtaicheng/workingdir/models/DNNmodel_param/alexnet_object100_singleobj.pth'))
# cnn_net.load_state_dict(torch.load('/nfs/a1/userhome/huangtaicheng/workingdir/models/DNNmodel_param/alexnetcate2_noaddlayer.pth', map_location='cuda:0'))
cnn_net.load_state_dict(torch.load('/nfs/a1/userhome/huangtaicheng/workingdir/models/DNNmodel_param/alexnet.pth', map_location='cuda:0'))
transform = transforms.Compose([transforms.Resize((224,224)), PaddingImage(0.2), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
# transform = transforms.Compose([ShuffleImage(), transforms.Resize((224,224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
imgpath_bsobject = '/nfs/a1/userhome/huangtaicheng/workingdir/data/PhysicalSize/ObjectSize/SizeDataset_2021/Object100_origin'
imgname, object_act = cnntools.extract_activation(cnn_net, imgpath_bsobject, layer_loc=('features', '8'), imgtransforms=transform, isgpu=True)
if object_act.ndim == 4:
object_act = object_act.reshape(*object_act.shape[:2], -1).mean(axis=-1)
object_act_avg = np.zeros((100,object_act.shape[-1]))
for lbl in range(100):
object_act_avg[lbl,:] = avg_by_imglabel(imgname, object_act, lbl)
object_act_avg = object_act_avg/np.tile(linalg.norm(object_act_avg, axis=1), (object_act_avg.shape[-1],1)).T
iopkl = iofiles.make_ioinstance('/nfs/a1/userhome/huangtaicheng/workingdir/models/pca_imgnetval_conv4_alexnet.pkl')
pca_model = iopkl.load()
pca_act = np.dot(object_act_avg, np.linalg.pinv(pca_model.components_))
pc2_act = pca_act[:,1]
# Load real-world size
# retin_size_pd = pd.read_csv('/nfs/a1/userhome/huangtaicheng/workingdir/data/PhysicalSize/RetinSizes.csv')
rw_size_pd = pd.read_csv('/nfs/a1/userhome/huangtaicheng/workingdir/data/PhysicalSize/Real_SizeRanks8.csv')
rw_size_pd = rw_size_pd.sort_values('name')
rw_size = rw_size_pd['diag_size']
retin_size_pd = pd.read_csv('/nfs/a1/userhome/huangtaicheng/workingdir/data/PhysicalSize/RetinSizes.csv')
rw_size_log10 = np.log10(rw_size)
rw_size_log2 = | np.log2(rw_size) | numpy.log2 |
#!/usr/bin/env python
"""
@package ion_functions.data.prs_functions
@file ion_functions/data/prs_functions.py
@author <NAME>, <NAME>
@brief Module containing calculations related to instruments in the Seafloor
Pressure family.
"""
import numexpr as ne
import numpy as np
import scipy as sp
from scipy import signal
"""
Listing of functions, in order encountered.
Functions calculating data products.
BOTTILT:
prs_bottilt_ccmp -- computes the BOTTILT-CCMP_L1 data product
prs_bottilt_tmag -- computes the BOTTILT-TMAG_L1 data product
prs_bottilt_tdir -- computes the BOTTILT-TDIR_L1 data product
BOTSFLU:
prs_botsflu_time15s -- computes the TIME15S-AUX auxiliary data product
prs_botsflu_meanpres -- computes the BOTSFLU-MEANPRES_L2 data product
prs_botsflu_predtide -- computes the BOTSFLU-PREDTIDE_L2 data product
prs_botsflu_meandepth -- computes the BOTSFLU-MEANDEPTH_L2 data product
prs_botsflu_5minrate -- computes the BOTSFLU-5MINRATE_L2 data product
prs_botsflu_10minrate -- computes the BOTSFLU-10MINRATE_L2 data product
prs_botsflu_time24h -- computes the TIME24H-AUX auxiliary data product
prs_botsflu_daydepth -- computes the BOTSFLU-DAYDEPTH_L2 data product
prs_botsflu_4wkrate -- computes the BOTSFLU-4WKRATE_L2 data product
prs_botsflu_8wkrate -- computes the BOTSFLU-8WKRATE_L2 data product
Functions calculating event notifications; they return either True or False.
BOTSFLU:
prs_tsunami_detection -- event notification specified by DPS
prs_eruption_imminent -- event notification specified by DPS
prs_eruption_occurred -- event notification specified by DPS
Worker functions called by functions calculating data products.
BOTSFLU
anchor_bin
calc_daydepth_plus
calc_meandepth_plus
calculate_sliding_means
calculate_sliding_slopes
"""
def prs_bottilt_ccmp(scmp, sn):
"""
Description:
OOI Level 1 Seafloor High-Resolution tilt (BOTTILT) core data product,
derived from data output by the Applied Geomechanics LILY tilt sensor
on board the Bottom Pressure Tilt (BOTPT) instruments on the Regional
Scale Nodes (RSN) at Axial Seamount. This function computes
BOTTILT-CCMP_L1.
Implemented by:
2013-06-10: <NAME>. Initial code.
2014-03-20: <NAME>. Alternate code: faster, but less direct.
Usage:
ccmp = prs_bottilt_ccmp(scmp, sn)
where
ccmp = Corrected compass direction (BOTTILT-CCMP_L1) [degrees]
scmp = Uncorrected sensor compass direction (BOTTILT-SCMP_L0) [degrees]
sn = LILY sensor serial number [unitless]
References:
OOI (2013). Data Product Specification for Seafloor High-Resolution
Tilt. Document Control Number 1341-00060.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00060_Data_Product_SPEC_BOTTILT_OOI.pdf)
"""
"""
Currently, there are two coded algorithms:
(1) the straightforward original, which uses a two-element keyed dictionary;
(2) a faster version, which uses serial number keys to the dictionary.
Since each algorithm uses its own dictionary, the corresponding import statements
are TEMPORARILY placed at the beginning of their respective code sections
instead of at module top.
"""
### Original coding, using a dictionary constructed with 2-element keys.
# load the corrected compass directions table [(sn, scmp) keys]
from ion_functions.data.prs_functions_ccmp import cmp_lookup
# use the lookup table to get the ccmp
ccmp = np.zeros(len(scmp))
for i in range(len(scmp)):
ccmp[i] = cmp_lookup[(sn[i], int(round(scmp[i])))]
return ccmp
#### Faster coding, using a dictionary constructed with 1-element keys.
#
## load the corrected compass directions table [sn keys]
#from ion_functions.data.prs_functions_ccmp_lily_compass_cals import cmp_cal
#
## initialize output array for vectorized masking operations. this will 'break'
## the code if an invalid serial number is specified in the argument list.
#ccmp = np.zeros(len(scmp)) + np.nan
#
## round the uncorrected compass values to the nearest integer as specified in the DPS,
## which uses a lookup table consisting of integral values to do the correction.
#scmp = np.round(scmp)
#
## find the supported tilt sensor serial numbers, which are keys in the dictionary
#sernum = cmp_cal.keys()
#
#for ii in range(len(sernum)):
# # get the cal coeffs as a function of the iterated serial number;
# # x is the raw, uncorrected reading (scmp)
# # y is the corrected reading (ccmp)
# [x, y] = cmp_cal[sernum[ii]]
#
# # the boolean mask has 'true' entries where the elements of input vector sn
# # agree with the iterated serial number.
# # np.core.defchararray.equal handles vector string comparisons.
# mask = np.core.defchararray.equal(sn, sernum[ii])
#
# ## np.interp is used to do the 'lookup' for performance reasons (vectorized)
# ccmp[mask] = np.interp(scmp[mask], x, y)
#
## round to make sure we get an integral value (but not int type)
#return np.round(ccmp)
def prs_bottilt_tmag(x_tilt, y_tilt):
"""
Description:
OOI Level 1 Seafloor High-Resolution Tilt (BOTTILT) core data product,
derived from data output by the Applied Geomechanics LILY tilt sensor
on board the Bottom Pressure Tilt (BOTPT) instruments on the Regional
Scale Nodes (RSN) at Axial Seamount. This function computes
BOTTILT-TMAG_L1.
Implemented by:
2013-06-10: <NAME>. Initial code.
Usage:
tmag = prs_bottilt(x_tilt, y_tilt)
where
tmag = Resultant tilt magnitude (BOTTILT-TMAG_L1) [microradians]
x_tilt = Sensor X_tilt (BOTTILT-XTLT_L0) [microradians]
y_tilt = Sensor Y_tilt (BOTTILT-YTLT_L0) [microradians]
References:
OOI (2013). Data Product Specification for Seafloor High-Resolution
Tilt. Document Control Number 1341-00060.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00060_Data_Product_SPEC_BOTTILT_OOI.pdf)
"""
tmag = ne.evaluate('sqrt(x_tilt**2 + y_tilt**2)')
return tmag
def prs_bottilt_tdir(x_tilt, y_tilt, ccmp):
"""
Description:
OOI Level 1 Seafloor High-Resolution Tilt (BOTTILT) core data product,
derived from data output by the Applied Geomechanics LILY tilt sensor
on board the Bottom Pressure Tilt (BOTPT) instruments on the Regional
Scale Nodes (RSN) at Axial Seamount. This function computes
BOTTILT-TDIR_L1.
Implemented by:
2013-06-10: <NAME>. Initial code.
2014-03-20: <NAME>. Replaced initial code with arctan2 implementation.
Usage:
tdir = prs_bottilt(x_tilt, y_tilt, ccmp)
where
tdir = Resultant tilt direction (BOTTILT-TDIR_L1) [degrees]
x_tilt = Sensor X_tilt (BOTTILT-XTLT_L0) [microradians]
y_tilt = Sensor Y_tilt (BOTTILT-YTLT_L0) [microradians]
ccmp = Corrected compass direction (BOTTILT-CCMP_L1) [degrees]
References:
OOI (2013). Data Product Specification for Seafloor High-Resolution
Tilt. Document Control Number 1341-00060.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00060_Data_Product_SPEC_BOTTILT_OOI.pdf)
"""
### As originally coded, according to the algorithm specified in the DPS:
## Calculate the angle to use in the tilt direction formula
## default angle calculation -- in degrees
#angle = ne.evaluate('arctan(y_tilt / x_tilt)')
#angle = np.degrees(angle)
#
## if X-Tilt == 0 and Y-Tilt > 0
#mask = np.logical_and(x_tilt == 0, y_tilt > 0)
#angle[mask] = 90.0
#
## if X-Tilt == 0 and Y-Tilt < 0
#mask = np.logical_and(x_tilt == 0, y_tilt < 0)
#angle[mask] = -90.0
#
## if Y-Tilt == 0
#mask = np.equal(y_tilt, np.zeros(len(y_tilt)))
#angle[mask] = 0.0
#
### Calculate the tilt direction, using the X-Tilt to set the equation
## default tilt direction equation
#tdir = ne.evaluate('(270 - angle + ccmp) % 360')
#
## if X-Tilt >= 0
#tmp = ne.evaluate('(90 - angle + ccmp) % 360')
#mask = np.greater_equal(x_tilt, np.zeros(len(x_tilt)))
#tdir[mask] = tmp[mask]
#
#return np.round(tdir)
# The preceding calculation is faster and simpler if the arctan2 function is used.
# Use 450 as an addend in the first argument to the mod function to make sure the result is positive.
return np.round(np.mod(450 - np.degrees(np.arctan2(y_tilt, x_tilt)) + ccmp, 360))
def prs_botsflu_time15s(timestamp):
"""
Description:
Calculates the auxiliary BOTSFLU data product TIME15S-AUX. These are timestamps
anchored at multiples of 15 seconds past the minute which correspond to the time
base for the BOTSFLU data products which are binned on 15 seconds.
Implemented by:
2015-01-13: <NAME>. Initial code.
Usage
time15s = prs_botsflu_time15s(timestamp)
where
time15s = BOTSFLU-TIME15S-AUX [sec since 01-01-1900]
timestamp = OOI system timestamps [sec since 01-01-1900]
Notes:
The BOTSFLU data products associated with this timebase are:
MEANPRES
PREDTIDE
MEANDEPTH
5MINRATE
10MINRATE
References:
OOI (2015). Data Product Specification for Seafloor Uplift and Subsidence
(BOTSFLU) from the BOTPT instrument. Document Control Number 1341-00080.
"""
bin_duration = 15.0 # seconds
# the second calling argument is a placeholder
time15s = anchor_bin(timestamp, None, bin_duration, 'time')
return time15s
def prs_botsflu_meanpres(timestamp, botpres):
"""
Description:
Calculates the BOTSFLU data product MEANPRES_L1.
Implemented by:
2015-01-13: <NAME>. Initial code.
Usage
meanpres = prs_botsflu_meanpres(timestamp, botpres)
where
meanpres = BOTSFLU-MEANPRES_L2 [psi]
timestamp = OOI system timestamps [sec since 01-01-1900]
botpres = BOTPRES_L1 [psia]
Notes:
The timebase data product associated with this data product is TIME15S.
References:
OOI (2015). Data Product Specification for Seafloor Uplift and Subsidence
(BOTSFLU) from the BOTPT instrument. Document Control Number 1341-00080.
"""
bin_duration = 15.0 # seconds
meanpres, _ = anchor_bin(timestamp, botpres, bin_duration, 'data')
return meanpres
def prs_botsflu_predtide(time):
"""
Description:
Assigns tide values for the 3 BOTPT instrument sites about 500 km west of Astoria.
When the input argument is the data product TIME15S, the output of this function
will be the BOTSFLU data product PREDTIDE.
Implemented by:
2015-01-13: <NAME>. Initial code.
Usage:
PREDTIDE = prs_botsflu_predtide(TIME15S)
where
PREDTIDE = BOTSFLU-PREDTIDE data product [m]
TIME15S = BOTSFLU-TIME15S data product [sec since 01-01-1900].
Notes:
Lookup table in binary file: 'ion_functions/data/prs_functions_tides_2014_thru_2019.mat'
The lookup table contains tide values every 15 seconds from 2014-01-01 to 2020-01-01
at lat = 45.95547 lon = -130.00957 calculated by the Tide Model Driver software
written in Matlab (Mathworks, Natick, MA) using the TPXO7.2 global model. The tides
corresponding to time are determined by positional indexing (the first value is for
2014-01-01 00:00:00, the second is for 2014-01-01 00:00:15, etc). The 3 BOTPT sites
are close enough together that the caldera center location can be used for all, as
above: lat = 45.95547 lon = -130.00957.
References:
OOI (2015). Data Product Specification for Seafloor Uplift and Subsidence
(BOTSFLU) from the BOTPT instrument. Document Control Number 1341-00080.
Matlab code to calculate tides using TPXO7.2 global model:
http://polaris.esr.org/ptm_index.html
Further documentation for the TPXO7.2 global tide model:
http://volkov.oce.orst.edu/tides/global.html
"""
time0 = 3597523200.0 # midnight, 2014-01-01
time_interval = 15.0 # seconds
# for unit test data, only, feb-apr 2011
if time[0] < time0:
time0 = 3502828800.0 # midnight, 2011-01-01
# tide values are signed 4 byte integers, units [0.001mm]
matpath = 'ion_functions/data/matlab_scripts/botpt/'
dict_tides = sp.io.loadmat(matpath + 'tides_15sec_2011_for_unit_tests.mat')
tidevector = 0.000001 * dict_tides['tides_mat']
tidevector = tidevector.reshape((-1))
# calculate tide vector index as a function of timestamp
idx = np.around((time - time0)/time_interval)
tide = tidevector[idx.astype(int)]
return tide
# else, OOI data from 2014 onwards
# tide values are signed 4 byte integers, units [0.001mm]
dict_tides = sp.io.loadmat('ion_functions/data/prs_functions_tides_2014_thru_2019.mat')
tidevector = 0.000001 * dict_tides['tides_mat']
tidevector = tidevector.reshape((-1))
# calculate tide vector index as a function of timestamp
idx = np.around((time - time0)/time_interval)
tide = tidevector[idx.astype(int)]
return tide
def prs_botsflu_meandepth(timestamp, botpres):
"""
Description:
Calculates the BOTSFLU data product MEANDEPTH_L2, de-tided bottom depth
as a function of time (15sec bins).
Implemented by:
2015-01-14: <NAME>. Initial code.
Usage
meandepth = prs_botsflu_meandepth(timestamp, botpres)
where
meandepth = BOTSFLU-MEANDEPTH_L2 [m]
timestamp = OOI system timestamps [sec since 01-01-1900]
botpres = BOTPRES_L1 [psia]
Notes:
The timebase data product associated with this data product is TIME15S.
The DPS specifies that atmospheric pressure not be subtracted from the
L1 pressure data even though its units are [psia].
References:
OOI (2015). Data Product Specification for Seafloor Uplift and Subsidence
(BOTSFLU) from the BOTPT instrument. Document Control Number 1341-00080.
"""
_, meandepth, _ = calc_meandepth_plus(timestamp, botpres)
return meandepth
def prs_botsflu_5minrate(timestamp, botpres):
"""
Description:
Calculates the BOTSFLU data product 5MINRATE_L2, the instantaneous rate of
depth change using 5 minute backwards-looking meandepth data.
Implemented by:
2015-01-14: <NAME>. Initial code.
Usage
botsflu_5minrate = pprs_botsflu_5minrate(timestamp, botpres)
where
botsflu_5minrate = BOTSFLU-5MINRATE_L2 [cm/min]
timestamp = CI system timestamps [sec since 01-01-1900]
botpres = BOTPRES_L1 [psia]
Notes:
The timebase data product associated with this data product is TIME15S.
References:
OOI (2015). Data Product Specification for Seafloor Uplift and Subsidence
(BOTSFLU) from the BOTPT instrument. Document Control Number 1341-00080.
"""
# calculate de-tided depth and the positions of non-zero bins in the original data.
_, meandepth, mask_nonzero = calc_meandepth_plus(timestamp, botpres)
# initialize data product including elements representing data gap positions
botsflu_5minrate = np.zeros(mask_nonzero.size) + np.nan
# re-constitute the original data, with data gaps represented by nans.
data_w_gaps = np.copy(botsflu_5minrate)
data_w_gaps[mask_nonzero] = meandepth
# for 15s binned data, 5 minutes comes out to (5 minutes)/(0.25 min) = 20 intervals
shift = 20
# units of the subtraction are meter/5min; to convert to cm/min,
# multiply by 100cm/m and divide by 5 = 20.
botsflu_5minrate[shift:] = 20.0 * (data_w_gaps[shift:] - data_w_gaps[:-shift])
# this rate product now has potentially two sources of nans;
# definitely those at the start of the data record, and any that might
# have been propagated into the calculation because of the presence of
# data gaps. remove those only at the data dropout positions (if present)
# so that this data product will have a 1:1 correspondence with
# its associated timestamp variable (TIME15S).
botsflu_5minrate = botsflu_5minrate[mask_nonzero]
return botsflu_5minrate
def prs_botsflu_10minrate(timestamp, botpres):
"""
Description:
Calculates the BOTSFLU data product 10MINRATE_L2, the mean seafloor uplift rate
calculated using 10 minute backwards-looking 10 minute running mean depth data.
Implemented by:
2015-01-14: <NAME>. Initial code.
Usage
botsflu_10minrate = pprs_botsflu_10minrate(timestamp, botpres)
where
botsflu_10minrate = BOTSFLU-10MINRATE_L2 [cm/hr]
timestamp = OOI system timestamps [sec since 01-01-1900]
botpres = BOTPRES_L1 [psia]
Notes:
The timebase data product associated with this data product is TIME15S.
References:
OOI (2015). Data Product Specification for Seafloor Uplift and Subsidence
(BOTSFLU) from the BOTPT instrument. Document Control Number 1341-00080.
"""
# calculate de-tided depth and the positions of non-zero bins in the original data.
_, meandepth, mask_nonzero = calc_meandepth_plus(timestamp, botpres)
# initialize data product including elements representing data gap positions
botsflu_10minrate = np.zeros(mask_nonzero.size) + np.nan
# re-constitute the original data, with data gaps represented by nans.
data_w_gaps = | np.copy(botsflu_10minrate) | numpy.copy |
import pickle
import numpy as np
import torch
from torch.utils.data import Dataset
def to_pickle(thing, path): # save something
with open(path, 'wb') as handle:
pickle.dump(thing, handle, protocol=pickle.HIGHEST_PROTOCOL)
def from_pickle(path): # load something
thing = None
with open(path, 'rb') as handle:
thing = pickle.load(handle)
return thing
def arrange_data(x, us, t, num_points=2):
# assume x has shape
# n_u, ts, bs, 3
# or n_u, ts, bs, 32, 32
# or n_u, ts, bs, 3, 64, 64
# output x has shape n_u, num_points, bs * (ts-num_points+1), ...
assert num_points>=2 and num_points<=len(t)
n_u, ts, bs = x.shape[0:3]
x_list = []
u_list = []
for u_ind in range(n_u):
temp = np.zeros((num_points, bs*(ts-num_points+1), *x.shape[3:]), dtype=np.float32)
for i in range(ts-num_points+1):
temp[:, i*bs:(i+1)*bs] = x[u_ind, i:i+num_points] # n_u, num_points, bs, ...
x_list.append(temp)
u_array = np.array(us[u_ind:u_ind+1], dtype=np.float32)
u_list.append(u_array * np.ones((temp.shape[1], len(u_array)), dtype=np.float32))
t_eval=t[0:num_points]
return | np.concatenate(x_list, axis=1) | numpy.concatenate |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Provide the 2D Spring Loaded Inverted Pendulum (SLIP) model.
"""
import numpy as np
import matplotlib.pyplot as plt
__author__ = ["<NAME>", "<NAME>"]
__copyright__ = "Copyright 2018, PyRoboLearn"
__credits__ = ["Songyan Xin"]
__license__ = "GNU GPLv3"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
class SLIP2D(object):
r"""2D Spring Loaded Inverted Pendulum (SLIP) model
The axes are :math:`x` and :math:`y`, where the former points to the right and the latter is up.
The SLIP model consists of a point mass :math:`m`, a massless spring with stiffness :math:`k`, and a rest length
:math:`l_0`. Three phases (flight, stance, and flight) are involved in one rollout of the running motion separated
by a touchdown (TD) and takeoff (TO) events. The state of the system is given by the position and velocity of the
mass :math:`[x, y, \dot{x}, \dot{y}]`, while the angle of the spring :math:`\theta` is the control parameter.
"""
def __init__(self, mass, length, stiffness, position=(0., 1.), velocity=(0., 0.), angle=0.,
angle_limits=(0, 2*np.pi), gravity=9.81, dt=1e-3):
"""
Initialize the 2D SLIP model.
Args:
mass (float): mass.
length (float): the rest length of the spring.
stiffness (float): spring stiffness.
position (np.float[2]): initial position.
velocity (np.float[2]): initial velocity.
angle (float): initial angle
angle_limits (tuple of 2 floats): angle limits (lower bound, upper bound).
gravity (float): gravity in the z direction.
dt (float): integration time step.
"""
# properties
self.m = mass
self.l0 = length
self.k = stiffness
self.g = gravity
self.dt = dt
# state
self.pos = np.array(position)
self.vel = np.array(velocity)
self.acc = np.zeros(self.pos.shape)
# control
self.theta = angle
##############
# Properties #
##############
@property
def state(self):
"""
Return the state of the system, i.e. the position and velocity vectors.
"""
return np.concatenate((self.pos, self.vel))
###########
# Methods #
###########
def kinetic_energy(self, vel=None):
"""
Return the kinetic energy of the inverted pendulum.
Args:
vel (np.ndarray, None): velocity of the inverted pendulum
Returns:
float: kinetic energy
"""
if vel is None:
vel = self.vel
return 0.5 * self.m * vel.dot(vel)
def potential_energy(self, pos=None):
"""
Return the potential energy due to gravity.
Args:
pos (np.ndarray, None): position of the inverted pendulum
Returns:
float: potential energy
"""
if pos is None:
pos = self.pos
return self.m * self.g * pos[-1]
def energy(self, X=None):
"""
Compute the total energy :math:`E=K+P` where :math:`K` is the kinetic energy of the system, and :math:`P`
is the potential energy.
Args:
X (np.array, None): state [pos, vel] of the inverted pendulum
Returns:
float: total energy
"""
if X is None:
X = self.state
P = self.potential_energy(X[:len(X)/2])
K = self.kinetic_energy(X[len(X)/2:])
E = K + P
return E
def max_velocity(self, Emax):
"""
Compute the max velocity in the x-direction when in the flight phase, given the total energy of the system
and assuming that this energy is conserved.
Args:
Emax (float): maximum total energy of the system
Returns:
float: velocity in the x direction
"""
P_min = self.m * self.g * self.l0
K_max = Emax - P_min # conservation of energy
v_max = np.sqrt(2 * K_max / self.m)
return v_max
def apex_energy(self, y, vx):
"""
Compute the energy at the apex (which is the maximum height and is the point where the velocity in the
y-direction is equal to 0).
Args:
y (float): height
vx (float): velocity in the x-direction
Returns:
float: total energy
"""
P = self.m * self.g * y
K = 0.5 * self.m * (vx ** 2)
E = K + P
return E
def apex_height_to_vel(self, E, y):
"""
Return the velocity in the x-direction at the apex given the energy.
Args:
E (float): total energy of the system
y (float): apex height of the pendulum
Returns:
float: velocity in x
"""
P = self.m * self.g * y
K = E - P
v = | np.sqrt(2 / self.m * K) | numpy.sqrt |
import numpy as np
from clize import run
class YOLO_Kmeans:
def __init__(self, cluster_number, filename, out):
self.cluster_number = cluster_number
self.filename = filename
self.out = out
def iou(self, boxes, clusters): # 1 box -> k clusters
n = boxes.shape[0]
k = self.cluster_number
box_area = boxes[:, 0] * boxes[:, 1]
box_area = box_area.repeat(k)
box_area = np.reshape(box_area, (n, k))
cluster_area = clusters[:, 0] * clusters[:, 1]
cluster_area = np.tile(cluster_area, [1, n])
cluster_area = np.reshape(cluster_area, (n, k))
box_w_matrix = np.reshape(boxes[:, 0].repeat(k), (n, k))
cluster_w_matrix = np.reshape(np.tile(clusters[:, 0], (1, n)), (n, k))
min_w_matrix = np.minimum(cluster_w_matrix, box_w_matrix)
box_h_matrix = np.reshape(boxes[:, 1].repeat(k), (n, k))
cluster_h_matrix = np.reshape(np.tile(clusters[:, 1], (1, n)), (n, k))
min_h_matrix = np.minimum(cluster_h_matrix, box_h_matrix)
inter_area = np.multiply(min_w_matrix, min_h_matrix)
result = inter_area / (box_area + cluster_area - inter_area)
return result
def avg_iou(self, boxes, clusters):
accuracy = np.mean([np.max(self.iou(boxes, clusters), axis=1)])
return accuracy
def kmeans(self, boxes, k, dist=np.median):
box_number = boxes.shape[0]
distances = np.empty((box_number, k))
last_nearest = np.zeros((box_number,))
np.random.seed()
clusters = boxes[np.random.choice(
box_number, k, replace=False)] # init k clusters
while True:
distances = 1 - self.iou(boxes, clusters)
current_nearest = np.argmin(distances, axis=1)
if (last_nearest == current_nearest).all():
break # clusters won't change
for cluster in range(k):
clusters[cluster] = dist( # update clusters
boxes[current_nearest == cluster], axis=0)
last_nearest = current_nearest
return clusters
def result2txt(self, data):
f = open(self.out, 'w')
row = np.shape(data)[0]
for i in range(row):
if i == 0:
x_y = "%d,%d" % (data[i][0], data[i][1])
else:
x_y = ", %d,%d" % (data[i][0], data[i][1])
f.write(x_y)
f.close()
def txt2boxes(self):
f = open(self.filename, 'r')
dataSet = []
for line in f:
infos = line.split(" ")
length = len(infos)
for i in range(1, length):
width = int(infos[i].split(",")[2]) - \
int(infos[i].split(",")[0])
height = int(infos[i].split(",")[3]) - \
int(infos[i].split(",")[1])
dataSet.append([width, height])
result = | np.array(dataSet) | numpy.array |
#!/usr/bin/python
#-*- coding: utf-8 -*
# SAMPLE FOR SIMPLE CONTROL LOOP TO IMPLEMENT BAXTER_CONTROL MPC ALGORITHMS
"""
MPC sample tracking for Baxter's right limb with specific references.
Authors: <NAME> and <NAME>.
"""
# Built-int imports
import time
import random
# Own imports
import baxter_essentials.baxter_class as bc
import baxter_essentials.transformation as transf
import baxter_control.mpc_controller as b_mpc
# General module imports
import numpy as np
import matplotlib.pyplot as plt
def create_plots(iteration_vector, x_matrix, u_matrix, sample_time, title, name1, name2):
"""
Create simple simulation plots based on vectors
It returns two pop-out matplotlib graphs.
"""
# Define a figure for the creation of the plot
figure_1, all_axes = plt.subplots(x_matrix.shape[0], 1)
current_axes = 0
for axes_i in all_axes:
# Generate the plot and its axes for each Xi and Ui.
axes_i.plot(iteration_vector,
x_matrix[current_axes, :].T, 'b', linewidth=1)
axes_i.plot(iteration_vector,
u_matrix[current_axes, :].T, 'g', linewidth=1)
current_axes = current_axes + 1
# Customize figure with the specific "x"-"y" labels
if (current_axes <= 3):
if (name1 == "x"):
axes_i.set_ylabel("[rad]")
else:
axes_i.set_ylabel("[m]")
else:
axes_i.set_ylabel("[rad]")
# Add labels to each subplot
axes_i.legend(["{}{}".format(name1, current_axes),
"{}{}".format(name2, current_axes)])
# Remove inner axes layers (only leave the outer ones)
axes_i.label_outer()
# Add personalized text to each subplot (at lower-right side)
axes_i.text(0.98,
0.02,
'SGA-EJGG',
verticalalignment='bottom',
horizontalalignment='right',
transform=axes_i.transAxes,
color='black',
fontsize=6
)
# Add grid
axes_i.grid(color='black', linestyle='-', alpha=0.2, linewidth=1)
# Change the background color of the external part
figure_1.patch.set_facecolor((0.2, 1, 1))
# Configure plot title and horizontal x-label
all_axes[0].set_title(title)
all_axes[len(
all_axes) - 1].set_xlabel("Iterations [k] (Ts={} seconds)".format(sample_time))
def calculate_cartesian_vectors(current_thetas):
# CURRENT CARTESIAN POSITION CALCULATIONS...
tm_current = bc.BaxterClass().fpk(current_thetas, "right", 7)
current_position = tm_current[0:3, 3]
current_orientation = transf.Transformation(
0, 0, 0, [0, 0, 0]).get_fixed_angles_from_tm(tm_current)
return np.concatenate([current_position, current_orientation], axis=0).reshape(6, 1)
def test_1_step_response_without_feedback(show_results=True):
"""
Sample loop to plot step response with constant change in each DOF without
any control algorithm (just to see Baxter's "chaos" response)
"""
# Variables for simulation
x_k = np.matrix([[0.1], [0.15], [0.2], [0.25], [0.3], [0.35], [0.4]])
u_k = np.matrix([[0.01], [0.01], [0.01], [0.01], [0.01], [0.01], [0.01]])
# Desired cartesian goal [x_g, y_g, z_g, x_angle_g, y_angle_g, z_angle_g]
# (NOT any goal, just to be able to plot)
cartesian_goal = np.array([0, 0, 0, 0, 0, 0]).reshape(6, 1)
iteration_vector = list()
x_matrix = np.zeros((x_k.shape[0], 0))
u_matrix = np.zeros((u_k.shape[0], 0))
cartesian_matrix = np.zeros((cartesian_goal.shape[0], 0))
cartesian_goal_matrix = np.zeros((cartesian_goal.shape[0], 0))
total_time_in_seconds = 5
sample_time_in_seconds = 0.01
final_time = time.time() + total_time_in_seconds
last_time = 0
iteration = 0
while (time.time() < final_time):
if (time.time() - last_time >= sample_time_in_seconds):
last_time = time.time()
iteration_vector.append(iteration)
if (show_results == True):
print("Iteration (k): ", iteration)
iteration = iteration + 1
x_k_plus_1 = x_k + u_k
x_k = x_k_plus_1
cartesian_k = calculate_cartesian_vectors(x_k)
x_matrix = np.hstack((x_matrix, x_k_plus_1))
u_matrix = np.hstack((u_matrix, u_k))
cartesian_matrix = np.hstack((cartesian_matrix, cartesian_k))
cartesian_goal_matrix = np.hstack(
(cartesian_goal_matrix, cartesian_goal))
if (show_results == True):
print("iteration_vector:")
print(iteration_vector)
print("len(iteration_vector):")
print(len(iteration_vector))
print("u_matrix:")
print(u_matrix)
print("x_matrix:")
print(x_matrix)
print("x_matrix.shape:")
print(x_matrix.shape)
create_plots(
iteration_vector,
cartesian_matrix,
cartesian_goal_matrix,
sample_time_in_seconds,
"Cartesian Values responses based on step respone no feedback",
"current",
"fake-goal"
)
create_plots(
iteration_vector,
x_matrix,
u_matrix,
sample_time_in_seconds,
"X and U vectors response based on step respone no feedback",
"x",
"u"
)
plt.show()
def test_2_mpc_first_attempt(show_results=True):
"""
Sample control loop to test MPC algorithm on Baxter right limbr for custom
variables such as N, total_time, sample_time, cartesian_goal, x0, u0 and
validate the resulting plots with or without noise.
"""
# Main conditions for executing the control loop with MPC algorithm
N = 1 # Prediction horizon
total_time_in_seconds = 20
sample_time_in_seconds = 0.1
# Initial conditions for states and inputs
x0 = np.array(
[
0.39500005288049406,
-1.2831749290661485,
-0.18867963690990588,
2.5905100555414924,
-0.11428156869746332,
-1.3506700837331067,
0.11504855909140603
]
).reshape(7, 1)
u0 = np.array([0, 0, 0, 0, 0, 0, 0]).reshape(7, 1)
# Number inputs (same as number of degrees of freedom)
nu = u0.shape[0]
# Initial cartesian_goal "default" value
cartesian_goal = np.array(
[
[
-0.9,
-1.0,
1.1,
0.6660425877100662,
1.5192944057794895,
-1.3616725381467032
],
] * N
).transpose().reshape(6, N)
# ---------- Main Control loop -------------
# Variables for control loop
x_k = x0
u_k = u0
iteration_vector = list()
x_matrix = np.zeros((x_k.shape[0], 0))
u_matrix = np.zeros((u_k.shape[0], 0))
cartesian_matrix = np.zeros((cartesian_goal.shape[0], 0))
cartesian_goal_matrix = np.zeros((cartesian_goal.shape[0], 0))
# Instead of running the algorithms in real time, we will run the total
# amount of discrete iterations (to get the total time)...
iteration = 0
total_iterations = int(total_time_in_seconds/sample_time_in_seconds)
for _ in range(total_iterations):
iteration_vector.append(iteration)
if (show_results == True):
print("Iteration (k): ", iteration)
iteration = iteration + 1
# Apply MPC prediction
mpc = b_mpc.MpcController(N, True, True)
cartesian_goal = cartesian_goal + np.array(
[
[
0.001 * np.sin(iteration/5),
0.001 * np.sin(iteration/5),
0.001 * np.sin(iteration/5),
0,
0,
0
],
] * N
).transpose().reshape((6, N))
dict_results = mpc.execute_mpc(cartesian_goal, x_k)
u = dict_results["optimal_dthetas"]
# Prediction Horizon for 1 iteration at a time
u_k = u[:, 0].reshape((nu, 1))
# Calculate new states based on StateSpace representation
x_k_plus_1 = x_k + u_k
# Add "random noise" to measurements (like real-life)
x_k_plus_1 = x_k_plus_1 + np.array(
[
1 * random.uniform(-0.005, 0.005),
1 * random.uniform(-0.005, 0.005),
1 * random.uniform(-0.005, 0.005),
1 * random.uniform(-0.005, 0.005),
1 * random.uniform(-0.005, 0.005),
1 * random.uniform(-0.005, 0.005),
1 * random.uniform(-0.005, 0.005)
]
).reshape((7, 1))
# Update current state for the next iteration
x_k = x_k_plus_1
cartesian_k = calculate_cartesian_vectors(x_k)
# Save current x and u values to plot latter
x_matrix = np.hstack((x_matrix, x_k))
u_matrix = np.hstack((u_matrix, u_k))
cartesian_matrix = np.hstack((cartesian_matrix, cartesian_k))
cartesian_goal_matrix = np.hstack(
(cartesian_goal_matrix, cartesian_goal[:, 0].reshape(6, 1)))
if (show_results == True):
print("len(iteration_vector):")
print(len(iteration_vector))
print("iteration_vector:")
print(iteration_vector)
print("u_matrix.shape:")
print(u_matrix.shape)
print("u_matrix:")
print(u_matrix)
print("x_matrix.shape:")
print(x_matrix.shape)
print("x_matrix:")
print(x_matrix)
create_plots(
iteration_vector,
cartesian_matrix,
cartesian_goal_matrix,
sample_time_in_seconds,
"Cartesian Values responses based on MPC with N={}".format(N),
"current",
"goal"
)
create_plots(
iteration_vector,
x_matrix,
u_matrix,
sample_time_in_seconds,
"X and U responses based on MPC with N={}".format(N),
"x",
"u"
)
plt.show()
def test_3_mpc_with_control_horizon(show_results=True):
"""
Sample control loop to test MPC algorithm on Baxter right limbr for custom
variables such as N, M, total_time, sample_time, cartesian_goal, x0, u0 and
validate the resulting plots with or without noise.
"""
# Main conditions for executing the control loop with MPC algorithm
N = 1 # Prediction horizon
M = 1 # Control horizon
m_count = 1 # Control horizon counter (1, 2, ... , M, 1, 2, ..., M, 1, 2, ..., M ...)
total_time_in_seconds = 10
sample_time_in_seconds = 0.1
# Initial conditions for states and inputs
x0 = np.array(
[
0.39500005288049406,
-1.2831749290661485,
-0.18867963690990588,
2.5905100555414924,
-0.11428156869746332,
-1.3506700837331067,
0.11504855909140603
]
).reshape(7, 1)
u0 = np.array([0, 0, 0, 0, 0, 0, 0]).reshape(7, 1)
# Number inputs (same as number of degrees of freedom)
nu = u0.shape[0]
# Initial cartesian_goal "default" value
cartesian_goal = np.array(
[
[
-0.9,
-1.0,
1.1,
0.6660425877100662,
1.5192944057794895,
-1.3616725381467032
],
] * N
).transpose().reshape(6, N)
# ---------- Main Control loop -------------
# Variables for control loop
x_k = x0
u_k = u0
iteration_vector = list()
x_matrix = np.zeros((x_k.shape[0], 0))
u_matrix = np.zeros((u_k.shape[0], 0))
cartesian_matrix = | np.zeros((cartesian_goal.shape[0], 0)) | numpy.zeros |
import numpy as np
import matplotlib.pyplot as plt
from math import pi
import math, os, datetime
class data_manipulation_module:
def __init__(self):
self.a = 1
self.list_x = None
self.list_y = None
def init_graph_list(self):
self.list_x = []
self.list_y = []
def add_graph_list(self, element_x, element_y):
self.list_x.append(element_x)
self.list_y.append(element_y)
# ๋จ์ํ ๋ฐฐ์ด์ ๊ธธ์ด๋ฅผ ๋๋ฆฌ๊ธฐ๋ง ํ๋ค.
# ๋๋จธ์ง ๋ถ๋ถ์ 0์ผ๋ก ์ฑ์ด๋ค.
def data_stretched_no_modify(self, data :np.ndarray, target_length :int):
self.a = 1
if data.size < target_length:
print("sizes are wrong")
return -1
ret = np.zeros(target_length)
ret[:data.size] = data
return ret
# np.interp ์ ๋น์ทํ ์ฐ์ฐ์ ํ๋ค.
# interp ์ ๋ค๋ฅด๊ฒ, origin_axis ๋ฒ์ ๋ฐ์ ๋ชจ๋ ๋ถ๋ถ๋ค์ 0์ผ๋ก ์ฑ์ด๋ค.
# interp ๋ ๋ฎ์ ๋ถ๋ถ๋ค๋ง 0์ผ๋ก ์ฑ์ด๋ค.
# target_axis
# - y ๊ฐ์ ๊ตฌํ x ์ถ ์ขํ๋ค์ด๋ค.
# - ์์์ ์๊ด์ด ์๋ค
# origin_axis
# - ๊ธฐ์กด์ x ์ถ ์ขํ๋ค์ด๋ค.
# - x[i] <= x[j] for all i <= j
# data
# - ๊ธฐ์กด์ y ์ถ ์ขํ๋ค์ด๋ค.
# - origin_axis ์ ํฌ๊ธฐ๊ฐ ๊ฐ์์ผ ํ๋ค.
def data_interp_zeros(self, target_axis :np.ndarray, origin_axis :np.ndarray, data :np.ndarray):
self.a = 1
# if data.size is not origin_axis.size:
# print("DataManipulation__data_interp_zeros : origin data sizes are wrong %d %d" % (data.size, origin_axis.size))
return np.interp(target_axis, origin_axis, data) * ((origin_axis[0] <= target_axis) & (target_axis <= origin_axis[-1]))
# ์ธก์ ๋ฐ์ดํฐ์ ์๊ฐ ์์ญ๊ณผ ์ฃผํ์ ์์ญ์ x ์ถ ์ขํ๋ค์ ๋ฐฐ์ด์ ๊ตฌํ๋ค.
# ์๊ฐ ์์ญ
# - N or n : Nano seconds
# - U or u : Micro seconds
# - M or m : Mili
# ์ฃผํ์ ์์ญ
# - G or g : Giga
# - M or m : Mega
# - K or k : Kilo
def get_sample_spacing(self, samples_per_second :int, size :int, unit_output_time :str, unit_output_freq :str):
self.a = 1
if unit_output_time[0] == 'N' or unit_output_time[0] == 'n':
u_output_time = 1e9
elif unit_output_time[0] == 'U' or unit_output_time[0] == 'u':
u_output_time = 1e6
elif unit_output_time[0] == 'M' or unit_output_time[0] == 'm':
u_output_time = 1e3
else:
u_output_time = 1
if unit_output_freq[0] == 'G' or unit_output_freq[0] == 'g':
u_output_freq = 1e-9
elif unit_output_freq[0] == 'M' or unit_output_freq[0] == 'm':
u_output_freq = 1e-6
elif unit_output_freq[0] == 'K' or unit_output_freq[0] == 'u':
u_output_freq = 1e-3
else:
u_output_freq = 1
ret_time = np.arange(size) * u_output_time / samples_per_second
ret_freq = | np.arange(size) | numpy.arange |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.